btrfs: remove unnecessary tmp variable in btrfs_assign_next_active_device()
[linux-block.git] / fs / btrfs / volumes.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/sched/mm.h>
8#include <linux/bio.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/ratelimit.h>
12#include <linux/kthread.h>
13#include <linux/raid/pq.h>
14#include <linux/semaphore.h>
15#include <linux/uuid.h>
16#include <linux/list_sort.h>
17#include "misc.h"
18#include "ctree.h"
19#include "extent_map.h"
20#include "disk-io.h"
21#include "transaction.h"
22#include "print-tree.h"
23#include "volumes.h"
24#include "raid56.h"
25#include "async-thread.h"
26#include "check-integrity.h"
27#include "rcu-string.h"
28#include "dev-replace.h"
29#include "sysfs.h"
30#include "tree-checker.h"
31#include "space-info.h"
32#include "block-group.h"
33#include "discard.h"
34
35const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
36 [BTRFS_RAID_RAID10] = {
37 .sub_stripes = 2,
38 .dev_stripes = 1,
39 .devs_max = 0, /* 0 == as many as possible */
40 .devs_min = 4,
41 .tolerated_failures = 1,
42 .devs_increment = 2,
43 .ncopies = 2,
44 .nparity = 0,
45 .raid_name = "raid10",
46 .bg_flag = BTRFS_BLOCK_GROUP_RAID10,
47 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
48 },
49 [BTRFS_RAID_RAID1] = {
50 .sub_stripes = 1,
51 .dev_stripes = 1,
52 .devs_max = 2,
53 .devs_min = 2,
54 .tolerated_failures = 1,
55 .devs_increment = 2,
56 .ncopies = 2,
57 .nparity = 0,
58 .raid_name = "raid1",
59 .bg_flag = BTRFS_BLOCK_GROUP_RAID1,
60 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
61 },
62 [BTRFS_RAID_RAID1C3] = {
63 .sub_stripes = 1,
64 .dev_stripes = 1,
65 .devs_max = 3,
66 .devs_min = 3,
67 .tolerated_failures = 2,
68 .devs_increment = 3,
69 .ncopies = 3,
70 .nparity = 0,
71 .raid_name = "raid1c3",
72 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
73 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
74 },
75 [BTRFS_RAID_RAID1C4] = {
76 .sub_stripes = 1,
77 .dev_stripes = 1,
78 .devs_max = 4,
79 .devs_min = 4,
80 .tolerated_failures = 3,
81 .devs_increment = 4,
82 .ncopies = 4,
83 .nparity = 0,
84 .raid_name = "raid1c4",
85 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
86 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
87 },
88 [BTRFS_RAID_DUP] = {
89 .sub_stripes = 1,
90 .dev_stripes = 2,
91 .devs_max = 1,
92 .devs_min = 1,
93 .tolerated_failures = 0,
94 .devs_increment = 1,
95 .ncopies = 2,
96 .nparity = 0,
97 .raid_name = "dup",
98 .bg_flag = BTRFS_BLOCK_GROUP_DUP,
99 .mindev_error = 0,
100 },
101 [BTRFS_RAID_RAID0] = {
102 .sub_stripes = 1,
103 .dev_stripes = 1,
104 .devs_max = 0,
105 .devs_min = 2,
106 .tolerated_failures = 0,
107 .devs_increment = 1,
108 .ncopies = 1,
109 .nparity = 0,
110 .raid_name = "raid0",
111 .bg_flag = BTRFS_BLOCK_GROUP_RAID0,
112 .mindev_error = 0,
113 },
114 [BTRFS_RAID_SINGLE] = {
115 .sub_stripes = 1,
116 .dev_stripes = 1,
117 .devs_max = 1,
118 .devs_min = 1,
119 .tolerated_failures = 0,
120 .devs_increment = 1,
121 .ncopies = 1,
122 .nparity = 0,
123 .raid_name = "single",
124 .bg_flag = 0,
125 .mindev_error = 0,
126 },
127 [BTRFS_RAID_RAID5] = {
128 .sub_stripes = 1,
129 .dev_stripes = 1,
130 .devs_max = 0,
131 .devs_min = 2,
132 .tolerated_failures = 1,
133 .devs_increment = 1,
134 .ncopies = 1,
135 .nparity = 1,
136 .raid_name = "raid5",
137 .bg_flag = BTRFS_BLOCK_GROUP_RAID5,
138 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
139 },
140 [BTRFS_RAID_RAID6] = {
141 .sub_stripes = 1,
142 .dev_stripes = 1,
143 .devs_max = 0,
144 .devs_min = 3,
145 .tolerated_failures = 2,
146 .devs_increment = 1,
147 .ncopies = 1,
148 .nparity = 2,
149 .raid_name = "raid6",
150 .bg_flag = BTRFS_BLOCK_GROUP_RAID6,
151 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
152 },
153};
154
155const char *btrfs_bg_type_to_raid_name(u64 flags)
156{
157 const int index = btrfs_bg_flags_to_raid_index(flags);
158
159 if (index >= BTRFS_NR_RAID_TYPES)
160 return NULL;
161
162 return btrfs_raid_array[index].raid_name;
163}
164
165/*
166 * Fill @buf with textual description of @bg_flags, no more than @size_buf
167 * bytes including terminating null byte.
168 */
169void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
170{
171 int i;
172 int ret;
173 char *bp = buf;
174 u64 flags = bg_flags;
175 u32 size_bp = size_buf;
176
177 if (!flags) {
178 strcpy(bp, "NONE");
179 return;
180 }
181
182#define DESCRIBE_FLAG(flag, desc) \
183 do { \
184 if (flags & (flag)) { \
185 ret = snprintf(bp, size_bp, "%s|", (desc)); \
186 if (ret < 0 || ret >= size_bp) \
187 goto out_overflow; \
188 size_bp -= ret; \
189 bp += ret; \
190 flags &= ~(flag); \
191 } \
192 } while (0)
193
194 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
195 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
196 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
197
198 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
199 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
200 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
201 btrfs_raid_array[i].raid_name);
202#undef DESCRIBE_FLAG
203
204 if (flags) {
205 ret = snprintf(bp, size_bp, "0x%llx|", flags);
206 size_bp -= ret;
207 }
208
209 if (size_bp < size_buf)
210 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
211
212 /*
213 * The text is trimmed, it's up to the caller to provide sufficiently
214 * large buffer
215 */
216out_overflow:;
217}
218
219static int init_first_rw_device(struct btrfs_trans_handle *trans);
220static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
221static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
222static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
223static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
224 enum btrfs_map_op op,
225 u64 logical, u64 *length,
226 struct btrfs_bio **bbio_ret,
227 int mirror_num, int need_raid_map);
228
229/*
230 * Device locking
231 * ==============
232 *
233 * There are several mutexes that protect manipulation of devices and low-level
234 * structures like chunks but not block groups, extents or files
235 *
236 * uuid_mutex (global lock)
237 * ------------------------
238 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
239 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
240 * device) or requested by the device= mount option
241 *
242 * the mutex can be very coarse and can cover long-running operations
243 *
244 * protects: updates to fs_devices counters like missing devices, rw devices,
245 * seeding, structure cloning, opening/closing devices at mount/umount time
246 *
247 * global::fs_devs - add, remove, updates to the global list
248 *
249 * does not protect: manipulation of the fs_devices::devices list in general
250 * but in mount context it could be used to exclude list modifications by eg.
251 * scan ioctl
252 *
253 * btrfs_device::name - renames (write side), read is RCU
254 *
255 * fs_devices::device_list_mutex (per-fs, with RCU)
256 * ------------------------------------------------
257 * protects updates to fs_devices::devices, ie. adding and deleting
258 *
259 * simple list traversal with read-only actions can be done with RCU protection
260 *
261 * may be used to exclude some operations from running concurrently without any
262 * modifications to the list (see write_all_supers)
263 *
264 * Is not required at mount and close times, because our device list is
265 * protected by the uuid_mutex at that point.
266 *
267 * balance_mutex
268 * -------------
269 * protects balance structures (status, state) and context accessed from
270 * several places (internally, ioctl)
271 *
272 * chunk_mutex
273 * -----------
274 * protects chunks, adding or removing during allocation, trim or when a new
275 * device is added/removed. Additionally it also protects post_commit_list of
276 * individual devices, since they can be added to the transaction's
277 * post_commit_list only with chunk_mutex held.
278 *
279 * cleaner_mutex
280 * -------------
281 * a big lock that is held by the cleaner thread and prevents running subvolume
282 * cleaning together with relocation or delayed iputs
283 *
284 *
285 * Lock nesting
286 * ============
287 *
288 * uuid_mutex
289 * device_list_mutex
290 * chunk_mutex
291 * balance_mutex
292 *
293 *
294 * Exclusive operations
295 * ====================
296 *
297 * Maintains the exclusivity of the following operations that apply to the
298 * whole filesystem and cannot run in parallel.
299 *
300 * - Balance (*)
301 * - Device add
302 * - Device remove
303 * - Device replace (*)
304 * - Resize
305 *
306 * The device operations (as above) can be in one of the following states:
307 *
308 * - Running state
309 * - Paused state
310 * - Completed state
311 *
312 * Only device operations marked with (*) can go into the Paused state for the
313 * following reasons:
314 *
315 * - ioctl (only Balance can be Paused through ioctl)
316 * - filesystem remounted as read-only
317 * - filesystem unmounted and mounted as read-only
318 * - system power-cycle and filesystem mounted as read-only
319 * - filesystem or device errors leading to forced read-only
320 *
321 * The status of exclusive operation is set and cleared atomically.
322 * During the course of Paused state, fs_info::exclusive_operation remains set.
323 * A device operation in Paused or Running state can be canceled or resumed
324 * either by ioctl (Balance only) or when remounted as read-write.
325 * The exclusive status is cleared when the device operation is canceled or
326 * completed.
327 */
328
329DEFINE_MUTEX(uuid_mutex);
330static LIST_HEAD(fs_uuids);
331struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
332{
333 return &fs_uuids;
334}
335
336/*
337 * alloc_fs_devices - allocate struct btrfs_fs_devices
338 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
339 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
340 *
341 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
342 * The returned struct is not linked onto any lists and can be destroyed with
343 * kfree() right away.
344 */
345static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
346 const u8 *metadata_fsid)
347{
348 struct btrfs_fs_devices *fs_devs;
349
350 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
351 if (!fs_devs)
352 return ERR_PTR(-ENOMEM);
353
354 mutex_init(&fs_devs->device_list_mutex);
355
356 INIT_LIST_HEAD(&fs_devs->devices);
357 INIT_LIST_HEAD(&fs_devs->alloc_list);
358 INIT_LIST_HEAD(&fs_devs->fs_list);
359 INIT_LIST_HEAD(&fs_devs->seed_list);
360 if (fsid)
361 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
362
363 if (metadata_fsid)
364 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
365 else if (fsid)
366 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
367
368 return fs_devs;
369}
370
371void btrfs_free_device(struct btrfs_device *device)
372{
373 WARN_ON(!list_empty(&device->post_commit_list));
374 rcu_string_free(device->name);
375 extent_io_tree_release(&device->alloc_state);
376 bio_put(device->flush_bio);
377 kfree(device);
378}
379
380static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
381{
382 struct btrfs_device *device;
383 WARN_ON(fs_devices->opened);
384 while (!list_empty(&fs_devices->devices)) {
385 device = list_entry(fs_devices->devices.next,
386 struct btrfs_device, dev_list);
387 list_del(&device->dev_list);
388 btrfs_free_device(device);
389 }
390 kfree(fs_devices);
391}
392
393void __exit btrfs_cleanup_fs_uuids(void)
394{
395 struct btrfs_fs_devices *fs_devices;
396
397 while (!list_empty(&fs_uuids)) {
398 fs_devices = list_entry(fs_uuids.next,
399 struct btrfs_fs_devices, fs_list);
400 list_del(&fs_devices->fs_list);
401 free_fs_devices(fs_devices);
402 }
403}
404
405/*
406 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
407 * Returned struct is not linked onto any lists and must be destroyed using
408 * btrfs_free_device.
409 */
410static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
411{
412 struct btrfs_device *dev;
413
414 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
415 if (!dev)
416 return ERR_PTR(-ENOMEM);
417
418 /*
419 * Preallocate a bio that's always going to be used for flushing device
420 * barriers and matches the device lifespan
421 */
422 dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
423 if (!dev->flush_bio) {
424 kfree(dev);
425 return ERR_PTR(-ENOMEM);
426 }
427
428 INIT_LIST_HEAD(&dev->dev_list);
429 INIT_LIST_HEAD(&dev->dev_alloc_list);
430 INIT_LIST_HEAD(&dev->post_commit_list);
431
432 atomic_set(&dev->reada_in_flight, 0);
433 atomic_set(&dev->dev_stats_ccnt, 0);
434 btrfs_device_data_ordered_init(dev);
435 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
436 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
437 extent_io_tree_init(fs_info, &dev->alloc_state,
438 IO_TREE_DEVICE_ALLOC_STATE, NULL);
439
440 return dev;
441}
442
443static noinline struct btrfs_fs_devices *find_fsid(
444 const u8 *fsid, const u8 *metadata_fsid)
445{
446 struct btrfs_fs_devices *fs_devices;
447
448 ASSERT(fsid);
449
450 /* Handle non-split brain cases */
451 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
452 if (metadata_fsid) {
453 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
454 && memcmp(metadata_fsid, fs_devices->metadata_uuid,
455 BTRFS_FSID_SIZE) == 0)
456 return fs_devices;
457 } else {
458 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
459 return fs_devices;
460 }
461 }
462 return NULL;
463}
464
465static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
466 struct btrfs_super_block *disk_super)
467{
468
469 struct btrfs_fs_devices *fs_devices;
470
471 /*
472 * Handle scanned device having completed its fsid change but
473 * belonging to a fs_devices that was created by first scanning
474 * a device which didn't have its fsid/metadata_uuid changed
475 * at all and the CHANGING_FSID_V2 flag set.
476 */
477 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
478 if (fs_devices->fsid_change &&
479 memcmp(disk_super->metadata_uuid, fs_devices->fsid,
480 BTRFS_FSID_SIZE) == 0 &&
481 memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
482 BTRFS_FSID_SIZE) == 0) {
483 return fs_devices;
484 }
485 }
486 /*
487 * Handle scanned device having completed its fsid change but
488 * belonging to a fs_devices that was created by a device that
489 * has an outdated pair of fsid/metadata_uuid and
490 * CHANGING_FSID_V2 flag set.
491 */
492 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
493 if (fs_devices->fsid_change &&
494 memcmp(fs_devices->metadata_uuid,
495 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
496 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
497 BTRFS_FSID_SIZE) == 0) {
498 return fs_devices;
499 }
500 }
501
502 return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
503}
504
505
506static int
507btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
508 int flush, struct block_device **bdev,
509 struct btrfs_super_block **disk_super)
510{
511 int ret;
512
513 *bdev = blkdev_get_by_path(device_path, flags, holder);
514
515 if (IS_ERR(*bdev)) {
516 ret = PTR_ERR(*bdev);
517 goto error;
518 }
519
520 if (flush)
521 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
522 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
523 if (ret) {
524 blkdev_put(*bdev, flags);
525 goto error;
526 }
527 invalidate_bdev(*bdev);
528 *disk_super = btrfs_read_dev_super(*bdev);
529 if (IS_ERR(*disk_super)) {
530 ret = PTR_ERR(*disk_super);
531 blkdev_put(*bdev, flags);
532 goto error;
533 }
534
535 return 0;
536
537error:
538 *bdev = NULL;
539 return ret;
540}
541
542static bool device_path_matched(const char *path, struct btrfs_device *device)
543{
544 int found;
545
546 rcu_read_lock();
547 found = strcmp(rcu_str_deref(device->name), path);
548 rcu_read_unlock();
549
550 return found == 0;
551}
552
553/*
554 * Search and remove all stale (devices which are not mounted) devices.
555 * When both inputs are NULL, it will search and release all stale devices.
556 * path: Optional. When provided will it release all unmounted devices
557 * matching this path only.
558 * skip_dev: Optional. Will skip this device when searching for the stale
559 * devices.
560 * Return: 0 for success or if @path is NULL.
561 * -EBUSY if @path is a mounted device.
562 * -ENOENT if @path does not match any device in the list.
563 */
564static int btrfs_free_stale_devices(const char *path,
565 struct btrfs_device *skip_device)
566{
567 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
568 struct btrfs_device *device, *tmp_device;
569 int ret = 0;
570
571 if (path)
572 ret = -ENOENT;
573
574 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
575
576 mutex_lock(&fs_devices->device_list_mutex);
577 list_for_each_entry_safe(device, tmp_device,
578 &fs_devices->devices, dev_list) {
579 if (skip_device && skip_device == device)
580 continue;
581 if (path && !device->name)
582 continue;
583 if (path && !device_path_matched(path, device))
584 continue;
585 if (fs_devices->opened) {
586 /* for an already deleted device return 0 */
587 if (path && ret != 0)
588 ret = -EBUSY;
589 break;
590 }
591
592 /* delete the stale device */
593 fs_devices->num_devices--;
594 list_del(&device->dev_list);
595 btrfs_free_device(device);
596
597 ret = 0;
598 }
599 mutex_unlock(&fs_devices->device_list_mutex);
600
601 if (fs_devices->num_devices == 0) {
602 btrfs_sysfs_remove_fsid(fs_devices);
603 list_del(&fs_devices->fs_list);
604 free_fs_devices(fs_devices);
605 }
606 }
607
608 return ret;
609}
610
611/*
612 * This is only used on mount, and we are protected from competing things
613 * messing with our fs_devices by the uuid_mutex, thus we do not need the
614 * fs_devices->device_list_mutex here.
615 */
616static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
617 struct btrfs_device *device, fmode_t flags,
618 void *holder)
619{
620 struct request_queue *q;
621 struct block_device *bdev;
622 struct btrfs_super_block *disk_super;
623 u64 devid;
624 int ret;
625
626 if (device->bdev)
627 return -EINVAL;
628 if (!device->name)
629 return -EINVAL;
630
631 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
632 &bdev, &disk_super);
633 if (ret)
634 return ret;
635
636 devid = btrfs_stack_device_id(&disk_super->dev_item);
637 if (devid != device->devid)
638 goto error_free_page;
639
640 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
641 goto error_free_page;
642
643 device->generation = btrfs_super_generation(disk_super);
644
645 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
646 if (btrfs_super_incompat_flags(disk_super) &
647 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
648 pr_err(
649 "BTRFS: Invalid seeding and uuid-changed device detected\n");
650 goto error_free_page;
651 }
652
653 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
654 fs_devices->seeding = true;
655 } else {
656 if (bdev_read_only(bdev))
657 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
658 else
659 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
660 }
661
662 q = bdev_get_queue(bdev);
663 if (!blk_queue_nonrot(q))
664 fs_devices->rotating = true;
665
666 device->bdev = bdev;
667 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
668 device->mode = flags;
669
670 fs_devices->open_devices++;
671 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
672 device->devid != BTRFS_DEV_REPLACE_DEVID) {
673 fs_devices->rw_devices++;
674 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
675 }
676 btrfs_release_disk_super(disk_super);
677
678 return 0;
679
680error_free_page:
681 btrfs_release_disk_super(disk_super);
682 blkdev_put(bdev, flags);
683
684 return -EINVAL;
685}
686
687/*
688 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
689 * being created with a disk that has already completed its fsid change. Such
690 * disk can belong to an fs which has its FSID changed or to one which doesn't.
691 * Handle both cases here.
692 */
693static struct btrfs_fs_devices *find_fsid_inprogress(
694 struct btrfs_super_block *disk_super)
695{
696 struct btrfs_fs_devices *fs_devices;
697
698 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
699 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
700 BTRFS_FSID_SIZE) != 0 &&
701 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
702 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
703 return fs_devices;
704 }
705 }
706
707 return find_fsid(disk_super->fsid, NULL);
708}
709
710
711static struct btrfs_fs_devices *find_fsid_changed(
712 struct btrfs_super_block *disk_super)
713{
714 struct btrfs_fs_devices *fs_devices;
715
716 /*
717 * Handles the case where scanned device is part of an fs that had
718 * multiple successful changes of FSID but curently device didn't
719 * observe it. Meaning our fsid will be different than theirs. We need
720 * to handle two subcases :
721 * 1 - The fs still continues to have different METADATA/FSID uuids.
722 * 2 - The fs is switched back to its original FSID (METADATA/FSID
723 * are equal).
724 */
725 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
726 /* Changed UUIDs */
727 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
728 BTRFS_FSID_SIZE) != 0 &&
729 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
730 BTRFS_FSID_SIZE) == 0 &&
731 memcmp(fs_devices->fsid, disk_super->fsid,
732 BTRFS_FSID_SIZE) != 0)
733 return fs_devices;
734
735 /* Unchanged UUIDs */
736 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
737 BTRFS_FSID_SIZE) == 0 &&
738 memcmp(fs_devices->fsid, disk_super->metadata_uuid,
739 BTRFS_FSID_SIZE) == 0)
740 return fs_devices;
741 }
742
743 return NULL;
744}
745
746static struct btrfs_fs_devices *find_fsid_reverted_metadata(
747 struct btrfs_super_block *disk_super)
748{
749 struct btrfs_fs_devices *fs_devices;
750
751 /*
752 * Handle the case where the scanned device is part of an fs whose last
753 * metadata UUID change reverted it to the original FSID. At the same
754 * time * fs_devices was first created by another constitutent device
755 * which didn't fully observe the operation. This results in an
756 * btrfs_fs_devices created with metadata/fsid different AND
757 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
758 * fs_devices equal to the FSID of the disk.
759 */
760 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
761 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
762 BTRFS_FSID_SIZE) != 0 &&
763 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
764 BTRFS_FSID_SIZE) == 0 &&
765 fs_devices->fsid_change)
766 return fs_devices;
767 }
768
769 return NULL;
770}
771/*
772 * Add new device to list of registered devices
773 *
774 * Returns:
775 * device pointer which was just added or updated when successful
776 * error pointer when failed
777 */
778static noinline struct btrfs_device *device_list_add(const char *path,
779 struct btrfs_super_block *disk_super,
780 bool *new_device_added)
781{
782 struct btrfs_device *device;
783 struct btrfs_fs_devices *fs_devices = NULL;
784 struct rcu_string *name;
785 u64 found_transid = btrfs_super_generation(disk_super);
786 u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
787 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
788 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
789 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
790 BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
791
792 if (fsid_change_in_progress) {
793 if (!has_metadata_uuid)
794 fs_devices = find_fsid_inprogress(disk_super);
795 else
796 fs_devices = find_fsid_changed(disk_super);
797 } else if (has_metadata_uuid) {
798 fs_devices = find_fsid_with_metadata_uuid(disk_super);
799 } else {
800 fs_devices = find_fsid_reverted_metadata(disk_super);
801 if (!fs_devices)
802 fs_devices = find_fsid(disk_super->fsid, NULL);
803 }
804
805
806 if (!fs_devices) {
807 if (has_metadata_uuid)
808 fs_devices = alloc_fs_devices(disk_super->fsid,
809 disk_super->metadata_uuid);
810 else
811 fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
812
813 if (IS_ERR(fs_devices))
814 return ERR_CAST(fs_devices);
815
816 fs_devices->fsid_change = fsid_change_in_progress;
817
818 mutex_lock(&fs_devices->device_list_mutex);
819 list_add(&fs_devices->fs_list, &fs_uuids);
820
821 device = NULL;
822 } else {
823 mutex_lock(&fs_devices->device_list_mutex);
824 device = btrfs_find_device(fs_devices, devid,
825 disk_super->dev_item.uuid, NULL, false);
826
827 /*
828 * If this disk has been pulled into an fs devices created by
829 * a device which had the CHANGING_FSID_V2 flag then replace the
830 * metadata_uuid/fsid values of the fs_devices.
831 */
832 if (fs_devices->fsid_change &&
833 found_transid > fs_devices->latest_generation) {
834 memcpy(fs_devices->fsid, disk_super->fsid,
835 BTRFS_FSID_SIZE);
836
837 if (has_metadata_uuid)
838 memcpy(fs_devices->metadata_uuid,
839 disk_super->metadata_uuid,
840 BTRFS_FSID_SIZE);
841 else
842 memcpy(fs_devices->metadata_uuid,
843 disk_super->fsid, BTRFS_FSID_SIZE);
844
845 fs_devices->fsid_change = false;
846 }
847 }
848
849 if (!device) {
850 if (fs_devices->opened) {
851 mutex_unlock(&fs_devices->device_list_mutex);
852 return ERR_PTR(-EBUSY);
853 }
854
855 device = btrfs_alloc_device(NULL, &devid,
856 disk_super->dev_item.uuid);
857 if (IS_ERR(device)) {
858 mutex_unlock(&fs_devices->device_list_mutex);
859 /* we can safely leave the fs_devices entry around */
860 return device;
861 }
862
863 name = rcu_string_strdup(path, GFP_NOFS);
864 if (!name) {
865 btrfs_free_device(device);
866 mutex_unlock(&fs_devices->device_list_mutex);
867 return ERR_PTR(-ENOMEM);
868 }
869 rcu_assign_pointer(device->name, name);
870
871 list_add_rcu(&device->dev_list, &fs_devices->devices);
872 fs_devices->num_devices++;
873
874 device->fs_devices = fs_devices;
875 *new_device_added = true;
876
877 if (disk_super->label[0])
878 pr_info(
879 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
880 disk_super->label, devid, found_transid, path,
881 current->comm, task_pid_nr(current));
882 else
883 pr_info(
884 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
885 disk_super->fsid, devid, found_transid, path,
886 current->comm, task_pid_nr(current));
887
888 } else if (!device->name || strcmp(device->name->str, path)) {
889 /*
890 * When FS is already mounted.
891 * 1. If you are here and if the device->name is NULL that
892 * means this device was missing at time of FS mount.
893 * 2. If you are here and if the device->name is different
894 * from 'path' that means either
895 * a. The same device disappeared and reappeared with
896 * different name. or
897 * b. The missing-disk-which-was-replaced, has
898 * reappeared now.
899 *
900 * We must allow 1 and 2a above. But 2b would be a spurious
901 * and unintentional.
902 *
903 * Further in case of 1 and 2a above, the disk at 'path'
904 * would have missed some transaction when it was away and
905 * in case of 2a the stale bdev has to be updated as well.
906 * 2b must not be allowed at all time.
907 */
908
909 /*
910 * For now, we do allow update to btrfs_fs_device through the
911 * btrfs dev scan cli after FS has been mounted. We're still
912 * tracking a problem where systems fail mount by subvolume id
913 * when we reject replacement on a mounted FS.
914 */
915 if (!fs_devices->opened && found_transid < device->generation) {
916 /*
917 * That is if the FS is _not_ mounted and if you
918 * are here, that means there is more than one
919 * disk with same uuid and devid.We keep the one
920 * with larger generation number or the last-in if
921 * generation are equal.
922 */
923 mutex_unlock(&fs_devices->device_list_mutex);
924 return ERR_PTR(-EEXIST);
925 }
926
927 /*
928 * We are going to replace the device path for a given devid,
929 * make sure it's the same device if the device is mounted
930 */
931 if (device->bdev) {
932 struct block_device *path_bdev;
933
934 path_bdev = lookup_bdev(path);
935 if (IS_ERR(path_bdev)) {
936 mutex_unlock(&fs_devices->device_list_mutex);
937 return ERR_CAST(path_bdev);
938 }
939
940 if (device->bdev != path_bdev) {
941 bdput(path_bdev);
942 mutex_unlock(&fs_devices->device_list_mutex);
943 btrfs_warn_in_rcu(device->fs_info,
944 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
945 path, devid, found_transid,
946 current->comm,
947 task_pid_nr(current));
948 return ERR_PTR(-EEXIST);
949 }
950 bdput(path_bdev);
951 btrfs_info_in_rcu(device->fs_info,
952 "devid %llu device path %s changed to %s scanned by %s (%d)",
953 devid, rcu_str_deref(device->name),
954 path, current->comm,
955 task_pid_nr(current));
956 }
957
958 name = rcu_string_strdup(path, GFP_NOFS);
959 if (!name) {
960 mutex_unlock(&fs_devices->device_list_mutex);
961 return ERR_PTR(-ENOMEM);
962 }
963 rcu_string_free(device->name);
964 rcu_assign_pointer(device->name, name);
965 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
966 fs_devices->missing_devices--;
967 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
968 }
969 }
970
971 /*
972 * Unmount does not free the btrfs_device struct but would zero
973 * generation along with most of the other members. So just update
974 * it back. We need it to pick the disk with largest generation
975 * (as above).
976 */
977 if (!fs_devices->opened) {
978 device->generation = found_transid;
979 fs_devices->latest_generation = max_t(u64, found_transid,
980 fs_devices->latest_generation);
981 }
982
983 fs_devices->total_devices = btrfs_super_num_devices(disk_super);
984
985 mutex_unlock(&fs_devices->device_list_mutex);
986 return device;
987}
988
989static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
990{
991 struct btrfs_fs_devices *fs_devices;
992 struct btrfs_device *device;
993 struct btrfs_device *orig_dev;
994 int ret = 0;
995
996 fs_devices = alloc_fs_devices(orig->fsid, NULL);
997 if (IS_ERR(fs_devices))
998 return fs_devices;
999
1000 mutex_lock(&orig->device_list_mutex);
1001 fs_devices->total_devices = orig->total_devices;
1002
1003 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1004 struct rcu_string *name;
1005
1006 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1007 orig_dev->uuid);
1008 if (IS_ERR(device)) {
1009 ret = PTR_ERR(device);
1010 goto error;
1011 }
1012
1013 /*
1014 * This is ok to do without rcu read locked because we hold the
1015 * uuid mutex so nothing we touch in here is going to disappear.
1016 */
1017 if (orig_dev->name) {
1018 name = rcu_string_strdup(orig_dev->name->str,
1019 GFP_KERNEL);
1020 if (!name) {
1021 btrfs_free_device(device);
1022 ret = -ENOMEM;
1023 goto error;
1024 }
1025 rcu_assign_pointer(device->name, name);
1026 }
1027
1028 list_add(&device->dev_list, &fs_devices->devices);
1029 device->fs_devices = fs_devices;
1030 fs_devices->num_devices++;
1031 }
1032 mutex_unlock(&orig->device_list_mutex);
1033 return fs_devices;
1034error:
1035 mutex_unlock(&orig->device_list_mutex);
1036 free_fs_devices(fs_devices);
1037 return ERR_PTR(ret);
1038}
1039
1040static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1041 int step, struct btrfs_device **latest_dev)
1042{
1043 struct btrfs_device *device, *next;
1044
1045 /* This is the initialized path, it is safe to release the devices. */
1046 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1047 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1048 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1049 &device->dev_state) &&
1050 !test_bit(BTRFS_DEV_STATE_MISSING,
1051 &device->dev_state) &&
1052 (!*latest_dev ||
1053 device->generation > (*latest_dev)->generation)) {
1054 *latest_dev = device;
1055 }
1056 continue;
1057 }
1058
1059 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
1060 /*
1061 * In the first step, keep the device which has
1062 * the correct fsid and the devid that is used
1063 * for the dev_replace procedure.
1064 * In the second step, the dev_replace state is
1065 * read from the device tree and it is known
1066 * whether the procedure is really active or
1067 * not, which means whether this device is
1068 * used or whether it should be removed.
1069 */
1070 if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1071 &device->dev_state)) {
1072 continue;
1073 }
1074 }
1075 if (device->bdev) {
1076 blkdev_put(device->bdev, device->mode);
1077 device->bdev = NULL;
1078 fs_devices->open_devices--;
1079 }
1080 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1081 list_del_init(&device->dev_alloc_list);
1082 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1083 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1084 &device->dev_state))
1085 fs_devices->rw_devices--;
1086 }
1087 list_del_init(&device->dev_list);
1088 fs_devices->num_devices--;
1089 btrfs_free_device(device);
1090 }
1091
1092}
1093
1094/*
1095 * After we have read the system tree and know devids belonging to this
1096 * filesystem, remove the device which does not belong there.
1097 */
1098void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1099{
1100 struct btrfs_device *latest_dev = NULL;
1101 struct btrfs_fs_devices *seed_dev;
1102
1103 mutex_lock(&uuid_mutex);
1104 __btrfs_free_extra_devids(fs_devices, step, &latest_dev);
1105
1106 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1107 __btrfs_free_extra_devids(seed_dev, step, &latest_dev);
1108
1109 fs_devices->latest_bdev = latest_dev->bdev;
1110
1111 mutex_unlock(&uuid_mutex);
1112}
1113
1114static void btrfs_close_bdev(struct btrfs_device *device)
1115{
1116 if (!device->bdev)
1117 return;
1118
1119 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1120 sync_blockdev(device->bdev);
1121 invalidate_bdev(device->bdev);
1122 }
1123
1124 blkdev_put(device->bdev, device->mode);
1125}
1126
1127static void btrfs_close_one_device(struct btrfs_device *device)
1128{
1129 struct btrfs_fs_devices *fs_devices = device->fs_devices;
1130
1131 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1132 device->devid != BTRFS_DEV_REPLACE_DEVID) {
1133 list_del_init(&device->dev_alloc_list);
1134 fs_devices->rw_devices--;
1135 }
1136
1137 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1138 fs_devices->missing_devices--;
1139
1140 btrfs_close_bdev(device);
1141 if (device->bdev) {
1142 fs_devices->open_devices--;
1143 device->bdev = NULL;
1144 }
1145 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1146
1147 device->fs_info = NULL;
1148 atomic_set(&device->dev_stats_ccnt, 0);
1149 extent_io_tree_release(&device->alloc_state);
1150
1151 /* Verify the device is back in a pristine state */
1152 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1153 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1154 ASSERT(list_empty(&device->dev_alloc_list));
1155 ASSERT(list_empty(&device->post_commit_list));
1156 ASSERT(atomic_read(&device->reada_in_flight) == 0);
1157}
1158
1159static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1160{
1161 struct btrfs_device *device, *tmp;
1162
1163 lockdep_assert_held(&uuid_mutex);
1164
1165 if (--fs_devices->opened > 0)
1166 return;
1167
1168 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1169 btrfs_close_one_device(device);
1170
1171 WARN_ON(fs_devices->open_devices);
1172 WARN_ON(fs_devices->rw_devices);
1173 fs_devices->opened = 0;
1174 fs_devices->seeding = false;
1175 fs_devices->fs_info = NULL;
1176}
1177
1178void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1179{
1180 LIST_HEAD(list);
1181 struct btrfs_fs_devices *tmp;
1182
1183 mutex_lock(&uuid_mutex);
1184 close_fs_devices(fs_devices);
1185 if (!fs_devices->opened)
1186 list_splice_init(&fs_devices->seed_list, &list);
1187
1188 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1189 close_fs_devices(fs_devices);
1190 list_del(&fs_devices->seed_list);
1191 free_fs_devices(fs_devices);
1192 }
1193 mutex_unlock(&uuid_mutex);
1194}
1195
1196static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1197 fmode_t flags, void *holder)
1198{
1199 struct btrfs_device *device;
1200 struct btrfs_device *latest_dev = NULL;
1201
1202 flags |= FMODE_EXCL;
1203
1204 list_for_each_entry(device, &fs_devices->devices, dev_list) {
1205 /* Just open everything we can; ignore failures here */
1206 if (btrfs_open_one_device(fs_devices, device, flags, holder))
1207 continue;
1208
1209 if (!latest_dev ||
1210 device->generation > latest_dev->generation)
1211 latest_dev = device;
1212 }
1213 if (fs_devices->open_devices == 0)
1214 return -EINVAL;
1215
1216 fs_devices->opened = 1;
1217 fs_devices->latest_bdev = latest_dev->bdev;
1218 fs_devices->total_rw_bytes = 0;
1219 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1220
1221 return 0;
1222}
1223
1224static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
1225{
1226 struct btrfs_device *dev1, *dev2;
1227
1228 dev1 = list_entry(a, struct btrfs_device, dev_list);
1229 dev2 = list_entry(b, struct btrfs_device, dev_list);
1230
1231 if (dev1->devid < dev2->devid)
1232 return -1;
1233 else if (dev1->devid > dev2->devid)
1234 return 1;
1235 return 0;
1236}
1237
1238int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1239 fmode_t flags, void *holder)
1240{
1241 int ret;
1242
1243 lockdep_assert_held(&uuid_mutex);
1244 /*
1245 * The device_list_mutex cannot be taken here in case opening the
1246 * underlying device takes further locks like bd_mutex.
1247 *
1248 * We also don't need the lock here as this is called during mount and
1249 * exclusion is provided by uuid_mutex
1250 */
1251
1252 if (fs_devices->opened) {
1253 fs_devices->opened++;
1254 ret = 0;
1255 } else {
1256 list_sort(NULL, &fs_devices->devices, devid_cmp);
1257 ret = open_fs_devices(fs_devices, flags, holder);
1258 }
1259
1260 return ret;
1261}
1262
1263void btrfs_release_disk_super(struct btrfs_super_block *super)
1264{
1265 struct page *page = virt_to_page(super);
1266
1267 put_page(page);
1268}
1269
1270static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1271 u64 bytenr)
1272{
1273 struct btrfs_super_block *disk_super;
1274 struct page *page;
1275 void *p;
1276 pgoff_t index;
1277
1278 /* make sure our super fits in the device */
1279 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1280 return ERR_PTR(-EINVAL);
1281
1282 /* make sure our super fits in the page */
1283 if (sizeof(*disk_super) > PAGE_SIZE)
1284 return ERR_PTR(-EINVAL);
1285
1286 /* make sure our super doesn't straddle pages on disk */
1287 index = bytenr >> PAGE_SHIFT;
1288 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1289 return ERR_PTR(-EINVAL);
1290
1291 /* pull in the page with our super */
1292 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1293
1294 if (IS_ERR(page))
1295 return ERR_CAST(page);
1296
1297 p = page_address(page);
1298
1299 /* align our pointer to the offset of the super block */
1300 disk_super = p + offset_in_page(bytenr);
1301
1302 if (btrfs_super_bytenr(disk_super) != bytenr ||
1303 btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1304 btrfs_release_disk_super(p);
1305 return ERR_PTR(-EINVAL);
1306 }
1307
1308 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1309 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1310
1311 return disk_super;
1312}
1313
1314int btrfs_forget_devices(const char *path)
1315{
1316 int ret;
1317
1318 mutex_lock(&uuid_mutex);
1319 ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1320 mutex_unlock(&uuid_mutex);
1321
1322 return ret;
1323}
1324
1325/*
1326 * Look for a btrfs signature on a device. This may be called out of the mount path
1327 * and we are not allowed to call set_blocksize during the scan. The superblock
1328 * is read via pagecache
1329 */
1330struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1331 void *holder)
1332{
1333 struct btrfs_super_block *disk_super;
1334 bool new_device_added = false;
1335 struct btrfs_device *device = NULL;
1336 struct block_device *bdev;
1337 u64 bytenr;
1338
1339 lockdep_assert_held(&uuid_mutex);
1340
1341 /*
1342 * we would like to check all the supers, but that would make
1343 * a btrfs mount succeed after a mkfs from a different FS.
1344 * So, we need to add a special mount option to scan for
1345 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1346 */
1347 bytenr = btrfs_sb_offset(0);
1348 flags |= FMODE_EXCL;
1349
1350 bdev = blkdev_get_by_path(path, flags, holder);
1351 if (IS_ERR(bdev))
1352 return ERR_CAST(bdev);
1353
1354 disk_super = btrfs_read_disk_super(bdev, bytenr);
1355 if (IS_ERR(disk_super)) {
1356 device = ERR_CAST(disk_super);
1357 goto error_bdev_put;
1358 }
1359
1360 device = device_list_add(path, disk_super, &new_device_added);
1361 if (!IS_ERR(device)) {
1362 if (new_device_added)
1363 btrfs_free_stale_devices(path, device);
1364 }
1365
1366 btrfs_release_disk_super(disk_super);
1367
1368error_bdev_put:
1369 blkdev_put(bdev, flags);
1370
1371 return device;
1372}
1373
1374/*
1375 * Try to find a chunk that intersects [start, start + len] range and when one
1376 * such is found, record the end of it in *start
1377 */
1378static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1379 u64 len)
1380{
1381 u64 physical_start, physical_end;
1382
1383 lockdep_assert_held(&device->fs_info->chunk_mutex);
1384
1385 if (!find_first_extent_bit(&device->alloc_state, *start,
1386 &physical_start, &physical_end,
1387 CHUNK_ALLOCATED, NULL)) {
1388
1389 if (in_range(physical_start, *start, len) ||
1390 in_range(*start, physical_start,
1391 physical_end - physical_start)) {
1392 *start = physical_end + 1;
1393 return true;
1394 }
1395 }
1396 return false;
1397}
1398
1399static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1400{
1401 switch (device->fs_devices->chunk_alloc_policy) {
1402 case BTRFS_CHUNK_ALLOC_REGULAR:
1403 /*
1404 * We don't want to overwrite the superblock on the drive nor
1405 * any area used by the boot loader (grub for example), so we
1406 * make sure to start at an offset of at least 1MB.
1407 */
1408 return max_t(u64, start, SZ_1M);
1409 default:
1410 BUG();
1411 }
1412}
1413
1414/**
1415 * dev_extent_hole_check - check if specified hole is suitable for allocation
1416 * @device: the device which we have the hole
1417 * @hole_start: starting position of the hole
1418 * @hole_size: the size of the hole
1419 * @num_bytes: the size of the free space that we need
1420 *
1421 * This function may modify @hole_start and @hole_end to reflect the suitable
1422 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1423 */
1424static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1425 u64 *hole_size, u64 num_bytes)
1426{
1427 bool changed = false;
1428 u64 hole_end = *hole_start + *hole_size;
1429
1430 /*
1431 * Check before we set max_hole_start, otherwise we could end up
1432 * sending back this offset anyway.
1433 */
1434 if (contains_pending_extent(device, hole_start, *hole_size)) {
1435 if (hole_end >= *hole_start)
1436 *hole_size = hole_end - *hole_start;
1437 else
1438 *hole_size = 0;
1439 changed = true;
1440 }
1441
1442 switch (device->fs_devices->chunk_alloc_policy) {
1443 case BTRFS_CHUNK_ALLOC_REGULAR:
1444 /* No extra check */
1445 break;
1446 default:
1447 BUG();
1448 }
1449
1450 return changed;
1451}
1452
1453/*
1454 * find_free_dev_extent_start - find free space in the specified device
1455 * @device: the device which we search the free space in
1456 * @num_bytes: the size of the free space that we need
1457 * @search_start: the position from which to begin the search
1458 * @start: store the start of the free space.
1459 * @len: the size of the free space. that we find, or the size
1460 * of the max free space if we don't find suitable free space
1461 *
1462 * this uses a pretty simple search, the expectation is that it is
1463 * called very infrequently and that a given device has a small number
1464 * of extents
1465 *
1466 * @start is used to store the start of the free space if we find. But if we
1467 * don't find suitable free space, it will be used to store the start position
1468 * of the max free space.
1469 *
1470 * @len is used to store the size of the free space that we find.
1471 * But if we don't find suitable free space, it is used to store the size of
1472 * the max free space.
1473 *
1474 * NOTE: This function will search *commit* root of device tree, and does extra
1475 * check to ensure dev extents are not double allocated.
1476 * This makes the function safe to allocate dev extents but may not report
1477 * correct usable device space, as device extent freed in current transaction
1478 * is not reported as avaiable.
1479 */
1480static int find_free_dev_extent_start(struct btrfs_device *device,
1481 u64 num_bytes, u64 search_start, u64 *start,
1482 u64 *len)
1483{
1484 struct btrfs_fs_info *fs_info = device->fs_info;
1485 struct btrfs_root *root = fs_info->dev_root;
1486 struct btrfs_key key;
1487 struct btrfs_dev_extent *dev_extent;
1488 struct btrfs_path *path;
1489 u64 hole_size;
1490 u64 max_hole_start;
1491 u64 max_hole_size;
1492 u64 extent_end;
1493 u64 search_end = device->total_bytes;
1494 int ret;
1495 int slot;
1496 struct extent_buffer *l;
1497
1498 search_start = dev_extent_search_start(device, search_start);
1499
1500 path = btrfs_alloc_path();
1501 if (!path)
1502 return -ENOMEM;
1503
1504 max_hole_start = search_start;
1505 max_hole_size = 0;
1506
1507again:
1508 if (search_start >= search_end ||
1509 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1510 ret = -ENOSPC;
1511 goto out;
1512 }
1513
1514 path->reada = READA_FORWARD;
1515 path->search_commit_root = 1;
1516 path->skip_locking = 1;
1517
1518 key.objectid = device->devid;
1519 key.offset = search_start;
1520 key.type = BTRFS_DEV_EXTENT_KEY;
1521
1522 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1523 if (ret < 0)
1524 goto out;
1525 if (ret > 0) {
1526 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1527 if (ret < 0)
1528 goto out;
1529 }
1530
1531 while (1) {
1532 l = path->nodes[0];
1533 slot = path->slots[0];
1534 if (slot >= btrfs_header_nritems(l)) {
1535 ret = btrfs_next_leaf(root, path);
1536 if (ret == 0)
1537 continue;
1538 if (ret < 0)
1539 goto out;
1540
1541 break;
1542 }
1543 btrfs_item_key_to_cpu(l, &key, slot);
1544
1545 if (key.objectid < device->devid)
1546 goto next;
1547
1548 if (key.objectid > device->devid)
1549 break;
1550
1551 if (key.type != BTRFS_DEV_EXTENT_KEY)
1552 goto next;
1553
1554 if (key.offset > search_start) {
1555 hole_size = key.offset - search_start;
1556 dev_extent_hole_check(device, &search_start, &hole_size,
1557 num_bytes);
1558
1559 if (hole_size > max_hole_size) {
1560 max_hole_start = search_start;
1561 max_hole_size = hole_size;
1562 }
1563
1564 /*
1565 * If this free space is greater than which we need,
1566 * it must be the max free space that we have found
1567 * until now, so max_hole_start must point to the start
1568 * of this free space and the length of this free space
1569 * is stored in max_hole_size. Thus, we return
1570 * max_hole_start and max_hole_size and go back to the
1571 * caller.
1572 */
1573 if (hole_size >= num_bytes) {
1574 ret = 0;
1575 goto out;
1576 }
1577 }
1578
1579 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1580 extent_end = key.offset + btrfs_dev_extent_length(l,
1581 dev_extent);
1582 if (extent_end > search_start)
1583 search_start = extent_end;
1584next:
1585 path->slots[0]++;
1586 cond_resched();
1587 }
1588
1589 /*
1590 * At this point, search_start should be the end of
1591 * allocated dev extents, and when shrinking the device,
1592 * search_end may be smaller than search_start.
1593 */
1594 if (search_end > search_start) {
1595 hole_size = search_end - search_start;
1596 if (dev_extent_hole_check(device, &search_start, &hole_size,
1597 num_bytes)) {
1598 btrfs_release_path(path);
1599 goto again;
1600 }
1601
1602 if (hole_size > max_hole_size) {
1603 max_hole_start = search_start;
1604 max_hole_size = hole_size;
1605 }
1606 }
1607
1608 /* See above. */
1609 if (max_hole_size < num_bytes)
1610 ret = -ENOSPC;
1611 else
1612 ret = 0;
1613
1614out:
1615 btrfs_free_path(path);
1616 *start = max_hole_start;
1617 if (len)
1618 *len = max_hole_size;
1619 return ret;
1620}
1621
1622int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1623 u64 *start, u64 *len)
1624{
1625 /* FIXME use last free of some kind */
1626 return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1627}
1628
1629static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1630 struct btrfs_device *device,
1631 u64 start, u64 *dev_extent_len)
1632{
1633 struct btrfs_fs_info *fs_info = device->fs_info;
1634 struct btrfs_root *root = fs_info->dev_root;
1635 int ret;
1636 struct btrfs_path *path;
1637 struct btrfs_key key;
1638 struct btrfs_key found_key;
1639 struct extent_buffer *leaf = NULL;
1640 struct btrfs_dev_extent *extent = NULL;
1641
1642 path = btrfs_alloc_path();
1643 if (!path)
1644 return -ENOMEM;
1645
1646 key.objectid = device->devid;
1647 key.offset = start;
1648 key.type = BTRFS_DEV_EXTENT_KEY;
1649again:
1650 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1651 if (ret > 0) {
1652 ret = btrfs_previous_item(root, path, key.objectid,
1653 BTRFS_DEV_EXTENT_KEY);
1654 if (ret)
1655 goto out;
1656 leaf = path->nodes[0];
1657 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1658 extent = btrfs_item_ptr(leaf, path->slots[0],
1659 struct btrfs_dev_extent);
1660 BUG_ON(found_key.offset > start || found_key.offset +
1661 btrfs_dev_extent_length(leaf, extent) < start);
1662 key = found_key;
1663 btrfs_release_path(path);
1664 goto again;
1665 } else if (ret == 0) {
1666 leaf = path->nodes[0];
1667 extent = btrfs_item_ptr(leaf, path->slots[0],
1668 struct btrfs_dev_extent);
1669 } else {
1670 btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1671 goto out;
1672 }
1673
1674 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1675
1676 ret = btrfs_del_item(trans, root, path);
1677 if (ret) {
1678 btrfs_handle_fs_error(fs_info, ret,
1679 "Failed to remove dev extent item");
1680 } else {
1681 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1682 }
1683out:
1684 btrfs_free_path(path);
1685 return ret;
1686}
1687
1688static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1689 struct btrfs_device *device,
1690 u64 chunk_offset, u64 start, u64 num_bytes)
1691{
1692 int ret;
1693 struct btrfs_path *path;
1694 struct btrfs_fs_info *fs_info = device->fs_info;
1695 struct btrfs_root *root = fs_info->dev_root;
1696 struct btrfs_dev_extent *extent;
1697 struct extent_buffer *leaf;
1698 struct btrfs_key key;
1699
1700 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1701 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1702 path = btrfs_alloc_path();
1703 if (!path)
1704 return -ENOMEM;
1705
1706 key.objectid = device->devid;
1707 key.offset = start;
1708 key.type = BTRFS_DEV_EXTENT_KEY;
1709 ret = btrfs_insert_empty_item(trans, root, path, &key,
1710 sizeof(*extent));
1711 if (ret)
1712 goto out;
1713
1714 leaf = path->nodes[0];
1715 extent = btrfs_item_ptr(leaf, path->slots[0],
1716 struct btrfs_dev_extent);
1717 btrfs_set_dev_extent_chunk_tree(leaf, extent,
1718 BTRFS_CHUNK_TREE_OBJECTID);
1719 btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1720 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1721 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1722
1723 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1724 btrfs_mark_buffer_dirty(leaf);
1725out:
1726 btrfs_free_path(path);
1727 return ret;
1728}
1729
1730static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1731{
1732 struct extent_map_tree *em_tree;
1733 struct extent_map *em;
1734 struct rb_node *n;
1735 u64 ret = 0;
1736
1737 em_tree = &fs_info->mapping_tree;
1738 read_lock(&em_tree->lock);
1739 n = rb_last(&em_tree->map.rb_root);
1740 if (n) {
1741 em = rb_entry(n, struct extent_map, rb_node);
1742 ret = em->start + em->len;
1743 }
1744 read_unlock(&em_tree->lock);
1745
1746 return ret;
1747}
1748
1749static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1750 u64 *devid_ret)
1751{
1752 int ret;
1753 struct btrfs_key key;
1754 struct btrfs_key found_key;
1755 struct btrfs_path *path;
1756
1757 path = btrfs_alloc_path();
1758 if (!path)
1759 return -ENOMEM;
1760
1761 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1762 key.type = BTRFS_DEV_ITEM_KEY;
1763 key.offset = (u64)-1;
1764
1765 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1766 if (ret < 0)
1767 goto error;
1768
1769 if (ret == 0) {
1770 /* Corruption */
1771 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1772 ret = -EUCLEAN;
1773 goto error;
1774 }
1775
1776 ret = btrfs_previous_item(fs_info->chunk_root, path,
1777 BTRFS_DEV_ITEMS_OBJECTID,
1778 BTRFS_DEV_ITEM_KEY);
1779 if (ret) {
1780 *devid_ret = 1;
1781 } else {
1782 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1783 path->slots[0]);
1784 *devid_ret = found_key.offset + 1;
1785 }
1786 ret = 0;
1787error:
1788 btrfs_free_path(path);
1789 return ret;
1790}
1791
1792/*
1793 * the device information is stored in the chunk root
1794 * the btrfs_device struct should be fully filled in
1795 */
1796static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1797 struct btrfs_device *device)
1798{
1799 int ret;
1800 struct btrfs_path *path;
1801 struct btrfs_dev_item *dev_item;
1802 struct extent_buffer *leaf;
1803 struct btrfs_key key;
1804 unsigned long ptr;
1805
1806 path = btrfs_alloc_path();
1807 if (!path)
1808 return -ENOMEM;
1809
1810 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1811 key.type = BTRFS_DEV_ITEM_KEY;
1812 key.offset = device->devid;
1813
1814 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1815 &key, sizeof(*dev_item));
1816 if (ret)
1817 goto out;
1818
1819 leaf = path->nodes[0];
1820 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1821
1822 btrfs_set_device_id(leaf, dev_item, device->devid);
1823 btrfs_set_device_generation(leaf, dev_item, 0);
1824 btrfs_set_device_type(leaf, dev_item, device->type);
1825 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1826 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1827 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1828 btrfs_set_device_total_bytes(leaf, dev_item,
1829 btrfs_device_get_disk_total_bytes(device));
1830 btrfs_set_device_bytes_used(leaf, dev_item,
1831 btrfs_device_get_bytes_used(device));
1832 btrfs_set_device_group(leaf, dev_item, 0);
1833 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1834 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1835 btrfs_set_device_start_offset(leaf, dev_item, 0);
1836
1837 ptr = btrfs_device_uuid(dev_item);
1838 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1839 ptr = btrfs_device_fsid(dev_item);
1840 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1841 ptr, BTRFS_FSID_SIZE);
1842 btrfs_mark_buffer_dirty(leaf);
1843
1844 ret = 0;
1845out:
1846 btrfs_free_path(path);
1847 return ret;
1848}
1849
1850/*
1851 * Function to update ctime/mtime for a given device path.
1852 * Mainly used for ctime/mtime based probe like libblkid.
1853 */
1854static void update_dev_time(const char *path_name)
1855{
1856 struct file *filp;
1857
1858 filp = filp_open(path_name, O_RDWR, 0);
1859 if (IS_ERR(filp))
1860 return;
1861 file_update_time(filp);
1862 filp_close(filp, NULL);
1863}
1864
1865static int btrfs_rm_dev_item(struct btrfs_device *device)
1866{
1867 struct btrfs_root *root = device->fs_info->chunk_root;
1868 int ret;
1869 struct btrfs_path *path;
1870 struct btrfs_key key;
1871 struct btrfs_trans_handle *trans;
1872
1873 path = btrfs_alloc_path();
1874 if (!path)
1875 return -ENOMEM;
1876
1877 trans = btrfs_start_transaction(root, 0);
1878 if (IS_ERR(trans)) {
1879 btrfs_free_path(path);
1880 return PTR_ERR(trans);
1881 }
1882 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1883 key.type = BTRFS_DEV_ITEM_KEY;
1884 key.offset = device->devid;
1885
1886 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1887 if (ret) {
1888 if (ret > 0)
1889 ret = -ENOENT;
1890 btrfs_abort_transaction(trans, ret);
1891 btrfs_end_transaction(trans);
1892 goto out;
1893 }
1894
1895 ret = btrfs_del_item(trans, root, path);
1896 if (ret) {
1897 btrfs_abort_transaction(trans, ret);
1898 btrfs_end_transaction(trans);
1899 }
1900
1901out:
1902 btrfs_free_path(path);
1903 if (!ret)
1904 ret = btrfs_commit_transaction(trans);
1905 return ret;
1906}
1907
1908/*
1909 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1910 * filesystem. It's up to the caller to adjust that number regarding eg. device
1911 * replace.
1912 */
1913static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1914 u64 num_devices)
1915{
1916 u64 all_avail;
1917 unsigned seq;
1918 int i;
1919
1920 do {
1921 seq = read_seqbegin(&fs_info->profiles_lock);
1922
1923 all_avail = fs_info->avail_data_alloc_bits |
1924 fs_info->avail_system_alloc_bits |
1925 fs_info->avail_metadata_alloc_bits;
1926 } while (read_seqretry(&fs_info->profiles_lock, seq));
1927
1928 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1929 if (!(all_avail & btrfs_raid_array[i].bg_flag))
1930 continue;
1931
1932 if (num_devices < btrfs_raid_array[i].devs_min) {
1933 int ret = btrfs_raid_array[i].mindev_error;
1934
1935 if (ret)
1936 return ret;
1937 }
1938 }
1939
1940 return 0;
1941}
1942
1943static struct btrfs_device * btrfs_find_next_active_device(
1944 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1945{
1946 struct btrfs_device *next_device;
1947
1948 list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1949 if (next_device != device &&
1950 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1951 && next_device->bdev)
1952 return next_device;
1953 }
1954
1955 return NULL;
1956}
1957
1958/*
1959 * Helper function to check if the given device is part of s_bdev / latest_bdev
1960 * and replace it with the provided or the next active device, in the context
1961 * where this function called, there should be always be another device (or
1962 * this_dev) which is active.
1963 */
1964void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1965 struct btrfs_device *next_device)
1966{
1967 struct btrfs_fs_info *fs_info = device->fs_info;
1968
1969 if (!next_device)
1970 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1971 device);
1972 ASSERT(next_device);
1973
1974 if (fs_info->sb->s_bdev &&
1975 (fs_info->sb->s_bdev == device->bdev))
1976 fs_info->sb->s_bdev = next_device->bdev;
1977
1978 if (fs_info->fs_devices->latest_bdev == device->bdev)
1979 fs_info->fs_devices->latest_bdev = next_device->bdev;
1980}
1981
1982/*
1983 * Return btrfs_fs_devices::num_devices excluding the device that's being
1984 * currently replaced.
1985 */
1986static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
1987{
1988 u64 num_devices = fs_info->fs_devices->num_devices;
1989
1990 down_read(&fs_info->dev_replace.rwsem);
1991 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
1992 ASSERT(num_devices > 1);
1993 num_devices--;
1994 }
1995 up_read(&fs_info->dev_replace.rwsem);
1996
1997 return num_devices;
1998}
1999
2000void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2001 struct block_device *bdev,
2002 const char *device_path)
2003{
2004 struct btrfs_super_block *disk_super;
2005 int copy_num;
2006
2007 if (!bdev)
2008 return;
2009
2010 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2011 struct page *page;
2012 int ret;
2013
2014 disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2015 if (IS_ERR(disk_super))
2016 continue;
2017
2018 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2019
2020 page = virt_to_page(disk_super);
2021 set_page_dirty(page);
2022 lock_page(page);
2023 /* write_on_page() unlocks the page */
2024 ret = write_one_page(page);
2025 if (ret)
2026 btrfs_warn(fs_info,
2027 "error clearing superblock number %d (%d)",
2028 copy_num, ret);
2029 btrfs_release_disk_super(disk_super);
2030
2031 }
2032
2033 /* Notify udev that device has changed */
2034 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2035
2036 /* Update ctime/mtime for device path for libblkid */
2037 update_dev_time(device_path);
2038}
2039
2040int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2041 u64 devid)
2042{
2043 struct btrfs_device *device;
2044 struct btrfs_fs_devices *cur_devices;
2045 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2046 u64 num_devices;
2047 int ret = 0;
2048
2049 mutex_lock(&uuid_mutex);
2050
2051 num_devices = btrfs_num_devices(fs_info);
2052
2053 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2054 if (ret)
2055 goto out;
2056
2057 device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2058
2059 if (IS_ERR(device)) {
2060 if (PTR_ERR(device) == -ENOENT &&
2061 strcmp(device_path, "missing") == 0)
2062 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2063 else
2064 ret = PTR_ERR(device);
2065 goto out;
2066 }
2067
2068 if (btrfs_pinned_by_swapfile(fs_info, device)) {
2069 btrfs_warn_in_rcu(fs_info,
2070 "cannot remove device %s (devid %llu) due to active swapfile",
2071 rcu_str_deref(device->name), device->devid);
2072 ret = -ETXTBSY;
2073 goto out;
2074 }
2075
2076 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2077 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2078 goto out;
2079 }
2080
2081 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2082 fs_info->fs_devices->rw_devices == 1) {
2083 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2084 goto out;
2085 }
2086
2087 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2088 mutex_lock(&fs_info->chunk_mutex);
2089 list_del_init(&device->dev_alloc_list);
2090 device->fs_devices->rw_devices--;
2091 mutex_unlock(&fs_info->chunk_mutex);
2092 }
2093
2094 mutex_unlock(&uuid_mutex);
2095 ret = btrfs_shrink_device(device, 0);
2096 mutex_lock(&uuid_mutex);
2097 if (ret)
2098 goto error_undo;
2099
2100 /*
2101 * TODO: the superblock still includes this device in its num_devices
2102 * counter although write_all_supers() is not locked out. This
2103 * could give a filesystem state which requires a degraded mount.
2104 */
2105 ret = btrfs_rm_dev_item(device);
2106 if (ret)
2107 goto error_undo;
2108
2109 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2110 btrfs_scrub_cancel_dev(device);
2111
2112 /*
2113 * the device list mutex makes sure that we don't change
2114 * the device list while someone else is writing out all
2115 * the device supers. Whoever is writing all supers, should
2116 * lock the device list mutex before getting the number of
2117 * devices in the super block (super_copy). Conversely,
2118 * whoever updates the number of devices in the super block
2119 * (super_copy) should hold the device list mutex.
2120 */
2121
2122 /*
2123 * In normal cases the cur_devices == fs_devices. But in case
2124 * of deleting a seed device, the cur_devices should point to
2125 * its own fs_devices listed under the fs_devices->seed.
2126 */
2127 cur_devices = device->fs_devices;
2128 mutex_lock(&fs_devices->device_list_mutex);
2129 list_del_rcu(&device->dev_list);
2130
2131 cur_devices->num_devices--;
2132 cur_devices->total_devices--;
2133 /* Update total_devices of the parent fs_devices if it's seed */
2134 if (cur_devices != fs_devices)
2135 fs_devices->total_devices--;
2136
2137 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2138 cur_devices->missing_devices--;
2139
2140 btrfs_assign_next_active_device(device, NULL);
2141
2142 if (device->bdev) {
2143 cur_devices->open_devices--;
2144 /* remove sysfs entry */
2145 btrfs_sysfs_remove_device(device);
2146 }
2147
2148 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2149 btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2150 mutex_unlock(&fs_devices->device_list_mutex);
2151
2152 /*
2153 * at this point, the device is zero sized and detached from
2154 * the devices list. All that's left is to zero out the old
2155 * supers and free the device.
2156 */
2157 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2158 btrfs_scratch_superblocks(fs_info, device->bdev,
2159 device->name->str);
2160
2161 btrfs_close_bdev(device);
2162 synchronize_rcu();
2163 btrfs_free_device(device);
2164
2165 if (cur_devices->open_devices == 0) {
2166 list_del_init(&cur_devices->seed_list);
2167 close_fs_devices(cur_devices);
2168 free_fs_devices(cur_devices);
2169 }
2170
2171out:
2172 mutex_unlock(&uuid_mutex);
2173 return ret;
2174
2175error_undo:
2176 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2177 mutex_lock(&fs_info->chunk_mutex);
2178 list_add(&device->dev_alloc_list,
2179 &fs_devices->alloc_list);
2180 device->fs_devices->rw_devices++;
2181 mutex_unlock(&fs_info->chunk_mutex);
2182 }
2183 goto out;
2184}
2185
2186void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2187{
2188 struct btrfs_fs_devices *fs_devices;
2189
2190 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2191
2192 /*
2193 * in case of fs with no seed, srcdev->fs_devices will point
2194 * to fs_devices of fs_info. However when the dev being replaced is
2195 * a seed dev it will point to the seed's local fs_devices. In short
2196 * srcdev will have its correct fs_devices in both the cases.
2197 */
2198 fs_devices = srcdev->fs_devices;
2199
2200 list_del_rcu(&srcdev->dev_list);
2201 list_del(&srcdev->dev_alloc_list);
2202 fs_devices->num_devices--;
2203 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2204 fs_devices->missing_devices--;
2205
2206 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2207 fs_devices->rw_devices--;
2208
2209 if (srcdev->bdev)
2210 fs_devices->open_devices--;
2211}
2212
2213void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2214{
2215 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2216
2217 mutex_lock(&uuid_mutex);
2218
2219 btrfs_close_bdev(srcdev);
2220 synchronize_rcu();
2221 btrfs_free_device(srcdev);
2222
2223 /* if this is no devs we rather delete the fs_devices */
2224 if (!fs_devices->num_devices) {
2225 /*
2226 * On a mounted FS, num_devices can't be zero unless it's a
2227 * seed. In case of a seed device being replaced, the replace
2228 * target added to the sprout FS, so there will be no more
2229 * device left under the seed FS.
2230 */
2231 ASSERT(fs_devices->seeding);
2232
2233 list_del_init(&fs_devices->seed_list);
2234 close_fs_devices(fs_devices);
2235 free_fs_devices(fs_devices);
2236 }
2237 mutex_unlock(&uuid_mutex);
2238}
2239
2240void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2241{
2242 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2243
2244 mutex_lock(&fs_devices->device_list_mutex);
2245
2246 btrfs_sysfs_remove_device(tgtdev);
2247
2248 if (tgtdev->bdev)
2249 fs_devices->open_devices--;
2250
2251 fs_devices->num_devices--;
2252
2253 btrfs_assign_next_active_device(tgtdev, NULL);
2254
2255 list_del_rcu(&tgtdev->dev_list);
2256
2257 mutex_unlock(&fs_devices->device_list_mutex);
2258
2259 /*
2260 * The update_dev_time() with in btrfs_scratch_superblocks()
2261 * may lead to a call to btrfs_show_devname() which will try
2262 * to hold device_list_mutex. And here this device
2263 * is already out of device list, so we don't have to hold
2264 * the device_list_mutex lock.
2265 */
2266 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2267 tgtdev->name->str);
2268
2269 btrfs_close_bdev(tgtdev);
2270 synchronize_rcu();
2271 btrfs_free_device(tgtdev);
2272}
2273
2274static struct btrfs_device *btrfs_find_device_by_path(
2275 struct btrfs_fs_info *fs_info, const char *device_path)
2276{
2277 int ret = 0;
2278 struct btrfs_super_block *disk_super;
2279 u64 devid;
2280 u8 *dev_uuid;
2281 struct block_device *bdev;
2282 struct btrfs_device *device;
2283
2284 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2285 fs_info->bdev_holder, 0, &bdev, &disk_super);
2286 if (ret)
2287 return ERR_PTR(ret);
2288
2289 devid = btrfs_stack_device_id(&disk_super->dev_item);
2290 dev_uuid = disk_super->dev_item.uuid;
2291 if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2292 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2293 disk_super->metadata_uuid, true);
2294 else
2295 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2296 disk_super->fsid, true);
2297
2298 btrfs_release_disk_super(disk_super);
2299 if (!device)
2300 device = ERR_PTR(-ENOENT);
2301 blkdev_put(bdev, FMODE_READ);
2302 return device;
2303}
2304
2305/*
2306 * Lookup a device given by device id, or the path if the id is 0.
2307 */
2308struct btrfs_device *btrfs_find_device_by_devspec(
2309 struct btrfs_fs_info *fs_info, u64 devid,
2310 const char *device_path)
2311{
2312 struct btrfs_device *device;
2313
2314 if (devid) {
2315 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2316 NULL, true);
2317 if (!device)
2318 return ERR_PTR(-ENOENT);
2319 return device;
2320 }
2321
2322 if (!device_path || !device_path[0])
2323 return ERR_PTR(-EINVAL);
2324
2325 if (strcmp(device_path, "missing") == 0) {
2326 /* Find first missing device */
2327 list_for_each_entry(device, &fs_info->fs_devices->devices,
2328 dev_list) {
2329 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2330 &device->dev_state) && !device->bdev)
2331 return device;
2332 }
2333 return ERR_PTR(-ENOENT);
2334 }
2335
2336 return btrfs_find_device_by_path(fs_info, device_path);
2337}
2338
2339/*
2340 * does all the dirty work required for changing file system's UUID.
2341 */
2342static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2343{
2344 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2345 struct btrfs_fs_devices *old_devices;
2346 struct btrfs_fs_devices *seed_devices;
2347 struct btrfs_super_block *disk_super = fs_info->super_copy;
2348 struct btrfs_device *device;
2349 u64 super_flags;
2350
2351 lockdep_assert_held(&uuid_mutex);
2352 if (!fs_devices->seeding)
2353 return -EINVAL;
2354
2355 /*
2356 * Private copy of the seed devices, anchored at
2357 * fs_info->fs_devices->seed_list
2358 */
2359 seed_devices = alloc_fs_devices(NULL, NULL);
2360 if (IS_ERR(seed_devices))
2361 return PTR_ERR(seed_devices);
2362
2363 /*
2364 * It's necessary to retain a copy of the original seed fs_devices in
2365 * fs_uuids so that filesystems which have been seeded can successfully
2366 * reference the seed device from open_seed_devices. This also supports
2367 * multiple fs seed.
2368 */
2369 old_devices = clone_fs_devices(fs_devices);
2370 if (IS_ERR(old_devices)) {
2371 kfree(seed_devices);
2372 return PTR_ERR(old_devices);
2373 }
2374
2375 list_add(&old_devices->fs_list, &fs_uuids);
2376
2377 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2378 seed_devices->opened = 1;
2379 INIT_LIST_HEAD(&seed_devices->devices);
2380 INIT_LIST_HEAD(&seed_devices->alloc_list);
2381 mutex_init(&seed_devices->device_list_mutex);
2382
2383 mutex_lock(&fs_devices->device_list_mutex);
2384 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2385 synchronize_rcu);
2386 list_for_each_entry(device, &seed_devices->devices, dev_list)
2387 device->fs_devices = seed_devices;
2388
2389 fs_devices->seeding = false;
2390 fs_devices->num_devices = 0;
2391 fs_devices->open_devices = 0;
2392 fs_devices->missing_devices = 0;
2393 fs_devices->rotating = false;
2394 list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2395
2396 generate_random_uuid(fs_devices->fsid);
2397 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2398 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2399 mutex_unlock(&fs_devices->device_list_mutex);
2400
2401 super_flags = btrfs_super_flags(disk_super) &
2402 ~BTRFS_SUPER_FLAG_SEEDING;
2403 btrfs_set_super_flags(disk_super, super_flags);
2404
2405 return 0;
2406}
2407
2408/*
2409 * Store the expected generation for seed devices in device items.
2410 */
2411static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2412{
2413 struct btrfs_fs_info *fs_info = trans->fs_info;
2414 struct btrfs_root *root = fs_info->chunk_root;
2415 struct btrfs_path *path;
2416 struct extent_buffer *leaf;
2417 struct btrfs_dev_item *dev_item;
2418 struct btrfs_device *device;
2419 struct btrfs_key key;
2420 u8 fs_uuid[BTRFS_FSID_SIZE];
2421 u8 dev_uuid[BTRFS_UUID_SIZE];
2422 u64 devid;
2423 int ret;
2424
2425 path = btrfs_alloc_path();
2426 if (!path)
2427 return -ENOMEM;
2428
2429 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2430 key.offset = 0;
2431 key.type = BTRFS_DEV_ITEM_KEY;
2432
2433 while (1) {
2434 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2435 if (ret < 0)
2436 goto error;
2437
2438 leaf = path->nodes[0];
2439next_slot:
2440 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2441 ret = btrfs_next_leaf(root, path);
2442 if (ret > 0)
2443 break;
2444 if (ret < 0)
2445 goto error;
2446 leaf = path->nodes[0];
2447 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2448 btrfs_release_path(path);
2449 continue;
2450 }
2451
2452 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2453 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2454 key.type != BTRFS_DEV_ITEM_KEY)
2455 break;
2456
2457 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2458 struct btrfs_dev_item);
2459 devid = btrfs_device_id(leaf, dev_item);
2460 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2461 BTRFS_UUID_SIZE);
2462 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2463 BTRFS_FSID_SIZE);
2464 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2465 fs_uuid, true);
2466 BUG_ON(!device); /* Logic error */
2467
2468 if (device->fs_devices->seeding) {
2469 btrfs_set_device_generation(leaf, dev_item,
2470 device->generation);
2471 btrfs_mark_buffer_dirty(leaf);
2472 }
2473
2474 path->slots[0]++;
2475 goto next_slot;
2476 }
2477 ret = 0;
2478error:
2479 btrfs_free_path(path);
2480 return ret;
2481}
2482
2483int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2484{
2485 struct btrfs_root *root = fs_info->dev_root;
2486 struct request_queue *q;
2487 struct btrfs_trans_handle *trans;
2488 struct btrfs_device *device;
2489 struct block_device *bdev;
2490 struct super_block *sb = fs_info->sb;
2491 struct rcu_string *name;
2492 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2493 u64 orig_super_total_bytes;
2494 u64 orig_super_num_devices;
2495 int seeding_dev = 0;
2496 int ret = 0;
2497 bool locked = false;
2498
2499 if (sb_rdonly(sb) && !fs_devices->seeding)
2500 return -EROFS;
2501
2502 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2503 fs_info->bdev_holder);
2504 if (IS_ERR(bdev))
2505 return PTR_ERR(bdev);
2506
2507 if (fs_devices->seeding) {
2508 seeding_dev = 1;
2509 down_write(&sb->s_umount);
2510 mutex_lock(&uuid_mutex);
2511 locked = true;
2512 }
2513
2514 sync_blockdev(bdev);
2515
2516 rcu_read_lock();
2517 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2518 if (device->bdev == bdev) {
2519 ret = -EEXIST;
2520 rcu_read_unlock();
2521 goto error;
2522 }
2523 }
2524 rcu_read_unlock();
2525
2526 device = btrfs_alloc_device(fs_info, NULL, NULL);
2527 if (IS_ERR(device)) {
2528 /* we can safely leave the fs_devices entry around */
2529 ret = PTR_ERR(device);
2530 goto error;
2531 }
2532
2533 name = rcu_string_strdup(device_path, GFP_KERNEL);
2534 if (!name) {
2535 ret = -ENOMEM;
2536 goto error_free_device;
2537 }
2538 rcu_assign_pointer(device->name, name);
2539
2540 trans = btrfs_start_transaction(root, 0);
2541 if (IS_ERR(trans)) {
2542 ret = PTR_ERR(trans);
2543 goto error_free_device;
2544 }
2545
2546 q = bdev_get_queue(bdev);
2547 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2548 device->generation = trans->transid;
2549 device->io_width = fs_info->sectorsize;
2550 device->io_align = fs_info->sectorsize;
2551 device->sector_size = fs_info->sectorsize;
2552 device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2553 fs_info->sectorsize);
2554 device->disk_total_bytes = device->total_bytes;
2555 device->commit_total_bytes = device->total_bytes;
2556 device->fs_info = fs_info;
2557 device->bdev = bdev;
2558 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2559 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2560 device->mode = FMODE_EXCL;
2561 device->dev_stats_valid = 1;
2562 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2563
2564 if (seeding_dev) {
2565 sb->s_flags &= ~SB_RDONLY;
2566 ret = btrfs_prepare_sprout(fs_info);
2567 if (ret) {
2568 btrfs_abort_transaction(trans, ret);
2569 goto error_trans;
2570 }
2571 }
2572
2573 device->fs_devices = fs_devices;
2574
2575 mutex_lock(&fs_devices->device_list_mutex);
2576 mutex_lock(&fs_info->chunk_mutex);
2577 list_add_rcu(&device->dev_list, &fs_devices->devices);
2578 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2579 fs_devices->num_devices++;
2580 fs_devices->open_devices++;
2581 fs_devices->rw_devices++;
2582 fs_devices->total_devices++;
2583 fs_devices->total_rw_bytes += device->total_bytes;
2584
2585 atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2586
2587 if (!blk_queue_nonrot(q))
2588 fs_devices->rotating = true;
2589
2590 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2591 btrfs_set_super_total_bytes(fs_info->super_copy,
2592 round_down(orig_super_total_bytes + device->total_bytes,
2593 fs_info->sectorsize));
2594
2595 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2596 btrfs_set_super_num_devices(fs_info->super_copy,
2597 orig_super_num_devices + 1);
2598
2599 /*
2600 * we've got more storage, clear any full flags on the space
2601 * infos
2602 */
2603 btrfs_clear_space_info_full(fs_info);
2604
2605 mutex_unlock(&fs_info->chunk_mutex);
2606
2607 /* Add sysfs device entry */
2608 btrfs_sysfs_add_device(device);
2609
2610 mutex_unlock(&fs_devices->device_list_mutex);
2611
2612 if (seeding_dev) {
2613 mutex_lock(&fs_info->chunk_mutex);
2614 ret = init_first_rw_device(trans);
2615 mutex_unlock(&fs_info->chunk_mutex);
2616 if (ret) {
2617 btrfs_abort_transaction(trans, ret);
2618 goto error_sysfs;
2619 }
2620 }
2621
2622 ret = btrfs_add_dev_item(trans, device);
2623 if (ret) {
2624 btrfs_abort_transaction(trans, ret);
2625 goto error_sysfs;
2626 }
2627
2628 if (seeding_dev) {
2629 ret = btrfs_finish_sprout(trans);
2630 if (ret) {
2631 btrfs_abort_transaction(trans, ret);
2632 goto error_sysfs;
2633 }
2634
2635 /*
2636 * fs_devices now represents the newly sprouted filesystem and
2637 * its fsid has been changed by btrfs_prepare_sprout
2638 */
2639 btrfs_sysfs_update_sprout_fsid(fs_devices);
2640 }
2641
2642 ret = btrfs_commit_transaction(trans);
2643
2644 if (seeding_dev) {
2645 mutex_unlock(&uuid_mutex);
2646 up_write(&sb->s_umount);
2647 locked = false;
2648
2649 if (ret) /* transaction commit */
2650 return ret;
2651
2652 ret = btrfs_relocate_sys_chunks(fs_info);
2653 if (ret < 0)
2654 btrfs_handle_fs_error(fs_info, ret,
2655 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2656 trans = btrfs_attach_transaction(root);
2657 if (IS_ERR(trans)) {
2658 if (PTR_ERR(trans) == -ENOENT)
2659 return 0;
2660 ret = PTR_ERR(trans);
2661 trans = NULL;
2662 goto error_sysfs;
2663 }
2664 ret = btrfs_commit_transaction(trans);
2665 }
2666
2667 /*
2668 * Now that we have written a new super block to this device, check all
2669 * other fs_devices list if device_path alienates any other scanned
2670 * device.
2671 * We can ignore the return value as it typically returns -EINVAL and
2672 * only succeeds if the device was an alien.
2673 */
2674 btrfs_forget_devices(device_path);
2675
2676 /* Update ctime/mtime for blkid or udev */
2677 update_dev_time(device_path);
2678
2679 return ret;
2680
2681error_sysfs:
2682 btrfs_sysfs_remove_device(device);
2683 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2684 mutex_lock(&fs_info->chunk_mutex);
2685 list_del_rcu(&device->dev_list);
2686 list_del(&device->dev_alloc_list);
2687 fs_info->fs_devices->num_devices--;
2688 fs_info->fs_devices->open_devices--;
2689 fs_info->fs_devices->rw_devices--;
2690 fs_info->fs_devices->total_devices--;
2691 fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2692 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2693 btrfs_set_super_total_bytes(fs_info->super_copy,
2694 orig_super_total_bytes);
2695 btrfs_set_super_num_devices(fs_info->super_copy,
2696 orig_super_num_devices);
2697 mutex_unlock(&fs_info->chunk_mutex);
2698 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2699error_trans:
2700 if (seeding_dev)
2701 sb->s_flags |= SB_RDONLY;
2702 if (trans)
2703 btrfs_end_transaction(trans);
2704error_free_device:
2705 btrfs_free_device(device);
2706error:
2707 blkdev_put(bdev, FMODE_EXCL);
2708 if (locked) {
2709 mutex_unlock(&uuid_mutex);
2710 up_write(&sb->s_umount);
2711 }
2712 return ret;
2713}
2714
2715static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2716 struct btrfs_device *device)
2717{
2718 int ret;
2719 struct btrfs_path *path;
2720 struct btrfs_root *root = device->fs_info->chunk_root;
2721 struct btrfs_dev_item *dev_item;
2722 struct extent_buffer *leaf;
2723 struct btrfs_key key;
2724
2725 path = btrfs_alloc_path();
2726 if (!path)
2727 return -ENOMEM;
2728
2729 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2730 key.type = BTRFS_DEV_ITEM_KEY;
2731 key.offset = device->devid;
2732
2733 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2734 if (ret < 0)
2735 goto out;
2736
2737 if (ret > 0) {
2738 ret = -ENOENT;
2739 goto out;
2740 }
2741
2742 leaf = path->nodes[0];
2743 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2744
2745 btrfs_set_device_id(leaf, dev_item, device->devid);
2746 btrfs_set_device_type(leaf, dev_item, device->type);
2747 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2748 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2749 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2750 btrfs_set_device_total_bytes(leaf, dev_item,
2751 btrfs_device_get_disk_total_bytes(device));
2752 btrfs_set_device_bytes_used(leaf, dev_item,
2753 btrfs_device_get_bytes_used(device));
2754 btrfs_mark_buffer_dirty(leaf);
2755
2756out:
2757 btrfs_free_path(path);
2758 return ret;
2759}
2760
2761int btrfs_grow_device(struct btrfs_trans_handle *trans,
2762 struct btrfs_device *device, u64 new_size)
2763{
2764 struct btrfs_fs_info *fs_info = device->fs_info;
2765 struct btrfs_super_block *super_copy = fs_info->super_copy;
2766 u64 old_total;
2767 u64 diff;
2768
2769 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2770 return -EACCES;
2771
2772 new_size = round_down(new_size, fs_info->sectorsize);
2773
2774 mutex_lock(&fs_info->chunk_mutex);
2775 old_total = btrfs_super_total_bytes(super_copy);
2776 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2777
2778 if (new_size <= device->total_bytes ||
2779 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2780 mutex_unlock(&fs_info->chunk_mutex);
2781 return -EINVAL;
2782 }
2783
2784 btrfs_set_super_total_bytes(super_copy,
2785 round_down(old_total + diff, fs_info->sectorsize));
2786 device->fs_devices->total_rw_bytes += diff;
2787
2788 btrfs_device_set_total_bytes(device, new_size);
2789 btrfs_device_set_disk_total_bytes(device, new_size);
2790 btrfs_clear_space_info_full(device->fs_info);
2791 if (list_empty(&device->post_commit_list))
2792 list_add_tail(&device->post_commit_list,
2793 &trans->transaction->dev_update_list);
2794 mutex_unlock(&fs_info->chunk_mutex);
2795
2796 return btrfs_update_device(trans, device);
2797}
2798
2799static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2800{
2801 struct btrfs_fs_info *fs_info = trans->fs_info;
2802 struct btrfs_root *root = fs_info->chunk_root;
2803 int ret;
2804 struct btrfs_path *path;
2805 struct btrfs_key key;
2806
2807 path = btrfs_alloc_path();
2808 if (!path)
2809 return -ENOMEM;
2810
2811 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2812 key.offset = chunk_offset;
2813 key.type = BTRFS_CHUNK_ITEM_KEY;
2814
2815 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2816 if (ret < 0)
2817 goto out;
2818 else if (ret > 0) { /* Logic error or corruption */
2819 btrfs_handle_fs_error(fs_info, -ENOENT,
2820 "Failed lookup while freeing chunk.");
2821 ret = -ENOENT;
2822 goto out;
2823 }
2824
2825 ret = btrfs_del_item(trans, root, path);
2826 if (ret < 0)
2827 btrfs_handle_fs_error(fs_info, ret,
2828 "Failed to delete chunk item.");
2829out:
2830 btrfs_free_path(path);
2831 return ret;
2832}
2833
2834static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2835{
2836 struct btrfs_super_block *super_copy = fs_info->super_copy;
2837 struct btrfs_disk_key *disk_key;
2838 struct btrfs_chunk *chunk;
2839 u8 *ptr;
2840 int ret = 0;
2841 u32 num_stripes;
2842 u32 array_size;
2843 u32 len = 0;
2844 u32 cur;
2845 struct btrfs_key key;
2846
2847 mutex_lock(&fs_info->chunk_mutex);
2848 array_size = btrfs_super_sys_array_size(super_copy);
2849
2850 ptr = super_copy->sys_chunk_array;
2851 cur = 0;
2852
2853 while (cur < array_size) {
2854 disk_key = (struct btrfs_disk_key *)ptr;
2855 btrfs_disk_key_to_cpu(&key, disk_key);
2856
2857 len = sizeof(*disk_key);
2858
2859 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2860 chunk = (struct btrfs_chunk *)(ptr + len);
2861 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2862 len += btrfs_chunk_item_size(num_stripes);
2863 } else {
2864 ret = -EIO;
2865 break;
2866 }
2867 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2868 key.offset == chunk_offset) {
2869 memmove(ptr, ptr + len, array_size - (cur + len));
2870 array_size -= len;
2871 btrfs_set_super_sys_array_size(super_copy, array_size);
2872 } else {
2873 ptr += len;
2874 cur += len;
2875 }
2876 }
2877 mutex_unlock(&fs_info->chunk_mutex);
2878 return ret;
2879}
2880
2881/*
2882 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2883 * @logical: Logical block offset in bytes.
2884 * @length: Length of extent in bytes.
2885 *
2886 * Return: Chunk mapping or ERR_PTR.
2887 */
2888struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2889 u64 logical, u64 length)
2890{
2891 struct extent_map_tree *em_tree;
2892 struct extent_map *em;
2893
2894 em_tree = &fs_info->mapping_tree;
2895 read_lock(&em_tree->lock);
2896 em = lookup_extent_mapping(em_tree, logical, length);
2897 read_unlock(&em_tree->lock);
2898
2899 if (!em) {
2900 btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2901 logical, length);
2902 return ERR_PTR(-EINVAL);
2903 }
2904
2905 if (em->start > logical || em->start + em->len < logical) {
2906 btrfs_crit(fs_info,
2907 "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2908 logical, length, em->start, em->start + em->len);
2909 free_extent_map(em);
2910 return ERR_PTR(-EINVAL);
2911 }
2912
2913 /* callers are responsible for dropping em's ref. */
2914 return em;
2915}
2916
2917int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2918{
2919 struct btrfs_fs_info *fs_info = trans->fs_info;
2920 struct extent_map *em;
2921 struct map_lookup *map;
2922 u64 dev_extent_len = 0;
2923 int i, ret = 0;
2924 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2925
2926 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2927 if (IS_ERR(em)) {
2928 /*
2929 * This is a logic error, but we don't want to just rely on the
2930 * user having built with ASSERT enabled, so if ASSERT doesn't
2931 * do anything we still error out.
2932 */
2933 ASSERT(0);
2934 return PTR_ERR(em);
2935 }
2936 map = em->map_lookup;
2937 mutex_lock(&fs_info->chunk_mutex);
2938 check_system_chunk(trans, map->type);
2939 mutex_unlock(&fs_info->chunk_mutex);
2940
2941 /*
2942 * Take the device list mutex to prevent races with the final phase of
2943 * a device replace operation that replaces the device object associated
2944 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2945 */
2946 mutex_lock(&fs_devices->device_list_mutex);
2947 for (i = 0; i < map->num_stripes; i++) {
2948 struct btrfs_device *device = map->stripes[i].dev;
2949 ret = btrfs_free_dev_extent(trans, device,
2950 map->stripes[i].physical,
2951 &dev_extent_len);
2952 if (ret) {
2953 mutex_unlock(&fs_devices->device_list_mutex);
2954 btrfs_abort_transaction(trans, ret);
2955 goto out;
2956 }
2957
2958 if (device->bytes_used > 0) {
2959 mutex_lock(&fs_info->chunk_mutex);
2960 btrfs_device_set_bytes_used(device,
2961 device->bytes_used - dev_extent_len);
2962 atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
2963 btrfs_clear_space_info_full(fs_info);
2964 mutex_unlock(&fs_info->chunk_mutex);
2965 }
2966
2967 ret = btrfs_update_device(trans, device);
2968 if (ret) {
2969 mutex_unlock(&fs_devices->device_list_mutex);
2970 btrfs_abort_transaction(trans, ret);
2971 goto out;
2972 }
2973 }
2974 mutex_unlock(&fs_devices->device_list_mutex);
2975
2976 ret = btrfs_free_chunk(trans, chunk_offset);
2977 if (ret) {
2978 btrfs_abort_transaction(trans, ret);
2979 goto out;
2980 }
2981
2982 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
2983
2984 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2985 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
2986 if (ret) {
2987 btrfs_abort_transaction(trans, ret);
2988 goto out;
2989 }
2990 }
2991
2992 ret = btrfs_remove_block_group(trans, chunk_offset, em);
2993 if (ret) {
2994 btrfs_abort_transaction(trans, ret);
2995 goto out;
2996 }
2997
2998out:
2999 /* once for us */
3000 free_extent_map(em);
3001 return ret;
3002}
3003
3004static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3005{
3006 struct btrfs_root *root = fs_info->chunk_root;
3007 struct btrfs_trans_handle *trans;
3008 struct btrfs_block_group *block_group;
3009 int ret;
3010
3011 /*
3012 * Prevent races with automatic removal of unused block groups.
3013 * After we relocate and before we remove the chunk with offset
3014 * chunk_offset, automatic removal of the block group can kick in,
3015 * resulting in a failure when calling btrfs_remove_chunk() below.
3016 *
3017 * Make sure to acquire this mutex before doing a tree search (dev
3018 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3019 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3020 * we release the path used to search the chunk/dev tree and before
3021 * the current task acquires this mutex and calls us.
3022 */
3023 lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3024
3025 /* step one, relocate all the extents inside this chunk */
3026 btrfs_scrub_pause(fs_info);
3027 ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3028 btrfs_scrub_continue(fs_info);
3029 if (ret)
3030 return ret;
3031
3032 block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3033 if (!block_group)
3034 return -ENOENT;
3035 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3036 btrfs_put_block_group(block_group);
3037
3038 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3039 chunk_offset);
3040 if (IS_ERR(trans)) {
3041 ret = PTR_ERR(trans);
3042 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3043 return ret;
3044 }
3045
3046 /*
3047 * step two, delete the device extents and the
3048 * chunk tree entries
3049 */
3050 ret = btrfs_remove_chunk(trans, chunk_offset);
3051 btrfs_end_transaction(trans);
3052 return ret;
3053}
3054
3055static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3056{
3057 struct btrfs_root *chunk_root = fs_info->chunk_root;
3058 struct btrfs_path *path;
3059 struct extent_buffer *leaf;
3060 struct btrfs_chunk *chunk;
3061 struct btrfs_key key;
3062 struct btrfs_key found_key;
3063 u64 chunk_type;
3064 bool retried = false;
3065 int failed = 0;
3066 int ret;
3067
3068 path = btrfs_alloc_path();
3069 if (!path)
3070 return -ENOMEM;
3071
3072again:
3073 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3074 key.offset = (u64)-1;
3075 key.type = BTRFS_CHUNK_ITEM_KEY;
3076
3077 while (1) {
3078 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3079 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3080 if (ret < 0) {
3081 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3082 goto error;
3083 }
3084 BUG_ON(ret == 0); /* Corruption */
3085
3086 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3087 key.type);
3088 if (ret)
3089 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3090 if (ret < 0)
3091 goto error;
3092 if (ret > 0)
3093 break;
3094
3095 leaf = path->nodes[0];
3096 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3097
3098 chunk = btrfs_item_ptr(leaf, path->slots[0],
3099 struct btrfs_chunk);
3100 chunk_type = btrfs_chunk_type(leaf, chunk);
3101 btrfs_release_path(path);
3102
3103 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3104 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3105 if (ret == -ENOSPC)
3106 failed++;
3107 else
3108 BUG_ON(ret);
3109 }
3110 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3111
3112 if (found_key.offset == 0)
3113 break;
3114 key.offset = found_key.offset - 1;
3115 }
3116 ret = 0;
3117 if (failed && !retried) {
3118 failed = 0;
3119 retried = true;
3120 goto again;
3121 } else if (WARN_ON(failed && retried)) {
3122 ret = -ENOSPC;
3123 }
3124error:
3125 btrfs_free_path(path);
3126 return ret;
3127}
3128
3129/*
3130 * return 1 : allocate a data chunk successfully,
3131 * return <0: errors during allocating a data chunk,
3132 * return 0 : no need to allocate a data chunk.
3133 */
3134static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3135 u64 chunk_offset)
3136{
3137 struct btrfs_block_group *cache;
3138 u64 bytes_used;
3139 u64 chunk_type;
3140
3141 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3142 ASSERT(cache);
3143 chunk_type = cache->flags;
3144 btrfs_put_block_group(cache);
3145
3146 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3147 return 0;
3148
3149 spin_lock(&fs_info->data_sinfo->lock);
3150 bytes_used = fs_info->data_sinfo->bytes_used;
3151 spin_unlock(&fs_info->data_sinfo->lock);
3152
3153 if (!bytes_used) {
3154 struct btrfs_trans_handle *trans;
3155 int ret;
3156
3157 trans = btrfs_join_transaction(fs_info->tree_root);
3158 if (IS_ERR(trans))
3159 return PTR_ERR(trans);
3160
3161 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3162 btrfs_end_transaction(trans);
3163 if (ret < 0)
3164 return ret;
3165 return 1;
3166 }
3167
3168 return 0;
3169}
3170
3171static int insert_balance_item(struct btrfs_fs_info *fs_info,
3172 struct btrfs_balance_control *bctl)
3173{
3174 struct btrfs_root *root = fs_info->tree_root;
3175 struct btrfs_trans_handle *trans;
3176 struct btrfs_balance_item *item;
3177 struct btrfs_disk_balance_args disk_bargs;
3178 struct btrfs_path *path;
3179 struct extent_buffer *leaf;
3180 struct btrfs_key key;
3181 int ret, err;
3182
3183 path = btrfs_alloc_path();
3184 if (!path)
3185 return -ENOMEM;
3186
3187 trans = btrfs_start_transaction(root, 0);
3188 if (IS_ERR(trans)) {
3189 btrfs_free_path(path);
3190 return PTR_ERR(trans);
3191 }
3192
3193 key.objectid = BTRFS_BALANCE_OBJECTID;
3194 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3195 key.offset = 0;
3196
3197 ret = btrfs_insert_empty_item(trans, root, path, &key,
3198 sizeof(*item));
3199 if (ret)
3200 goto out;
3201
3202 leaf = path->nodes[0];
3203 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3204
3205 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3206
3207 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3208 btrfs_set_balance_data(leaf, item, &disk_bargs);
3209 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3210 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3211 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3212 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3213
3214 btrfs_set_balance_flags(leaf, item, bctl->flags);
3215
3216 btrfs_mark_buffer_dirty(leaf);
3217out:
3218 btrfs_free_path(path);
3219 err = btrfs_commit_transaction(trans);
3220 if (err && !ret)
3221 ret = err;
3222 return ret;
3223}
3224
3225static int del_balance_item(struct btrfs_fs_info *fs_info)
3226{
3227 struct btrfs_root *root = fs_info->tree_root;
3228 struct btrfs_trans_handle *trans;
3229 struct btrfs_path *path;
3230 struct btrfs_key key;
3231 int ret, err;
3232
3233 path = btrfs_alloc_path();
3234 if (!path)
3235 return -ENOMEM;
3236
3237 trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3238 if (IS_ERR(trans)) {
3239 btrfs_free_path(path);
3240 return PTR_ERR(trans);
3241 }
3242
3243 key.objectid = BTRFS_BALANCE_OBJECTID;
3244 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3245 key.offset = 0;
3246
3247 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3248 if (ret < 0)
3249 goto out;
3250 if (ret > 0) {
3251 ret = -ENOENT;
3252 goto out;
3253 }
3254
3255 ret = btrfs_del_item(trans, root, path);
3256out:
3257 btrfs_free_path(path);
3258 err = btrfs_commit_transaction(trans);
3259 if (err && !ret)
3260 ret = err;
3261 return ret;
3262}
3263
3264/*
3265 * This is a heuristic used to reduce the number of chunks balanced on
3266 * resume after balance was interrupted.
3267 */
3268static void update_balance_args(struct btrfs_balance_control *bctl)
3269{
3270 /*
3271 * Turn on soft mode for chunk types that were being converted.
3272 */
3273 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3274 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3275 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3276 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3277 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3278 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3279
3280 /*
3281 * Turn on usage filter if is not already used. The idea is
3282 * that chunks that we have already balanced should be
3283 * reasonably full. Don't do it for chunks that are being
3284 * converted - that will keep us from relocating unconverted
3285 * (albeit full) chunks.
3286 */
3287 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3288 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3289 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3290 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3291 bctl->data.usage = 90;
3292 }
3293 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3294 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3295 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3296 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3297 bctl->sys.usage = 90;
3298 }
3299 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3300 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3301 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3302 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3303 bctl->meta.usage = 90;
3304 }
3305}
3306
3307/*
3308 * Clear the balance status in fs_info and delete the balance item from disk.
3309 */
3310static void reset_balance_state(struct btrfs_fs_info *fs_info)
3311{
3312 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3313 int ret;
3314
3315 BUG_ON(!fs_info->balance_ctl);
3316
3317 spin_lock(&fs_info->balance_lock);
3318 fs_info->balance_ctl = NULL;
3319 spin_unlock(&fs_info->balance_lock);
3320
3321 kfree(bctl);
3322 ret = del_balance_item(fs_info);
3323 if (ret)
3324 btrfs_handle_fs_error(fs_info, ret, NULL);
3325}
3326
3327/*
3328 * Balance filters. Return 1 if chunk should be filtered out
3329 * (should not be balanced).
3330 */
3331static int chunk_profiles_filter(u64 chunk_type,
3332 struct btrfs_balance_args *bargs)
3333{
3334 chunk_type = chunk_to_extended(chunk_type) &
3335 BTRFS_EXTENDED_PROFILE_MASK;
3336
3337 if (bargs->profiles & chunk_type)
3338 return 0;
3339
3340 return 1;
3341}
3342
3343static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3344 struct btrfs_balance_args *bargs)
3345{
3346 struct btrfs_block_group *cache;
3347 u64 chunk_used;
3348 u64 user_thresh_min;
3349 u64 user_thresh_max;
3350 int ret = 1;
3351
3352 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3353 chunk_used = cache->used;
3354
3355 if (bargs->usage_min == 0)
3356 user_thresh_min = 0;
3357 else
3358 user_thresh_min = div_factor_fine(cache->length,
3359 bargs->usage_min);
3360
3361 if (bargs->usage_max == 0)
3362 user_thresh_max = 1;
3363 else if (bargs->usage_max > 100)
3364 user_thresh_max = cache->length;
3365 else
3366 user_thresh_max = div_factor_fine(cache->length,
3367 bargs->usage_max);
3368
3369 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3370 ret = 0;
3371
3372 btrfs_put_block_group(cache);
3373 return ret;
3374}
3375
3376static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3377 u64 chunk_offset, struct btrfs_balance_args *bargs)
3378{
3379 struct btrfs_block_group *cache;
3380 u64 chunk_used, user_thresh;
3381 int ret = 1;
3382
3383 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3384 chunk_used = cache->used;
3385
3386 if (bargs->usage_min == 0)
3387 user_thresh = 1;
3388 else if (bargs->usage > 100)
3389 user_thresh = cache->length;
3390 else
3391 user_thresh = div_factor_fine(cache->length, bargs->usage);
3392
3393 if (chunk_used < user_thresh)
3394 ret = 0;
3395
3396 btrfs_put_block_group(cache);
3397 return ret;
3398}
3399
3400static int chunk_devid_filter(struct extent_buffer *leaf,
3401 struct btrfs_chunk *chunk,
3402 struct btrfs_balance_args *bargs)
3403{
3404 struct btrfs_stripe *stripe;
3405 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3406 int i;
3407
3408 for (i = 0; i < num_stripes; i++) {
3409 stripe = btrfs_stripe_nr(chunk, i);
3410 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3411 return 0;
3412 }
3413
3414 return 1;
3415}
3416
3417static u64 calc_data_stripes(u64 type, int num_stripes)
3418{
3419 const int index = btrfs_bg_flags_to_raid_index(type);
3420 const int ncopies = btrfs_raid_array[index].ncopies;
3421 const int nparity = btrfs_raid_array[index].nparity;
3422
3423 if (nparity)
3424 return num_stripes - nparity;
3425 else
3426 return num_stripes / ncopies;
3427}
3428
3429/* [pstart, pend) */
3430static int chunk_drange_filter(struct extent_buffer *leaf,
3431 struct btrfs_chunk *chunk,
3432 struct btrfs_balance_args *bargs)
3433{
3434 struct btrfs_stripe *stripe;
3435 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3436 u64 stripe_offset;
3437 u64 stripe_length;
3438 u64 type;
3439 int factor;
3440 int i;
3441
3442 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3443 return 0;
3444
3445 type = btrfs_chunk_type(leaf, chunk);
3446 factor = calc_data_stripes(type, num_stripes);
3447
3448 for (i = 0; i < num_stripes; i++) {
3449 stripe = btrfs_stripe_nr(chunk, i);
3450 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3451 continue;
3452
3453 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3454 stripe_length = btrfs_chunk_length(leaf, chunk);
3455 stripe_length = div_u64(stripe_length, factor);
3456
3457 if (stripe_offset < bargs->pend &&
3458 stripe_offset + stripe_length > bargs->pstart)
3459 return 0;
3460 }
3461
3462 return 1;
3463}
3464
3465/* [vstart, vend) */
3466static int chunk_vrange_filter(struct extent_buffer *leaf,
3467 struct btrfs_chunk *chunk,
3468 u64 chunk_offset,
3469 struct btrfs_balance_args *bargs)
3470{
3471 if (chunk_offset < bargs->vend &&
3472 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3473 /* at least part of the chunk is inside this vrange */
3474 return 0;
3475
3476 return 1;
3477}
3478
3479static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3480 struct btrfs_chunk *chunk,
3481 struct btrfs_balance_args *bargs)
3482{
3483 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3484
3485 if (bargs->stripes_min <= num_stripes
3486 && num_stripes <= bargs->stripes_max)
3487 return 0;
3488
3489 return 1;
3490}
3491
3492static int chunk_soft_convert_filter(u64 chunk_type,
3493 struct btrfs_balance_args *bargs)
3494{
3495 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3496 return 0;
3497
3498 chunk_type = chunk_to_extended(chunk_type) &
3499 BTRFS_EXTENDED_PROFILE_MASK;
3500
3501 if (bargs->target == chunk_type)
3502 return 1;
3503
3504 return 0;
3505}
3506
3507static int should_balance_chunk(struct extent_buffer *leaf,
3508 struct btrfs_chunk *chunk, u64 chunk_offset)
3509{
3510 struct btrfs_fs_info *fs_info = leaf->fs_info;
3511 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3512 struct btrfs_balance_args *bargs = NULL;
3513 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3514
3515 /* type filter */
3516 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3517 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3518 return 0;
3519 }
3520
3521 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3522 bargs = &bctl->data;
3523 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3524 bargs = &bctl->sys;
3525 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3526 bargs = &bctl->meta;
3527
3528 /* profiles filter */
3529 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3530 chunk_profiles_filter(chunk_type, bargs)) {
3531 return 0;
3532 }
3533
3534 /* usage filter */
3535 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3536 chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3537 return 0;
3538 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3539 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3540 return 0;
3541 }
3542
3543 /* devid filter */
3544 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3545 chunk_devid_filter(leaf, chunk, bargs)) {
3546 return 0;
3547 }
3548
3549 /* drange filter, makes sense only with devid filter */
3550 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3551 chunk_drange_filter(leaf, chunk, bargs)) {
3552 return 0;
3553 }
3554
3555 /* vrange filter */
3556 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3557 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3558 return 0;
3559 }
3560
3561 /* stripes filter */
3562 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3563 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3564 return 0;
3565 }
3566
3567 /* soft profile changing mode */
3568 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3569 chunk_soft_convert_filter(chunk_type, bargs)) {
3570 return 0;
3571 }
3572
3573 /*
3574 * limited by count, must be the last filter
3575 */
3576 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3577 if (bargs->limit == 0)
3578 return 0;
3579 else
3580 bargs->limit--;
3581 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3582 /*
3583 * Same logic as the 'limit' filter; the minimum cannot be
3584 * determined here because we do not have the global information
3585 * about the count of all chunks that satisfy the filters.
3586 */
3587 if (bargs->limit_max == 0)
3588 return 0;
3589 else
3590 bargs->limit_max--;
3591 }
3592
3593 return 1;
3594}
3595
3596static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3597{
3598 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3599 struct btrfs_root *chunk_root = fs_info->chunk_root;
3600 u64 chunk_type;
3601 struct btrfs_chunk *chunk;
3602 struct btrfs_path *path = NULL;
3603 struct btrfs_key key;
3604 struct btrfs_key found_key;
3605 struct extent_buffer *leaf;
3606 int slot;
3607 int ret;
3608 int enospc_errors = 0;
3609 bool counting = true;
3610 /* The single value limit and min/max limits use the same bytes in the */
3611 u64 limit_data = bctl->data.limit;
3612 u64 limit_meta = bctl->meta.limit;
3613 u64 limit_sys = bctl->sys.limit;
3614 u32 count_data = 0;
3615 u32 count_meta = 0;
3616 u32 count_sys = 0;
3617 int chunk_reserved = 0;
3618
3619 path = btrfs_alloc_path();
3620 if (!path) {
3621 ret = -ENOMEM;
3622 goto error;
3623 }
3624
3625 /* zero out stat counters */
3626 spin_lock(&fs_info->balance_lock);
3627 memset(&bctl->stat, 0, sizeof(bctl->stat));
3628 spin_unlock(&fs_info->balance_lock);
3629again:
3630 if (!counting) {
3631 /*
3632 * The single value limit and min/max limits use the same bytes
3633 * in the
3634 */
3635 bctl->data.limit = limit_data;
3636 bctl->meta.limit = limit_meta;
3637 bctl->sys.limit = limit_sys;
3638 }
3639 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3640 key.offset = (u64)-1;
3641 key.type = BTRFS_CHUNK_ITEM_KEY;
3642
3643 while (1) {
3644 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3645 atomic_read(&fs_info->balance_cancel_req)) {
3646 ret = -ECANCELED;
3647 goto error;
3648 }
3649
3650 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3651 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3652 if (ret < 0) {
3653 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3654 goto error;
3655 }
3656
3657 /*
3658 * this shouldn't happen, it means the last relocate
3659 * failed
3660 */
3661 if (ret == 0)
3662 BUG(); /* FIXME break ? */
3663
3664 ret = btrfs_previous_item(chunk_root, path, 0,
3665 BTRFS_CHUNK_ITEM_KEY);
3666 if (ret) {
3667 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3668 ret = 0;
3669 break;
3670 }
3671
3672 leaf = path->nodes[0];
3673 slot = path->slots[0];
3674 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3675
3676 if (found_key.objectid != key.objectid) {
3677 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3678 break;
3679 }
3680
3681 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3682 chunk_type = btrfs_chunk_type(leaf, chunk);
3683
3684 if (!counting) {
3685 spin_lock(&fs_info->balance_lock);
3686 bctl->stat.considered++;
3687 spin_unlock(&fs_info->balance_lock);
3688 }
3689
3690 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3691
3692 btrfs_release_path(path);
3693 if (!ret) {
3694 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3695 goto loop;
3696 }
3697
3698 if (counting) {
3699 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3700 spin_lock(&fs_info->balance_lock);
3701 bctl->stat.expected++;
3702 spin_unlock(&fs_info->balance_lock);
3703
3704 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3705 count_data++;
3706 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3707 count_sys++;
3708 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3709 count_meta++;
3710
3711 goto loop;
3712 }
3713
3714 /*
3715 * Apply limit_min filter, no need to check if the LIMITS
3716 * filter is used, limit_min is 0 by default
3717 */
3718 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3719 count_data < bctl->data.limit_min)
3720 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3721 count_meta < bctl->meta.limit_min)
3722 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3723 count_sys < bctl->sys.limit_min)) {
3724 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3725 goto loop;
3726 }
3727
3728 if (!chunk_reserved) {
3729 /*
3730 * We may be relocating the only data chunk we have,
3731 * which could potentially end up with losing data's
3732 * raid profile, so lets allocate an empty one in
3733 * advance.
3734 */
3735 ret = btrfs_may_alloc_data_chunk(fs_info,
3736 found_key.offset);
3737 if (ret < 0) {
3738 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3739 goto error;
3740 } else if (ret == 1) {
3741 chunk_reserved = 1;
3742 }
3743 }
3744
3745 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3746 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3747 if (ret == -ENOSPC) {
3748 enospc_errors++;
3749 } else if (ret == -ETXTBSY) {
3750 btrfs_info(fs_info,
3751 "skipping relocation of block group %llu due to active swapfile",
3752 found_key.offset);
3753 ret = 0;
3754 } else if (ret) {
3755 goto error;
3756 } else {
3757 spin_lock(&fs_info->balance_lock);
3758 bctl->stat.completed++;
3759 spin_unlock(&fs_info->balance_lock);
3760 }
3761loop:
3762 if (found_key.offset == 0)
3763 break;
3764 key.offset = found_key.offset - 1;
3765 }
3766
3767 if (counting) {
3768 btrfs_release_path(path);
3769 counting = false;
3770 goto again;
3771 }
3772error:
3773 btrfs_free_path(path);
3774 if (enospc_errors) {
3775 btrfs_info(fs_info, "%d enospc errors during balance",
3776 enospc_errors);
3777 if (!ret)
3778 ret = -ENOSPC;
3779 }
3780
3781 return ret;
3782}
3783
3784/**
3785 * alloc_profile_is_valid - see if a given profile is valid and reduced
3786 * @flags: profile to validate
3787 * @extended: if true @flags is treated as an extended profile
3788 */
3789static int alloc_profile_is_valid(u64 flags, int extended)
3790{
3791 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3792 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3793
3794 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3795
3796 /* 1) check that all other bits are zeroed */
3797 if (flags & ~mask)
3798 return 0;
3799
3800 /* 2) see if profile is reduced */
3801 if (flags == 0)
3802 return !extended; /* "0" is valid for usual profiles */
3803
3804 return has_single_bit_set(flags);
3805}
3806
3807static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3808{
3809 /* cancel requested || normal exit path */
3810 return atomic_read(&fs_info->balance_cancel_req) ||
3811 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3812 atomic_read(&fs_info->balance_cancel_req) == 0);
3813}
3814
3815/*
3816 * Validate target profile against allowed profiles and return true if it's OK.
3817 * Otherwise print the error message and return false.
3818 */
3819static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3820 const struct btrfs_balance_args *bargs,
3821 u64 allowed, const char *type)
3822{
3823 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3824 return true;
3825
3826 /* Profile is valid and does not have bits outside of the allowed set */
3827 if (alloc_profile_is_valid(bargs->target, 1) &&
3828 (bargs->target & ~allowed) == 0)
3829 return true;
3830
3831 btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3832 type, btrfs_bg_type_to_raid_name(bargs->target));
3833 return false;
3834}
3835
3836/*
3837 * Fill @buf with textual description of balance filter flags @bargs, up to
3838 * @size_buf including the terminating null. The output may be trimmed if it
3839 * does not fit into the provided buffer.
3840 */
3841static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3842 u32 size_buf)
3843{
3844 int ret;
3845 u32 size_bp = size_buf;
3846 char *bp = buf;
3847 u64 flags = bargs->flags;
3848 char tmp_buf[128] = {'\0'};
3849
3850 if (!flags)
3851 return;
3852
3853#define CHECK_APPEND_NOARG(a) \
3854 do { \
3855 ret = snprintf(bp, size_bp, (a)); \
3856 if (ret < 0 || ret >= size_bp) \
3857 goto out_overflow; \
3858 size_bp -= ret; \
3859 bp += ret; \
3860 } while (0)
3861
3862#define CHECK_APPEND_1ARG(a, v1) \
3863 do { \
3864 ret = snprintf(bp, size_bp, (a), (v1)); \
3865 if (ret < 0 || ret >= size_bp) \
3866 goto out_overflow; \
3867 size_bp -= ret; \
3868 bp += ret; \
3869 } while (0)
3870
3871#define CHECK_APPEND_2ARG(a, v1, v2) \
3872 do { \
3873 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
3874 if (ret < 0 || ret >= size_bp) \
3875 goto out_overflow; \
3876 size_bp -= ret; \
3877 bp += ret; \
3878 } while (0)
3879
3880 if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3881 CHECK_APPEND_1ARG("convert=%s,",
3882 btrfs_bg_type_to_raid_name(bargs->target));
3883
3884 if (flags & BTRFS_BALANCE_ARGS_SOFT)
3885 CHECK_APPEND_NOARG("soft,");
3886
3887 if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3888 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3889 sizeof(tmp_buf));
3890 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3891 }
3892
3893 if (flags & BTRFS_BALANCE_ARGS_USAGE)
3894 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3895
3896 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3897 CHECK_APPEND_2ARG("usage=%u..%u,",
3898 bargs->usage_min, bargs->usage_max);
3899
3900 if (flags & BTRFS_BALANCE_ARGS_DEVID)
3901 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3902
3903 if (flags & BTRFS_BALANCE_ARGS_DRANGE)
3904 CHECK_APPEND_2ARG("drange=%llu..%llu,",
3905 bargs->pstart, bargs->pend);
3906
3907 if (flags & BTRFS_BALANCE_ARGS_VRANGE)
3908 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3909 bargs->vstart, bargs->vend);
3910
3911 if (flags & BTRFS_BALANCE_ARGS_LIMIT)
3912 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
3913
3914 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
3915 CHECK_APPEND_2ARG("limit=%u..%u,",
3916 bargs->limit_min, bargs->limit_max);
3917
3918 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
3919 CHECK_APPEND_2ARG("stripes=%u..%u,",
3920 bargs->stripes_min, bargs->stripes_max);
3921
3922#undef CHECK_APPEND_2ARG
3923#undef CHECK_APPEND_1ARG
3924#undef CHECK_APPEND_NOARG
3925
3926out_overflow:
3927
3928 if (size_bp < size_buf)
3929 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
3930 else
3931 buf[0] = '\0';
3932}
3933
3934static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
3935{
3936 u32 size_buf = 1024;
3937 char tmp_buf[192] = {'\0'};
3938 char *buf;
3939 char *bp;
3940 u32 size_bp = size_buf;
3941 int ret;
3942 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3943
3944 buf = kzalloc(size_buf, GFP_KERNEL);
3945 if (!buf)
3946 return;
3947
3948 bp = buf;
3949
3950#define CHECK_APPEND_1ARG(a, v1) \
3951 do { \
3952 ret = snprintf(bp, size_bp, (a), (v1)); \
3953 if (ret < 0 || ret >= size_bp) \
3954 goto out_overflow; \
3955 size_bp -= ret; \
3956 bp += ret; \
3957 } while (0)
3958
3959 if (bctl->flags & BTRFS_BALANCE_FORCE)
3960 CHECK_APPEND_1ARG("%s", "-f ");
3961
3962 if (bctl->flags & BTRFS_BALANCE_DATA) {
3963 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
3964 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
3965 }
3966
3967 if (bctl->flags & BTRFS_BALANCE_METADATA) {
3968 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
3969 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
3970 }
3971
3972 if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
3973 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
3974 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
3975 }
3976
3977#undef CHECK_APPEND_1ARG
3978
3979out_overflow:
3980
3981 if (size_bp < size_buf)
3982 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
3983 btrfs_info(fs_info, "balance: %s %s",
3984 (bctl->flags & BTRFS_BALANCE_RESUME) ?
3985 "resume" : "start", buf);
3986
3987 kfree(buf);
3988}
3989
3990/*
3991 * Should be called with balance mutexe held
3992 */
3993int btrfs_balance(struct btrfs_fs_info *fs_info,
3994 struct btrfs_balance_control *bctl,
3995 struct btrfs_ioctl_balance_args *bargs)
3996{
3997 u64 meta_target, data_target;
3998 u64 allowed;
3999 int mixed = 0;
4000 int ret;
4001 u64 num_devices;
4002 unsigned seq;
4003 bool reducing_redundancy;
4004 int i;
4005
4006 if (btrfs_fs_closing(fs_info) ||
4007 atomic_read(&fs_info->balance_pause_req) ||
4008 btrfs_should_cancel_balance(fs_info)) {
4009 ret = -EINVAL;
4010 goto out;
4011 }
4012
4013 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4014 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4015 mixed = 1;
4016
4017 /*
4018 * In case of mixed groups both data and meta should be picked,
4019 * and identical options should be given for both of them.
4020 */
4021 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4022 if (mixed && (bctl->flags & allowed)) {
4023 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4024 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4025 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4026 btrfs_err(fs_info,
4027 "balance: mixed groups data and metadata options must be the same");
4028 ret = -EINVAL;
4029 goto out;
4030 }
4031 }
4032
4033 /*
4034 * rw_devices will not change at the moment, device add/delete/replace
4035 * are exclusive
4036 */
4037 num_devices = fs_info->fs_devices->rw_devices;
4038
4039 /*
4040 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4041 * special bit for it, to make it easier to distinguish. Thus we need
4042 * to set it manually, or balance would refuse the profile.
4043 */
4044 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4045 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4046 if (num_devices >= btrfs_raid_array[i].devs_min)
4047 allowed |= btrfs_raid_array[i].bg_flag;
4048
4049 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4050 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4051 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) {
4052 ret = -EINVAL;
4053 goto out;
4054 }
4055
4056 /*
4057 * Allow to reduce metadata or system integrity only if force set for
4058 * profiles with redundancy (copies, parity)
4059 */
4060 allowed = 0;
4061 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4062 if (btrfs_raid_array[i].ncopies >= 2 ||
4063 btrfs_raid_array[i].tolerated_failures >= 1)
4064 allowed |= btrfs_raid_array[i].bg_flag;
4065 }
4066 do {
4067 seq = read_seqbegin(&fs_info->profiles_lock);
4068
4069 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4070 (fs_info->avail_system_alloc_bits & allowed) &&
4071 !(bctl->sys.target & allowed)) ||
4072 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4073 (fs_info->avail_metadata_alloc_bits & allowed) &&
4074 !(bctl->meta.target & allowed)))
4075 reducing_redundancy = true;
4076 else
4077 reducing_redundancy = false;
4078
4079 /* if we're not converting, the target field is uninitialized */
4080 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4081 bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4082 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4083 bctl->data.target : fs_info->avail_data_alloc_bits;
4084 } while (read_seqretry(&fs_info->profiles_lock, seq));
4085
4086 if (reducing_redundancy) {
4087 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4088 btrfs_info(fs_info,
4089 "balance: force reducing metadata redundancy");
4090 } else {
4091 btrfs_err(fs_info,
4092 "balance: reduces metadata redundancy, use --force if you want this");
4093 ret = -EINVAL;
4094 goto out;
4095 }
4096 }
4097
4098 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4099 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4100 btrfs_warn(fs_info,
4101 "balance: metadata profile %s has lower redundancy than data profile %s",
4102 btrfs_bg_type_to_raid_name(meta_target),
4103 btrfs_bg_type_to_raid_name(data_target));
4104 }
4105
4106 if (fs_info->send_in_progress) {
4107 btrfs_warn_rl(fs_info,
4108"cannot run balance while send operations are in progress (%d in progress)",
4109 fs_info->send_in_progress);
4110 ret = -EAGAIN;
4111 goto out;
4112 }
4113
4114 ret = insert_balance_item(fs_info, bctl);
4115 if (ret && ret != -EEXIST)
4116 goto out;
4117
4118 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4119 BUG_ON(ret == -EEXIST);
4120 BUG_ON(fs_info->balance_ctl);
4121 spin_lock(&fs_info->balance_lock);
4122 fs_info->balance_ctl = bctl;
4123 spin_unlock(&fs_info->balance_lock);
4124 } else {
4125 BUG_ON(ret != -EEXIST);
4126 spin_lock(&fs_info->balance_lock);
4127 update_balance_args(bctl);
4128 spin_unlock(&fs_info->balance_lock);
4129 }
4130
4131 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4132 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4133 describe_balance_start_or_resume(fs_info);
4134 mutex_unlock(&fs_info->balance_mutex);
4135
4136 ret = __btrfs_balance(fs_info);
4137
4138 mutex_lock(&fs_info->balance_mutex);
4139 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4140 btrfs_info(fs_info, "balance: paused");
4141 /*
4142 * Balance can be canceled by:
4143 *
4144 * - Regular cancel request
4145 * Then ret == -ECANCELED and balance_cancel_req > 0
4146 *
4147 * - Fatal signal to "btrfs" process
4148 * Either the signal caught by wait_reserve_ticket() and callers
4149 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4150 * got -ECANCELED.
4151 * Either way, in this case balance_cancel_req = 0, and
4152 * ret == -EINTR or ret == -ECANCELED.
4153 *
4154 * So here we only check the return value to catch canceled balance.
4155 */
4156 else if (ret == -ECANCELED || ret == -EINTR)
4157 btrfs_info(fs_info, "balance: canceled");
4158 else
4159 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4160
4161 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4162
4163 if (bargs) {
4164 memset(bargs, 0, sizeof(*bargs));
4165 btrfs_update_ioctl_balance_args(fs_info, bargs);
4166 }
4167
4168 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4169 balance_need_close(fs_info)) {
4170 reset_balance_state(fs_info);
4171 btrfs_exclop_finish(fs_info);
4172 }
4173
4174 wake_up(&fs_info->balance_wait_q);
4175
4176 return ret;
4177out:
4178 if (bctl->flags & BTRFS_BALANCE_RESUME)
4179 reset_balance_state(fs_info);
4180 else
4181 kfree(bctl);
4182 btrfs_exclop_finish(fs_info);
4183
4184 return ret;
4185}
4186
4187static int balance_kthread(void *data)
4188{
4189 struct btrfs_fs_info *fs_info = data;
4190 int ret = 0;
4191
4192 mutex_lock(&fs_info->balance_mutex);
4193 if (fs_info->balance_ctl)
4194 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4195 mutex_unlock(&fs_info->balance_mutex);
4196
4197 return ret;
4198}
4199
4200int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4201{
4202 struct task_struct *tsk;
4203
4204 mutex_lock(&fs_info->balance_mutex);
4205 if (!fs_info->balance_ctl) {
4206 mutex_unlock(&fs_info->balance_mutex);
4207 return 0;
4208 }
4209 mutex_unlock(&fs_info->balance_mutex);
4210
4211 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4212 btrfs_info(fs_info, "balance: resume skipped");
4213 return 0;
4214 }
4215
4216 /*
4217 * A ro->rw remount sequence should continue with the paused balance
4218 * regardless of who pauses it, system or the user as of now, so set
4219 * the resume flag.
4220 */
4221 spin_lock(&fs_info->balance_lock);
4222 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4223 spin_unlock(&fs_info->balance_lock);
4224
4225 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4226 return PTR_ERR_OR_ZERO(tsk);
4227}
4228
4229int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4230{
4231 struct btrfs_balance_control *bctl;
4232 struct btrfs_balance_item *item;
4233 struct btrfs_disk_balance_args disk_bargs;
4234 struct btrfs_path *path;
4235 struct extent_buffer *leaf;
4236 struct btrfs_key key;
4237 int ret;
4238
4239 path = btrfs_alloc_path();
4240 if (!path)
4241 return -ENOMEM;
4242
4243 key.objectid = BTRFS_BALANCE_OBJECTID;
4244 key.type = BTRFS_TEMPORARY_ITEM_KEY;
4245 key.offset = 0;
4246
4247 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4248 if (ret < 0)
4249 goto out;
4250 if (ret > 0) { /* ret = -ENOENT; */
4251 ret = 0;
4252 goto out;
4253 }
4254
4255 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4256 if (!bctl) {
4257 ret = -ENOMEM;
4258 goto out;
4259 }
4260
4261 leaf = path->nodes[0];
4262 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4263
4264 bctl->flags = btrfs_balance_flags(leaf, item);
4265 bctl->flags |= BTRFS_BALANCE_RESUME;
4266
4267 btrfs_balance_data(leaf, item, &disk_bargs);
4268 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4269 btrfs_balance_meta(leaf, item, &disk_bargs);
4270 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4271 btrfs_balance_sys(leaf, item, &disk_bargs);
4272 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4273
4274 /*
4275 * This should never happen, as the paused balance state is recovered
4276 * during mount without any chance of other exclusive ops to collide.
4277 *
4278 * This gives the exclusive op status to balance and keeps in paused
4279 * state until user intervention (cancel or umount). If the ownership
4280 * cannot be assigned, show a message but do not fail. The balance
4281 * is in a paused state and must have fs_info::balance_ctl properly
4282 * set up.
4283 */
4284 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4285 btrfs_warn(fs_info,
4286 "balance: cannot set exclusive op status, resume manually");
4287
4288 mutex_lock(&fs_info->balance_mutex);
4289 BUG_ON(fs_info->balance_ctl);
4290 spin_lock(&fs_info->balance_lock);
4291 fs_info->balance_ctl = bctl;
4292 spin_unlock(&fs_info->balance_lock);
4293 mutex_unlock(&fs_info->balance_mutex);
4294out:
4295 btrfs_free_path(path);
4296 return ret;
4297}
4298
4299int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4300{
4301 int ret = 0;
4302
4303 mutex_lock(&fs_info->balance_mutex);
4304 if (!fs_info->balance_ctl) {
4305 mutex_unlock(&fs_info->balance_mutex);
4306 return -ENOTCONN;
4307 }
4308
4309 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4310 atomic_inc(&fs_info->balance_pause_req);
4311 mutex_unlock(&fs_info->balance_mutex);
4312
4313 wait_event(fs_info->balance_wait_q,
4314 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4315
4316 mutex_lock(&fs_info->balance_mutex);
4317 /* we are good with balance_ctl ripped off from under us */
4318 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4319 atomic_dec(&fs_info->balance_pause_req);
4320 } else {
4321 ret = -ENOTCONN;
4322 }
4323
4324 mutex_unlock(&fs_info->balance_mutex);
4325 return ret;
4326}
4327
4328int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4329{
4330 mutex_lock(&fs_info->balance_mutex);
4331 if (!fs_info->balance_ctl) {
4332 mutex_unlock(&fs_info->balance_mutex);
4333 return -ENOTCONN;
4334 }
4335
4336 /*
4337 * A paused balance with the item stored on disk can be resumed at
4338 * mount time if the mount is read-write. Otherwise it's still paused
4339 * and we must not allow cancelling as it deletes the item.
4340 */
4341 if (sb_rdonly(fs_info->sb)) {
4342 mutex_unlock(&fs_info->balance_mutex);
4343 return -EROFS;
4344 }
4345
4346 atomic_inc(&fs_info->balance_cancel_req);
4347 /*
4348 * if we are running just wait and return, balance item is
4349 * deleted in btrfs_balance in this case
4350 */
4351 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4352 mutex_unlock(&fs_info->balance_mutex);
4353 wait_event(fs_info->balance_wait_q,
4354 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4355 mutex_lock(&fs_info->balance_mutex);
4356 } else {
4357 mutex_unlock(&fs_info->balance_mutex);
4358 /*
4359 * Lock released to allow other waiters to continue, we'll
4360 * reexamine the status again.
4361 */
4362 mutex_lock(&fs_info->balance_mutex);
4363
4364 if (fs_info->balance_ctl) {
4365 reset_balance_state(fs_info);
4366 btrfs_exclop_finish(fs_info);
4367 btrfs_info(fs_info, "balance: canceled");
4368 }
4369 }
4370
4371 BUG_ON(fs_info->balance_ctl ||
4372 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4373 atomic_dec(&fs_info->balance_cancel_req);
4374 mutex_unlock(&fs_info->balance_mutex);
4375 return 0;
4376}
4377
4378int btrfs_uuid_scan_kthread(void *data)
4379{
4380 struct btrfs_fs_info *fs_info = data;
4381 struct btrfs_root *root = fs_info->tree_root;
4382 struct btrfs_key key;
4383 struct btrfs_path *path = NULL;
4384 int ret = 0;
4385 struct extent_buffer *eb;
4386 int slot;
4387 struct btrfs_root_item root_item;
4388 u32 item_size;
4389 struct btrfs_trans_handle *trans = NULL;
4390 bool closing = false;
4391
4392 path = btrfs_alloc_path();
4393 if (!path) {
4394 ret = -ENOMEM;
4395 goto out;
4396 }
4397
4398 key.objectid = 0;
4399 key.type = BTRFS_ROOT_ITEM_KEY;
4400 key.offset = 0;
4401
4402 while (1) {
4403 if (btrfs_fs_closing(fs_info)) {
4404 closing = true;
4405 break;
4406 }
4407 ret = btrfs_search_forward(root, &key, path,
4408 BTRFS_OLDEST_GENERATION);
4409 if (ret) {
4410 if (ret > 0)
4411 ret = 0;
4412 break;
4413 }
4414
4415 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4416 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4417 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4418 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4419 goto skip;
4420
4421 eb = path->nodes[0];
4422 slot = path->slots[0];
4423 item_size = btrfs_item_size_nr(eb, slot);
4424 if (item_size < sizeof(root_item))
4425 goto skip;
4426
4427 read_extent_buffer(eb, &root_item,
4428 btrfs_item_ptr_offset(eb, slot),
4429 (int)sizeof(root_item));
4430 if (btrfs_root_refs(&root_item) == 0)
4431 goto skip;
4432
4433 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4434 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4435 if (trans)
4436 goto update_tree;
4437
4438 btrfs_release_path(path);
4439 /*
4440 * 1 - subvol uuid item
4441 * 1 - received_subvol uuid item
4442 */
4443 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4444 if (IS_ERR(trans)) {
4445 ret = PTR_ERR(trans);
4446 break;
4447 }
4448 continue;
4449 } else {
4450 goto skip;
4451 }
4452update_tree:
4453 btrfs_release_path(path);
4454 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4455 ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4456 BTRFS_UUID_KEY_SUBVOL,
4457 key.objectid);
4458 if (ret < 0) {
4459 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4460 ret);
4461 break;
4462 }
4463 }
4464
4465 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4466 ret = btrfs_uuid_tree_add(trans,
4467 root_item.received_uuid,
4468 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4469 key.objectid);
4470 if (ret < 0) {
4471 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4472 ret);
4473 break;
4474 }
4475 }
4476
4477skip:
4478 btrfs_release_path(path);
4479 if (trans) {
4480 ret = btrfs_end_transaction(trans);
4481 trans = NULL;
4482 if (ret)
4483 break;
4484 }
4485
4486 if (key.offset < (u64)-1) {
4487 key.offset++;
4488 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4489 key.offset = 0;
4490 key.type = BTRFS_ROOT_ITEM_KEY;
4491 } else if (key.objectid < (u64)-1) {
4492 key.offset = 0;
4493 key.type = BTRFS_ROOT_ITEM_KEY;
4494 key.objectid++;
4495 } else {
4496 break;
4497 }
4498 cond_resched();
4499 }
4500
4501out:
4502 btrfs_free_path(path);
4503 if (trans && !IS_ERR(trans))
4504 btrfs_end_transaction(trans);
4505 if (ret)
4506 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4507 else if (!closing)
4508 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4509 up(&fs_info->uuid_tree_rescan_sem);
4510 return 0;
4511}
4512
4513int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4514{
4515 struct btrfs_trans_handle *trans;
4516 struct btrfs_root *tree_root = fs_info->tree_root;
4517 struct btrfs_root *uuid_root;
4518 struct task_struct *task;
4519 int ret;
4520
4521 /*
4522 * 1 - root node
4523 * 1 - root item
4524 */
4525 trans = btrfs_start_transaction(tree_root, 2);
4526 if (IS_ERR(trans))
4527 return PTR_ERR(trans);
4528
4529 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4530 if (IS_ERR(uuid_root)) {
4531 ret = PTR_ERR(uuid_root);
4532 btrfs_abort_transaction(trans, ret);
4533 btrfs_end_transaction(trans);
4534 return ret;
4535 }
4536
4537 fs_info->uuid_root = uuid_root;
4538
4539 ret = btrfs_commit_transaction(trans);
4540 if (ret)
4541 return ret;
4542
4543 down(&fs_info->uuid_tree_rescan_sem);
4544 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4545 if (IS_ERR(task)) {
4546 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4547 btrfs_warn(fs_info, "failed to start uuid_scan task");
4548 up(&fs_info->uuid_tree_rescan_sem);
4549 return PTR_ERR(task);
4550 }
4551
4552 return 0;
4553}
4554
4555/*
4556 * shrinking a device means finding all of the device extents past
4557 * the new size, and then following the back refs to the chunks.
4558 * The chunk relocation code actually frees the device extent
4559 */
4560int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4561{
4562 struct btrfs_fs_info *fs_info = device->fs_info;
4563 struct btrfs_root *root = fs_info->dev_root;
4564 struct btrfs_trans_handle *trans;
4565 struct btrfs_dev_extent *dev_extent = NULL;
4566 struct btrfs_path *path;
4567 u64 length;
4568 u64 chunk_offset;
4569 int ret;
4570 int slot;
4571 int failed = 0;
4572 bool retried = false;
4573 struct extent_buffer *l;
4574 struct btrfs_key key;
4575 struct btrfs_super_block *super_copy = fs_info->super_copy;
4576 u64 old_total = btrfs_super_total_bytes(super_copy);
4577 u64 old_size = btrfs_device_get_total_bytes(device);
4578 u64 diff;
4579 u64 start;
4580
4581 new_size = round_down(new_size, fs_info->sectorsize);
4582 start = new_size;
4583 diff = round_down(old_size - new_size, fs_info->sectorsize);
4584
4585 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4586 return -EINVAL;
4587
4588 path = btrfs_alloc_path();
4589 if (!path)
4590 return -ENOMEM;
4591
4592 path->reada = READA_BACK;
4593
4594 trans = btrfs_start_transaction(root, 0);
4595 if (IS_ERR(trans)) {
4596 btrfs_free_path(path);
4597 return PTR_ERR(trans);
4598 }
4599
4600 mutex_lock(&fs_info->chunk_mutex);
4601
4602 btrfs_device_set_total_bytes(device, new_size);
4603 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4604 device->fs_devices->total_rw_bytes -= diff;
4605 atomic64_sub(diff, &fs_info->free_chunk_space);
4606 }
4607
4608 /*
4609 * Once the device's size has been set to the new size, ensure all
4610 * in-memory chunks are synced to disk so that the loop below sees them
4611 * and relocates them accordingly.
4612 */
4613 if (contains_pending_extent(device, &start, diff)) {
4614 mutex_unlock(&fs_info->chunk_mutex);
4615 ret = btrfs_commit_transaction(trans);
4616 if (ret)
4617 goto done;
4618 } else {
4619 mutex_unlock(&fs_info->chunk_mutex);
4620 btrfs_end_transaction(trans);
4621 }
4622
4623again:
4624 key.objectid = device->devid;
4625 key.offset = (u64)-1;
4626 key.type = BTRFS_DEV_EXTENT_KEY;
4627
4628 do {
4629 mutex_lock(&fs_info->delete_unused_bgs_mutex);
4630 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4631 if (ret < 0) {
4632 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4633 goto done;
4634 }
4635
4636 ret = btrfs_previous_item(root, path, 0, key.type);
4637 if (ret)
4638 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4639 if (ret < 0)
4640 goto done;
4641 if (ret) {
4642 ret = 0;
4643 btrfs_release_path(path);
4644 break;
4645 }
4646
4647 l = path->nodes[0];
4648 slot = path->slots[0];
4649 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4650
4651 if (key.objectid != device->devid) {
4652 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4653 btrfs_release_path(path);
4654 break;
4655 }
4656
4657 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4658 length = btrfs_dev_extent_length(l, dev_extent);
4659
4660 if (key.offset + length <= new_size) {
4661 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4662 btrfs_release_path(path);
4663 break;
4664 }
4665
4666 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4667 btrfs_release_path(path);
4668
4669 /*
4670 * We may be relocating the only data chunk we have,
4671 * which could potentially end up with losing data's
4672 * raid profile, so lets allocate an empty one in
4673 * advance.
4674 */
4675 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4676 if (ret < 0) {
4677 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4678 goto done;
4679 }
4680
4681 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4682 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4683 if (ret == -ENOSPC) {
4684 failed++;
4685 } else if (ret) {
4686 if (ret == -ETXTBSY) {
4687 btrfs_warn(fs_info,
4688 "could not shrink block group %llu due to active swapfile",
4689 chunk_offset);
4690 }
4691 goto done;
4692 }
4693 } while (key.offset-- > 0);
4694
4695 if (failed && !retried) {
4696 failed = 0;
4697 retried = true;
4698 goto again;
4699 } else if (failed && retried) {
4700 ret = -ENOSPC;
4701 goto done;
4702 }
4703
4704 /* Shrinking succeeded, else we would be at "done". */
4705 trans = btrfs_start_transaction(root, 0);
4706 if (IS_ERR(trans)) {
4707 ret = PTR_ERR(trans);
4708 goto done;
4709 }
4710
4711 mutex_lock(&fs_info->chunk_mutex);
4712 /* Clear all state bits beyond the shrunk device size */
4713 clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4714 CHUNK_STATE_MASK);
4715
4716 btrfs_device_set_disk_total_bytes(device, new_size);
4717 if (list_empty(&device->post_commit_list))
4718 list_add_tail(&device->post_commit_list,
4719 &trans->transaction->dev_update_list);
4720
4721 WARN_ON(diff > old_total);
4722 btrfs_set_super_total_bytes(super_copy,
4723 round_down(old_total - diff, fs_info->sectorsize));
4724 mutex_unlock(&fs_info->chunk_mutex);
4725
4726 /* Now btrfs_update_device() will change the on-disk size. */
4727 ret = btrfs_update_device(trans, device);
4728 if (ret < 0) {
4729 btrfs_abort_transaction(trans, ret);
4730 btrfs_end_transaction(trans);
4731 } else {
4732 ret = btrfs_commit_transaction(trans);
4733 }
4734done:
4735 btrfs_free_path(path);
4736 if (ret) {
4737 mutex_lock(&fs_info->chunk_mutex);
4738 btrfs_device_set_total_bytes(device, old_size);
4739 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4740 device->fs_devices->total_rw_bytes += diff;
4741 atomic64_add(diff, &fs_info->free_chunk_space);
4742 mutex_unlock(&fs_info->chunk_mutex);
4743 }
4744 return ret;
4745}
4746
4747static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4748 struct btrfs_key *key,
4749 struct btrfs_chunk *chunk, int item_size)
4750{
4751 struct btrfs_super_block *super_copy = fs_info->super_copy;
4752 struct btrfs_disk_key disk_key;
4753 u32 array_size;
4754 u8 *ptr;
4755
4756 mutex_lock(&fs_info->chunk_mutex);
4757 array_size = btrfs_super_sys_array_size(super_copy);
4758 if (array_size + item_size + sizeof(disk_key)
4759 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4760 mutex_unlock(&fs_info->chunk_mutex);
4761 return -EFBIG;
4762 }
4763
4764 ptr = super_copy->sys_chunk_array + array_size;
4765 btrfs_cpu_key_to_disk(&disk_key, key);
4766 memcpy(ptr, &disk_key, sizeof(disk_key));
4767 ptr += sizeof(disk_key);
4768 memcpy(ptr, chunk, item_size);
4769 item_size += sizeof(disk_key);
4770 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4771 mutex_unlock(&fs_info->chunk_mutex);
4772
4773 return 0;
4774}
4775
4776/*
4777 * sort the devices in descending order by max_avail, total_avail
4778 */
4779static int btrfs_cmp_device_info(const void *a, const void *b)
4780{
4781 const struct btrfs_device_info *di_a = a;
4782 const struct btrfs_device_info *di_b = b;
4783
4784 if (di_a->max_avail > di_b->max_avail)
4785 return -1;
4786 if (di_a->max_avail < di_b->max_avail)
4787 return 1;
4788 if (di_a->total_avail > di_b->total_avail)
4789 return -1;
4790 if (di_a->total_avail < di_b->total_avail)
4791 return 1;
4792 return 0;
4793}
4794
4795static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4796{
4797 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4798 return;
4799
4800 btrfs_set_fs_incompat(info, RAID56);
4801}
4802
4803static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4804{
4805 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4806 return;
4807
4808 btrfs_set_fs_incompat(info, RAID1C34);
4809}
4810
4811/*
4812 * Structure used internally for __btrfs_alloc_chunk() function.
4813 * Wraps needed parameters.
4814 */
4815struct alloc_chunk_ctl {
4816 u64 start;
4817 u64 type;
4818 /* Total number of stripes to allocate */
4819 int num_stripes;
4820 /* sub_stripes info for map */
4821 int sub_stripes;
4822 /* Stripes per device */
4823 int dev_stripes;
4824 /* Maximum number of devices to use */
4825 int devs_max;
4826 /* Minimum number of devices to use */
4827 int devs_min;
4828 /* ndevs has to be a multiple of this */
4829 int devs_increment;
4830 /* Number of copies */
4831 int ncopies;
4832 /* Number of stripes worth of bytes to store parity information */
4833 int nparity;
4834 u64 max_stripe_size;
4835 u64 max_chunk_size;
4836 u64 dev_extent_min;
4837 u64 stripe_size;
4838 u64 chunk_size;
4839 int ndevs;
4840};
4841
4842static void init_alloc_chunk_ctl_policy_regular(
4843 struct btrfs_fs_devices *fs_devices,
4844 struct alloc_chunk_ctl *ctl)
4845{
4846 u64 type = ctl->type;
4847
4848 if (type & BTRFS_BLOCK_GROUP_DATA) {
4849 ctl->max_stripe_size = SZ_1G;
4850 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4851 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4852 /* For larger filesystems, use larger metadata chunks */
4853 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4854 ctl->max_stripe_size = SZ_1G;
4855 else
4856 ctl->max_stripe_size = SZ_256M;
4857 ctl->max_chunk_size = ctl->max_stripe_size;
4858 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4859 ctl->max_stripe_size = SZ_32M;
4860 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
4861 ctl->devs_max = min_t(int, ctl->devs_max,
4862 BTRFS_MAX_DEVS_SYS_CHUNK);
4863 } else {
4864 BUG();
4865 }
4866
4867 /* We don't want a chunk larger than 10% of writable space */
4868 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4869 ctl->max_chunk_size);
4870 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
4871}
4872
4873static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
4874 struct alloc_chunk_ctl *ctl)
4875{
4876 int index = btrfs_bg_flags_to_raid_index(ctl->type);
4877
4878 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
4879 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
4880 ctl->devs_max = btrfs_raid_array[index].devs_max;
4881 if (!ctl->devs_max)
4882 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
4883 ctl->devs_min = btrfs_raid_array[index].devs_min;
4884 ctl->devs_increment = btrfs_raid_array[index].devs_increment;
4885 ctl->ncopies = btrfs_raid_array[index].ncopies;
4886 ctl->nparity = btrfs_raid_array[index].nparity;
4887 ctl->ndevs = 0;
4888
4889 switch (fs_devices->chunk_alloc_policy) {
4890 case BTRFS_CHUNK_ALLOC_REGULAR:
4891 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
4892 break;
4893 default:
4894 BUG();
4895 }
4896}
4897
4898static int gather_device_info(struct btrfs_fs_devices *fs_devices,
4899 struct alloc_chunk_ctl *ctl,
4900 struct btrfs_device_info *devices_info)
4901{
4902 struct btrfs_fs_info *info = fs_devices->fs_info;
4903 struct btrfs_device *device;
4904 u64 total_avail;
4905 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
4906 int ret;
4907 int ndevs = 0;
4908 u64 max_avail;
4909 u64 dev_offset;
4910
4911 /*
4912 * in the first pass through the devices list, we gather information
4913 * about the available holes on each device.
4914 */
4915 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4916 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4917 WARN(1, KERN_ERR
4918 "BTRFS: read-only device in alloc_list\n");
4919 continue;
4920 }
4921
4922 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
4923 &device->dev_state) ||
4924 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4925 continue;
4926
4927 if (device->total_bytes > device->bytes_used)
4928 total_avail = device->total_bytes - device->bytes_used;
4929 else
4930 total_avail = 0;
4931
4932 /* If there is no space on this device, skip it. */
4933 if (total_avail < ctl->dev_extent_min)
4934 continue;
4935
4936 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
4937 &max_avail);
4938 if (ret && ret != -ENOSPC)
4939 return ret;
4940
4941 if (ret == 0)
4942 max_avail = dev_extent_want;
4943
4944 if (max_avail < ctl->dev_extent_min) {
4945 if (btrfs_test_opt(info, ENOSPC_DEBUG))
4946 btrfs_debug(info,
4947 "%s: devid %llu has no free space, have=%llu want=%llu",
4948 __func__, device->devid, max_avail,
4949 ctl->dev_extent_min);
4950 continue;
4951 }
4952
4953 if (ndevs == fs_devices->rw_devices) {
4954 WARN(1, "%s: found more than %llu devices\n",
4955 __func__, fs_devices->rw_devices);
4956 break;
4957 }
4958 devices_info[ndevs].dev_offset = dev_offset;
4959 devices_info[ndevs].max_avail = max_avail;
4960 devices_info[ndevs].total_avail = total_avail;
4961 devices_info[ndevs].dev = device;
4962 ++ndevs;
4963 }
4964 ctl->ndevs = ndevs;
4965
4966 /*
4967 * now sort the devices by hole size / available space
4968 */
4969 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4970 btrfs_cmp_device_info, NULL);
4971
4972 return 0;
4973}
4974
4975static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
4976 struct btrfs_device_info *devices_info)
4977{
4978 /* Number of stripes that count for block group size */
4979 int data_stripes;
4980
4981 /*
4982 * The primary goal is to maximize the number of stripes, so use as
4983 * many devices as possible, even if the stripes are not maximum sized.
4984 *
4985 * The DUP profile stores more than one stripe per device, the
4986 * max_avail is the total size so we have to adjust.
4987 */
4988 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
4989 ctl->dev_stripes);
4990 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
4991
4992 /* This will have to be fixed for RAID1 and RAID10 over more drives */
4993 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
4994
4995 /*
4996 * Use the number of data stripes to figure out how big this chunk is
4997 * really going to be in terms of logical address space, and compare
4998 * that answer with the max chunk size. If it's higher, we try to
4999 * reduce stripe_size.
5000 */
5001 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5002 /*
5003 * Reduce stripe_size, round it up to a 16MB boundary again and
5004 * then use it, unless it ends up being even bigger than the
5005 * previous value we had already.
5006 */
5007 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5008 data_stripes), SZ_16M),
5009 ctl->stripe_size);
5010 }
5011
5012 /* Align to BTRFS_STRIPE_LEN */
5013 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5014 ctl->chunk_size = ctl->stripe_size * data_stripes;
5015
5016 return 0;
5017}
5018
5019static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5020 struct alloc_chunk_ctl *ctl,
5021 struct btrfs_device_info *devices_info)
5022{
5023 struct btrfs_fs_info *info = fs_devices->fs_info;
5024
5025 /*
5026 * Round down to number of usable stripes, devs_increment can be any
5027 * number so we can't use round_down() that requires power of 2, while
5028 * rounddown is safe.
5029 */
5030 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5031
5032 if (ctl->ndevs < ctl->devs_min) {
5033 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5034 btrfs_debug(info,
5035 "%s: not enough devices with free space: have=%d minimum required=%d",
5036 __func__, ctl->ndevs, ctl->devs_min);
5037 }
5038 return -ENOSPC;
5039 }
5040
5041 ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5042
5043 switch (fs_devices->chunk_alloc_policy) {
5044 case BTRFS_CHUNK_ALLOC_REGULAR:
5045 return decide_stripe_size_regular(ctl, devices_info);
5046 default:
5047 BUG();
5048 }
5049}
5050
5051static int create_chunk(struct btrfs_trans_handle *trans,
5052 struct alloc_chunk_ctl *ctl,
5053 struct btrfs_device_info *devices_info)
5054{
5055 struct btrfs_fs_info *info = trans->fs_info;
5056 struct map_lookup *map = NULL;
5057 struct extent_map_tree *em_tree;
5058 struct extent_map *em;
5059 u64 start = ctl->start;
5060 u64 type = ctl->type;
5061 int ret;
5062 int i;
5063 int j;
5064
5065 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5066 if (!map)
5067 return -ENOMEM;
5068 map->num_stripes = ctl->num_stripes;
5069
5070 for (i = 0; i < ctl->ndevs; ++i) {
5071 for (j = 0; j < ctl->dev_stripes; ++j) {
5072 int s = i * ctl->dev_stripes + j;
5073 map->stripes[s].dev = devices_info[i].dev;
5074 map->stripes[s].physical = devices_info[i].dev_offset +
5075 j * ctl->stripe_size;
5076 }
5077 }
5078 map->stripe_len = BTRFS_STRIPE_LEN;
5079 map->io_align = BTRFS_STRIPE_LEN;
5080 map->io_width = BTRFS_STRIPE_LEN;
5081 map->type = type;
5082 map->sub_stripes = ctl->sub_stripes;
5083
5084 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5085
5086 em = alloc_extent_map();
5087 if (!em) {
5088 kfree(map);
5089 return -ENOMEM;
5090 }
5091 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5092 em->map_lookup = map;
5093 em->start = start;
5094 em->len = ctl->chunk_size;
5095 em->block_start = 0;
5096 em->block_len = em->len;
5097 em->orig_block_len = ctl->stripe_size;
5098
5099 em_tree = &info->mapping_tree;
5100 write_lock(&em_tree->lock);
5101 ret = add_extent_mapping(em_tree, em, 0);
5102 if (ret) {
5103 write_unlock(&em_tree->lock);
5104 free_extent_map(em);
5105 return ret;
5106 }
5107 write_unlock(&em_tree->lock);
5108
5109 ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5110 if (ret)
5111 goto error_del_extent;
5112
5113 for (i = 0; i < map->num_stripes; i++) {
5114 struct btrfs_device *dev = map->stripes[i].dev;
5115
5116 btrfs_device_set_bytes_used(dev,
5117 dev->bytes_used + ctl->stripe_size);
5118 if (list_empty(&dev->post_commit_list))
5119 list_add_tail(&dev->post_commit_list,
5120 &trans->transaction->dev_update_list);
5121 }
5122
5123 atomic64_sub(ctl->stripe_size * map->num_stripes,
5124 &info->free_chunk_space);
5125
5126 free_extent_map(em);
5127 check_raid56_incompat_flag(info, type);
5128 check_raid1c34_incompat_flag(info, type);
5129
5130 return 0;
5131
5132error_del_extent:
5133 write_lock(&em_tree->lock);
5134 remove_extent_mapping(em_tree, em);
5135 write_unlock(&em_tree->lock);
5136
5137 /* One for our allocation */
5138 free_extent_map(em);
5139 /* One for the tree reference */
5140 free_extent_map(em);
5141
5142 return ret;
5143}
5144
5145int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5146{
5147 struct btrfs_fs_info *info = trans->fs_info;
5148 struct btrfs_fs_devices *fs_devices = info->fs_devices;
5149 struct btrfs_device_info *devices_info = NULL;
5150 struct alloc_chunk_ctl ctl;
5151 int ret;
5152
5153 lockdep_assert_held(&info->chunk_mutex);
5154
5155 if (!alloc_profile_is_valid(type, 0)) {
5156 ASSERT(0);
5157 return -EINVAL;
5158 }
5159
5160 if (list_empty(&fs_devices->alloc_list)) {
5161 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5162 btrfs_debug(info, "%s: no writable device", __func__);
5163 return -ENOSPC;
5164 }
5165
5166 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5167 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5168 ASSERT(0);
5169 return -EINVAL;
5170 }
5171
5172 ctl.start = find_next_chunk(info);
5173 ctl.type = type;
5174 init_alloc_chunk_ctl(fs_devices, &ctl);
5175
5176 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5177 GFP_NOFS);
5178 if (!devices_info)
5179 return -ENOMEM;
5180
5181 ret = gather_device_info(fs_devices, &ctl, devices_info);
5182 if (ret < 0)
5183 goto out;
5184
5185 ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5186 if (ret < 0)
5187 goto out;
5188
5189 ret = create_chunk(trans, &ctl, devices_info);
5190
5191out:
5192 kfree(devices_info);
5193 return ret;
5194}
5195
5196/*
5197 * Chunk allocation falls into two parts. The first part does work
5198 * that makes the new allocated chunk usable, but does not do any operation
5199 * that modifies the chunk tree. The second part does the work that
5200 * requires modifying the chunk tree. This division is important for the
5201 * bootstrap process of adding storage to a seed btrfs.
5202 */
5203int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5204 u64 chunk_offset, u64 chunk_size)
5205{
5206 struct btrfs_fs_info *fs_info = trans->fs_info;
5207 struct btrfs_root *extent_root = fs_info->extent_root;
5208 struct btrfs_root *chunk_root = fs_info->chunk_root;
5209 struct btrfs_key key;
5210 struct btrfs_device *device;
5211 struct btrfs_chunk *chunk;
5212 struct btrfs_stripe *stripe;
5213 struct extent_map *em;
5214 struct map_lookup *map;
5215 size_t item_size;
5216 u64 dev_offset;
5217 u64 stripe_size;
5218 int i = 0;
5219 int ret = 0;
5220
5221 em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5222 if (IS_ERR(em))
5223 return PTR_ERR(em);
5224
5225 map = em->map_lookup;
5226 item_size = btrfs_chunk_item_size(map->num_stripes);
5227 stripe_size = em->orig_block_len;
5228
5229 chunk = kzalloc(item_size, GFP_NOFS);
5230 if (!chunk) {
5231 ret = -ENOMEM;
5232 goto out;
5233 }
5234
5235 /*
5236 * Take the device list mutex to prevent races with the final phase of
5237 * a device replace operation that replaces the device object associated
5238 * with the map's stripes, because the device object's id can change
5239 * at any time during that final phase of the device replace operation
5240 * (dev-replace.c:btrfs_dev_replace_finishing()).
5241 */
5242 mutex_lock(&fs_info->fs_devices->device_list_mutex);
5243 for (i = 0; i < map->num_stripes; i++) {
5244 device = map->stripes[i].dev;
5245 dev_offset = map->stripes[i].physical;
5246
5247 ret = btrfs_update_device(trans, device);
5248 if (ret)
5249 break;
5250 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5251 dev_offset, stripe_size);
5252 if (ret)
5253 break;
5254 }
5255 if (ret) {
5256 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5257 goto out;
5258 }
5259
5260 stripe = &chunk->stripe;
5261 for (i = 0; i < map->num_stripes; i++) {
5262 device = map->stripes[i].dev;
5263 dev_offset = map->stripes[i].physical;
5264
5265 btrfs_set_stack_stripe_devid(stripe, device->devid);
5266 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5267 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5268 stripe++;
5269 }
5270 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5271
5272 btrfs_set_stack_chunk_length(chunk, chunk_size);
5273 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5274 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5275 btrfs_set_stack_chunk_type(chunk, map->type);
5276 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5277 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5278 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5279 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5280 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5281
5282 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5283 key.type = BTRFS_CHUNK_ITEM_KEY;
5284 key.offset = chunk_offset;
5285
5286 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5287 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5288 /*
5289 * TODO: Cleanup of inserted chunk root in case of
5290 * failure.
5291 */
5292 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5293 }
5294
5295out:
5296 kfree(chunk);
5297 free_extent_map(em);
5298 return ret;
5299}
5300
5301static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5302{
5303 struct btrfs_fs_info *fs_info = trans->fs_info;
5304 u64 alloc_profile;
5305 int ret;
5306
5307 alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5308 ret = btrfs_alloc_chunk(trans, alloc_profile);
5309 if (ret)
5310 return ret;
5311
5312 alloc_profile = btrfs_system_alloc_profile(fs_info);
5313 ret = btrfs_alloc_chunk(trans, alloc_profile);
5314 return ret;
5315}
5316
5317static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5318{
5319 const int index = btrfs_bg_flags_to_raid_index(map->type);
5320
5321 return btrfs_raid_array[index].tolerated_failures;
5322}
5323
5324int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5325{
5326 struct extent_map *em;
5327 struct map_lookup *map;
5328 int readonly = 0;
5329 int miss_ndevs = 0;
5330 int i;
5331
5332 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5333 if (IS_ERR(em))
5334 return 1;
5335
5336 map = em->map_lookup;
5337 for (i = 0; i < map->num_stripes; i++) {
5338 if (test_bit(BTRFS_DEV_STATE_MISSING,
5339 &map->stripes[i].dev->dev_state)) {
5340 miss_ndevs++;
5341 continue;
5342 }
5343 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5344 &map->stripes[i].dev->dev_state)) {
5345 readonly = 1;
5346 goto end;
5347 }
5348 }
5349
5350 /*
5351 * If the number of missing devices is larger than max errors,
5352 * we can not write the data into that chunk successfully, so
5353 * set it readonly.
5354 */
5355 if (miss_ndevs > btrfs_chunk_max_errors(map))
5356 readonly = 1;
5357end:
5358 free_extent_map(em);
5359 return readonly;
5360}
5361
5362void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5363{
5364 struct extent_map *em;
5365
5366 while (1) {
5367 write_lock(&tree->lock);
5368 em = lookup_extent_mapping(tree, 0, (u64)-1);
5369 if (em)
5370 remove_extent_mapping(tree, em);
5371 write_unlock(&tree->lock);
5372 if (!em)
5373 break;
5374 /* once for us */
5375 free_extent_map(em);
5376 /* once for the tree */
5377 free_extent_map(em);
5378 }
5379}
5380
5381int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5382{
5383 struct extent_map *em;
5384 struct map_lookup *map;
5385 int ret;
5386
5387 em = btrfs_get_chunk_map(fs_info, logical, len);
5388 if (IS_ERR(em))
5389 /*
5390 * We could return errors for these cases, but that could get
5391 * ugly and we'd probably do the same thing which is just not do
5392 * anything else and exit, so return 1 so the callers don't try
5393 * to use other copies.
5394 */
5395 return 1;
5396
5397 map = em->map_lookup;
5398 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5399 ret = map->num_stripes;
5400 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5401 ret = map->sub_stripes;
5402 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5403 ret = 2;
5404 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5405 /*
5406 * There could be two corrupted data stripes, we need
5407 * to loop retry in order to rebuild the correct data.
5408 *
5409 * Fail a stripe at a time on every retry except the
5410 * stripe under reconstruction.
5411 */
5412 ret = map->num_stripes;
5413 else
5414 ret = 1;
5415 free_extent_map(em);
5416
5417 down_read(&fs_info->dev_replace.rwsem);
5418 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5419 fs_info->dev_replace.tgtdev)
5420 ret++;
5421 up_read(&fs_info->dev_replace.rwsem);
5422
5423 return ret;
5424}
5425
5426unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5427 u64 logical)
5428{
5429 struct extent_map *em;
5430 struct map_lookup *map;
5431 unsigned long len = fs_info->sectorsize;
5432
5433 em = btrfs_get_chunk_map(fs_info, logical, len);
5434
5435 if (!WARN_ON(IS_ERR(em))) {
5436 map = em->map_lookup;
5437 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5438 len = map->stripe_len * nr_data_stripes(map);
5439 free_extent_map(em);
5440 }
5441 return len;
5442}
5443
5444int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5445{
5446 struct extent_map *em;
5447 struct map_lookup *map;
5448 int ret = 0;
5449
5450 em = btrfs_get_chunk_map(fs_info, logical, len);
5451
5452 if(!WARN_ON(IS_ERR(em))) {
5453 map = em->map_lookup;
5454 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5455 ret = 1;
5456 free_extent_map(em);
5457 }
5458 return ret;
5459}
5460
5461static int find_live_mirror(struct btrfs_fs_info *fs_info,
5462 struct map_lookup *map, int first,
5463 int dev_replace_is_ongoing)
5464{
5465 int i;
5466 int num_stripes;
5467 int preferred_mirror;
5468 int tolerance;
5469 struct btrfs_device *srcdev;
5470
5471 ASSERT((map->type &
5472 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5473
5474 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5475 num_stripes = map->sub_stripes;
5476 else
5477 num_stripes = map->num_stripes;
5478
5479 preferred_mirror = first + current->pid % num_stripes;
5480
5481 if (dev_replace_is_ongoing &&
5482 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5483 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5484 srcdev = fs_info->dev_replace.srcdev;
5485 else
5486 srcdev = NULL;
5487
5488 /*
5489 * try to avoid the drive that is the source drive for a
5490 * dev-replace procedure, only choose it if no other non-missing
5491 * mirror is available
5492 */
5493 for (tolerance = 0; tolerance < 2; tolerance++) {
5494 if (map->stripes[preferred_mirror].dev->bdev &&
5495 (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5496 return preferred_mirror;
5497 for (i = first; i < first + num_stripes; i++) {
5498 if (map->stripes[i].dev->bdev &&
5499 (tolerance || map->stripes[i].dev != srcdev))
5500 return i;
5501 }
5502 }
5503
5504 /* we couldn't find one that doesn't fail. Just return something
5505 * and the io error handling code will clean up eventually
5506 */
5507 return preferred_mirror;
5508}
5509
5510/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5511static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5512{
5513 int i;
5514 int again = 1;
5515
5516 while (again) {
5517 again = 0;
5518 for (i = 0; i < num_stripes - 1; i++) {
5519 /* Swap if parity is on a smaller index */
5520 if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5521 swap(bbio->stripes[i], bbio->stripes[i + 1]);
5522 swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5523 again = 1;
5524 }
5525 }
5526 }
5527}
5528
5529static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5530{
5531 struct btrfs_bio *bbio = kzalloc(
5532 /* the size of the btrfs_bio */
5533 sizeof(struct btrfs_bio) +
5534 /* plus the variable array for the stripes */
5535 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5536 /* plus the variable array for the tgt dev */
5537 sizeof(int) * (real_stripes) +
5538 /*
5539 * plus the raid_map, which includes both the tgt dev
5540 * and the stripes
5541 */
5542 sizeof(u64) * (total_stripes),
5543 GFP_NOFS|__GFP_NOFAIL);
5544
5545 atomic_set(&bbio->error, 0);
5546 refcount_set(&bbio->refs, 1);
5547
5548 bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5549 bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5550
5551 return bbio;
5552}
5553
5554void btrfs_get_bbio(struct btrfs_bio *bbio)
5555{
5556 WARN_ON(!refcount_read(&bbio->refs));
5557 refcount_inc(&bbio->refs);
5558}
5559
5560void btrfs_put_bbio(struct btrfs_bio *bbio)
5561{
5562 if (!bbio)
5563 return;
5564 if (refcount_dec_and_test(&bbio->refs))
5565 kfree(bbio);
5566}
5567
5568/* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5569/*
5570 * Please note that, discard won't be sent to target device of device
5571 * replace.
5572 */
5573static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5574 u64 logical, u64 *length_ret,
5575 struct btrfs_bio **bbio_ret)
5576{
5577 struct extent_map *em;
5578 struct map_lookup *map;
5579 struct btrfs_bio *bbio;
5580 u64 length = *length_ret;
5581 u64 offset;
5582 u64 stripe_nr;
5583 u64 stripe_nr_end;
5584 u64 stripe_end_offset;
5585 u64 stripe_cnt;
5586 u64 stripe_len;
5587 u64 stripe_offset;
5588 u64 num_stripes;
5589 u32 stripe_index;
5590 u32 factor = 0;
5591 u32 sub_stripes = 0;
5592 u64 stripes_per_dev = 0;
5593 u32 remaining_stripes = 0;
5594 u32 last_stripe = 0;
5595 int ret = 0;
5596 int i;
5597
5598 /* discard always return a bbio */
5599 ASSERT(bbio_ret);
5600
5601 em = btrfs_get_chunk_map(fs_info, logical, length);
5602 if (IS_ERR(em))
5603 return PTR_ERR(em);
5604
5605 map = em->map_lookup;
5606 /* we don't discard raid56 yet */
5607 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5608 ret = -EOPNOTSUPP;
5609 goto out;
5610 }
5611
5612 offset = logical - em->start;
5613 length = min_t(u64, em->start + em->len - logical, length);
5614 *length_ret = length;
5615
5616 stripe_len = map->stripe_len;
5617 /*
5618 * stripe_nr counts the total number of stripes we have to stride
5619 * to get to this block
5620 */
5621 stripe_nr = div64_u64(offset, stripe_len);
5622
5623 /* stripe_offset is the offset of this block in its stripe */
5624 stripe_offset = offset - stripe_nr * stripe_len;
5625
5626 stripe_nr_end = round_up(offset + length, map->stripe_len);
5627 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5628 stripe_cnt = stripe_nr_end - stripe_nr;
5629 stripe_end_offset = stripe_nr_end * map->stripe_len -
5630 (offset + length);
5631 /*
5632 * after this, stripe_nr is the number of stripes on this
5633 * device we have to walk to find the data, and stripe_index is
5634 * the number of our device in the stripe array
5635 */
5636 num_stripes = 1;
5637 stripe_index = 0;
5638 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5639 BTRFS_BLOCK_GROUP_RAID10)) {
5640 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5641 sub_stripes = 1;
5642 else
5643 sub_stripes = map->sub_stripes;
5644
5645 factor = map->num_stripes / sub_stripes;
5646 num_stripes = min_t(u64, map->num_stripes,
5647 sub_stripes * stripe_cnt);
5648 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5649 stripe_index *= sub_stripes;
5650 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5651 &remaining_stripes);
5652 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5653 last_stripe *= sub_stripes;
5654 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5655 BTRFS_BLOCK_GROUP_DUP)) {
5656 num_stripes = map->num_stripes;
5657 } else {
5658 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5659 &stripe_index);
5660 }
5661
5662 bbio = alloc_btrfs_bio(num_stripes, 0);
5663 if (!bbio) {
5664 ret = -ENOMEM;
5665 goto out;
5666 }
5667
5668 for (i = 0; i < num_stripes; i++) {
5669 bbio->stripes[i].physical =
5670 map->stripes[stripe_index].physical +
5671 stripe_offset + stripe_nr * map->stripe_len;
5672 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5673
5674 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5675 BTRFS_BLOCK_GROUP_RAID10)) {
5676 bbio->stripes[i].length = stripes_per_dev *
5677 map->stripe_len;
5678
5679 if (i / sub_stripes < remaining_stripes)
5680 bbio->stripes[i].length +=
5681 map->stripe_len;
5682
5683 /*
5684 * Special for the first stripe and
5685 * the last stripe:
5686 *
5687 * |-------|...|-------|
5688 * |----------|
5689 * off end_off
5690 */
5691 if (i < sub_stripes)
5692 bbio->stripes[i].length -=
5693 stripe_offset;
5694
5695 if (stripe_index >= last_stripe &&
5696 stripe_index <= (last_stripe +
5697 sub_stripes - 1))
5698 bbio->stripes[i].length -=
5699 stripe_end_offset;
5700
5701 if (i == sub_stripes - 1)
5702 stripe_offset = 0;
5703 } else {
5704 bbio->stripes[i].length = length;
5705 }
5706
5707 stripe_index++;
5708 if (stripe_index == map->num_stripes) {
5709 stripe_index = 0;
5710 stripe_nr++;
5711 }
5712 }
5713
5714 *bbio_ret = bbio;
5715 bbio->map_type = map->type;
5716 bbio->num_stripes = num_stripes;
5717out:
5718 free_extent_map(em);
5719 return ret;
5720}
5721
5722/*
5723 * In dev-replace case, for repair case (that's the only case where the mirror
5724 * is selected explicitly when calling btrfs_map_block), blocks left of the
5725 * left cursor can also be read from the target drive.
5726 *
5727 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5728 * array of stripes.
5729 * For READ, it also needs to be supported using the same mirror number.
5730 *
5731 * If the requested block is not left of the left cursor, EIO is returned. This
5732 * can happen because btrfs_num_copies() returns one more in the dev-replace
5733 * case.
5734 */
5735static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5736 u64 logical, u64 length,
5737 u64 srcdev_devid, int *mirror_num,
5738 u64 *physical)
5739{
5740 struct btrfs_bio *bbio = NULL;
5741 int num_stripes;
5742 int index_srcdev = 0;
5743 int found = 0;
5744 u64 physical_of_found = 0;
5745 int i;
5746 int ret = 0;
5747
5748 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5749 logical, &length, &bbio, 0, 0);
5750 if (ret) {
5751 ASSERT(bbio == NULL);
5752 return ret;
5753 }
5754
5755 num_stripes = bbio->num_stripes;
5756 if (*mirror_num > num_stripes) {
5757 /*
5758 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5759 * that means that the requested area is not left of the left
5760 * cursor
5761 */
5762 btrfs_put_bbio(bbio);
5763 return -EIO;
5764 }
5765
5766 /*
5767 * process the rest of the function using the mirror_num of the source
5768 * drive. Therefore look it up first. At the end, patch the device
5769 * pointer to the one of the target drive.
5770 */
5771 for (i = 0; i < num_stripes; i++) {
5772 if (bbio->stripes[i].dev->devid != srcdev_devid)
5773 continue;
5774
5775 /*
5776 * In case of DUP, in order to keep it simple, only add the
5777 * mirror with the lowest physical address
5778 */
5779 if (found &&
5780 physical_of_found <= bbio->stripes[i].physical)
5781 continue;
5782
5783 index_srcdev = i;
5784 found = 1;
5785 physical_of_found = bbio->stripes[i].physical;
5786 }
5787
5788 btrfs_put_bbio(bbio);
5789
5790 ASSERT(found);
5791 if (!found)
5792 return -EIO;
5793
5794 *mirror_num = index_srcdev + 1;
5795 *physical = physical_of_found;
5796 return ret;
5797}
5798
5799static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5800 struct btrfs_bio **bbio_ret,
5801 struct btrfs_dev_replace *dev_replace,
5802 int *num_stripes_ret, int *max_errors_ret)
5803{
5804 struct btrfs_bio *bbio = *bbio_ret;
5805 u64 srcdev_devid = dev_replace->srcdev->devid;
5806 int tgtdev_indexes = 0;
5807 int num_stripes = *num_stripes_ret;
5808 int max_errors = *max_errors_ret;
5809 int i;
5810
5811 if (op == BTRFS_MAP_WRITE) {
5812 int index_where_to_add;
5813
5814 /*
5815 * duplicate the write operations while the dev replace
5816 * procedure is running. Since the copying of the old disk to
5817 * the new disk takes place at run time while the filesystem is
5818 * mounted writable, the regular write operations to the old
5819 * disk have to be duplicated to go to the new disk as well.
5820 *
5821 * Note that device->missing is handled by the caller, and that
5822 * the write to the old disk is already set up in the stripes
5823 * array.
5824 */
5825 index_where_to_add = num_stripes;
5826 for (i = 0; i < num_stripes; i++) {
5827 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5828 /* write to new disk, too */
5829 struct btrfs_bio_stripe *new =
5830 bbio->stripes + index_where_to_add;
5831 struct btrfs_bio_stripe *old =
5832 bbio->stripes + i;
5833
5834 new->physical = old->physical;
5835 new->length = old->length;
5836 new->dev = dev_replace->tgtdev;
5837 bbio->tgtdev_map[i] = index_where_to_add;
5838 index_where_to_add++;
5839 max_errors++;
5840 tgtdev_indexes++;
5841 }
5842 }
5843 num_stripes = index_where_to_add;
5844 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5845 int index_srcdev = 0;
5846 int found = 0;
5847 u64 physical_of_found = 0;
5848
5849 /*
5850 * During the dev-replace procedure, the target drive can also
5851 * be used to read data in case it is needed to repair a corrupt
5852 * block elsewhere. This is possible if the requested area is
5853 * left of the left cursor. In this area, the target drive is a
5854 * full copy of the source drive.
5855 */
5856 for (i = 0; i < num_stripes; i++) {
5857 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5858 /*
5859 * In case of DUP, in order to keep it simple,
5860 * only add the mirror with the lowest physical
5861 * address
5862 */
5863 if (found &&
5864 physical_of_found <=
5865 bbio->stripes[i].physical)
5866 continue;
5867 index_srcdev = i;
5868 found = 1;
5869 physical_of_found = bbio->stripes[i].physical;
5870 }
5871 }
5872 if (found) {
5873 struct btrfs_bio_stripe *tgtdev_stripe =
5874 bbio->stripes + num_stripes;
5875
5876 tgtdev_stripe->physical = physical_of_found;
5877 tgtdev_stripe->length =
5878 bbio->stripes[index_srcdev].length;
5879 tgtdev_stripe->dev = dev_replace->tgtdev;
5880 bbio->tgtdev_map[index_srcdev] = num_stripes;
5881
5882 tgtdev_indexes++;
5883 num_stripes++;
5884 }
5885 }
5886
5887 *num_stripes_ret = num_stripes;
5888 *max_errors_ret = max_errors;
5889 bbio->num_tgtdevs = tgtdev_indexes;
5890 *bbio_ret = bbio;
5891}
5892
5893static bool need_full_stripe(enum btrfs_map_op op)
5894{
5895 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5896}
5897
5898/*
5899 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5900 * tuple. This information is used to calculate how big a
5901 * particular bio can get before it straddles a stripe.
5902 *
5903 * @fs_info - the filesystem
5904 * @logical - address that we want to figure out the geometry of
5905 * @len - the length of IO we are going to perform, starting at @logical
5906 * @op - type of operation - write or read
5907 * @io_geom - pointer used to return values
5908 *
5909 * Returns < 0 in case a chunk for the given logical address cannot be found,
5910 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
5911 */
5912int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5913 u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
5914{
5915 struct extent_map *em;
5916 struct map_lookup *map;
5917 u64 offset;
5918 u64 stripe_offset;
5919 u64 stripe_nr;
5920 u64 stripe_len;
5921 u64 raid56_full_stripe_start = (u64)-1;
5922 int data_stripes;
5923 int ret = 0;
5924
5925 ASSERT(op != BTRFS_MAP_DISCARD);
5926
5927 em = btrfs_get_chunk_map(fs_info, logical, len);
5928 if (IS_ERR(em))
5929 return PTR_ERR(em);
5930
5931 map = em->map_lookup;
5932 /* Offset of this logical address in the chunk */
5933 offset = logical - em->start;
5934 /* Len of a stripe in a chunk */
5935 stripe_len = map->stripe_len;
5936 /* Stripe wher this block falls in */
5937 stripe_nr = div64_u64(offset, stripe_len);
5938 /* Offset of stripe in the chunk */
5939 stripe_offset = stripe_nr * stripe_len;
5940 if (offset < stripe_offset) {
5941 btrfs_crit(fs_info,
5942"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
5943 stripe_offset, offset, em->start, logical, stripe_len);
5944 ret = -EINVAL;
5945 goto out;
5946 }
5947
5948 /* stripe_offset is the offset of this block in its stripe */
5949 stripe_offset = offset - stripe_offset;
5950 data_stripes = nr_data_stripes(map);
5951
5952 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5953 u64 max_len = stripe_len - stripe_offset;
5954
5955 /*
5956 * In case of raid56, we need to know the stripe aligned start
5957 */
5958 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5959 unsigned long full_stripe_len = stripe_len * data_stripes;
5960 raid56_full_stripe_start = offset;
5961
5962 /*
5963 * Allow a write of a full stripe, but make sure we
5964 * don't allow straddling of stripes
5965 */
5966 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5967 full_stripe_len);
5968 raid56_full_stripe_start *= full_stripe_len;
5969
5970 /*
5971 * For writes to RAID[56], allow a full stripeset across
5972 * all disks. For other RAID types and for RAID[56]
5973 * reads, just allow a single stripe (on a single disk).
5974 */
5975 if (op == BTRFS_MAP_WRITE) {
5976 max_len = stripe_len * data_stripes -
5977 (offset - raid56_full_stripe_start);
5978 }
5979 }
5980 len = min_t(u64, em->len - offset, max_len);
5981 } else {
5982 len = em->len - offset;
5983 }
5984
5985 io_geom->len = len;
5986 io_geom->offset = offset;
5987 io_geom->stripe_len = stripe_len;
5988 io_geom->stripe_nr = stripe_nr;
5989 io_geom->stripe_offset = stripe_offset;
5990 io_geom->raid56_stripe_offset = raid56_full_stripe_start;
5991
5992out:
5993 /* once for us */
5994 free_extent_map(em);
5995 return ret;
5996}
5997
5998static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
5999 enum btrfs_map_op op,
6000 u64 logical, u64 *length,
6001 struct btrfs_bio **bbio_ret,
6002 int mirror_num, int need_raid_map)
6003{
6004 struct extent_map *em;
6005 struct map_lookup *map;
6006 u64 stripe_offset;
6007 u64 stripe_nr;
6008 u64 stripe_len;
6009 u32 stripe_index;
6010 int data_stripes;
6011 int i;
6012 int ret = 0;
6013 int num_stripes;
6014 int max_errors = 0;
6015 int tgtdev_indexes = 0;
6016 struct btrfs_bio *bbio = NULL;
6017 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6018 int dev_replace_is_ongoing = 0;
6019 int num_alloc_stripes;
6020 int patch_the_first_stripe_for_dev_replace = 0;
6021 u64 physical_to_patch_in_first_stripe = 0;
6022 u64 raid56_full_stripe_start = (u64)-1;
6023 struct btrfs_io_geometry geom;
6024
6025 ASSERT(bbio_ret);
6026 ASSERT(op != BTRFS_MAP_DISCARD);
6027
6028 ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
6029 if (ret < 0)
6030 return ret;
6031
6032 em = btrfs_get_chunk_map(fs_info, logical, *length);
6033 ASSERT(!IS_ERR(em));
6034 map = em->map_lookup;
6035
6036 *length = geom.len;
6037 stripe_len = geom.stripe_len;
6038 stripe_nr = geom.stripe_nr;
6039 stripe_offset = geom.stripe_offset;
6040 raid56_full_stripe_start = geom.raid56_stripe_offset;
6041 data_stripes = nr_data_stripes(map);
6042
6043 down_read(&dev_replace->rwsem);
6044 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6045 /*
6046 * Hold the semaphore for read during the whole operation, write is
6047 * requested at commit time but must wait.
6048 */
6049 if (!dev_replace_is_ongoing)
6050 up_read(&dev_replace->rwsem);
6051
6052 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6053 !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6054 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6055 dev_replace->srcdev->devid,
6056 &mirror_num,
6057 &physical_to_patch_in_first_stripe);
6058 if (ret)
6059 goto out;
6060 else
6061 patch_the_first_stripe_for_dev_replace = 1;
6062 } else if (mirror_num > map->num_stripes) {
6063 mirror_num = 0;
6064 }
6065
6066 num_stripes = 1;
6067 stripe_index = 0;
6068 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6069 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6070 &stripe_index);
6071 if (!need_full_stripe(op))
6072 mirror_num = 1;
6073 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6074 if (need_full_stripe(op))
6075 num_stripes = map->num_stripes;
6076 else if (mirror_num)
6077 stripe_index = mirror_num - 1;
6078 else {
6079 stripe_index = find_live_mirror(fs_info, map, 0,
6080 dev_replace_is_ongoing);
6081 mirror_num = stripe_index + 1;
6082 }
6083
6084 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6085 if (need_full_stripe(op)) {
6086 num_stripes = map->num_stripes;
6087 } else if (mirror_num) {
6088 stripe_index = mirror_num - 1;
6089 } else {
6090 mirror_num = 1;
6091 }
6092
6093 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6094 u32 factor = map->num_stripes / map->sub_stripes;
6095
6096 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6097 stripe_index *= map->sub_stripes;
6098
6099 if (need_full_stripe(op))
6100 num_stripes = map->sub_stripes;
6101 else if (mirror_num)
6102 stripe_index += mirror_num - 1;
6103 else {
6104 int old_stripe_index = stripe_index;
6105 stripe_index = find_live_mirror(fs_info, map,
6106 stripe_index,
6107 dev_replace_is_ongoing);
6108 mirror_num = stripe_index - old_stripe_index + 1;
6109 }
6110
6111 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6112 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6113 /* push stripe_nr back to the start of the full stripe */
6114 stripe_nr = div64_u64(raid56_full_stripe_start,
6115 stripe_len * data_stripes);
6116
6117 /* RAID[56] write or recovery. Return all stripes */
6118 num_stripes = map->num_stripes;
6119 max_errors = nr_parity_stripes(map);
6120
6121 *length = map->stripe_len;
6122 stripe_index = 0;
6123 stripe_offset = 0;
6124 } else {
6125 /*
6126 * Mirror #0 or #1 means the original data block.
6127 * Mirror #2 is RAID5 parity block.
6128 * Mirror #3 is RAID6 Q block.
6129 */
6130 stripe_nr = div_u64_rem(stripe_nr,
6131 data_stripes, &stripe_index);
6132 if (mirror_num > 1)
6133 stripe_index = data_stripes + mirror_num - 2;
6134
6135 /* We distribute the parity blocks across stripes */
6136 div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6137 &stripe_index);
6138 if (!need_full_stripe(op) && mirror_num <= 1)
6139 mirror_num = 1;
6140 }
6141 } else {
6142 /*
6143 * after this, stripe_nr is the number of stripes on this
6144 * device we have to walk to find the data, and stripe_index is
6145 * the number of our device in the stripe array
6146 */
6147 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6148 &stripe_index);
6149 mirror_num = stripe_index + 1;
6150 }
6151 if (stripe_index >= map->num_stripes) {
6152 btrfs_crit(fs_info,
6153 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6154 stripe_index, map->num_stripes);
6155 ret = -EINVAL;
6156 goto out;
6157 }
6158
6159 num_alloc_stripes = num_stripes;
6160 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6161 if (op == BTRFS_MAP_WRITE)
6162 num_alloc_stripes <<= 1;
6163 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6164 num_alloc_stripes++;
6165 tgtdev_indexes = num_stripes;
6166 }
6167
6168 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6169 if (!bbio) {
6170 ret = -ENOMEM;
6171 goto out;
6172 }
6173
6174 for (i = 0; i < num_stripes; i++) {
6175 bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6176 stripe_offset + stripe_nr * map->stripe_len;
6177 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6178 stripe_index++;
6179 }
6180
6181 /* build raid_map */
6182 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6183 (need_full_stripe(op) || mirror_num > 1)) {
6184 u64 tmp;
6185 unsigned rot;
6186
6187 /* Work out the disk rotation on this stripe-set */
6188 div_u64_rem(stripe_nr, num_stripes, &rot);
6189
6190 /* Fill in the logical address of each stripe */
6191 tmp = stripe_nr * data_stripes;
6192 for (i = 0; i < data_stripes; i++)
6193 bbio->raid_map[(i+rot) % num_stripes] =
6194 em->start + (tmp + i) * map->stripe_len;
6195
6196 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6197 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6198 bbio->raid_map[(i+rot+1) % num_stripes] =
6199 RAID6_Q_STRIPE;
6200
6201 sort_parity_stripes(bbio, num_stripes);
6202 }
6203
6204 if (need_full_stripe(op))
6205 max_errors = btrfs_chunk_max_errors(map);
6206
6207 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6208 need_full_stripe(op)) {
6209 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6210 &max_errors);
6211 }
6212
6213 *bbio_ret = bbio;
6214 bbio->map_type = map->type;
6215 bbio->num_stripes = num_stripes;
6216 bbio->max_errors = max_errors;
6217 bbio->mirror_num = mirror_num;
6218
6219 /*
6220 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6221 * mirror_num == num_stripes + 1 && dev_replace target drive is
6222 * available as a mirror
6223 */
6224 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6225 WARN_ON(num_stripes > 1);
6226 bbio->stripes[0].dev = dev_replace->tgtdev;
6227 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6228 bbio->mirror_num = map->num_stripes + 1;
6229 }
6230out:
6231 if (dev_replace_is_ongoing) {
6232 lockdep_assert_held(&dev_replace->rwsem);
6233 /* Unlock and let waiting writers proceed */
6234 up_read(&dev_replace->rwsem);
6235 }
6236 free_extent_map(em);
6237 return ret;
6238}
6239
6240int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6241 u64 logical, u64 *length,
6242 struct btrfs_bio **bbio_ret, int mirror_num)
6243{
6244 if (op == BTRFS_MAP_DISCARD)
6245 return __btrfs_map_block_for_discard(fs_info, logical,
6246 length, bbio_ret);
6247
6248 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6249 mirror_num, 0);
6250}
6251
6252/* For Scrub/replace */
6253int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6254 u64 logical, u64 *length,
6255 struct btrfs_bio **bbio_ret)
6256{
6257 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6258}
6259
6260static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6261{
6262 bio->bi_private = bbio->private;
6263 bio->bi_end_io = bbio->end_io;
6264 bio_endio(bio);
6265
6266 btrfs_put_bbio(bbio);
6267}
6268
6269static void btrfs_end_bio(struct bio *bio)
6270{
6271 struct btrfs_bio *bbio = bio->bi_private;
6272 int is_orig_bio = 0;
6273
6274 if (bio->bi_status) {
6275 atomic_inc(&bbio->error);
6276 if (bio->bi_status == BLK_STS_IOERR ||
6277 bio->bi_status == BLK_STS_TARGET) {
6278 struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6279
6280 ASSERT(dev->bdev);
6281 if (bio_op(bio) == REQ_OP_WRITE)
6282 btrfs_dev_stat_inc_and_print(dev,
6283 BTRFS_DEV_STAT_WRITE_ERRS);
6284 else if (!(bio->bi_opf & REQ_RAHEAD))
6285 btrfs_dev_stat_inc_and_print(dev,
6286 BTRFS_DEV_STAT_READ_ERRS);
6287 if (bio->bi_opf & REQ_PREFLUSH)
6288 btrfs_dev_stat_inc_and_print(dev,
6289 BTRFS_DEV_STAT_FLUSH_ERRS);
6290 }
6291 }
6292
6293 if (bio == bbio->orig_bio)
6294 is_orig_bio = 1;
6295
6296 btrfs_bio_counter_dec(bbio->fs_info);
6297
6298 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6299 if (!is_orig_bio) {
6300 bio_put(bio);
6301 bio = bbio->orig_bio;
6302 }
6303
6304 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6305 /* only send an error to the higher layers if it is
6306 * beyond the tolerance of the btrfs bio
6307 */
6308 if (atomic_read(&bbio->error) > bbio->max_errors) {
6309 bio->bi_status = BLK_STS_IOERR;
6310 } else {
6311 /*
6312 * this bio is actually up to date, we didn't
6313 * go over the max number of errors
6314 */
6315 bio->bi_status = BLK_STS_OK;
6316 }
6317
6318 btrfs_end_bbio(bbio, bio);
6319 } else if (!is_orig_bio) {
6320 bio_put(bio);
6321 }
6322}
6323
6324static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6325 u64 physical, struct btrfs_device *dev)
6326{
6327 struct btrfs_fs_info *fs_info = bbio->fs_info;
6328
6329 bio->bi_private = bbio;
6330 btrfs_io_bio(bio)->device = dev;
6331 bio->bi_end_io = btrfs_end_bio;
6332 bio->bi_iter.bi_sector = physical >> 9;
6333 btrfs_debug_in_rcu(fs_info,
6334 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6335 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6336 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6337 dev->devid, bio->bi_iter.bi_size);
6338 bio_set_dev(bio, dev->bdev);
6339
6340 btrfs_bio_counter_inc_noblocked(fs_info);
6341
6342 btrfsic_submit_bio(bio);
6343}
6344
6345static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6346{
6347 atomic_inc(&bbio->error);
6348 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6349 /* Should be the original bio. */
6350 WARN_ON(bio != bbio->orig_bio);
6351
6352 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6353 bio->bi_iter.bi_sector = logical >> 9;
6354 if (atomic_read(&bbio->error) > bbio->max_errors)
6355 bio->bi_status = BLK_STS_IOERR;
6356 else
6357 bio->bi_status = BLK_STS_OK;
6358 btrfs_end_bbio(bbio, bio);
6359 }
6360}
6361
6362blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6363 int mirror_num)
6364{
6365 struct btrfs_device *dev;
6366 struct bio *first_bio = bio;
6367 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6368 u64 length = 0;
6369 u64 map_length;
6370 int ret;
6371 int dev_nr;
6372 int total_devs;
6373 struct btrfs_bio *bbio = NULL;
6374
6375 length = bio->bi_iter.bi_size;
6376 map_length = length;
6377
6378 btrfs_bio_counter_inc_blocked(fs_info);
6379 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6380 &map_length, &bbio, mirror_num, 1);
6381 if (ret) {
6382 btrfs_bio_counter_dec(fs_info);
6383 return errno_to_blk_status(ret);
6384 }
6385
6386 total_devs = bbio->num_stripes;
6387 bbio->orig_bio = first_bio;
6388 bbio->private = first_bio->bi_private;
6389 bbio->end_io = first_bio->bi_end_io;
6390 bbio->fs_info = fs_info;
6391 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6392
6393 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6394 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6395 /* In this case, map_length has been set to the length of
6396 a single stripe; not the whole write */
6397 if (bio_op(bio) == REQ_OP_WRITE) {
6398 ret = raid56_parity_write(fs_info, bio, bbio,
6399 map_length);
6400 } else {
6401 ret = raid56_parity_recover(fs_info, bio, bbio,
6402 map_length, mirror_num, 1);
6403 }
6404
6405 btrfs_bio_counter_dec(fs_info);
6406 return errno_to_blk_status(ret);
6407 }
6408
6409 if (map_length < length) {
6410 btrfs_crit(fs_info,
6411 "mapping failed logical %llu bio len %llu len %llu",
6412 logical, length, map_length);
6413 BUG();
6414 }
6415
6416 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6417 dev = bbio->stripes[dev_nr].dev;
6418 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6419 &dev->dev_state) ||
6420 (bio_op(first_bio) == REQ_OP_WRITE &&
6421 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6422 bbio_error(bbio, first_bio, logical);
6423 continue;
6424 }
6425
6426 if (dev_nr < total_devs - 1)
6427 bio = btrfs_bio_clone(first_bio);
6428 else
6429 bio = first_bio;
6430
6431 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6432 }
6433 btrfs_bio_counter_dec(fs_info);
6434 return BLK_STS_OK;
6435}
6436
6437/*
6438 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6439 * return NULL.
6440 *
6441 * If devid and uuid are both specified, the match must be exact, otherwise
6442 * only devid is used.
6443 *
6444 * If @seed is true, traverse through the seed devices.
6445 */
6446struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6447 u64 devid, u8 *uuid, u8 *fsid,
6448 bool seed)
6449{
6450 struct btrfs_device *device;
6451 struct btrfs_fs_devices *seed_devs;
6452
6453 if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6454 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6455 if (device->devid == devid &&
6456 (!uuid || memcmp(device->uuid, uuid,
6457 BTRFS_UUID_SIZE) == 0))
6458 return device;
6459 }
6460 }
6461
6462 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6463 if (!fsid ||
6464 !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6465 list_for_each_entry(device, &seed_devs->devices,
6466 dev_list) {
6467 if (device->devid == devid &&
6468 (!uuid || memcmp(device->uuid, uuid,
6469 BTRFS_UUID_SIZE) == 0))
6470 return device;
6471 }
6472 }
6473 }
6474
6475 return NULL;
6476}
6477
6478static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6479 u64 devid, u8 *dev_uuid)
6480{
6481 struct btrfs_device *device;
6482 unsigned int nofs_flag;
6483
6484 /*
6485 * We call this under the chunk_mutex, so we want to use NOFS for this
6486 * allocation, however we don't want to change btrfs_alloc_device() to
6487 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6488 * places.
6489 */
6490 nofs_flag = memalloc_nofs_save();
6491 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6492 memalloc_nofs_restore(nofs_flag);
6493 if (IS_ERR(device))
6494 return device;
6495
6496 list_add(&device->dev_list, &fs_devices->devices);
6497 device->fs_devices = fs_devices;
6498 fs_devices->num_devices++;
6499
6500 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6501 fs_devices->missing_devices++;
6502
6503 return device;
6504}
6505
6506/**
6507 * btrfs_alloc_device - allocate struct btrfs_device
6508 * @fs_info: used only for generating a new devid, can be NULL if
6509 * devid is provided (i.e. @devid != NULL).
6510 * @devid: a pointer to devid for this device. If NULL a new devid
6511 * is generated.
6512 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6513 * is generated.
6514 *
6515 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6516 * on error. Returned struct is not linked onto any lists and must be
6517 * destroyed with btrfs_free_device.
6518 */
6519struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6520 const u64 *devid,
6521 const u8 *uuid)
6522{
6523 struct btrfs_device *dev;
6524 u64 tmp;
6525
6526 if (WARN_ON(!devid && !fs_info))
6527 return ERR_PTR(-EINVAL);
6528
6529 dev = __alloc_device(fs_info);
6530 if (IS_ERR(dev))
6531 return dev;
6532
6533 if (devid)
6534 tmp = *devid;
6535 else {
6536 int ret;
6537
6538 ret = find_next_devid(fs_info, &tmp);
6539 if (ret) {
6540 btrfs_free_device(dev);
6541 return ERR_PTR(ret);
6542 }
6543 }
6544 dev->devid = tmp;
6545
6546 if (uuid)
6547 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6548 else
6549 generate_random_uuid(dev->uuid);
6550
6551 return dev;
6552}
6553
6554static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6555 u64 devid, u8 *uuid, bool error)
6556{
6557 if (error)
6558 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6559 devid, uuid);
6560 else
6561 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6562 devid, uuid);
6563}
6564
6565static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6566{
6567 int index = btrfs_bg_flags_to_raid_index(type);
6568 int ncopies = btrfs_raid_array[index].ncopies;
6569 const int nparity = btrfs_raid_array[index].nparity;
6570 int data_stripes;
6571
6572 if (nparity)
6573 data_stripes = num_stripes - nparity;
6574 else
6575 data_stripes = num_stripes / ncopies;
6576
6577 return div_u64(chunk_len, data_stripes);
6578}
6579
6580static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6581 struct btrfs_chunk *chunk)
6582{
6583 struct btrfs_fs_info *fs_info = leaf->fs_info;
6584 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6585 struct map_lookup *map;
6586 struct extent_map *em;
6587 u64 logical;
6588 u64 length;
6589 u64 devid;
6590 u8 uuid[BTRFS_UUID_SIZE];
6591 int num_stripes;
6592 int ret;
6593 int i;
6594
6595 logical = key->offset;
6596 length = btrfs_chunk_length(leaf, chunk);
6597 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6598
6599 /*
6600 * Only need to verify chunk item if we're reading from sys chunk array,
6601 * as chunk item in tree block is already verified by tree-checker.
6602 */
6603 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6604 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6605 if (ret)
6606 return ret;
6607 }
6608
6609 read_lock(&map_tree->lock);
6610 em = lookup_extent_mapping(map_tree, logical, 1);
6611 read_unlock(&map_tree->lock);
6612
6613 /* already mapped? */
6614 if (em && em->start <= logical && em->start + em->len > logical) {
6615 free_extent_map(em);
6616 return 0;
6617 } else if (em) {
6618 free_extent_map(em);
6619 }
6620
6621 em = alloc_extent_map();
6622 if (!em)
6623 return -ENOMEM;
6624 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6625 if (!map) {
6626 free_extent_map(em);
6627 return -ENOMEM;
6628 }
6629
6630 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6631 em->map_lookup = map;
6632 em->start = logical;
6633 em->len = length;
6634 em->orig_start = 0;
6635 em->block_start = 0;
6636 em->block_len = em->len;
6637
6638 map->num_stripes = num_stripes;
6639 map->io_width = btrfs_chunk_io_width(leaf, chunk);
6640 map->io_align = btrfs_chunk_io_align(leaf, chunk);
6641 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6642 map->type = btrfs_chunk_type(leaf, chunk);
6643 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6644 map->verified_stripes = 0;
6645 em->orig_block_len = calc_stripe_length(map->type, em->len,
6646 map->num_stripes);
6647 for (i = 0; i < num_stripes; i++) {
6648 map->stripes[i].physical =
6649 btrfs_stripe_offset_nr(leaf, chunk, i);
6650 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6651 read_extent_buffer(leaf, uuid, (unsigned long)
6652 btrfs_stripe_dev_uuid_nr(chunk, i),
6653 BTRFS_UUID_SIZE);
6654 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6655 devid, uuid, NULL, true);
6656 if (!map->stripes[i].dev &&
6657 !btrfs_test_opt(fs_info, DEGRADED)) {
6658 free_extent_map(em);
6659 btrfs_report_missing_device(fs_info, devid, uuid, true);
6660 return -ENOENT;
6661 }
6662 if (!map->stripes[i].dev) {
6663 map->stripes[i].dev =
6664 add_missing_dev(fs_info->fs_devices, devid,
6665 uuid);
6666 if (IS_ERR(map->stripes[i].dev)) {
6667 free_extent_map(em);
6668 btrfs_err(fs_info,
6669 "failed to init missing dev %llu: %ld",
6670 devid, PTR_ERR(map->stripes[i].dev));
6671 return PTR_ERR(map->stripes[i].dev);
6672 }
6673 btrfs_report_missing_device(fs_info, devid, uuid, false);
6674 }
6675 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6676 &(map->stripes[i].dev->dev_state));
6677
6678 }
6679
6680 write_lock(&map_tree->lock);
6681 ret = add_extent_mapping(map_tree, em, 0);
6682 write_unlock(&map_tree->lock);
6683 if (ret < 0) {
6684 btrfs_err(fs_info,
6685 "failed to add chunk map, start=%llu len=%llu: %d",
6686 em->start, em->len, ret);
6687 }
6688 free_extent_map(em);
6689
6690 return ret;
6691}
6692
6693static void fill_device_from_item(struct extent_buffer *leaf,
6694 struct btrfs_dev_item *dev_item,
6695 struct btrfs_device *device)
6696{
6697 unsigned long ptr;
6698
6699 device->devid = btrfs_device_id(leaf, dev_item);
6700 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6701 device->total_bytes = device->disk_total_bytes;
6702 device->commit_total_bytes = device->disk_total_bytes;
6703 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6704 device->commit_bytes_used = device->bytes_used;
6705 device->type = btrfs_device_type(leaf, dev_item);
6706 device->io_align = btrfs_device_io_align(leaf, dev_item);
6707 device->io_width = btrfs_device_io_width(leaf, dev_item);
6708 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6709 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6710 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6711
6712 ptr = btrfs_device_uuid(dev_item);
6713 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6714}
6715
6716static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6717 u8 *fsid)
6718{
6719 struct btrfs_fs_devices *fs_devices;
6720 int ret;
6721
6722 lockdep_assert_held(&uuid_mutex);
6723 ASSERT(fsid);
6724
6725 /* This will match only for multi-device seed fs */
6726 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
6727 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6728 return fs_devices;
6729
6730
6731 fs_devices = find_fsid(fsid, NULL);
6732 if (!fs_devices) {
6733 if (!btrfs_test_opt(fs_info, DEGRADED))
6734 return ERR_PTR(-ENOENT);
6735
6736 fs_devices = alloc_fs_devices(fsid, NULL);
6737 if (IS_ERR(fs_devices))
6738 return fs_devices;
6739
6740 fs_devices->seeding = true;
6741 fs_devices->opened = 1;
6742 return fs_devices;
6743 }
6744
6745 /*
6746 * Upon first call for a seed fs fsid, just create a private copy of the
6747 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
6748 */
6749 fs_devices = clone_fs_devices(fs_devices);
6750 if (IS_ERR(fs_devices))
6751 return fs_devices;
6752
6753 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6754 if (ret) {
6755 free_fs_devices(fs_devices);
6756 fs_devices = ERR_PTR(ret);
6757 goto out;
6758 }
6759
6760 if (!fs_devices->seeding) {
6761 close_fs_devices(fs_devices);
6762 free_fs_devices(fs_devices);
6763 fs_devices = ERR_PTR(-EINVAL);
6764 goto out;
6765 }
6766
6767 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
6768out:
6769 return fs_devices;
6770}
6771
6772static int read_one_dev(struct extent_buffer *leaf,
6773 struct btrfs_dev_item *dev_item)
6774{
6775 struct btrfs_fs_info *fs_info = leaf->fs_info;
6776 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6777 struct btrfs_device *device;
6778 u64 devid;
6779 int ret;
6780 u8 fs_uuid[BTRFS_FSID_SIZE];
6781 u8 dev_uuid[BTRFS_UUID_SIZE];
6782
6783 devid = btrfs_device_id(leaf, dev_item);
6784 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6785 BTRFS_UUID_SIZE);
6786 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6787 BTRFS_FSID_SIZE);
6788
6789 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6790 fs_devices = open_seed_devices(fs_info, fs_uuid);
6791 if (IS_ERR(fs_devices))
6792 return PTR_ERR(fs_devices);
6793 }
6794
6795 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6796 fs_uuid, true);
6797 if (!device) {
6798 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6799 btrfs_report_missing_device(fs_info, devid,
6800 dev_uuid, true);
6801 return -ENOENT;
6802 }
6803
6804 device = add_missing_dev(fs_devices, devid, dev_uuid);
6805 if (IS_ERR(device)) {
6806 btrfs_err(fs_info,
6807 "failed to add missing dev %llu: %ld",
6808 devid, PTR_ERR(device));
6809 return PTR_ERR(device);
6810 }
6811 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6812 } else {
6813 if (!device->bdev) {
6814 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6815 btrfs_report_missing_device(fs_info,
6816 devid, dev_uuid, true);
6817 return -ENOENT;
6818 }
6819 btrfs_report_missing_device(fs_info, devid,
6820 dev_uuid, false);
6821 }
6822
6823 if (!device->bdev &&
6824 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6825 /*
6826 * this happens when a device that was properly setup
6827 * in the device info lists suddenly goes bad.
6828 * device->bdev is NULL, and so we have to set
6829 * device->missing to one here
6830 */
6831 device->fs_devices->missing_devices++;
6832 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6833 }
6834
6835 /* Move the device to its own fs_devices */
6836 if (device->fs_devices != fs_devices) {
6837 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6838 &device->dev_state));
6839
6840 list_move(&device->dev_list, &fs_devices->devices);
6841 device->fs_devices->num_devices--;
6842 fs_devices->num_devices++;
6843
6844 device->fs_devices->missing_devices--;
6845 fs_devices->missing_devices++;
6846
6847 device->fs_devices = fs_devices;
6848 }
6849 }
6850
6851 if (device->fs_devices != fs_info->fs_devices) {
6852 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6853 if (device->generation !=
6854 btrfs_device_generation(leaf, dev_item))
6855 return -EINVAL;
6856 }
6857
6858 fill_device_from_item(leaf, dev_item, device);
6859 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6860 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6861 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6862 device->fs_devices->total_rw_bytes += device->total_bytes;
6863 atomic64_add(device->total_bytes - device->bytes_used,
6864 &fs_info->free_chunk_space);
6865 }
6866 ret = 0;
6867 return ret;
6868}
6869
6870int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6871{
6872 struct btrfs_root *root = fs_info->tree_root;
6873 struct btrfs_super_block *super_copy = fs_info->super_copy;
6874 struct extent_buffer *sb;
6875 struct btrfs_disk_key *disk_key;
6876 struct btrfs_chunk *chunk;
6877 u8 *array_ptr;
6878 unsigned long sb_array_offset;
6879 int ret = 0;
6880 u32 num_stripes;
6881 u32 array_size;
6882 u32 len = 0;
6883 u32 cur_offset;
6884 u64 type;
6885 struct btrfs_key key;
6886
6887 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6888 /*
6889 * This will create extent buffer of nodesize, superblock size is
6890 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6891 * overallocate but we can keep it as-is, only the first page is used.
6892 */
6893 sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6894 if (IS_ERR(sb))
6895 return PTR_ERR(sb);
6896 set_extent_buffer_uptodate(sb);
6897 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6898 /*
6899 * The sb extent buffer is artificial and just used to read the system array.
6900 * set_extent_buffer_uptodate() call does not properly mark all it's
6901 * pages up-to-date when the page is larger: extent does not cover the
6902 * whole page and consequently check_page_uptodate does not find all
6903 * the page's extents up-to-date (the hole beyond sb),
6904 * write_extent_buffer then triggers a WARN_ON.
6905 *
6906 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6907 * but sb spans only this function. Add an explicit SetPageUptodate call
6908 * to silence the warning eg. on PowerPC 64.
6909 */
6910 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6911 SetPageUptodate(sb->pages[0]);
6912
6913 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6914 array_size = btrfs_super_sys_array_size(super_copy);
6915
6916 array_ptr = super_copy->sys_chunk_array;
6917 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6918 cur_offset = 0;
6919
6920 while (cur_offset < array_size) {
6921 disk_key = (struct btrfs_disk_key *)array_ptr;
6922 len = sizeof(*disk_key);
6923 if (cur_offset + len > array_size)
6924 goto out_short_read;
6925
6926 btrfs_disk_key_to_cpu(&key, disk_key);
6927
6928 array_ptr += len;
6929 sb_array_offset += len;
6930 cur_offset += len;
6931
6932 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
6933 btrfs_err(fs_info,
6934 "unexpected item type %u in sys_array at offset %u",
6935 (u32)key.type, cur_offset);
6936 ret = -EIO;
6937 break;
6938 }
6939
6940 chunk = (struct btrfs_chunk *)sb_array_offset;
6941 /*
6942 * At least one btrfs_chunk with one stripe must be present,
6943 * exact stripe count check comes afterwards
6944 */
6945 len = btrfs_chunk_item_size(1);
6946 if (cur_offset + len > array_size)
6947 goto out_short_read;
6948
6949 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6950 if (!num_stripes) {
6951 btrfs_err(fs_info,
6952 "invalid number of stripes %u in sys_array at offset %u",
6953 num_stripes, cur_offset);
6954 ret = -EIO;
6955 break;
6956 }
6957
6958 type = btrfs_chunk_type(sb, chunk);
6959 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6960 btrfs_err(fs_info,
6961 "invalid chunk type %llu in sys_array at offset %u",
6962 type, cur_offset);
6963 ret = -EIO;
6964 break;
6965 }
6966
6967 len = btrfs_chunk_item_size(num_stripes);
6968 if (cur_offset + len > array_size)
6969 goto out_short_read;
6970
6971 ret = read_one_chunk(&key, sb, chunk);
6972 if (ret)
6973 break;
6974
6975 array_ptr += len;
6976 sb_array_offset += len;
6977 cur_offset += len;
6978 }
6979 clear_extent_buffer_uptodate(sb);
6980 free_extent_buffer_stale(sb);
6981 return ret;
6982
6983out_short_read:
6984 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6985 len, cur_offset);
6986 clear_extent_buffer_uptodate(sb);
6987 free_extent_buffer_stale(sb);
6988 return -EIO;
6989}
6990
6991/*
6992 * Check if all chunks in the fs are OK for read-write degraded mount
6993 *
6994 * If the @failing_dev is specified, it's accounted as missing.
6995 *
6996 * Return true if all chunks meet the minimal RW mount requirements.
6997 * Return false if any chunk doesn't meet the minimal RW mount requirements.
6998 */
6999bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7000 struct btrfs_device *failing_dev)
7001{
7002 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7003 struct extent_map *em;
7004 u64 next_start = 0;
7005 bool ret = true;
7006
7007 read_lock(&map_tree->lock);
7008 em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7009 read_unlock(&map_tree->lock);
7010 /* No chunk at all? Return false anyway */
7011 if (!em) {
7012 ret = false;
7013 goto out;
7014 }
7015 while (em) {
7016 struct map_lookup *map;
7017 int missing = 0;
7018 int max_tolerated;
7019 int i;
7020
7021 map = em->map_lookup;
7022 max_tolerated =
7023 btrfs_get_num_tolerated_disk_barrier_failures(
7024 map->type);
7025 for (i = 0; i < map->num_stripes; i++) {
7026 struct btrfs_device *dev = map->stripes[i].dev;
7027
7028 if (!dev || !dev->bdev ||
7029 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7030 dev->last_flush_error)
7031 missing++;
7032 else if (failing_dev && failing_dev == dev)
7033 missing++;
7034 }
7035 if (missing > max_tolerated) {
7036 if (!failing_dev)
7037 btrfs_warn(fs_info,
7038 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7039 em->start, missing, max_tolerated);
7040 free_extent_map(em);
7041 ret = false;
7042 goto out;
7043 }
7044 next_start = extent_map_end(em);
7045 free_extent_map(em);
7046
7047 read_lock(&map_tree->lock);
7048 em = lookup_extent_mapping(map_tree, next_start,
7049 (u64)(-1) - next_start);
7050 read_unlock(&map_tree->lock);
7051 }
7052out:
7053 return ret;
7054}
7055
7056static void readahead_tree_node_children(struct extent_buffer *node)
7057{
7058 int i;
7059 const int nr_items = btrfs_header_nritems(node);
7060
7061 for (i = 0; i < nr_items; i++) {
7062 u64 start;
7063
7064 start = btrfs_node_blockptr(node, i);
7065 readahead_tree_block(node->fs_info, start);
7066 }
7067}
7068
7069int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7070{
7071 struct btrfs_root *root = fs_info->chunk_root;
7072 struct btrfs_path *path;
7073 struct extent_buffer *leaf;
7074 struct btrfs_key key;
7075 struct btrfs_key found_key;
7076 int ret;
7077 int slot;
7078 u64 total_dev = 0;
7079 u64 last_ra_node = 0;
7080
7081 path = btrfs_alloc_path();
7082 if (!path)
7083 return -ENOMEM;
7084
7085 /*
7086 * uuid_mutex is needed only if we are mounting a sprout FS
7087 * otherwise we don't need it.
7088 */
7089 mutex_lock(&uuid_mutex);
7090
7091 /*
7092 * It is possible for mount and umount to race in such a way that
7093 * we execute this code path, but open_fs_devices failed to clear
7094 * total_rw_bytes. We certainly want it cleared before reading the
7095 * device items, so clear it here.
7096 */
7097 fs_info->fs_devices->total_rw_bytes = 0;
7098
7099 /*
7100 * Read all device items, and then all the chunk items. All
7101 * device items are found before any chunk item (their object id
7102 * is smaller than the lowest possible object id for a chunk
7103 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7104 */
7105 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7106 key.offset = 0;
7107 key.type = 0;
7108 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7109 if (ret < 0)
7110 goto error;
7111 while (1) {
7112 struct extent_buffer *node;
7113
7114 leaf = path->nodes[0];
7115 slot = path->slots[0];
7116 if (slot >= btrfs_header_nritems(leaf)) {
7117 ret = btrfs_next_leaf(root, path);
7118 if (ret == 0)
7119 continue;
7120 if (ret < 0)
7121 goto error;
7122 break;
7123 }
7124 /*
7125 * The nodes on level 1 are not locked but we don't need to do
7126 * that during mount time as nothing else can access the tree
7127 */
7128 node = path->nodes[1];
7129 if (node) {
7130 if (last_ra_node != node->start) {
7131 readahead_tree_node_children(node);
7132 last_ra_node = node->start;
7133 }
7134 }
7135 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7136 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7137 struct btrfs_dev_item *dev_item;
7138 dev_item = btrfs_item_ptr(leaf, slot,
7139 struct btrfs_dev_item);
7140 ret = read_one_dev(leaf, dev_item);
7141 if (ret)
7142 goto error;
7143 total_dev++;
7144 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7145 struct btrfs_chunk *chunk;
7146 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7147 mutex_lock(&fs_info->chunk_mutex);
7148 ret = read_one_chunk(&found_key, leaf, chunk);
7149 mutex_unlock(&fs_info->chunk_mutex);
7150 if (ret)
7151 goto error;
7152 }
7153 path->slots[0]++;
7154 }
7155
7156 /*
7157 * After loading chunk tree, we've got all device information,
7158 * do another round of validation checks.
7159 */
7160 if (total_dev != fs_info->fs_devices->total_devices) {
7161 btrfs_err(fs_info,
7162 "super_num_devices %llu mismatch with num_devices %llu found here",
7163 btrfs_super_num_devices(fs_info->super_copy),
7164 total_dev);
7165 ret = -EINVAL;
7166 goto error;
7167 }
7168 if (btrfs_super_total_bytes(fs_info->super_copy) <
7169 fs_info->fs_devices->total_rw_bytes) {
7170 btrfs_err(fs_info,
7171 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7172 btrfs_super_total_bytes(fs_info->super_copy),
7173 fs_info->fs_devices->total_rw_bytes);
7174 ret = -EINVAL;
7175 goto error;
7176 }
7177 ret = 0;
7178error:
7179 mutex_unlock(&uuid_mutex);
7180
7181 btrfs_free_path(path);
7182 return ret;
7183}
7184
7185void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7186{
7187 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7188 struct btrfs_device *device;
7189
7190 fs_devices->fs_info = fs_info;
7191
7192 mutex_lock(&fs_devices->device_list_mutex);
7193 list_for_each_entry(device, &fs_devices->devices, dev_list)
7194 device->fs_info = fs_info;
7195
7196 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7197 list_for_each_entry(device, &seed_devs->devices, dev_list)
7198 device->fs_info = fs_info;
7199
7200 seed_devs->fs_info = fs_info;
7201 }
7202 mutex_unlock(&fs_devices->device_list_mutex);
7203}
7204
7205static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7206 const struct btrfs_dev_stats_item *ptr,
7207 int index)
7208{
7209 u64 val;
7210
7211 read_extent_buffer(eb, &val,
7212 offsetof(struct btrfs_dev_stats_item, values) +
7213 ((unsigned long)ptr) + (index * sizeof(u64)),
7214 sizeof(val));
7215 return val;
7216}
7217
7218static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7219 struct btrfs_dev_stats_item *ptr,
7220 int index, u64 val)
7221{
7222 write_extent_buffer(eb, &val,
7223 offsetof(struct btrfs_dev_stats_item, values) +
7224 ((unsigned long)ptr) + (index * sizeof(u64)),
7225 sizeof(val));
7226}
7227
7228int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7229{
7230 struct btrfs_key key;
7231 struct btrfs_root *dev_root = fs_info->dev_root;
7232 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7233 struct extent_buffer *eb;
7234 int slot;
7235 int ret = 0;
7236 struct btrfs_device *device;
7237 struct btrfs_path *path = NULL;
7238 int i;
7239
7240 path = btrfs_alloc_path();
7241 if (!path)
7242 return -ENOMEM;
7243
7244 mutex_lock(&fs_devices->device_list_mutex);
7245 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7246 int item_size;
7247 struct btrfs_dev_stats_item *ptr;
7248
7249 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7250 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7251 key.offset = device->devid;
7252 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
7253 if (ret) {
7254 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7255 btrfs_dev_stat_set(device, i, 0);
7256 device->dev_stats_valid = 1;
7257 btrfs_release_path(path);
7258 continue;
7259 }
7260 slot = path->slots[0];
7261 eb = path->nodes[0];
7262 item_size = btrfs_item_size_nr(eb, slot);
7263
7264 ptr = btrfs_item_ptr(eb, slot,
7265 struct btrfs_dev_stats_item);
7266
7267 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7268 if (item_size >= (1 + i) * sizeof(__le64))
7269 btrfs_dev_stat_set(device, i,
7270 btrfs_dev_stats_value(eb, ptr, i));
7271 else
7272 btrfs_dev_stat_set(device, i, 0);
7273 }
7274
7275 device->dev_stats_valid = 1;
7276 btrfs_dev_stat_print_on_load(device);
7277 btrfs_release_path(path);
7278 }
7279 mutex_unlock(&fs_devices->device_list_mutex);
7280
7281 btrfs_free_path(path);
7282 return ret < 0 ? ret : 0;
7283}
7284
7285static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7286 struct btrfs_device *device)
7287{
7288 struct btrfs_fs_info *fs_info = trans->fs_info;
7289 struct btrfs_root *dev_root = fs_info->dev_root;
7290 struct btrfs_path *path;
7291 struct btrfs_key key;
7292 struct extent_buffer *eb;
7293 struct btrfs_dev_stats_item *ptr;
7294 int ret;
7295 int i;
7296
7297 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7298 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7299 key.offset = device->devid;
7300
7301 path = btrfs_alloc_path();
7302 if (!path)
7303 return -ENOMEM;
7304 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7305 if (ret < 0) {
7306 btrfs_warn_in_rcu(fs_info,
7307 "error %d while searching for dev_stats item for device %s",
7308 ret, rcu_str_deref(device->name));
7309 goto out;
7310 }
7311
7312 if (ret == 0 &&
7313 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7314 /* need to delete old one and insert a new one */
7315 ret = btrfs_del_item(trans, dev_root, path);
7316 if (ret != 0) {
7317 btrfs_warn_in_rcu(fs_info,
7318 "delete too small dev_stats item for device %s failed %d",
7319 rcu_str_deref(device->name), ret);
7320 goto out;
7321 }
7322 ret = 1;
7323 }
7324
7325 if (ret == 1) {
7326 /* need to insert a new item */
7327 btrfs_release_path(path);
7328 ret = btrfs_insert_empty_item(trans, dev_root, path,
7329 &key, sizeof(*ptr));
7330 if (ret < 0) {
7331 btrfs_warn_in_rcu(fs_info,
7332 "insert dev_stats item for device %s failed %d",
7333 rcu_str_deref(device->name), ret);
7334 goto out;
7335 }
7336 }
7337
7338 eb = path->nodes[0];
7339 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7340 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7341 btrfs_set_dev_stats_value(eb, ptr, i,
7342 btrfs_dev_stat_read(device, i));
7343 btrfs_mark_buffer_dirty(eb);
7344
7345out:
7346 btrfs_free_path(path);
7347 return ret;
7348}
7349
7350/*
7351 * called from commit_transaction. Writes all changed device stats to disk.
7352 */
7353int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7354{
7355 struct btrfs_fs_info *fs_info = trans->fs_info;
7356 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7357 struct btrfs_device *device;
7358 int stats_cnt;
7359 int ret = 0;
7360
7361 mutex_lock(&fs_devices->device_list_mutex);
7362 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7363 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7364 if (!device->dev_stats_valid || stats_cnt == 0)
7365 continue;
7366
7367
7368 /*
7369 * There is a LOAD-LOAD control dependency between the value of
7370 * dev_stats_ccnt and updating the on-disk values which requires
7371 * reading the in-memory counters. Such control dependencies
7372 * require explicit read memory barriers.
7373 *
7374 * This memory barriers pairs with smp_mb__before_atomic in
7375 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7376 * barrier implied by atomic_xchg in
7377 * btrfs_dev_stats_read_and_reset
7378 */
7379 smp_rmb();
7380
7381 ret = update_dev_stat_item(trans, device);
7382 if (!ret)
7383 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7384 }
7385 mutex_unlock(&fs_devices->device_list_mutex);
7386
7387 return ret;
7388}
7389
7390void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7391{
7392 btrfs_dev_stat_inc(dev, index);
7393 btrfs_dev_stat_print_on_error(dev);
7394}
7395
7396static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7397{
7398 if (!dev->dev_stats_valid)
7399 return;
7400 btrfs_err_rl_in_rcu(dev->fs_info,
7401 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7402 rcu_str_deref(dev->name),
7403 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7404 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7405 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7406 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7407 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7408}
7409
7410static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7411{
7412 int i;
7413
7414 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7415 if (btrfs_dev_stat_read(dev, i) != 0)
7416 break;
7417 if (i == BTRFS_DEV_STAT_VALUES_MAX)
7418 return; /* all values == 0, suppress message */
7419
7420 btrfs_info_in_rcu(dev->fs_info,
7421 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7422 rcu_str_deref(dev->name),
7423 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7424 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7425 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7426 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7427 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7428}
7429
7430int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7431 struct btrfs_ioctl_get_dev_stats *stats)
7432{
7433 struct btrfs_device *dev;
7434 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7435 int i;
7436
7437 mutex_lock(&fs_devices->device_list_mutex);
7438 dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7439 true);
7440 mutex_unlock(&fs_devices->device_list_mutex);
7441
7442 if (!dev) {
7443 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7444 return -ENODEV;
7445 } else if (!dev->dev_stats_valid) {
7446 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7447 return -ENODEV;
7448 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7449 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7450 if (stats->nr_items > i)
7451 stats->values[i] =
7452 btrfs_dev_stat_read_and_reset(dev, i);
7453 else
7454 btrfs_dev_stat_set(dev, i, 0);
7455 }
7456 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7457 current->comm, task_pid_nr(current));
7458 } else {
7459 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7460 if (stats->nr_items > i)
7461 stats->values[i] = btrfs_dev_stat_read(dev, i);
7462 }
7463 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7464 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7465 return 0;
7466}
7467
7468/*
7469 * Update the size and bytes used for each device where it changed. This is
7470 * delayed since we would otherwise get errors while writing out the
7471 * superblocks.
7472 *
7473 * Must be invoked during transaction commit.
7474 */
7475void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7476{
7477 struct btrfs_device *curr, *next;
7478
7479 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7480
7481 if (list_empty(&trans->dev_update_list))
7482 return;
7483
7484 /*
7485 * We don't need the device_list_mutex here. This list is owned by the
7486 * transaction and the transaction must complete before the device is
7487 * released.
7488 */
7489 mutex_lock(&trans->fs_info->chunk_mutex);
7490 list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7491 post_commit_list) {
7492 list_del_init(&curr->post_commit_list);
7493 curr->commit_total_bytes = curr->disk_total_bytes;
7494 curr->commit_bytes_used = curr->bytes_used;
7495 }
7496 mutex_unlock(&trans->fs_info->chunk_mutex);
7497}
7498
7499/*
7500 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7501 */
7502int btrfs_bg_type_to_factor(u64 flags)
7503{
7504 const int index = btrfs_bg_flags_to_raid_index(flags);
7505
7506 return btrfs_raid_array[index].ncopies;
7507}
7508
7509
7510
7511static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7512 u64 chunk_offset, u64 devid,
7513 u64 physical_offset, u64 physical_len)
7514{
7515 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7516 struct extent_map *em;
7517 struct map_lookup *map;
7518 struct btrfs_device *dev;
7519 u64 stripe_len;
7520 bool found = false;
7521 int ret = 0;
7522 int i;
7523
7524 read_lock(&em_tree->lock);
7525 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7526 read_unlock(&em_tree->lock);
7527
7528 if (!em) {
7529 btrfs_err(fs_info,
7530"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7531 physical_offset, devid);
7532 ret = -EUCLEAN;
7533 goto out;
7534 }
7535
7536 map = em->map_lookup;
7537 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7538 if (physical_len != stripe_len) {
7539 btrfs_err(fs_info,
7540"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7541 physical_offset, devid, em->start, physical_len,
7542 stripe_len);
7543 ret = -EUCLEAN;
7544 goto out;
7545 }
7546
7547 for (i = 0; i < map->num_stripes; i++) {
7548 if (map->stripes[i].dev->devid == devid &&
7549 map->stripes[i].physical == physical_offset) {
7550 found = true;
7551 if (map->verified_stripes >= map->num_stripes) {
7552 btrfs_err(fs_info,
7553 "too many dev extents for chunk %llu found",
7554 em->start);
7555 ret = -EUCLEAN;
7556 goto out;
7557 }
7558 map->verified_stripes++;
7559 break;
7560 }
7561 }
7562 if (!found) {
7563 btrfs_err(fs_info,
7564 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7565 physical_offset, devid);
7566 ret = -EUCLEAN;
7567 }
7568
7569 /* Make sure no dev extent is beyond device bondary */
7570 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7571 if (!dev) {
7572 btrfs_err(fs_info, "failed to find devid %llu", devid);
7573 ret = -EUCLEAN;
7574 goto out;
7575 }
7576
7577 /* It's possible this device is a dummy for seed device */
7578 if (dev->disk_total_bytes == 0) {
7579 struct btrfs_fs_devices *devs;
7580
7581 devs = list_first_entry(&fs_info->fs_devices->seed_list,
7582 struct btrfs_fs_devices, seed_list);
7583 dev = btrfs_find_device(devs, devid, NULL, NULL, false);
7584 if (!dev) {
7585 btrfs_err(fs_info, "failed to find seed devid %llu",
7586 devid);
7587 ret = -EUCLEAN;
7588 goto out;
7589 }
7590 }
7591
7592 if (physical_offset + physical_len > dev->disk_total_bytes) {
7593 btrfs_err(fs_info,
7594"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7595 devid, physical_offset, physical_len,
7596 dev->disk_total_bytes);
7597 ret = -EUCLEAN;
7598 goto out;
7599 }
7600out:
7601 free_extent_map(em);
7602 return ret;
7603}
7604
7605static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7606{
7607 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7608 struct extent_map *em;
7609 struct rb_node *node;
7610 int ret = 0;
7611
7612 read_lock(&em_tree->lock);
7613 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7614 em = rb_entry(node, struct extent_map, rb_node);
7615 if (em->map_lookup->num_stripes !=
7616 em->map_lookup->verified_stripes) {
7617 btrfs_err(fs_info,
7618 "chunk %llu has missing dev extent, have %d expect %d",
7619 em->start, em->map_lookup->verified_stripes,
7620 em->map_lookup->num_stripes);
7621 ret = -EUCLEAN;
7622 goto out;
7623 }
7624 }
7625out:
7626 read_unlock(&em_tree->lock);
7627 return ret;
7628}
7629
7630/*
7631 * Ensure that all dev extents are mapped to correct chunk, otherwise
7632 * later chunk allocation/free would cause unexpected behavior.
7633 *
7634 * NOTE: This will iterate through the whole device tree, which should be of
7635 * the same size level as the chunk tree. This slightly increases mount time.
7636 */
7637int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7638{
7639 struct btrfs_path *path;
7640 struct btrfs_root *root = fs_info->dev_root;
7641 struct btrfs_key key;
7642 u64 prev_devid = 0;
7643 u64 prev_dev_ext_end = 0;
7644 int ret = 0;
7645
7646 key.objectid = 1;
7647 key.type = BTRFS_DEV_EXTENT_KEY;
7648 key.offset = 0;
7649
7650 path = btrfs_alloc_path();
7651 if (!path)
7652 return -ENOMEM;
7653
7654 path->reada = READA_FORWARD;
7655 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7656 if (ret < 0)
7657 goto out;
7658
7659 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7660 ret = btrfs_next_item(root, path);
7661 if (ret < 0)
7662 goto out;
7663 /* No dev extents at all? Not good */
7664 if (ret > 0) {
7665 ret = -EUCLEAN;
7666 goto out;
7667 }
7668 }
7669 while (1) {
7670 struct extent_buffer *leaf = path->nodes[0];
7671 struct btrfs_dev_extent *dext;
7672 int slot = path->slots[0];
7673 u64 chunk_offset;
7674 u64 physical_offset;
7675 u64 physical_len;
7676 u64 devid;
7677
7678 btrfs_item_key_to_cpu(leaf, &key, slot);
7679 if (key.type != BTRFS_DEV_EXTENT_KEY)
7680 break;
7681 devid = key.objectid;
7682 physical_offset = key.offset;
7683
7684 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7685 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7686 physical_len = btrfs_dev_extent_length(leaf, dext);
7687
7688 /* Check if this dev extent overlaps with the previous one */
7689 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7690 btrfs_err(fs_info,
7691"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7692 devid, physical_offset, prev_dev_ext_end);
7693 ret = -EUCLEAN;
7694 goto out;
7695 }
7696
7697 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7698 physical_offset, physical_len);
7699 if (ret < 0)
7700 goto out;
7701 prev_devid = devid;
7702 prev_dev_ext_end = physical_offset + physical_len;
7703
7704 ret = btrfs_next_item(root, path);
7705 if (ret < 0)
7706 goto out;
7707 if (ret > 0) {
7708 ret = 0;
7709 break;
7710 }
7711 }
7712
7713 /* Ensure all chunks have corresponding dev extents */
7714 ret = verify_chunk_dev_extent_mapping(fs_info);
7715out:
7716 btrfs_free_path(path);
7717 return ret;
7718}
7719
7720/*
7721 * Check whether the given block group or device is pinned by any inode being
7722 * used as a swapfile.
7723 */
7724bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7725{
7726 struct btrfs_swapfile_pin *sp;
7727 struct rb_node *node;
7728
7729 spin_lock(&fs_info->swapfile_pins_lock);
7730 node = fs_info->swapfile_pins.rb_node;
7731 while (node) {
7732 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7733 if (ptr < sp->ptr)
7734 node = node->rb_left;
7735 else if (ptr > sp->ptr)
7736 node = node->rb_right;
7737 else
7738 break;
7739 }
7740 spin_unlock(&fs_info->swapfile_pins_lock);
7741 return node != NULL;
7742}