btrfs: add support for multiple global roots
[linux-block.git] / fs / btrfs / zoned.c
CommitLineData
5b316468
NA
1// SPDX-License-Identifier: GPL-2.0
2
1cd6121f 3#include <linux/bitops.h>
5b316468
NA
4#include <linux/slab.h>
5#include <linux/blkdev.h>
08e11a3d 6#include <linux/sched/mm.h>
ea6f8ddc 7#include <linux/atomic.h>
16beac87 8#include <linux/vmalloc.h>
5b316468
NA
9#include "ctree.h"
10#include "volumes.h"
11#include "zoned.h"
12#include "rcu-string.h"
1cd6121f 13#include "disk-io.h"
08e11a3d 14#include "block-group.h"
d3575156 15#include "transaction.h"
6143c23c 16#include "dev-replace.h"
7db1c5d1 17#include "space-info.h"
5b316468
NA
18
19/* Maximum number of zones to report per blkdev_report_zones() call */
20#define BTRFS_REPORT_NR_ZONES 4096
08e11a3d
NA
21/* Invalid allocation pointer value for missing devices */
22#define WP_MISSING_DEV ((u64)-1)
23/* Pseudo write pointer value for conventional zone */
24#define WP_CONVENTIONAL ((u64)-2)
5b316468 25
53b74fa9
NA
26/*
27 * Location of the first zone of superblock logging zone pairs.
28 *
29 * - primary superblock: 0B (zone 0)
30 * - first copy: 512G (zone starting at that offset)
31 * - second copy: 4T (zone starting at that offset)
32 */
33#define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
34#define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
35#define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
36
37#define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
38#define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
39
12659251
NA
40/* Number of superblock log zones */
41#define BTRFS_NR_SB_LOG_ZONES 2
42
ea6f8ddc
NA
43/*
44 * Minimum of active zones we need:
45 *
46 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
47 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
48 * - 1 zone for tree-log dedicated block group
49 * - 1 zone for relocation
50 */
51#define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
52
53b74fa9
NA
53/*
54 * Maximum supported zone size. Currently, SMR disks have a zone size of
55 * 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. We do not
56 * expect the zone size to become larger than 8GiB in the near future.
57 */
58#define BTRFS_MAX_ZONE_SIZE SZ_8G
59
5daaf552
NA
60#define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
61
62static inline bool sb_zone_is_full(const struct blk_zone *zone)
63{
64 return (zone->cond == BLK_ZONE_COND_FULL) ||
65 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
66}
67
5b316468
NA
68static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
69{
70 struct blk_zone *zones = data;
71
72 memcpy(&zones[idx], zone, sizeof(*zone));
73
74 return 0;
75}
76
12659251
NA
77static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
78 u64 *wp_ret)
79{
80 bool empty[BTRFS_NR_SB_LOG_ZONES];
81 bool full[BTRFS_NR_SB_LOG_ZONES];
82 sector_t sector;
5daaf552 83 int i;
12659251 84
5daaf552
NA
85 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
86 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
87 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
88 full[i] = sb_zone_is_full(&zones[i]);
89 }
12659251
NA
90
91 /*
92 * Possible states of log buffer zones
93 *
94 * Empty[0] In use[0] Full[0]
95 * Empty[1] * x 0
96 * In use[1] 0 x 0
97 * Full[1] 1 1 C
98 *
99 * Log position:
100 * *: Special case, no superblock is written
101 * 0: Use write pointer of zones[0]
102 * 1: Use write pointer of zones[1]
1a9fd417 103 * C: Compare super blocks from zones[0] and zones[1], use the latest
12659251
NA
104 * one determined by generation
105 * x: Invalid state
106 */
107
108 if (empty[0] && empty[1]) {
109 /* Special case to distinguish no superblock to read */
110 *wp_ret = zones[0].start << SECTOR_SHIFT;
111 return -ENOENT;
112 } else if (full[0] && full[1]) {
113 /* Compare two super blocks */
114 struct address_space *mapping = bdev->bd_inode->i_mapping;
115 struct page *page[BTRFS_NR_SB_LOG_ZONES];
116 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
117 int i;
118
119 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
120 u64 bytenr;
121
122 bytenr = ((zones[i].start + zones[i].len)
123 << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
124
125 page[i] = read_cache_page_gfp(mapping,
126 bytenr >> PAGE_SHIFT, GFP_NOFS);
127 if (IS_ERR(page[i])) {
128 if (i == 1)
129 btrfs_release_disk_super(super[0]);
130 return PTR_ERR(page[i]);
131 }
132 super[i] = page_address(page[i]);
133 }
134
135 if (super[0]->generation > super[1]->generation)
136 sector = zones[1].start;
137 else
138 sector = zones[0].start;
139
140 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
141 btrfs_release_disk_super(super[i]);
142 } else if (!full[0] && (empty[1] || full[1])) {
143 sector = zones[0].wp;
144 } else if (full[0]) {
145 sector = zones[1].wp;
146 } else {
147 return -EUCLEAN;
148 }
149 *wp_ret = sector << SECTOR_SHIFT;
150 return 0;
151}
152
153/*
53b74fa9 154 * Get the first zone number of the superblock mirror
12659251
NA
155 */
156static inline u32 sb_zone_number(int shift, int mirror)
157{
53b74fa9 158 u64 zone;
12659251 159
53b74fa9 160 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
12659251 161 switch (mirror) {
53b74fa9
NA
162 case 0: zone = 0; break;
163 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
164 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
12659251
NA
165 }
166
53b74fa9
NA
167 ASSERT(zone <= U32_MAX);
168
169 return (u32)zone;
12659251
NA
170}
171
5b434df8
NA
172static inline sector_t zone_start_sector(u32 zone_number,
173 struct block_device *bdev)
174{
175 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
176}
177
178static inline u64 zone_start_physical(u32 zone_number,
179 struct btrfs_zoned_device_info *zone_info)
180{
181 return (u64)zone_number << zone_info->zone_size_shift;
182}
183
3c9daa09
JT
184/*
185 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
186 * device into static sized chunks and fake a conventional zone on each of
187 * them.
188 */
189static int emulate_report_zones(struct btrfs_device *device, u64 pos,
190 struct blk_zone *zones, unsigned int nr_zones)
191{
192 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
193 sector_t bdev_size = bdev_nr_sectors(device->bdev);
194 unsigned int i;
195
196 pos >>= SECTOR_SHIFT;
197 for (i = 0; i < nr_zones; i++) {
198 zones[i].start = i * zone_sectors + pos;
199 zones[i].len = zone_sectors;
200 zones[i].capacity = zone_sectors;
201 zones[i].wp = zones[i].start + zone_sectors;
202 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
203 zones[i].cond = BLK_ZONE_COND_NOT_WP;
204
205 if (zones[i].wp >= bdev_size) {
206 i++;
207 break;
208 }
209 }
210
211 return i;
212}
213
5b316468
NA
214static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
215 struct blk_zone *zones, unsigned int *nr_zones)
216{
16beac87
NA
217 struct btrfs_zoned_device_info *zinfo = device->zone_info;
218 u32 zno;
5b316468
NA
219 int ret;
220
221 if (!*nr_zones)
222 return 0;
223
3c9daa09
JT
224 if (!bdev_is_zoned(device->bdev)) {
225 ret = emulate_report_zones(device, pos, zones, *nr_zones);
226 *nr_zones = ret;
227 return 0;
228 }
229
16beac87
NA
230 /* Check cache */
231 if (zinfo->zone_cache) {
232 unsigned int i;
233
234 ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
235 zno = pos >> zinfo->zone_size_shift;
236 /*
237 * We cannot report zones beyond the zone end. So, it is OK to
238 * cap *nr_zones to at the end.
239 */
240 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
241
242 for (i = 0; i < *nr_zones; i++) {
243 struct blk_zone *zone_info;
244
245 zone_info = &zinfo->zone_cache[zno + i];
246 if (!zone_info->len)
247 break;
248 }
249
250 if (i == *nr_zones) {
251 /* Cache hit on all the zones */
252 memcpy(zones, zinfo->zone_cache + zno,
253 sizeof(*zinfo->zone_cache) * *nr_zones);
254 return 0;
255 }
256 }
257
5b316468
NA
258 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
259 copy_zone_info_cb, zones);
260 if (ret < 0) {
261 btrfs_err_in_rcu(device->fs_info,
262 "zoned: failed to read zone %llu on %s (devid %llu)",
263 pos, rcu_str_deref(device->name),
264 device->devid);
265 return ret;
266 }
267 *nr_zones = ret;
268 if (!ret)
269 return -EIO;
270
16beac87
NA
271 /* Populate cache */
272 if (zinfo->zone_cache)
273 memcpy(zinfo->zone_cache + zno, zones,
274 sizeof(*zinfo->zone_cache) * *nr_zones);
275
5b316468
NA
276 return 0;
277}
278
3c9daa09
JT
279/* The emulated zone size is determined from the size of device extent */
280static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
281{
282 struct btrfs_path *path;
283 struct btrfs_root *root = fs_info->dev_root;
284 struct btrfs_key key;
285 struct extent_buffer *leaf;
286 struct btrfs_dev_extent *dext;
287 int ret = 0;
288
289 key.objectid = 1;
290 key.type = BTRFS_DEV_EXTENT_KEY;
291 key.offset = 0;
292
293 path = btrfs_alloc_path();
294 if (!path)
295 return -ENOMEM;
296
297 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
298 if (ret < 0)
299 goto out;
300
301 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ad9a9378 302 ret = btrfs_next_leaf(root, path);
3c9daa09
JT
303 if (ret < 0)
304 goto out;
305 /* No dev extents at all? Not good */
306 if (ret > 0) {
307 ret = -EUCLEAN;
308 goto out;
309 }
310 }
311
312 leaf = path->nodes[0];
313 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
314 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
315 ret = 0;
316
317out:
318 btrfs_free_path(path);
319
320 return ret;
321}
322
73651042
NA
323int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
324{
325 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
326 struct btrfs_device *device;
327 int ret = 0;
328
329 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
330 if (!btrfs_fs_incompat(fs_info, ZONED))
331 return 0;
332
333 mutex_lock(&fs_devices->device_list_mutex);
334 list_for_each_entry(device, &fs_devices->devices, dev_list) {
335 /* We can skip reading of zone info for missing devices */
336 if (!device->bdev)
337 continue;
338
16beac87 339 ret = btrfs_get_dev_zone_info(device, true);
73651042
NA
340 if (ret)
341 break;
342 }
343 mutex_unlock(&fs_devices->device_list_mutex);
344
345 return ret;
346}
347
16beac87 348int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
5b316468 349{
3c9daa09 350 struct btrfs_fs_info *fs_info = device->fs_info;
5b316468
NA
351 struct btrfs_zoned_device_info *zone_info = NULL;
352 struct block_device *bdev = device->bdev;
ea6f8ddc
NA
353 struct request_queue *queue = bdev_get_queue(bdev);
354 unsigned int max_active_zones;
355 unsigned int nactive;
5b316468
NA
356 sector_t nr_sectors;
357 sector_t sector = 0;
358 struct blk_zone *zones = NULL;
359 unsigned int i, nreported = 0, nr_zones;
d734492a 360 sector_t zone_sectors;
3c9daa09 361 char *model, *emulated;
5b316468
NA
362 int ret;
363
3c9daa09
JT
364 /*
365 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
366 * yet be set.
367 */
368 if (!btrfs_fs_incompat(fs_info, ZONED))
5b316468
NA
369 return 0;
370
371 if (device->zone_info)
372 return 0;
373
374 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
375 if (!zone_info)
376 return -ENOMEM;
377
16beac87
NA
378 device->zone_info = zone_info;
379
3c9daa09
JT
380 if (!bdev_is_zoned(bdev)) {
381 if (!fs_info->zone_size) {
382 ret = calculate_emulated_zone_size(fs_info);
383 if (ret)
384 goto out;
385 }
386
387 ASSERT(fs_info->zone_size);
388 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
389 } else {
390 zone_sectors = bdev_zone_sectors(bdev);
391 }
392
5b316468
NA
393 /* Check if it's power of 2 (see is_power_of_2) */
394 ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
395 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
53b74fa9
NA
396
397 /* We reject devices with a zone size larger than 8GB */
398 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
399 btrfs_err_in_rcu(fs_info,
400 "zoned: %s: zone size %llu larger than supported maximum %llu",
401 rcu_str_deref(device->name),
402 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
403 ret = -EINVAL;
404 goto out;
405 }
406
407 nr_sectors = bdev_nr_sectors(bdev);
5b316468
NA
408 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
409 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
410 if (!IS_ALIGNED(nr_sectors, zone_sectors))
411 zone_info->nr_zones++;
412
ea6f8ddc
NA
413 max_active_zones = queue_max_active_zones(queue);
414 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
415 btrfs_err_in_rcu(fs_info,
416"zoned: %s: max active zones %u is too small, need at least %u active zones",
417 rcu_str_deref(device->name), max_active_zones,
418 BTRFS_MIN_ACTIVE_ZONES);
419 ret = -EINVAL;
420 goto out;
421 }
422 zone_info->max_active_zones = max_active_zones;
423
5b316468
NA
424 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
425 if (!zone_info->seq_zones) {
426 ret = -ENOMEM;
427 goto out;
428 }
429
430 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
431 if (!zone_info->empty_zones) {
432 ret = -ENOMEM;
433 goto out;
434 }
435
ea6f8ddc
NA
436 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
437 if (!zone_info->active_zones) {
438 ret = -ENOMEM;
439 goto out;
440 }
441
5b316468
NA
442 zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
443 if (!zones) {
444 ret = -ENOMEM;
445 goto out;
446 }
447
16beac87
NA
448 /*
449 * Enable zone cache only for a zoned device. On a non-zoned device, we
450 * fill the zone info with emulated CONVENTIONAL zones, so no need to
451 * use the cache.
452 */
453 if (populate_cache && bdev_is_zoned(device->bdev)) {
454 zone_info->zone_cache = vzalloc(sizeof(struct blk_zone) *
455 zone_info->nr_zones);
456 if (!zone_info->zone_cache) {
457 btrfs_err_in_rcu(device->fs_info,
458 "zoned: failed to allocate zone cache for %s",
459 rcu_str_deref(device->name));
460 ret = -ENOMEM;
461 goto out;
462 }
463 }
464
5b316468 465 /* Get zones type */
ea6f8ddc 466 nactive = 0;
5b316468
NA
467 while (sector < nr_sectors) {
468 nr_zones = BTRFS_REPORT_NR_ZONES;
469 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
470 &nr_zones);
471 if (ret)
472 goto out;
473
474 for (i = 0; i < nr_zones; i++) {
475 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
476 __set_bit(nreported, zone_info->seq_zones);
ea6f8ddc
NA
477 switch (zones[i].cond) {
478 case BLK_ZONE_COND_EMPTY:
5b316468 479 __set_bit(nreported, zone_info->empty_zones);
ea6f8ddc
NA
480 break;
481 case BLK_ZONE_COND_IMP_OPEN:
482 case BLK_ZONE_COND_EXP_OPEN:
483 case BLK_ZONE_COND_CLOSED:
484 __set_bit(nreported, zone_info->active_zones);
485 nactive++;
486 break;
487 }
5b316468
NA
488 nreported++;
489 }
490 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
491 }
492
493 if (nreported != zone_info->nr_zones) {
494 btrfs_err_in_rcu(device->fs_info,
495 "inconsistent number of zones on %s (%u/%u)",
496 rcu_str_deref(device->name), nreported,
497 zone_info->nr_zones);
498 ret = -EIO;
499 goto out;
500 }
501
ea6f8ddc
NA
502 if (max_active_zones) {
503 if (nactive > max_active_zones) {
504 btrfs_err_in_rcu(device->fs_info,
505 "zoned: %u active zones on %s exceeds max_active_zones %u",
506 nactive, rcu_str_deref(device->name),
507 max_active_zones);
508 ret = -EIO;
509 goto out;
510 }
511 atomic_set(&zone_info->active_zones_left,
512 max_active_zones - nactive);
513 }
514
12659251
NA
515 /* Validate superblock log */
516 nr_zones = BTRFS_NR_SB_LOG_ZONES;
517 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
518 u32 sb_zone;
519 u64 sb_wp;
520 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
521
522 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
523 if (sb_zone + 1 >= zone_info->nr_zones)
524 continue;
525
5b434df8
NA
526 ret = btrfs_get_dev_zones(device,
527 zone_start_physical(sb_zone, zone_info),
12659251
NA
528 &zone_info->sb_zones[sb_pos],
529 &nr_zones);
530 if (ret)
531 goto out;
532
533 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
534 btrfs_err_in_rcu(device->fs_info,
535 "zoned: failed to read super block log zone info at devid %llu zone %u",
536 device->devid, sb_zone);
537 ret = -EUCLEAN;
538 goto out;
539 }
540
541 /*
1a9fd417 542 * If zones[0] is conventional, always use the beginning of the
12659251
NA
543 * zone to record superblock. No need to validate in that case.
544 */
545 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
546 BLK_ZONE_TYPE_CONVENTIONAL)
547 continue;
548
549 ret = sb_write_pointer(device->bdev,
550 &zone_info->sb_zones[sb_pos], &sb_wp);
551 if (ret != -ENOENT && ret) {
552 btrfs_err_in_rcu(device->fs_info,
553 "zoned: super block log zone corrupted devid %llu zone %u",
554 device->devid, sb_zone);
555 ret = -EUCLEAN;
556 goto out;
557 }
558 }
559
560
5b316468
NA
561 kfree(zones);
562
3c9daa09
JT
563 switch (bdev_zoned_model(bdev)) {
564 case BLK_ZONED_HM:
565 model = "host-managed zoned";
566 emulated = "";
567 break;
568 case BLK_ZONED_HA:
569 model = "host-aware zoned";
570 emulated = "";
571 break;
572 case BLK_ZONED_NONE:
573 model = "regular";
574 emulated = "emulated ";
575 break;
576 default:
577 /* Just in case */
578 btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
579 bdev_zoned_model(bdev),
580 rcu_str_deref(device->name));
581 ret = -EOPNOTSUPP;
582 goto out_free_zone_info;
583 }
584
585 btrfs_info_in_rcu(fs_info,
586 "%s block device %s, %u %szones of %llu bytes",
587 model, rcu_str_deref(device->name), zone_info->nr_zones,
588 emulated, zone_info->zone_size);
5b316468
NA
589
590 return 0;
591
592out:
593 kfree(zones);
3c9daa09 594out_free_zone_info:
16beac87 595 btrfs_destroy_dev_zone_info(device);
5b316468
NA
596
597 return ret;
598}
599
600void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
601{
602 struct btrfs_zoned_device_info *zone_info = device->zone_info;
603
604 if (!zone_info)
605 return;
606
ea6f8ddc 607 bitmap_free(zone_info->active_zones);
5b316468
NA
608 bitmap_free(zone_info->seq_zones);
609 bitmap_free(zone_info->empty_zones);
16beac87 610 vfree(zone_info->zone_cache);
5b316468
NA
611 kfree(zone_info);
612 device->zone_info = NULL;
613}
614
615int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
616 struct blk_zone *zone)
617{
618 unsigned int nr_zones = 1;
619 int ret;
620
621 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
622 if (ret != 0 || !nr_zones)
623 return ret ? ret : -EIO;
624
625 return 0;
626}
b70f5097
NA
627
628int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
629{
630 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
631 struct btrfs_device *device;
632 u64 zoned_devices = 0;
633 u64 nr_devices = 0;
634 u64 zone_size = 0;
3c9daa09 635 const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
b70f5097
NA
636 int ret = 0;
637
638 /* Count zoned devices */
639 list_for_each_entry(device, &fs_devices->devices, dev_list) {
640 enum blk_zoned_model model;
641
642 if (!device->bdev)
643 continue;
644
645 model = bdev_zoned_model(device->bdev);
3c9daa09
JT
646 /*
647 * A Host-Managed zoned device must be used as a zoned device.
648 * A Host-Aware zoned device and a non-zoned devices can be
649 * treated as a zoned device, if ZONED flag is enabled in the
650 * superblock.
651 */
b70f5097 652 if (model == BLK_ZONED_HM ||
3c9daa09
JT
653 (model == BLK_ZONED_HA && incompat_zoned) ||
654 (model == BLK_ZONED_NONE && incompat_zoned)) {
655 struct btrfs_zoned_device_info *zone_info =
656 device->zone_info;
862931c7
NA
657
658 zone_info = device->zone_info;
b70f5097
NA
659 zoned_devices++;
660 if (!zone_size) {
862931c7
NA
661 zone_size = zone_info->zone_size;
662 } else if (zone_info->zone_size != zone_size) {
b70f5097
NA
663 btrfs_err(fs_info,
664 "zoned: unequal block device zone sizes: have %llu found %llu",
665 device->zone_info->zone_size,
666 zone_size);
667 ret = -EINVAL;
668 goto out;
669 }
670 }
671 nr_devices++;
672 }
673
674 if (!zoned_devices && !incompat_zoned)
675 goto out;
676
677 if (!zoned_devices && incompat_zoned) {
678 /* No zoned block device found on ZONED filesystem */
679 btrfs_err(fs_info,
680 "zoned: no zoned devices found on a zoned filesystem");
681 ret = -EINVAL;
682 goto out;
683 }
684
685 if (zoned_devices && !incompat_zoned) {
686 btrfs_err(fs_info,
687 "zoned: mode not enabled but zoned device found");
688 ret = -EINVAL;
689 goto out;
690 }
691
692 if (zoned_devices != nr_devices) {
693 btrfs_err(fs_info,
694 "zoned: cannot mix zoned and regular devices");
695 ret = -EINVAL;
696 goto out;
697 }
698
699 /*
700 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
f6f39f7a 701 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
b70f5097
NA
702 * check the alignment here.
703 */
704 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
705 btrfs_err(fs_info,
706 "zoned: zone size %llu not aligned to stripe %u",
707 zone_size, BTRFS_STRIPE_LEN);
708 ret = -EINVAL;
709 goto out;
710 }
711
a589dde0
NA
712 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
713 btrfs_err(fs_info, "zoned: mixed block groups not supported");
714 ret = -EINVAL;
715 goto out;
716 }
717
b70f5097 718 fs_info->zone_size = zone_size;
1cd6121f 719 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
b70f5097 720
b53429ba
JT
721 /*
722 * Check mount options here, because we might change fs_info->zoned
723 * from fs_info->zone_size.
724 */
725 ret = btrfs_check_mountopts_zoned(fs_info);
726 if (ret)
727 goto out;
728
b70f5097
NA
729 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
730out:
731 return ret;
732}
5d1ab66c
NA
733
734int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
735{
736 if (!btrfs_is_zoned(info))
737 return 0;
738
739 /*
740 * Space cache writing is not COWed. Disable that to avoid write errors
741 * in sequential zones.
742 */
743 if (btrfs_test_opt(info, SPACE_CACHE)) {
744 btrfs_err(info, "zoned: space cache v1 is not supported");
745 return -EINVAL;
746 }
747
d206e9c9
NA
748 if (btrfs_test_opt(info, NODATACOW)) {
749 btrfs_err(info, "zoned: NODATACOW not supported");
750 return -EINVAL;
751 }
752
5d1ab66c
NA
753 return 0;
754}
12659251
NA
755
756static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
757 int rw, u64 *bytenr_ret)
758{
759 u64 wp;
760 int ret;
761
762 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
763 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
764 return 0;
765 }
766
767 ret = sb_write_pointer(bdev, zones, &wp);
768 if (ret != -ENOENT && ret < 0)
769 return ret;
770
771 if (rw == WRITE) {
772 struct blk_zone *reset = NULL;
773
774 if (wp == zones[0].start << SECTOR_SHIFT)
775 reset = &zones[0];
776 else if (wp == zones[1].start << SECTOR_SHIFT)
777 reset = &zones[1];
778
779 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
5daaf552 780 ASSERT(sb_zone_is_full(reset));
12659251
NA
781
782 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
783 reset->start, reset->len,
784 GFP_NOFS);
785 if (ret)
786 return ret;
787
788 reset->cond = BLK_ZONE_COND_EMPTY;
789 reset->wp = reset->start;
790 }
791 } else if (ret != -ENOENT) {
9658b72e
NA
792 /*
793 * For READ, we want the previous one. Move write pointer to
794 * the end of a zone, if it is at the head of a zone.
795 */
796 u64 zone_end = 0;
797
12659251 798 if (wp == zones[0].start << SECTOR_SHIFT)
9658b72e
NA
799 zone_end = zones[1].start + zones[1].capacity;
800 else if (wp == zones[1].start << SECTOR_SHIFT)
801 zone_end = zones[0].start + zones[0].capacity;
802 if (zone_end)
803 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
804 BTRFS_SUPER_INFO_SIZE);
805
12659251
NA
806 wp -= BTRFS_SUPER_INFO_SIZE;
807 }
808
809 *bytenr_ret = wp;
810 return 0;
811
812}
813
814int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
815 u64 *bytenr_ret)
816{
817 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
d734492a 818 sector_t zone_sectors;
12659251
NA
819 u32 sb_zone;
820 int ret;
12659251
NA
821 u8 zone_sectors_shift;
822 sector_t nr_sectors;
823 u32 nr_zones;
824
825 if (!bdev_is_zoned(bdev)) {
826 *bytenr_ret = btrfs_sb_offset(mirror);
827 return 0;
828 }
829
830 ASSERT(rw == READ || rw == WRITE);
831
832 zone_sectors = bdev_zone_sectors(bdev);
833 if (!is_power_of_2(zone_sectors))
834 return -EINVAL;
12659251 835 zone_sectors_shift = ilog2(zone_sectors);
ac7ac461 836 nr_sectors = bdev_nr_sectors(bdev);
12659251
NA
837 nr_zones = nr_sectors >> zone_sectors_shift;
838
839 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
840 if (sb_zone + 1 >= nr_zones)
841 return -ENOENT;
842
5b434df8 843 ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
12659251
NA
844 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
845 zones);
846 if (ret < 0)
847 return ret;
848 if (ret != BTRFS_NR_SB_LOG_ZONES)
849 return -EIO;
850
851 return sb_log_location(bdev, zones, rw, bytenr_ret);
852}
853
854int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
855 u64 *bytenr_ret)
856{
857 struct btrfs_zoned_device_info *zinfo = device->zone_info;
858 u32 zone_num;
859
d6639b35
NA
860 /*
861 * For a zoned filesystem on a non-zoned block device, use the same
862 * super block locations as regular filesystem. Doing so, the super
863 * block can always be retrieved and the zoned flag of the volume
864 * detected from the super block information.
865 */
866 if (!bdev_is_zoned(device->bdev)) {
12659251
NA
867 *bytenr_ret = btrfs_sb_offset(mirror);
868 return 0;
869 }
870
871 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
872 if (zone_num + 1 >= zinfo->nr_zones)
873 return -ENOENT;
874
875 return sb_log_location(device->bdev,
876 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
877 rw, bytenr_ret);
878}
879
880static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
881 int mirror)
882{
883 u32 zone_num;
884
885 if (!zinfo)
886 return false;
887
888 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
889 if (zone_num + 1 >= zinfo->nr_zones)
890 return false;
891
892 if (!test_bit(zone_num, zinfo->seq_zones))
893 return false;
894
895 return true;
896}
897
8376d9e1 898int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
12659251
NA
899{
900 struct btrfs_zoned_device_info *zinfo = device->zone_info;
901 struct blk_zone *zone;
8376d9e1 902 int i;
12659251
NA
903
904 if (!is_sb_log_zone(zinfo, mirror))
8376d9e1 905 return 0;
12659251
NA
906
907 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
8376d9e1
NA
908 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
909 /* Advance the next zone */
910 if (zone->cond == BLK_ZONE_COND_FULL) {
911 zone++;
912 continue;
913 }
914
12659251
NA
915 if (zone->cond == BLK_ZONE_COND_EMPTY)
916 zone->cond = BLK_ZONE_COND_IMP_OPEN;
917
8376d9e1
NA
918 zone->wp += SUPER_INFO_SECTORS;
919
920 if (sb_zone_is_full(zone)) {
921 /*
922 * No room left to write new superblock. Since
923 * superblock is written with REQ_SYNC, it is safe to
924 * finish the zone now.
925 *
926 * If the write pointer is exactly at the capacity,
927 * explicit ZONE_FINISH is not necessary.
928 */
929 if (zone->wp != zone->start + zone->capacity) {
930 int ret;
931
932 ret = blkdev_zone_mgmt(device->bdev,
933 REQ_OP_ZONE_FINISH, zone->start,
934 zone->len, GFP_NOFS);
935 if (ret)
936 return ret;
937 }
12659251 938
8376d9e1 939 zone->wp = zone->start + zone->len;
12659251 940 zone->cond = BLK_ZONE_COND_FULL;
8376d9e1
NA
941 }
942 return 0;
12659251
NA
943 }
944
8376d9e1
NA
945 /* All the zones are FULL. Should not reach here. */
946 ASSERT(0);
947 return -EIO;
12659251
NA
948}
949
950int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
951{
952 sector_t zone_sectors;
953 sector_t nr_sectors;
954 u8 zone_sectors_shift;
955 u32 sb_zone;
956 u32 nr_zones;
957
958 zone_sectors = bdev_zone_sectors(bdev);
959 zone_sectors_shift = ilog2(zone_sectors);
ac7ac461 960 nr_sectors = bdev_nr_sectors(bdev);
12659251
NA
961 nr_zones = nr_sectors >> zone_sectors_shift;
962
963 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
964 if (sb_zone + 1 >= nr_zones)
965 return -ENOENT;
966
967 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
5b434df8 968 zone_start_sector(sb_zone, bdev),
12659251
NA
969 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
970}
1cd6121f
NA
971
972/**
973 * btrfs_find_allocatable_zones - find allocatable zones within a given region
974 *
975 * @device: the device to allocate a region on
976 * @hole_start: the position of the hole to allocate the region
977 * @num_bytes: size of wanted region
978 * @hole_end: the end of the hole
979 * @return: position of allocatable zones
980 *
981 * Allocatable region should not contain any superblock locations.
982 */
983u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
984 u64 hole_end, u64 num_bytes)
985{
986 struct btrfs_zoned_device_info *zinfo = device->zone_info;
987 const u8 shift = zinfo->zone_size_shift;
988 u64 nzones = num_bytes >> shift;
989 u64 pos = hole_start;
990 u64 begin, end;
991 bool have_sb;
992 int i;
993
994 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
995 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
996
997 while (pos < hole_end) {
998 begin = pos >> shift;
999 end = begin + nzones;
1000
1001 if (end > zinfo->nr_zones)
1002 return hole_end;
1003
1004 /* Check if zones in the region are all empty */
1005 if (btrfs_dev_is_sequential(device, pos) &&
1006 find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
1007 pos += zinfo->zone_size;
1008 continue;
1009 }
1010
1011 have_sb = false;
1012 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1013 u32 sb_zone;
1014 u64 sb_pos;
1015
1016 sb_zone = sb_zone_number(shift, i);
1017 if (!(end <= sb_zone ||
1018 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1019 have_sb = true;
5b434df8
NA
1020 pos = zone_start_physical(
1021 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1cd6121f
NA
1022 break;
1023 }
1024
1025 /* We also need to exclude regular superblock positions */
1026 sb_pos = btrfs_sb_offset(i);
1027 if (!(pos + num_bytes <= sb_pos ||
1028 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1029 have_sb = true;
1030 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1031 zinfo->zone_size);
1032 break;
1033 }
1034 }
1035 if (!have_sb)
1036 break;
1037 }
1038
1039 return pos;
1040}
1041
afba2bc0
NA
1042static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1043{
1044 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1045 unsigned int zno = (pos >> zone_info->zone_size_shift);
1046
1047 /* We can use any number of zones */
1048 if (zone_info->max_active_zones == 0)
1049 return true;
1050
1051 if (!test_bit(zno, zone_info->active_zones)) {
1052 /* Active zone left? */
1053 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1054 return false;
1055 if (test_and_set_bit(zno, zone_info->active_zones)) {
1056 /* Someone already set the bit */
1057 atomic_inc(&zone_info->active_zones_left);
1058 }
1059 }
1060
1061 return true;
1062}
1063
1064static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1065{
1066 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1067 unsigned int zno = (pos >> zone_info->zone_size_shift);
1068
1069 /* We can use any number of zones */
1070 if (zone_info->max_active_zones == 0)
1071 return;
1072
1073 if (test_and_clear_bit(zno, zone_info->active_zones))
1074 atomic_inc(&zone_info->active_zones_left);
1075}
1076
1cd6121f
NA
1077int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1078 u64 length, u64 *bytes)
1079{
1080 int ret;
1081
1082 *bytes = 0;
1083 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1084 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
1085 GFP_NOFS);
1086 if (ret)
1087 return ret;
1088
1089 *bytes = length;
1090 while (length) {
1091 btrfs_dev_set_zone_empty(device, physical);
afba2bc0 1092 btrfs_dev_clear_active_zone(device, physical);
1cd6121f
NA
1093 physical += device->zone_info->zone_size;
1094 length -= device->zone_info->zone_size;
1095 }
1096
1097 return 0;
1098}
1099
1100int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1101{
1102 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1103 const u8 shift = zinfo->zone_size_shift;
1104 unsigned long begin = start >> shift;
1105 unsigned long end = (start + size) >> shift;
1106 u64 pos;
1107 int ret;
1108
1109 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
1110 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
1111
1112 if (end > zinfo->nr_zones)
1113 return -ERANGE;
1114
1115 /* All the zones are conventional */
1116 if (find_next_bit(zinfo->seq_zones, begin, end) == end)
1117 return 0;
1118
1119 /* All the zones are sequential and empty */
1120 if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
1121 find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
1122 return 0;
1123
1124 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1125 u64 reset_bytes;
1126
1127 if (!btrfs_dev_is_sequential(device, pos) ||
1128 btrfs_dev_is_empty_zone(device, pos))
1129 continue;
1130
1131 /* Free regions should be empty */
1132 btrfs_warn_in_rcu(
1133 device->fs_info,
1134 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1135 rcu_str_deref(device->name), device->devid, pos >> shift);
1136 WARN_ON_ONCE(1);
1137
1138 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1139 &reset_bytes);
1140 if (ret)
1141 return ret;
1142 }
1143
1144 return 0;
1145}
08e11a3d 1146
a94794d5
NA
1147/*
1148 * Calculate an allocation pointer from the extent allocation information
1149 * for a block group consist of conventional zones. It is pointed to the
1150 * end of the highest addressed extent in the block group as an allocation
1151 * offset.
1152 */
1153static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1154 u64 *offset_ret)
1155{
1156 struct btrfs_fs_info *fs_info = cache->fs_info;
29cbcf40 1157 struct btrfs_root *root;
a94794d5
NA
1158 struct btrfs_path *path;
1159 struct btrfs_key key;
1160 struct btrfs_key found_key;
1161 int ret;
1162 u64 length;
1163
1164 path = btrfs_alloc_path();
1165 if (!path)
1166 return -ENOMEM;
1167
1168 key.objectid = cache->start + cache->length;
1169 key.type = 0;
1170 key.offset = 0;
1171
29cbcf40 1172 root = btrfs_extent_root(fs_info, key.objectid);
a94794d5
NA
1173 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1174 /* We should not find the exact match */
1175 if (!ret)
1176 ret = -EUCLEAN;
1177 if (ret < 0)
1178 goto out;
1179
1180 ret = btrfs_previous_extent_item(root, path, cache->start);
1181 if (ret) {
1182 if (ret == 1) {
1183 ret = 0;
1184 *offset_ret = 0;
1185 }
1186 goto out;
1187 }
1188
1189 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1190
1191 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1192 length = found_key.offset;
1193 else
1194 length = fs_info->nodesize;
1195
1196 if (!(found_key.objectid >= cache->start &&
1197 found_key.objectid + length <= cache->start + cache->length)) {
1198 ret = -EUCLEAN;
1199 goto out;
1200 }
1201 *offset_ret = found_key.objectid + length - cache->start;
1202 ret = 0;
1203
1204out:
1205 btrfs_free_path(path);
1206 return ret;
1207}
1208
1209int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
08e11a3d
NA
1210{
1211 struct btrfs_fs_info *fs_info = cache->fs_info;
1212 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1213 struct extent_map *em;
1214 struct map_lookup *map;
1215 struct btrfs_device *device;
1216 u64 logical = cache->start;
1217 u64 length = cache->length;
1218 u64 physical = 0;
1219 int ret;
1220 int i;
1221 unsigned int nofs_flag;
1222 u64 *alloc_offsets = NULL;
8eae532b 1223 u64 *caps = NULL;
68a384b5 1224 unsigned long *active = NULL;
a94794d5 1225 u64 last_alloc = 0;
08e11a3d
NA
1226 u32 num_sequential = 0, num_conventional = 0;
1227
1228 if (!btrfs_is_zoned(fs_info))
1229 return 0;
1230
1231 /* Sanity check */
1232 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1233 btrfs_err(fs_info,
1234 "zoned: block group %llu len %llu unaligned to zone size %llu",
1235 logical, length, fs_info->zone_size);
1236 return -EIO;
1237 }
1238
1239 /* Get the chunk mapping */
1240 read_lock(&em_tree->lock);
1241 em = lookup_extent_mapping(em_tree, logical, length);
1242 read_unlock(&em_tree->lock);
1243
1244 if (!em)
1245 return -EINVAL;
1246
1247 map = em->map_lookup;
1248
64259baa 1249 cache->physical_map = kmemdup(map, map_lookup_size(map->num_stripes), GFP_NOFS);
dafc340d
NA
1250 if (!cache->physical_map) {
1251 ret = -ENOMEM;
1252 goto out;
1253 }
1254
08e11a3d
NA
1255 alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
1256 if (!alloc_offsets) {
dafc340d
NA
1257 ret = -ENOMEM;
1258 goto out;
08e11a3d
NA
1259 }
1260
8eae532b
NA
1261 caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
1262 if (!caps) {
1263 ret = -ENOMEM;
1264 goto out;
1265 }
1266
68a384b5
NA
1267 active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1268 if (!active) {
1269 ret = -ENOMEM;
1270 goto out;
1271 }
1272
08e11a3d
NA
1273 for (i = 0; i < map->num_stripes; i++) {
1274 bool is_sequential;
1275 struct blk_zone zone;
6143c23c
NA
1276 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1277 int dev_replace_is_ongoing = 0;
08e11a3d
NA
1278
1279 device = map->stripes[i].dev;
1280 physical = map->stripes[i].physical;
1281
1282 if (device->bdev == NULL) {
1283 alloc_offsets[i] = WP_MISSING_DEV;
1284 continue;
1285 }
1286
1287 is_sequential = btrfs_dev_is_sequential(device, physical);
1288 if (is_sequential)
1289 num_sequential++;
1290 else
1291 num_conventional++;
1292
1293 if (!is_sequential) {
1294 alloc_offsets[i] = WP_CONVENTIONAL;
1295 continue;
1296 }
1297
1298 /*
1299 * This zone will be used for allocation, so mark this zone
1300 * non-empty.
1301 */
1302 btrfs_dev_clear_zone_empty(device, physical);
1303
6143c23c
NA
1304 down_read(&dev_replace->rwsem);
1305 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1306 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1307 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical);
1308 up_read(&dev_replace->rwsem);
1309
08e11a3d
NA
1310 /*
1311 * The group is mapped to a sequential zone. Get the zone write
1312 * pointer to determine the allocation offset within the zone.
1313 */
1314 WARN_ON(!IS_ALIGNED(physical, fs_info->zone_size));
1315 nofs_flag = memalloc_nofs_save();
1316 ret = btrfs_get_dev_zone(device, physical, &zone);
1317 memalloc_nofs_restore(nofs_flag);
1318 if (ret == -EIO || ret == -EOPNOTSUPP) {
1319 ret = 0;
1320 alloc_offsets[i] = WP_MISSING_DEV;
1321 continue;
1322 } else if (ret) {
1323 goto out;
1324 }
1325
784daf2b 1326 if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
47cdfb5e
NA
1327 btrfs_err_in_rcu(fs_info,
1328 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1329 zone.start << SECTOR_SHIFT,
1330 rcu_str_deref(device->name), device->devid);
784daf2b
NA
1331 ret = -EIO;
1332 goto out;
1333 }
1334
8eae532b
NA
1335 caps[i] = (zone.capacity << SECTOR_SHIFT);
1336
08e11a3d
NA
1337 switch (zone.cond) {
1338 case BLK_ZONE_COND_OFFLINE:
1339 case BLK_ZONE_COND_READONLY:
1340 btrfs_err(fs_info,
1341 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1342 physical >> device->zone_info->zone_size_shift,
1343 rcu_str_deref(device->name), device->devid);
1344 alloc_offsets[i] = WP_MISSING_DEV;
1345 break;
1346 case BLK_ZONE_COND_EMPTY:
1347 alloc_offsets[i] = 0;
1348 break;
1349 case BLK_ZONE_COND_FULL:
8eae532b 1350 alloc_offsets[i] = caps[i];
08e11a3d
NA
1351 break;
1352 default:
1353 /* Partially used zone */
1354 alloc_offsets[i] =
1355 ((zone.wp - zone.start) << SECTOR_SHIFT);
68a384b5 1356 __set_bit(i, active);
08e11a3d
NA
1357 break;
1358 }
68a384b5
NA
1359
1360 /*
1361 * Consider a zone as active if we can allow any number of
1362 * active zones.
1363 */
1364 if (!device->zone_info->max_active_zones)
1365 __set_bit(i, active);
08e11a3d
NA
1366 }
1367
08f45559
JT
1368 if (num_sequential > 0)
1369 cache->seq_zone = true;
1370
08e11a3d
NA
1371 if (num_conventional > 0) {
1372 /*
a94794d5
NA
1373 * Avoid calling calculate_alloc_pointer() for new BG. It
1374 * is no use for new BG. It must be always 0.
1375 *
1376 * Also, we have a lock chain of extent buffer lock ->
1377 * chunk mutex. For new BG, this function is called from
1378 * btrfs_make_block_group() which is already taking the
1379 * chunk mutex. Thus, we cannot call
1380 * calculate_alloc_pointer() which takes extent buffer
1381 * locks to avoid deadlock.
08e11a3d 1382 */
8eae532b
NA
1383
1384 /* Zone capacity is always zone size in emulation */
1385 cache->zone_capacity = cache->length;
a94794d5
NA
1386 if (new) {
1387 cache->alloc_offset = 0;
1388 goto out;
1389 }
1390 ret = calculate_alloc_pointer(cache, &last_alloc);
1391 if (ret || map->num_stripes == num_conventional) {
1392 if (!ret)
1393 cache->alloc_offset = last_alloc;
1394 else
1395 btrfs_err(fs_info,
1396 "zoned: failed to determine allocation offset of bg %llu",
1397 cache->start);
1398 goto out;
1399 }
08e11a3d
NA
1400 }
1401
1402 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1403 case 0: /* single */
06e1e7f4
JT
1404 if (alloc_offsets[0] == WP_MISSING_DEV) {
1405 btrfs_err(fs_info,
1406 "zoned: cannot recover write pointer for zone %llu",
1407 physical);
1408 ret = -EIO;
1409 goto out;
1410 }
08e11a3d 1411 cache->alloc_offset = alloc_offsets[0];
8eae532b 1412 cache->zone_capacity = caps[0];
68a384b5 1413 cache->zone_is_active = test_bit(0, active);
08e11a3d
NA
1414 break;
1415 case BTRFS_BLOCK_GROUP_DUP:
1416 case BTRFS_BLOCK_GROUP_RAID1:
1417 case BTRFS_BLOCK_GROUP_RAID0:
1418 case BTRFS_BLOCK_GROUP_RAID10:
1419 case BTRFS_BLOCK_GROUP_RAID5:
1420 case BTRFS_BLOCK_GROUP_RAID6:
1421 /* non-single profiles are not supported yet */
1422 default:
1423 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1424 btrfs_bg_type_to_raid_name(map->type));
1425 ret = -EINVAL;
1426 goto out;
1427 }
1428
68a384b5
NA
1429 if (cache->zone_is_active) {
1430 btrfs_get_block_group(cache);
1431 spin_lock(&fs_info->zone_active_bgs_lock);
1432 list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
1433 spin_unlock(&fs_info->zone_active_bgs_lock);
1434 }
1435
08e11a3d 1436out:
06e1e7f4
JT
1437 if (cache->alloc_offset > fs_info->zone_size) {
1438 btrfs_err(fs_info,
1439 "zoned: invalid write pointer %llu in block group %llu",
1440 cache->alloc_offset, cache->start);
1441 ret = -EIO;
1442 }
1443
8eae532b
NA
1444 if (cache->alloc_offset > cache->zone_capacity) {
1445 btrfs_err(fs_info,
1446"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1447 cache->alloc_offset, cache->zone_capacity,
1448 cache->start);
1449 ret = -EIO;
1450 }
1451
a94794d5
NA
1452 /* An extent is allocated after the write pointer */
1453 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1454 btrfs_err(fs_info,
1455 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1456 logical, last_alloc, cache->alloc_offset);
1457 ret = -EIO;
1458 }
1459
0bc09ca1
NA
1460 if (!ret)
1461 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1462
dafc340d
NA
1463 if (ret) {
1464 kfree(cache->physical_map);
1465 cache->physical_map = NULL;
1466 }
68a384b5 1467 bitmap_free(active);
8eae532b 1468 kfree(caps);
08e11a3d
NA
1469 kfree(alloc_offsets);
1470 free_extent_map(em);
1471
1472 return ret;
1473}
169e0da9
NA
1474
1475void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1476{
1477 u64 unusable, free;
1478
1479 if (!btrfs_is_zoned(cache->fs_info))
1480 return;
1481
1482 WARN_ON(cache->bytes_super != 0);
98173255
NA
1483 unusable = (cache->alloc_offset - cache->used) +
1484 (cache->length - cache->zone_capacity);
1485 free = cache->zone_capacity - cache->alloc_offset;
169e0da9
NA
1486
1487 /* We only need ->free_space in ALLOC_SEQ block groups */
1488 cache->last_byte_to_unpin = (u64)-1;
1489 cache->cached = BTRFS_CACHE_FINISHED;
1490 cache->free_space_ctl->free_space = free;
1491 cache->zone_unusable = unusable;
169e0da9 1492}
d3575156
NA
1493
1494void btrfs_redirty_list_add(struct btrfs_transaction *trans,
1495 struct extent_buffer *eb)
1496{
1497 struct btrfs_fs_info *fs_info = eb->fs_info;
1498
1499 if (!btrfs_is_zoned(fs_info) ||
1500 btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) ||
1501 !list_empty(&eb->release_list))
1502 return;
1503
1504 set_extent_buffer_dirty(eb);
1505 set_extent_bits_nowait(&trans->dirty_pages, eb->start,
1506 eb->start + eb->len - 1, EXTENT_DIRTY);
1507 memzero_extent_buffer(eb, 0, eb->len);
1508 set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
1509
1510 spin_lock(&trans->releasing_ebs_lock);
1511 list_add_tail(&eb->release_list, &trans->releasing_ebs);
1512 spin_unlock(&trans->releasing_ebs_lock);
1513 atomic_inc(&eb->refs);
1514}
1515
1516void btrfs_free_redirty_list(struct btrfs_transaction *trans)
1517{
1518 spin_lock(&trans->releasing_ebs_lock);
1519 while (!list_empty(&trans->releasing_ebs)) {
1520 struct extent_buffer *eb;
1521
1522 eb = list_first_entry(&trans->releasing_ebs,
1523 struct extent_buffer, release_list);
1524 list_del_init(&eb->release_list);
1525 free_extent_buffer(eb);
1526 }
1527 spin_unlock(&trans->releasing_ebs_lock);
1528}
08f45559 1529
e380adfc 1530bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
08f45559
JT
1531{
1532 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1533 struct btrfs_block_group *cache;
1534 bool ret = false;
1535
1536 if (!btrfs_is_zoned(fs_info))
1537 return false;
1538
08f45559
JT
1539 if (!is_data_inode(&inode->vfs_inode))
1540 return false;
1541
e6d261e3
JT
1542 /*
1543 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
1544 * extent layout the relocation code has.
1545 * Furthermore we have set aside own block-group from which only the
1546 * relocation "process" can allocate and make sure only one process at a
1547 * time can add pages to an extent that gets relocated, so it's safe to
1548 * use regular REQ_OP_WRITE for this special case.
1549 */
1550 if (btrfs_is_data_reloc_root(inode->root))
1551 return false;
1552
e380adfc 1553 cache = btrfs_lookup_block_group(fs_info, start);
08f45559
JT
1554 ASSERT(cache);
1555 if (!cache)
1556 return false;
1557
1558 ret = cache->seq_zone;
1559 btrfs_put_block_group(cache);
1560
1561 return ret;
1562}
d8e3fb10
NA
1563
1564void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
1565 struct bio *bio)
1566{
1567 struct btrfs_ordered_extent *ordered;
1568 const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
1569
1570 if (bio_op(bio) != REQ_OP_ZONE_APPEND)
1571 return;
1572
1573 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset);
1574 if (WARN_ON(!ordered))
1575 return;
1576
1577 ordered->physical = physical;
c7c3a6dc 1578 ordered->bdev = bio->bi_bdev;
d8e3fb10
NA
1579
1580 btrfs_put_ordered_extent(ordered);
1581}
1582
1583void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
1584{
1585 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1586 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1587 struct extent_map_tree *em_tree;
1588 struct extent_map *em;
1589 struct btrfs_ordered_sum *sum;
d8e3fb10
NA
1590 u64 orig_logical = ordered->disk_bytenr;
1591 u64 *logical = NULL;
1592 int nr, stripe_len;
1593
1594 /* Zoned devices should not have partitions. So, we can assume it is 0 */
c7c3a6dc
CH
1595 ASSERT(!bdev_is_partition(ordered->bdev));
1596 if (WARN_ON(!ordered->bdev))
d8e3fb10
NA
1597 return;
1598
c7c3a6dc 1599 if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev,
d8e3fb10
NA
1600 ordered->physical, &logical, &nr,
1601 &stripe_len)))
1602 goto out;
1603
1604 WARN_ON(nr != 1);
1605
1606 if (orig_logical == *logical)
1607 goto out;
1608
1609 ordered->disk_bytenr = *logical;
1610
1611 em_tree = &inode->extent_tree;
1612 write_lock(&em_tree->lock);
1613 em = search_extent_mapping(em_tree, ordered->file_offset,
1614 ordered->num_bytes);
1615 em->block_start = *logical;
1616 free_extent_map(em);
1617 write_unlock(&em_tree->lock);
1618
1619 list_for_each_entry(sum, &ordered->list, list) {
1620 if (*logical < orig_logical)
1621 sum->bytenr -= orig_logical - *logical;
1622 else
1623 sum->bytenr += *logical - orig_logical;
1624 }
1625
1626out:
1627 kfree(logical);
d8e3fb10 1628}
0bc09ca1
NA
1629
1630bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1631 struct extent_buffer *eb,
1632 struct btrfs_block_group **cache_ret)
1633{
1634 struct btrfs_block_group *cache;
1635 bool ret = true;
1636
1637 if (!btrfs_is_zoned(fs_info))
1638 return true;
1639
8fdf54fe
JT
1640 cache = btrfs_lookup_block_group(fs_info, eb->start);
1641 if (!cache)
1642 return true;
0bc09ca1 1643
8fdf54fe 1644 if (cache->meta_write_pointer != eb->start) {
0bc09ca1
NA
1645 btrfs_put_block_group(cache);
1646 cache = NULL;
8fdf54fe
JT
1647 ret = false;
1648 } else {
1649 cache->meta_write_pointer = eb->start + eb->len;
0bc09ca1
NA
1650 }
1651
8fdf54fe 1652 *cache_ret = cache;
0bc09ca1
NA
1653
1654 return ret;
1655}
1656
1657void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
1658 struct extent_buffer *eb)
1659{
1660 if (!btrfs_is_zoned(eb->fs_info) || !cache)
1661 return;
1662
1663 ASSERT(cache->meta_write_pointer == eb->start + eb->len);
1664 cache->meta_write_pointer = eb->start;
1665}
de17addc
NA
1666
1667int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
1668{
1669 if (!btrfs_dev_is_sequential(device, physical))
1670 return -EOPNOTSUPP;
1671
1672 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
1673 length >> SECTOR_SHIFT, GFP_NOFS, 0);
1674}
7db1c5d1
NA
1675
1676static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
1677 struct blk_zone *zone)
1678{
4c664611 1679 struct btrfs_io_context *bioc = NULL;
7db1c5d1
NA
1680 u64 mapped_length = PAGE_SIZE;
1681 unsigned int nofs_flag;
1682 int nmirrors;
1683 int i, ret;
1684
1685 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
4c664611
QW
1686 &mapped_length, &bioc);
1687 if (ret || !bioc || mapped_length < PAGE_SIZE) {
1688 btrfs_put_bioc(bioc);
7db1c5d1
NA
1689 return -EIO;
1690 }
1691
4c664611 1692 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
7db1c5d1
NA
1693 return -EINVAL;
1694
1695 nofs_flag = memalloc_nofs_save();
4c664611 1696 nmirrors = (int)bioc->num_stripes;
7db1c5d1 1697 for (i = 0; i < nmirrors; i++) {
4c664611
QW
1698 u64 physical = bioc->stripes[i].physical;
1699 struct btrfs_device *dev = bioc->stripes[i].dev;
7db1c5d1
NA
1700
1701 /* Missing device */
1702 if (!dev->bdev)
1703 continue;
1704
1705 ret = btrfs_get_dev_zone(dev, physical, zone);
1706 /* Failing device */
1707 if (ret == -EIO || ret == -EOPNOTSUPP)
1708 continue;
1709 break;
1710 }
1711 memalloc_nofs_restore(nofs_flag);
1712
1713 return ret;
1714}
1715
1716/*
1717 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
1718 * filling zeros between @physical_pos to a write pointer of dev-replace
1719 * source device.
1720 */
1721int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
1722 u64 physical_start, u64 physical_pos)
1723{
1724 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
1725 struct blk_zone zone;
1726 u64 length;
1727 u64 wp;
1728 int ret;
1729
1730 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
1731 return 0;
1732
1733 ret = read_zone_info(fs_info, logical, &zone);
1734 if (ret)
1735 return ret;
1736
1737 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
1738
1739 if (physical_pos == wp)
1740 return 0;
1741
1742 if (physical_pos > wp)
1743 return -EUCLEAN;
1744
1745 length = wp - physical_pos;
1746 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
1747}
e7ff9e6b
JT
1748
1749struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
1750 u64 logical, u64 length)
1751{
1752 struct btrfs_device *device;
1753 struct extent_map *em;
1754 struct map_lookup *map;
1755
1756 em = btrfs_get_chunk_map(fs_info, logical, length);
1757 if (IS_ERR(em))
1758 return ERR_CAST(em);
1759
1760 map = em->map_lookup;
1761 /* We only support single profile for now */
1762 ASSERT(map->num_stripes == 1);
1763 device = map->stripes[0].dev;
1764
1765 free_extent_map(em);
1766
1767 return device;
1768}
afba2bc0
NA
1769
1770/**
1771 * Activate block group and underlying device zones
1772 *
1773 * @block_group: the block group to activate
1774 *
1775 * Return: true on success, false otherwise
1776 */
1777bool btrfs_zone_activate(struct btrfs_block_group *block_group)
1778{
1779 struct btrfs_fs_info *fs_info = block_group->fs_info;
1780 struct map_lookup *map;
1781 struct btrfs_device *device;
1782 u64 physical;
1783 bool ret;
1784
1785 if (!btrfs_is_zoned(block_group->fs_info))
1786 return true;
1787
1788 map = block_group->physical_map;
1789 /* Currently support SINGLE profile only */
1790 ASSERT(map->num_stripes == 1);
1791 device = map->stripes[0].dev;
1792 physical = map->stripes[0].physical;
1793
1794 if (device->zone_info->max_active_zones == 0)
1795 return true;
1796
1797 spin_lock(&block_group->lock);
1798
1799 if (block_group->zone_is_active) {
1800 ret = true;
1801 goto out_unlock;
1802 }
1803
1804 /* No space left */
1805 if (block_group->alloc_offset == block_group->zone_capacity) {
1806 ret = false;
1807 goto out_unlock;
1808 }
1809
1810 if (!btrfs_dev_set_active_zone(device, physical)) {
1811 /* Cannot activate the zone */
1812 ret = false;
1813 goto out_unlock;
1814 }
1815
1816 /* Successfully activated all the zones */
1817 block_group->zone_is_active = 1;
1818
1819 spin_unlock(&block_group->lock);
1820
1821 /* For the active block group list */
1822 btrfs_get_block_group(block_group);
1823
1824 spin_lock(&fs_info->zone_active_bgs_lock);
1825 ASSERT(list_empty(&block_group->active_bg_list));
1826 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
1827 spin_unlock(&fs_info->zone_active_bgs_lock);
1828
1829 return true;
1830
1831out_unlock:
1832 spin_unlock(&block_group->lock);
1833 return ret;
1834}
1835
1836int btrfs_zone_finish(struct btrfs_block_group *block_group)
1837{
1838 struct btrfs_fs_info *fs_info = block_group->fs_info;
1839 struct map_lookup *map;
1840 struct btrfs_device *device;
1841 u64 physical;
1842 int ret = 0;
1843
1844 if (!btrfs_is_zoned(fs_info))
1845 return 0;
1846
1847 map = block_group->physical_map;
1848 /* Currently support SINGLE profile only */
1849 ASSERT(map->num_stripes == 1);
1850
1851 device = map->stripes[0].dev;
1852 physical = map->stripes[0].physical;
1853
1854 if (device->zone_info->max_active_zones == 0)
1855 return 0;
1856
1857 spin_lock(&block_group->lock);
1858 if (!block_group->zone_is_active) {
1859 spin_unlock(&block_group->lock);
1860 return 0;
1861 }
1862
1863 /* Check if we have unwritten allocated space */
1864 if ((block_group->flags &
1865 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) &&
1866 block_group->alloc_offset > block_group->meta_write_pointer) {
1867 spin_unlock(&block_group->lock);
1868 return -EAGAIN;
1869 }
1870 spin_unlock(&block_group->lock);
1871
1872 ret = btrfs_inc_block_group_ro(block_group, false);
1873 if (ret)
1874 return ret;
1875
1876 /* Ensure all writes in this block group finish */
1877 btrfs_wait_block_group_reservations(block_group);
1878 /* No need to wait for NOCOW writers. Zoned mode does not allow that. */
1879 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
1880 block_group->length);
1881
1882 spin_lock(&block_group->lock);
1883
1884 /*
1885 * Bail out if someone already deactivated the block group, or
1886 * allocated space is left in the block group.
1887 */
1888 if (!block_group->zone_is_active) {
1889 spin_unlock(&block_group->lock);
1890 btrfs_dec_block_group_ro(block_group);
1891 return 0;
1892 }
1893
1894 if (block_group->reserved) {
1895 spin_unlock(&block_group->lock);
1896 btrfs_dec_block_group_ro(block_group);
1897 return -EAGAIN;
1898 }
1899
1900 block_group->zone_is_active = 0;
1901 block_group->alloc_offset = block_group->zone_capacity;
1902 block_group->free_space_ctl->free_space = 0;
1903 btrfs_clear_treelog_bg(block_group);
5911f538 1904 btrfs_clear_data_reloc_bg(block_group);
afba2bc0
NA
1905 spin_unlock(&block_group->lock);
1906
1907 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
1908 physical >> SECTOR_SHIFT,
1909 device->zone_info->zone_size >> SECTOR_SHIFT,
1910 GFP_NOFS);
1911 btrfs_dec_block_group_ro(block_group);
1912
1913 if (!ret) {
1914 btrfs_dev_clear_active_zone(device, physical);
1915
1916 spin_lock(&fs_info->zone_active_bgs_lock);
1917 ASSERT(!list_empty(&block_group->active_bg_list));
1918 list_del_init(&block_group->active_bg_list);
1919 spin_unlock(&fs_info->zone_active_bgs_lock);
1920
1921 /* For active_bg_list */
1922 btrfs_put_block_group(block_group);
1923 }
1924
1925 return ret;
1926}
a85f05e5 1927
82187d2e 1928bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
a85f05e5
NA
1929{
1930 struct btrfs_device *device;
1931 bool ret = false;
1932
1933 if (!btrfs_is_zoned(fs_devices->fs_info))
1934 return true;
1935
1936 /* Non-single profiles are not supported yet */
82187d2e 1937 ASSERT((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0);
a85f05e5
NA
1938
1939 /* Check if there is a device with active zones left */
1940 mutex_lock(&fs_devices->device_list_mutex);
1941 list_for_each_entry(device, &fs_devices->devices, dev_list) {
1942 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1943
1944 if (!device->bdev)
1945 continue;
1946
1947 if (!zinfo->max_active_zones ||
1948 atomic_read(&zinfo->active_zones_left)) {
1949 ret = true;
1950 break;
1951 }
1952 }
1953 mutex_unlock(&fs_devices->device_list_mutex);
1954
1955 return ret;
1956}
be1a1d7a
NA
1957
1958void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
1959{
1960 struct btrfs_block_group *block_group;
1961 struct map_lookup *map;
1962 struct btrfs_device *device;
1963 u64 physical;
1964
1965 if (!btrfs_is_zoned(fs_info))
1966 return;
1967
1968 block_group = btrfs_lookup_block_group(fs_info, logical);
1969 ASSERT(block_group);
1970
1971 if (logical + length < block_group->start + block_group->zone_capacity)
1972 goto out;
1973
1974 spin_lock(&block_group->lock);
1975
1976 if (!block_group->zone_is_active) {
1977 spin_unlock(&block_group->lock);
1978 goto out;
1979 }
1980
1981 block_group->zone_is_active = 0;
1982 /* We should have consumed all the free space */
1983 ASSERT(block_group->alloc_offset == block_group->zone_capacity);
1984 ASSERT(block_group->free_space_ctl->free_space == 0);
1985 btrfs_clear_treelog_bg(block_group);
5911f538 1986 btrfs_clear_data_reloc_bg(block_group);
be1a1d7a
NA
1987 spin_unlock(&block_group->lock);
1988
1989 map = block_group->physical_map;
1990 device = map->stripes[0].dev;
1991 physical = map->stripes[0].physical;
1992
1993 if (!device->zone_info->max_active_zones)
1994 goto out;
1995
1996 btrfs_dev_clear_active_zone(device, physical);
1997
1998 spin_lock(&fs_info->zone_active_bgs_lock);
1999 ASSERT(!list_empty(&block_group->active_bg_list));
2000 list_del_init(&block_group->active_bg_list);
2001 spin_unlock(&fs_info->zone_active_bgs_lock);
2002
2003 btrfs_put_block_group(block_group);
2004
2005out:
2006 btrfs_put_block_group(block_group);
2007}
c2707a25
JT
2008
2009void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2010{
2011 struct btrfs_fs_info *fs_info = bg->fs_info;
2012
2013 spin_lock(&fs_info->relocation_bg_lock);
2014 if (fs_info->data_reloc_bg == bg->start)
2015 fs_info->data_reloc_bg = 0;
2016 spin_unlock(&fs_info->relocation_bg_lock);
2017}
16beac87
NA
2018
2019void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2020{
2021 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2022 struct btrfs_device *device;
2023
2024 if (!btrfs_is_zoned(fs_info))
2025 return;
2026
2027 mutex_lock(&fs_devices->device_list_mutex);
2028 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2029 if (device->zone_info) {
2030 vfree(device->zone_info->zone_cache);
2031 device->zone_info->zone_cache = NULL;
2032 }
2033 }
2034 mutex_unlock(&fs_devices->device_list_mutex);
2035}