btrfs: zoned: calculate allocation offset for conventional zones
[linux-block.git] / fs / btrfs / zoned.c
CommitLineData
5b316468
NA
1// SPDX-License-Identifier: GPL-2.0
2
1cd6121f 3#include <linux/bitops.h>
5b316468
NA
4#include <linux/slab.h>
5#include <linux/blkdev.h>
08e11a3d 6#include <linux/sched/mm.h>
5b316468
NA
7#include "ctree.h"
8#include "volumes.h"
9#include "zoned.h"
10#include "rcu-string.h"
1cd6121f 11#include "disk-io.h"
08e11a3d 12#include "block-group.h"
5b316468
NA
13
14/* Maximum number of zones to report per blkdev_report_zones() call */
15#define BTRFS_REPORT_NR_ZONES 4096
08e11a3d
NA
16/* Invalid allocation pointer value for missing devices */
17#define WP_MISSING_DEV ((u64)-1)
18/* Pseudo write pointer value for conventional zone */
19#define WP_CONVENTIONAL ((u64)-2)
5b316468 20
12659251
NA
21/* Number of superblock log zones */
22#define BTRFS_NR_SB_LOG_ZONES 2
23
5b316468
NA
24static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
25{
26 struct blk_zone *zones = data;
27
28 memcpy(&zones[idx], zone, sizeof(*zone));
29
30 return 0;
31}
32
12659251
NA
33static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
34 u64 *wp_ret)
35{
36 bool empty[BTRFS_NR_SB_LOG_ZONES];
37 bool full[BTRFS_NR_SB_LOG_ZONES];
38 sector_t sector;
39
40 ASSERT(zones[0].type != BLK_ZONE_TYPE_CONVENTIONAL &&
41 zones[1].type != BLK_ZONE_TYPE_CONVENTIONAL);
42
43 empty[0] = (zones[0].cond == BLK_ZONE_COND_EMPTY);
44 empty[1] = (zones[1].cond == BLK_ZONE_COND_EMPTY);
45 full[0] = (zones[0].cond == BLK_ZONE_COND_FULL);
46 full[1] = (zones[1].cond == BLK_ZONE_COND_FULL);
47
48 /*
49 * Possible states of log buffer zones
50 *
51 * Empty[0] In use[0] Full[0]
52 * Empty[1] * x 0
53 * In use[1] 0 x 0
54 * Full[1] 1 1 C
55 *
56 * Log position:
57 * *: Special case, no superblock is written
58 * 0: Use write pointer of zones[0]
59 * 1: Use write pointer of zones[1]
60 * C: Compare super blcoks from zones[0] and zones[1], use the latest
61 * one determined by generation
62 * x: Invalid state
63 */
64
65 if (empty[0] && empty[1]) {
66 /* Special case to distinguish no superblock to read */
67 *wp_ret = zones[0].start << SECTOR_SHIFT;
68 return -ENOENT;
69 } else if (full[0] && full[1]) {
70 /* Compare two super blocks */
71 struct address_space *mapping = bdev->bd_inode->i_mapping;
72 struct page *page[BTRFS_NR_SB_LOG_ZONES];
73 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
74 int i;
75
76 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
77 u64 bytenr;
78
79 bytenr = ((zones[i].start + zones[i].len)
80 << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
81
82 page[i] = read_cache_page_gfp(mapping,
83 bytenr >> PAGE_SHIFT, GFP_NOFS);
84 if (IS_ERR(page[i])) {
85 if (i == 1)
86 btrfs_release_disk_super(super[0]);
87 return PTR_ERR(page[i]);
88 }
89 super[i] = page_address(page[i]);
90 }
91
92 if (super[0]->generation > super[1]->generation)
93 sector = zones[1].start;
94 else
95 sector = zones[0].start;
96
97 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
98 btrfs_release_disk_super(super[i]);
99 } else if (!full[0] && (empty[1] || full[1])) {
100 sector = zones[0].wp;
101 } else if (full[0]) {
102 sector = zones[1].wp;
103 } else {
104 return -EUCLEAN;
105 }
106 *wp_ret = sector << SECTOR_SHIFT;
107 return 0;
108}
109
110/*
111 * The following zones are reserved as the circular buffer on ZONED btrfs.
112 * - The primary superblock: zones 0 and 1
113 * - The first copy: zones 16 and 17
114 * - The second copy: zones 1024 or zone at 256GB which is minimum, and
115 * the following one
116 */
117static inline u32 sb_zone_number(int shift, int mirror)
118{
119 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
120
121 switch (mirror) {
122 case 0: return 0;
123 case 1: return 16;
124 case 2: return min_t(u64, btrfs_sb_offset(mirror) >> shift, 1024);
125 }
126
127 return 0;
128}
129
3c9daa09
JT
130/*
131 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
132 * device into static sized chunks and fake a conventional zone on each of
133 * them.
134 */
135static int emulate_report_zones(struct btrfs_device *device, u64 pos,
136 struct blk_zone *zones, unsigned int nr_zones)
137{
138 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
139 sector_t bdev_size = bdev_nr_sectors(device->bdev);
140 unsigned int i;
141
142 pos >>= SECTOR_SHIFT;
143 for (i = 0; i < nr_zones; i++) {
144 zones[i].start = i * zone_sectors + pos;
145 zones[i].len = zone_sectors;
146 zones[i].capacity = zone_sectors;
147 zones[i].wp = zones[i].start + zone_sectors;
148 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
149 zones[i].cond = BLK_ZONE_COND_NOT_WP;
150
151 if (zones[i].wp >= bdev_size) {
152 i++;
153 break;
154 }
155 }
156
157 return i;
158}
159
5b316468
NA
160static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
161 struct blk_zone *zones, unsigned int *nr_zones)
162{
163 int ret;
164
165 if (!*nr_zones)
166 return 0;
167
3c9daa09
JT
168 if (!bdev_is_zoned(device->bdev)) {
169 ret = emulate_report_zones(device, pos, zones, *nr_zones);
170 *nr_zones = ret;
171 return 0;
172 }
173
5b316468
NA
174 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
175 copy_zone_info_cb, zones);
176 if (ret < 0) {
177 btrfs_err_in_rcu(device->fs_info,
178 "zoned: failed to read zone %llu on %s (devid %llu)",
179 pos, rcu_str_deref(device->name),
180 device->devid);
181 return ret;
182 }
183 *nr_zones = ret;
184 if (!ret)
185 return -EIO;
186
187 return 0;
188}
189
3c9daa09
JT
190/* The emulated zone size is determined from the size of device extent */
191static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
192{
193 struct btrfs_path *path;
194 struct btrfs_root *root = fs_info->dev_root;
195 struct btrfs_key key;
196 struct extent_buffer *leaf;
197 struct btrfs_dev_extent *dext;
198 int ret = 0;
199
200 key.objectid = 1;
201 key.type = BTRFS_DEV_EXTENT_KEY;
202 key.offset = 0;
203
204 path = btrfs_alloc_path();
205 if (!path)
206 return -ENOMEM;
207
208 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
209 if (ret < 0)
210 goto out;
211
212 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
213 ret = btrfs_next_item(root, path);
214 if (ret < 0)
215 goto out;
216 /* No dev extents at all? Not good */
217 if (ret > 0) {
218 ret = -EUCLEAN;
219 goto out;
220 }
221 }
222
223 leaf = path->nodes[0];
224 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
225 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
226 ret = 0;
227
228out:
229 btrfs_free_path(path);
230
231 return ret;
232}
233
73651042
NA
234int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
235{
236 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
237 struct btrfs_device *device;
238 int ret = 0;
239
240 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
241 if (!btrfs_fs_incompat(fs_info, ZONED))
242 return 0;
243
244 mutex_lock(&fs_devices->device_list_mutex);
245 list_for_each_entry(device, &fs_devices->devices, dev_list) {
246 /* We can skip reading of zone info for missing devices */
247 if (!device->bdev)
248 continue;
249
250 ret = btrfs_get_dev_zone_info(device);
251 if (ret)
252 break;
253 }
254 mutex_unlock(&fs_devices->device_list_mutex);
255
256 return ret;
257}
258
5b316468
NA
259int btrfs_get_dev_zone_info(struct btrfs_device *device)
260{
3c9daa09 261 struct btrfs_fs_info *fs_info = device->fs_info;
5b316468
NA
262 struct btrfs_zoned_device_info *zone_info = NULL;
263 struct block_device *bdev = device->bdev;
862931c7 264 struct request_queue *queue = bdev_get_queue(bdev);
5b316468
NA
265 sector_t nr_sectors;
266 sector_t sector = 0;
267 struct blk_zone *zones = NULL;
268 unsigned int i, nreported = 0, nr_zones;
269 unsigned int zone_sectors;
3c9daa09 270 char *model, *emulated;
5b316468
NA
271 int ret;
272
3c9daa09
JT
273 /*
274 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
275 * yet be set.
276 */
277 if (!btrfs_fs_incompat(fs_info, ZONED))
5b316468
NA
278 return 0;
279
280 if (device->zone_info)
281 return 0;
282
283 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
284 if (!zone_info)
285 return -ENOMEM;
286
3c9daa09
JT
287 if (!bdev_is_zoned(bdev)) {
288 if (!fs_info->zone_size) {
289 ret = calculate_emulated_zone_size(fs_info);
290 if (ret)
291 goto out;
292 }
293
294 ASSERT(fs_info->zone_size);
295 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
296 } else {
297 zone_sectors = bdev_zone_sectors(bdev);
298 }
299
ac7ac461 300 nr_sectors = bdev_nr_sectors(bdev);
5b316468
NA
301 /* Check if it's power of 2 (see is_power_of_2) */
302 ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
303 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
304 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
862931c7
NA
305 zone_info->max_zone_append_size =
306 (u64)queue_max_zone_append_sectors(queue) << SECTOR_SHIFT;
5b316468
NA
307 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
308 if (!IS_ALIGNED(nr_sectors, zone_sectors))
309 zone_info->nr_zones++;
310
311 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
312 if (!zone_info->seq_zones) {
313 ret = -ENOMEM;
314 goto out;
315 }
316
317 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
318 if (!zone_info->empty_zones) {
319 ret = -ENOMEM;
320 goto out;
321 }
322
323 zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
324 if (!zones) {
325 ret = -ENOMEM;
326 goto out;
327 }
328
329 /* Get zones type */
330 while (sector < nr_sectors) {
331 nr_zones = BTRFS_REPORT_NR_ZONES;
332 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
333 &nr_zones);
334 if (ret)
335 goto out;
336
337 for (i = 0; i < nr_zones; i++) {
338 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
339 __set_bit(nreported, zone_info->seq_zones);
340 if (zones[i].cond == BLK_ZONE_COND_EMPTY)
341 __set_bit(nreported, zone_info->empty_zones);
342 nreported++;
343 }
344 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
345 }
346
347 if (nreported != zone_info->nr_zones) {
348 btrfs_err_in_rcu(device->fs_info,
349 "inconsistent number of zones on %s (%u/%u)",
350 rcu_str_deref(device->name), nreported,
351 zone_info->nr_zones);
352 ret = -EIO;
353 goto out;
354 }
355
12659251
NA
356 /* Validate superblock log */
357 nr_zones = BTRFS_NR_SB_LOG_ZONES;
358 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
359 u32 sb_zone;
360 u64 sb_wp;
361 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
362
363 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
364 if (sb_zone + 1 >= zone_info->nr_zones)
365 continue;
366
367 sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
368 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
369 &zone_info->sb_zones[sb_pos],
370 &nr_zones);
371 if (ret)
372 goto out;
373
374 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
375 btrfs_err_in_rcu(device->fs_info,
376 "zoned: failed to read super block log zone info at devid %llu zone %u",
377 device->devid, sb_zone);
378 ret = -EUCLEAN;
379 goto out;
380 }
381
382 /*
383 * If zones[0] is conventional, always use the beggining of the
384 * zone to record superblock. No need to validate in that case.
385 */
386 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
387 BLK_ZONE_TYPE_CONVENTIONAL)
388 continue;
389
390 ret = sb_write_pointer(device->bdev,
391 &zone_info->sb_zones[sb_pos], &sb_wp);
392 if (ret != -ENOENT && ret) {
393 btrfs_err_in_rcu(device->fs_info,
394 "zoned: super block log zone corrupted devid %llu zone %u",
395 device->devid, sb_zone);
396 ret = -EUCLEAN;
397 goto out;
398 }
399 }
400
401
5b316468
NA
402 kfree(zones);
403
404 device->zone_info = zone_info;
405
3c9daa09
JT
406 switch (bdev_zoned_model(bdev)) {
407 case BLK_ZONED_HM:
408 model = "host-managed zoned";
409 emulated = "";
410 break;
411 case BLK_ZONED_HA:
412 model = "host-aware zoned";
413 emulated = "";
414 break;
415 case BLK_ZONED_NONE:
416 model = "regular";
417 emulated = "emulated ";
418 break;
419 default:
420 /* Just in case */
421 btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
422 bdev_zoned_model(bdev),
423 rcu_str_deref(device->name));
424 ret = -EOPNOTSUPP;
425 goto out_free_zone_info;
426 }
427
428 btrfs_info_in_rcu(fs_info,
429 "%s block device %s, %u %szones of %llu bytes",
430 model, rcu_str_deref(device->name), zone_info->nr_zones,
431 emulated, zone_info->zone_size);
5b316468
NA
432
433 return 0;
434
435out:
436 kfree(zones);
3c9daa09 437out_free_zone_info:
5b316468
NA
438 bitmap_free(zone_info->empty_zones);
439 bitmap_free(zone_info->seq_zones);
440 kfree(zone_info);
3c9daa09 441 device->zone_info = NULL;
5b316468
NA
442
443 return ret;
444}
445
446void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
447{
448 struct btrfs_zoned_device_info *zone_info = device->zone_info;
449
450 if (!zone_info)
451 return;
452
453 bitmap_free(zone_info->seq_zones);
454 bitmap_free(zone_info->empty_zones);
455 kfree(zone_info);
456 device->zone_info = NULL;
457}
458
459int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
460 struct blk_zone *zone)
461{
462 unsigned int nr_zones = 1;
463 int ret;
464
465 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
466 if (ret != 0 || !nr_zones)
467 return ret ? ret : -EIO;
468
469 return 0;
470}
b70f5097
NA
471
472int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
473{
474 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
475 struct btrfs_device *device;
476 u64 zoned_devices = 0;
477 u64 nr_devices = 0;
478 u64 zone_size = 0;
862931c7 479 u64 max_zone_append_size = 0;
3c9daa09 480 const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
b70f5097
NA
481 int ret = 0;
482
483 /* Count zoned devices */
484 list_for_each_entry(device, &fs_devices->devices, dev_list) {
485 enum blk_zoned_model model;
486
487 if (!device->bdev)
488 continue;
489
490 model = bdev_zoned_model(device->bdev);
3c9daa09
JT
491 /*
492 * A Host-Managed zoned device must be used as a zoned device.
493 * A Host-Aware zoned device and a non-zoned devices can be
494 * treated as a zoned device, if ZONED flag is enabled in the
495 * superblock.
496 */
b70f5097 497 if (model == BLK_ZONED_HM ||
3c9daa09
JT
498 (model == BLK_ZONED_HA && incompat_zoned) ||
499 (model == BLK_ZONED_NONE && incompat_zoned)) {
500 struct btrfs_zoned_device_info *zone_info =
501 device->zone_info;
862931c7
NA
502
503 zone_info = device->zone_info;
b70f5097
NA
504 zoned_devices++;
505 if (!zone_size) {
862931c7
NA
506 zone_size = zone_info->zone_size;
507 } else if (zone_info->zone_size != zone_size) {
b70f5097
NA
508 btrfs_err(fs_info,
509 "zoned: unequal block device zone sizes: have %llu found %llu",
510 device->zone_info->zone_size,
511 zone_size);
512 ret = -EINVAL;
513 goto out;
514 }
862931c7
NA
515 if (!max_zone_append_size ||
516 (zone_info->max_zone_append_size &&
517 zone_info->max_zone_append_size < max_zone_append_size))
518 max_zone_append_size =
519 zone_info->max_zone_append_size;
b70f5097
NA
520 }
521 nr_devices++;
522 }
523
524 if (!zoned_devices && !incompat_zoned)
525 goto out;
526
527 if (!zoned_devices && incompat_zoned) {
528 /* No zoned block device found on ZONED filesystem */
529 btrfs_err(fs_info,
530 "zoned: no zoned devices found on a zoned filesystem");
531 ret = -EINVAL;
532 goto out;
533 }
534
535 if (zoned_devices && !incompat_zoned) {
536 btrfs_err(fs_info,
537 "zoned: mode not enabled but zoned device found");
538 ret = -EINVAL;
539 goto out;
540 }
541
542 if (zoned_devices != nr_devices) {
543 btrfs_err(fs_info,
544 "zoned: cannot mix zoned and regular devices");
545 ret = -EINVAL;
546 goto out;
547 }
548
549 /*
550 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
551 * __btrfs_alloc_chunk(). Since we want stripe_len == zone_size,
552 * check the alignment here.
553 */
554 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
555 btrfs_err(fs_info,
556 "zoned: zone size %llu not aligned to stripe %u",
557 zone_size, BTRFS_STRIPE_LEN);
558 ret = -EINVAL;
559 goto out;
560 }
561
a589dde0
NA
562 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
563 btrfs_err(fs_info, "zoned: mixed block groups not supported");
564 ret = -EINVAL;
565 goto out;
566 }
567
b70f5097 568 fs_info->zone_size = zone_size;
862931c7 569 fs_info->max_zone_append_size = max_zone_append_size;
1cd6121f 570 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
b70f5097 571
b53429ba
JT
572 /*
573 * Check mount options here, because we might change fs_info->zoned
574 * from fs_info->zone_size.
575 */
576 ret = btrfs_check_mountopts_zoned(fs_info);
577 if (ret)
578 goto out;
579
b70f5097
NA
580 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
581out:
582 return ret;
583}
5d1ab66c
NA
584
585int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
586{
587 if (!btrfs_is_zoned(info))
588 return 0;
589
590 /*
591 * Space cache writing is not COWed. Disable that to avoid write errors
592 * in sequential zones.
593 */
594 if (btrfs_test_opt(info, SPACE_CACHE)) {
595 btrfs_err(info, "zoned: space cache v1 is not supported");
596 return -EINVAL;
597 }
598
d206e9c9
NA
599 if (btrfs_test_opt(info, NODATACOW)) {
600 btrfs_err(info, "zoned: NODATACOW not supported");
601 return -EINVAL;
602 }
603
5d1ab66c
NA
604 return 0;
605}
12659251
NA
606
607static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
608 int rw, u64 *bytenr_ret)
609{
610 u64 wp;
611 int ret;
612
613 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
614 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
615 return 0;
616 }
617
618 ret = sb_write_pointer(bdev, zones, &wp);
619 if (ret != -ENOENT && ret < 0)
620 return ret;
621
622 if (rw == WRITE) {
623 struct blk_zone *reset = NULL;
624
625 if (wp == zones[0].start << SECTOR_SHIFT)
626 reset = &zones[0];
627 else if (wp == zones[1].start << SECTOR_SHIFT)
628 reset = &zones[1];
629
630 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
631 ASSERT(reset->cond == BLK_ZONE_COND_FULL);
632
633 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
634 reset->start, reset->len,
635 GFP_NOFS);
636 if (ret)
637 return ret;
638
639 reset->cond = BLK_ZONE_COND_EMPTY;
640 reset->wp = reset->start;
641 }
642 } else if (ret != -ENOENT) {
643 /* For READ, we want the precious one */
644 if (wp == zones[0].start << SECTOR_SHIFT)
645 wp = (zones[1].start + zones[1].len) << SECTOR_SHIFT;
646 wp -= BTRFS_SUPER_INFO_SIZE;
647 }
648
649 *bytenr_ret = wp;
650 return 0;
651
652}
653
654int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
655 u64 *bytenr_ret)
656{
657 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
658 unsigned int zone_sectors;
659 u32 sb_zone;
660 int ret;
12659251
NA
661 u8 zone_sectors_shift;
662 sector_t nr_sectors;
663 u32 nr_zones;
664
665 if (!bdev_is_zoned(bdev)) {
666 *bytenr_ret = btrfs_sb_offset(mirror);
667 return 0;
668 }
669
670 ASSERT(rw == READ || rw == WRITE);
671
672 zone_sectors = bdev_zone_sectors(bdev);
673 if (!is_power_of_2(zone_sectors))
674 return -EINVAL;
12659251 675 zone_sectors_shift = ilog2(zone_sectors);
ac7ac461 676 nr_sectors = bdev_nr_sectors(bdev);
12659251
NA
677 nr_zones = nr_sectors >> zone_sectors_shift;
678
679 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
680 if (sb_zone + 1 >= nr_zones)
681 return -ENOENT;
682
683 ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
684 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
685 zones);
686 if (ret < 0)
687 return ret;
688 if (ret != BTRFS_NR_SB_LOG_ZONES)
689 return -EIO;
690
691 return sb_log_location(bdev, zones, rw, bytenr_ret);
692}
693
694int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
695 u64 *bytenr_ret)
696{
697 struct btrfs_zoned_device_info *zinfo = device->zone_info;
698 u32 zone_num;
699
d6639b35
NA
700 /*
701 * For a zoned filesystem on a non-zoned block device, use the same
702 * super block locations as regular filesystem. Doing so, the super
703 * block can always be retrieved and the zoned flag of the volume
704 * detected from the super block information.
705 */
706 if (!bdev_is_zoned(device->bdev)) {
12659251
NA
707 *bytenr_ret = btrfs_sb_offset(mirror);
708 return 0;
709 }
710
711 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
712 if (zone_num + 1 >= zinfo->nr_zones)
713 return -ENOENT;
714
715 return sb_log_location(device->bdev,
716 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
717 rw, bytenr_ret);
718}
719
720static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
721 int mirror)
722{
723 u32 zone_num;
724
725 if (!zinfo)
726 return false;
727
728 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
729 if (zone_num + 1 >= zinfo->nr_zones)
730 return false;
731
732 if (!test_bit(zone_num, zinfo->seq_zones))
733 return false;
734
735 return true;
736}
737
738void btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
739{
740 struct btrfs_zoned_device_info *zinfo = device->zone_info;
741 struct blk_zone *zone;
742
743 if (!is_sb_log_zone(zinfo, mirror))
744 return;
745
746 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
747 if (zone->cond != BLK_ZONE_COND_FULL) {
748 if (zone->cond == BLK_ZONE_COND_EMPTY)
749 zone->cond = BLK_ZONE_COND_IMP_OPEN;
750
751 zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
752
753 if (zone->wp == zone->start + zone->len)
754 zone->cond = BLK_ZONE_COND_FULL;
755
756 return;
757 }
758
759 zone++;
760 ASSERT(zone->cond != BLK_ZONE_COND_FULL);
761 if (zone->cond == BLK_ZONE_COND_EMPTY)
762 zone->cond = BLK_ZONE_COND_IMP_OPEN;
763
764 zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
765
766 if (zone->wp == zone->start + zone->len)
767 zone->cond = BLK_ZONE_COND_FULL;
768}
769
770int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
771{
772 sector_t zone_sectors;
773 sector_t nr_sectors;
774 u8 zone_sectors_shift;
775 u32 sb_zone;
776 u32 nr_zones;
777
778 zone_sectors = bdev_zone_sectors(bdev);
779 zone_sectors_shift = ilog2(zone_sectors);
ac7ac461 780 nr_sectors = bdev_nr_sectors(bdev);
12659251
NA
781 nr_zones = nr_sectors >> zone_sectors_shift;
782
783 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
784 if (sb_zone + 1 >= nr_zones)
785 return -ENOENT;
786
787 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
788 sb_zone << zone_sectors_shift,
789 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
790}
1cd6121f
NA
791
792/**
793 * btrfs_find_allocatable_zones - find allocatable zones within a given region
794 *
795 * @device: the device to allocate a region on
796 * @hole_start: the position of the hole to allocate the region
797 * @num_bytes: size of wanted region
798 * @hole_end: the end of the hole
799 * @return: position of allocatable zones
800 *
801 * Allocatable region should not contain any superblock locations.
802 */
803u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
804 u64 hole_end, u64 num_bytes)
805{
806 struct btrfs_zoned_device_info *zinfo = device->zone_info;
807 const u8 shift = zinfo->zone_size_shift;
808 u64 nzones = num_bytes >> shift;
809 u64 pos = hole_start;
810 u64 begin, end;
811 bool have_sb;
812 int i;
813
814 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
815 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
816
817 while (pos < hole_end) {
818 begin = pos >> shift;
819 end = begin + nzones;
820
821 if (end > zinfo->nr_zones)
822 return hole_end;
823
824 /* Check if zones in the region are all empty */
825 if (btrfs_dev_is_sequential(device, pos) &&
826 find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
827 pos += zinfo->zone_size;
828 continue;
829 }
830
831 have_sb = false;
832 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
833 u32 sb_zone;
834 u64 sb_pos;
835
836 sb_zone = sb_zone_number(shift, i);
837 if (!(end <= sb_zone ||
838 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
839 have_sb = true;
840 pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
841 break;
842 }
843
844 /* We also need to exclude regular superblock positions */
845 sb_pos = btrfs_sb_offset(i);
846 if (!(pos + num_bytes <= sb_pos ||
847 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
848 have_sb = true;
849 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
850 zinfo->zone_size);
851 break;
852 }
853 }
854 if (!have_sb)
855 break;
856 }
857
858 return pos;
859}
860
861int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
862 u64 length, u64 *bytes)
863{
864 int ret;
865
866 *bytes = 0;
867 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
868 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
869 GFP_NOFS);
870 if (ret)
871 return ret;
872
873 *bytes = length;
874 while (length) {
875 btrfs_dev_set_zone_empty(device, physical);
876 physical += device->zone_info->zone_size;
877 length -= device->zone_info->zone_size;
878 }
879
880 return 0;
881}
882
883int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
884{
885 struct btrfs_zoned_device_info *zinfo = device->zone_info;
886 const u8 shift = zinfo->zone_size_shift;
887 unsigned long begin = start >> shift;
888 unsigned long end = (start + size) >> shift;
889 u64 pos;
890 int ret;
891
892 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
893 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
894
895 if (end > zinfo->nr_zones)
896 return -ERANGE;
897
898 /* All the zones are conventional */
899 if (find_next_bit(zinfo->seq_zones, begin, end) == end)
900 return 0;
901
902 /* All the zones are sequential and empty */
903 if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
904 find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
905 return 0;
906
907 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
908 u64 reset_bytes;
909
910 if (!btrfs_dev_is_sequential(device, pos) ||
911 btrfs_dev_is_empty_zone(device, pos))
912 continue;
913
914 /* Free regions should be empty */
915 btrfs_warn_in_rcu(
916 device->fs_info,
917 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
918 rcu_str_deref(device->name), device->devid, pos >> shift);
919 WARN_ON_ONCE(1);
920
921 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
922 &reset_bytes);
923 if (ret)
924 return ret;
925 }
926
927 return 0;
928}
08e11a3d 929
a94794d5
NA
930/*
931 * Calculate an allocation pointer from the extent allocation information
932 * for a block group consist of conventional zones. It is pointed to the
933 * end of the highest addressed extent in the block group as an allocation
934 * offset.
935 */
936static int calculate_alloc_pointer(struct btrfs_block_group *cache,
937 u64 *offset_ret)
938{
939 struct btrfs_fs_info *fs_info = cache->fs_info;
940 struct btrfs_root *root = fs_info->extent_root;
941 struct btrfs_path *path;
942 struct btrfs_key key;
943 struct btrfs_key found_key;
944 int ret;
945 u64 length;
946
947 path = btrfs_alloc_path();
948 if (!path)
949 return -ENOMEM;
950
951 key.objectid = cache->start + cache->length;
952 key.type = 0;
953 key.offset = 0;
954
955 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
956 /* We should not find the exact match */
957 if (!ret)
958 ret = -EUCLEAN;
959 if (ret < 0)
960 goto out;
961
962 ret = btrfs_previous_extent_item(root, path, cache->start);
963 if (ret) {
964 if (ret == 1) {
965 ret = 0;
966 *offset_ret = 0;
967 }
968 goto out;
969 }
970
971 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
972
973 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
974 length = found_key.offset;
975 else
976 length = fs_info->nodesize;
977
978 if (!(found_key.objectid >= cache->start &&
979 found_key.objectid + length <= cache->start + cache->length)) {
980 ret = -EUCLEAN;
981 goto out;
982 }
983 *offset_ret = found_key.objectid + length - cache->start;
984 ret = 0;
985
986out:
987 btrfs_free_path(path);
988 return ret;
989}
990
991int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
08e11a3d
NA
992{
993 struct btrfs_fs_info *fs_info = cache->fs_info;
994 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
995 struct extent_map *em;
996 struct map_lookup *map;
997 struct btrfs_device *device;
998 u64 logical = cache->start;
999 u64 length = cache->length;
1000 u64 physical = 0;
1001 int ret;
1002 int i;
1003 unsigned int nofs_flag;
1004 u64 *alloc_offsets = NULL;
a94794d5 1005 u64 last_alloc = 0;
08e11a3d
NA
1006 u32 num_sequential = 0, num_conventional = 0;
1007
1008 if (!btrfs_is_zoned(fs_info))
1009 return 0;
1010
1011 /* Sanity check */
1012 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1013 btrfs_err(fs_info,
1014 "zoned: block group %llu len %llu unaligned to zone size %llu",
1015 logical, length, fs_info->zone_size);
1016 return -EIO;
1017 }
1018
1019 /* Get the chunk mapping */
1020 read_lock(&em_tree->lock);
1021 em = lookup_extent_mapping(em_tree, logical, length);
1022 read_unlock(&em_tree->lock);
1023
1024 if (!em)
1025 return -EINVAL;
1026
1027 map = em->map_lookup;
1028
1029 alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
1030 if (!alloc_offsets) {
1031 free_extent_map(em);
1032 return -ENOMEM;
1033 }
1034
1035 for (i = 0; i < map->num_stripes; i++) {
1036 bool is_sequential;
1037 struct blk_zone zone;
1038
1039 device = map->stripes[i].dev;
1040 physical = map->stripes[i].physical;
1041
1042 if (device->bdev == NULL) {
1043 alloc_offsets[i] = WP_MISSING_DEV;
1044 continue;
1045 }
1046
1047 is_sequential = btrfs_dev_is_sequential(device, physical);
1048 if (is_sequential)
1049 num_sequential++;
1050 else
1051 num_conventional++;
1052
1053 if (!is_sequential) {
1054 alloc_offsets[i] = WP_CONVENTIONAL;
1055 continue;
1056 }
1057
1058 /*
1059 * This zone will be used for allocation, so mark this zone
1060 * non-empty.
1061 */
1062 btrfs_dev_clear_zone_empty(device, physical);
1063
1064 /*
1065 * The group is mapped to a sequential zone. Get the zone write
1066 * pointer to determine the allocation offset within the zone.
1067 */
1068 WARN_ON(!IS_ALIGNED(physical, fs_info->zone_size));
1069 nofs_flag = memalloc_nofs_save();
1070 ret = btrfs_get_dev_zone(device, physical, &zone);
1071 memalloc_nofs_restore(nofs_flag);
1072 if (ret == -EIO || ret == -EOPNOTSUPP) {
1073 ret = 0;
1074 alloc_offsets[i] = WP_MISSING_DEV;
1075 continue;
1076 } else if (ret) {
1077 goto out;
1078 }
1079
1080 switch (zone.cond) {
1081 case BLK_ZONE_COND_OFFLINE:
1082 case BLK_ZONE_COND_READONLY:
1083 btrfs_err(fs_info,
1084 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1085 physical >> device->zone_info->zone_size_shift,
1086 rcu_str_deref(device->name), device->devid);
1087 alloc_offsets[i] = WP_MISSING_DEV;
1088 break;
1089 case BLK_ZONE_COND_EMPTY:
1090 alloc_offsets[i] = 0;
1091 break;
1092 case BLK_ZONE_COND_FULL:
1093 alloc_offsets[i] = fs_info->zone_size;
1094 break;
1095 default:
1096 /* Partially used zone */
1097 alloc_offsets[i] =
1098 ((zone.wp - zone.start) << SECTOR_SHIFT);
1099 break;
1100 }
1101 }
1102
1103 if (num_conventional > 0) {
1104 /*
a94794d5
NA
1105 * Avoid calling calculate_alloc_pointer() for new BG. It
1106 * is no use for new BG. It must be always 0.
1107 *
1108 * Also, we have a lock chain of extent buffer lock ->
1109 * chunk mutex. For new BG, this function is called from
1110 * btrfs_make_block_group() which is already taking the
1111 * chunk mutex. Thus, we cannot call
1112 * calculate_alloc_pointer() which takes extent buffer
1113 * locks to avoid deadlock.
08e11a3d 1114 */
a94794d5
NA
1115 if (new) {
1116 cache->alloc_offset = 0;
1117 goto out;
1118 }
1119 ret = calculate_alloc_pointer(cache, &last_alloc);
1120 if (ret || map->num_stripes == num_conventional) {
1121 if (!ret)
1122 cache->alloc_offset = last_alloc;
1123 else
1124 btrfs_err(fs_info,
1125 "zoned: failed to determine allocation offset of bg %llu",
1126 cache->start);
1127 goto out;
1128 }
08e11a3d
NA
1129 }
1130
1131 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1132 case 0: /* single */
1133 cache->alloc_offset = alloc_offsets[0];
1134 break;
1135 case BTRFS_BLOCK_GROUP_DUP:
1136 case BTRFS_BLOCK_GROUP_RAID1:
1137 case BTRFS_BLOCK_GROUP_RAID0:
1138 case BTRFS_BLOCK_GROUP_RAID10:
1139 case BTRFS_BLOCK_GROUP_RAID5:
1140 case BTRFS_BLOCK_GROUP_RAID6:
1141 /* non-single profiles are not supported yet */
1142 default:
1143 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1144 btrfs_bg_type_to_raid_name(map->type));
1145 ret = -EINVAL;
1146 goto out;
1147 }
1148
1149out:
a94794d5
NA
1150 /* An extent is allocated after the write pointer */
1151 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1152 btrfs_err(fs_info,
1153 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1154 logical, last_alloc, cache->alloc_offset);
1155 ret = -EIO;
1156 }
1157
08e11a3d
NA
1158 kfree(alloc_offsets);
1159 free_extent_map(em);
1160
1161 return ret;
1162}