btrfs: zoned: mark block groups to copy for device-replace
[linux-block.git] / fs / btrfs / zoned.c
CommitLineData
5b316468
NA
1// SPDX-License-Identifier: GPL-2.0
2
1cd6121f 3#include <linux/bitops.h>
5b316468
NA
4#include <linux/slab.h>
5#include <linux/blkdev.h>
08e11a3d 6#include <linux/sched/mm.h>
5b316468
NA
7#include "ctree.h"
8#include "volumes.h"
9#include "zoned.h"
10#include "rcu-string.h"
1cd6121f 11#include "disk-io.h"
08e11a3d 12#include "block-group.h"
d3575156 13#include "transaction.h"
5b316468
NA
14
15/* Maximum number of zones to report per blkdev_report_zones() call */
16#define BTRFS_REPORT_NR_ZONES 4096
08e11a3d
NA
17/* Invalid allocation pointer value for missing devices */
18#define WP_MISSING_DEV ((u64)-1)
19/* Pseudo write pointer value for conventional zone */
20#define WP_CONVENTIONAL ((u64)-2)
5b316468 21
12659251
NA
22/* Number of superblock log zones */
23#define BTRFS_NR_SB_LOG_ZONES 2
24
5b316468
NA
25static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
26{
27 struct blk_zone *zones = data;
28
29 memcpy(&zones[idx], zone, sizeof(*zone));
30
31 return 0;
32}
33
12659251
NA
34static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
35 u64 *wp_ret)
36{
37 bool empty[BTRFS_NR_SB_LOG_ZONES];
38 bool full[BTRFS_NR_SB_LOG_ZONES];
39 sector_t sector;
40
41 ASSERT(zones[0].type != BLK_ZONE_TYPE_CONVENTIONAL &&
42 zones[1].type != BLK_ZONE_TYPE_CONVENTIONAL);
43
44 empty[0] = (zones[0].cond == BLK_ZONE_COND_EMPTY);
45 empty[1] = (zones[1].cond == BLK_ZONE_COND_EMPTY);
46 full[0] = (zones[0].cond == BLK_ZONE_COND_FULL);
47 full[1] = (zones[1].cond == BLK_ZONE_COND_FULL);
48
49 /*
50 * Possible states of log buffer zones
51 *
52 * Empty[0] In use[0] Full[0]
53 * Empty[1] * x 0
54 * In use[1] 0 x 0
55 * Full[1] 1 1 C
56 *
57 * Log position:
58 * *: Special case, no superblock is written
59 * 0: Use write pointer of zones[0]
60 * 1: Use write pointer of zones[1]
61 * C: Compare super blcoks from zones[0] and zones[1], use the latest
62 * one determined by generation
63 * x: Invalid state
64 */
65
66 if (empty[0] && empty[1]) {
67 /* Special case to distinguish no superblock to read */
68 *wp_ret = zones[0].start << SECTOR_SHIFT;
69 return -ENOENT;
70 } else if (full[0] && full[1]) {
71 /* Compare two super blocks */
72 struct address_space *mapping = bdev->bd_inode->i_mapping;
73 struct page *page[BTRFS_NR_SB_LOG_ZONES];
74 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
75 int i;
76
77 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
78 u64 bytenr;
79
80 bytenr = ((zones[i].start + zones[i].len)
81 << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
82
83 page[i] = read_cache_page_gfp(mapping,
84 bytenr >> PAGE_SHIFT, GFP_NOFS);
85 if (IS_ERR(page[i])) {
86 if (i == 1)
87 btrfs_release_disk_super(super[0]);
88 return PTR_ERR(page[i]);
89 }
90 super[i] = page_address(page[i]);
91 }
92
93 if (super[0]->generation > super[1]->generation)
94 sector = zones[1].start;
95 else
96 sector = zones[0].start;
97
98 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
99 btrfs_release_disk_super(super[i]);
100 } else if (!full[0] && (empty[1] || full[1])) {
101 sector = zones[0].wp;
102 } else if (full[0]) {
103 sector = zones[1].wp;
104 } else {
105 return -EUCLEAN;
106 }
107 *wp_ret = sector << SECTOR_SHIFT;
108 return 0;
109}
110
111/*
112 * The following zones are reserved as the circular buffer on ZONED btrfs.
113 * - The primary superblock: zones 0 and 1
114 * - The first copy: zones 16 and 17
115 * - The second copy: zones 1024 or zone at 256GB which is minimum, and
116 * the following one
117 */
118static inline u32 sb_zone_number(int shift, int mirror)
119{
120 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
121
122 switch (mirror) {
123 case 0: return 0;
124 case 1: return 16;
125 case 2: return min_t(u64, btrfs_sb_offset(mirror) >> shift, 1024);
126 }
127
128 return 0;
129}
130
3c9daa09
JT
131/*
132 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
133 * device into static sized chunks and fake a conventional zone on each of
134 * them.
135 */
136static int emulate_report_zones(struct btrfs_device *device, u64 pos,
137 struct blk_zone *zones, unsigned int nr_zones)
138{
139 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
140 sector_t bdev_size = bdev_nr_sectors(device->bdev);
141 unsigned int i;
142
143 pos >>= SECTOR_SHIFT;
144 for (i = 0; i < nr_zones; i++) {
145 zones[i].start = i * zone_sectors + pos;
146 zones[i].len = zone_sectors;
147 zones[i].capacity = zone_sectors;
148 zones[i].wp = zones[i].start + zone_sectors;
149 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
150 zones[i].cond = BLK_ZONE_COND_NOT_WP;
151
152 if (zones[i].wp >= bdev_size) {
153 i++;
154 break;
155 }
156 }
157
158 return i;
159}
160
5b316468
NA
161static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
162 struct blk_zone *zones, unsigned int *nr_zones)
163{
164 int ret;
165
166 if (!*nr_zones)
167 return 0;
168
3c9daa09
JT
169 if (!bdev_is_zoned(device->bdev)) {
170 ret = emulate_report_zones(device, pos, zones, *nr_zones);
171 *nr_zones = ret;
172 return 0;
173 }
174
5b316468
NA
175 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
176 copy_zone_info_cb, zones);
177 if (ret < 0) {
178 btrfs_err_in_rcu(device->fs_info,
179 "zoned: failed to read zone %llu on %s (devid %llu)",
180 pos, rcu_str_deref(device->name),
181 device->devid);
182 return ret;
183 }
184 *nr_zones = ret;
185 if (!ret)
186 return -EIO;
187
188 return 0;
189}
190
3c9daa09
JT
191/* The emulated zone size is determined from the size of device extent */
192static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
193{
194 struct btrfs_path *path;
195 struct btrfs_root *root = fs_info->dev_root;
196 struct btrfs_key key;
197 struct extent_buffer *leaf;
198 struct btrfs_dev_extent *dext;
199 int ret = 0;
200
201 key.objectid = 1;
202 key.type = BTRFS_DEV_EXTENT_KEY;
203 key.offset = 0;
204
205 path = btrfs_alloc_path();
206 if (!path)
207 return -ENOMEM;
208
209 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
210 if (ret < 0)
211 goto out;
212
213 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
214 ret = btrfs_next_item(root, path);
215 if (ret < 0)
216 goto out;
217 /* No dev extents at all? Not good */
218 if (ret > 0) {
219 ret = -EUCLEAN;
220 goto out;
221 }
222 }
223
224 leaf = path->nodes[0];
225 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
226 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
227 ret = 0;
228
229out:
230 btrfs_free_path(path);
231
232 return ret;
233}
234
73651042
NA
235int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
236{
237 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
238 struct btrfs_device *device;
239 int ret = 0;
240
241 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
242 if (!btrfs_fs_incompat(fs_info, ZONED))
243 return 0;
244
245 mutex_lock(&fs_devices->device_list_mutex);
246 list_for_each_entry(device, &fs_devices->devices, dev_list) {
247 /* We can skip reading of zone info for missing devices */
248 if (!device->bdev)
249 continue;
250
251 ret = btrfs_get_dev_zone_info(device);
252 if (ret)
253 break;
254 }
255 mutex_unlock(&fs_devices->device_list_mutex);
256
257 return ret;
258}
259
5b316468
NA
260int btrfs_get_dev_zone_info(struct btrfs_device *device)
261{
3c9daa09 262 struct btrfs_fs_info *fs_info = device->fs_info;
5b316468
NA
263 struct btrfs_zoned_device_info *zone_info = NULL;
264 struct block_device *bdev = device->bdev;
862931c7 265 struct request_queue *queue = bdev_get_queue(bdev);
5b316468
NA
266 sector_t nr_sectors;
267 sector_t sector = 0;
268 struct blk_zone *zones = NULL;
269 unsigned int i, nreported = 0, nr_zones;
270 unsigned int zone_sectors;
3c9daa09 271 char *model, *emulated;
5b316468
NA
272 int ret;
273
3c9daa09
JT
274 /*
275 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
276 * yet be set.
277 */
278 if (!btrfs_fs_incompat(fs_info, ZONED))
5b316468
NA
279 return 0;
280
281 if (device->zone_info)
282 return 0;
283
284 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
285 if (!zone_info)
286 return -ENOMEM;
287
3c9daa09
JT
288 if (!bdev_is_zoned(bdev)) {
289 if (!fs_info->zone_size) {
290 ret = calculate_emulated_zone_size(fs_info);
291 if (ret)
292 goto out;
293 }
294
295 ASSERT(fs_info->zone_size);
296 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
297 } else {
298 zone_sectors = bdev_zone_sectors(bdev);
299 }
300
ac7ac461 301 nr_sectors = bdev_nr_sectors(bdev);
5b316468
NA
302 /* Check if it's power of 2 (see is_power_of_2) */
303 ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
304 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
305 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
862931c7
NA
306 zone_info->max_zone_append_size =
307 (u64)queue_max_zone_append_sectors(queue) << SECTOR_SHIFT;
5b316468
NA
308 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
309 if (!IS_ALIGNED(nr_sectors, zone_sectors))
310 zone_info->nr_zones++;
311
312 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
313 if (!zone_info->seq_zones) {
314 ret = -ENOMEM;
315 goto out;
316 }
317
318 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
319 if (!zone_info->empty_zones) {
320 ret = -ENOMEM;
321 goto out;
322 }
323
324 zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
325 if (!zones) {
326 ret = -ENOMEM;
327 goto out;
328 }
329
330 /* Get zones type */
331 while (sector < nr_sectors) {
332 nr_zones = BTRFS_REPORT_NR_ZONES;
333 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
334 &nr_zones);
335 if (ret)
336 goto out;
337
338 for (i = 0; i < nr_zones; i++) {
339 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
340 __set_bit(nreported, zone_info->seq_zones);
341 if (zones[i].cond == BLK_ZONE_COND_EMPTY)
342 __set_bit(nreported, zone_info->empty_zones);
343 nreported++;
344 }
345 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
346 }
347
348 if (nreported != zone_info->nr_zones) {
349 btrfs_err_in_rcu(device->fs_info,
350 "inconsistent number of zones on %s (%u/%u)",
351 rcu_str_deref(device->name), nreported,
352 zone_info->nr_zones);
353 ret = -EIO;
354 goto out;
355 }
356
12659251
NA
357 /* Validate superblock log */
358 nr_zones = BTRFS_NR_SB_LOG_ZONES;
359 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
360 u32 sb_zone;
361 u64 sb_wp;
362 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
363
364 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
365 if (sb_zone + 1 >= zone_info->nr_zones)
366 continue;
367
368 sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
369 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
370 &zone_info->sb_zones[sb_pos],
371 &nr_zones);
372 if (ret)
373 goto out;
374
375 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
376 btrfs_err_in_rcu(device->fs_info,
377 "zoned: failed to read super block log zone info at devid %llu zone %u",
378 device->devid, sb_zone);
379 ret = -EUCLEAN;
380 goto out;
381 }
382
383 /*
384 * If zones[0] is conventional, always use the beggining of the
385 * zone to record superblock. No need to validate in that case.
386 */
387 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
388 BLK_ZONE_TYPE_CONVENTIONAL)
389 continue;
390
391 ret = sb_write_pointer(device->bdev,
392 &zone_info->sb_zones[sb_pos], &sb_wp);
393 if (ret != -ENOENT && ret) {
394 btrfs_err_in_rcu(device->fs_info,
395 "zoned: super block log zone corrupted devid %llu zone %u",
396 device->devid, sb_zone);
397 ret = -EUCLEAN;
398 goto out;
399 }
400 }
401
402
5b316468
NA
403 kfree(zones);
404
405 device->zone_info = zone_info;
406
3c9daa09
JT
407 switch (bdev_zoned_model(bdev)) {
408 case BLK_ZONED_HM:
409 model = "host-managed zoned";
410 emulated = "";
411 break;
412 case BLK_ZONED_HA:
413 model = "host-aware zoned";
414 emulated = "";
415 break;
416 case BLK_ZONED_NONE:
417 model = "regular";
418 emulated = "emulated ";
419 break;
420 default:
421 /* Just in case */
422 btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
423 bdev_zoned_model(bdev),
424 rcu_str_deref(device->name));
425 ret = -EOPNOTSUPP;
426 goto out_free_zone_info;
427 }
428
429 btrfs_info_in_rcu(fs_info,
430 "%s block device %s, %u %szones of %llu bytes",
431 model, rcu_str_deref(device->name), zone_info->nr_zones,
432 emulated, zone_info->zone_size);
5b316468
NA
433
434 return 0;
435
436out:
437 kfree(zones);
3c9daa09 438out_free_zone_info:
5b316468
NA
439 bitmap_free(zone_info->empty_zones);
440 bitmap_free(zone_info->seq_zones);
441 kfree(zone_info);
3c9daa09 442 device->zone_info = NULL;
5b316468
NA
443
444 return ret;
445}
446
447void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
448{
449 struct btrfs_zoned_device_info *zone_info = device->zone_info;
450
451 if (!zone_info)
452 return;
453
454 bitmap_free(zone_info->seq_zones);
455 bitmap_free(zone_info->empty_zones);
456 kfree(zone_info);
457 device->zone_info = NULL;
458}
459
460int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
461 struct blk_zone *zone)
462{
463 unsigned int nr_zones = 1;
464 int ret;
465
466 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
467 if (ret != 0 || !nr_zones)
468 return ret ? ret : -EIO;
469
470 return 0;
471}
b70f5097
NA
472
473int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
474{
475 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
476 struct btrfs_device *device;
477 u64 zoned_devices = 0;
478 u64 nr_devices = 0;
479 u64 zone_size = 0;
862931c7 480 u64 max_zone_append_size = 0;
3c9daa09 481 const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
b70f5097
NA
482 int ret = 0;
483
484 /* Count zoned devices */
485 list_for_each_entry(device, &fs_devices->devices, dev_list) {
486 enum blk_zoned_model model;
487
488 if (!device->bdev)
489 continue;
490
491 model = bdev_zoned_model(device->bdev);
3c9daa09
JT
492 /*
493 * A Host-Managed zoned device must be used as a zoned device.
494 * A Host-Aware zoned device and a non-zoned devices can be
495 * treated as a zoned device, if ZONED flag is enabled in the
496 * superblock.
497 */
b70f5097 498 if (model == BLK_ZONED_HM ||
3c9daa09
JT
499 (model == BLK_ZONED_HA && incompat_zoned) ||
500 (model == BLK_ZONED_NONE && incompat_zoned)) {
501 struct btrfs_zoned_device_info *zone_info =
502 device->zone_info;
862931c7
NA
503
504 zone_info = device->zone_info;
b70f5097
NA
505 zoned_devices++;
506 if (!zone_size) {
862931c7
NA
507 zone_size = zone_info->zone_size;
508 } else if (zone_info->zone_size != zone_size) {
b70f5097
NA
509 btrfs_err(fs_info,
510 "zoned: unequal block device zone sizes: have %llu found %llu",
511 device->zone_info->zone_size,
512 zone_size);
513 ret = -EINVAL;
514 goto out;
515 }
862931c7
NA
516 if (!max_zone_append_size ||
517 (zone_info->max_zone_append_size &&
518 zone_info->max_zone_append_size < max_zone_append_size))
519 max_zone_append_size =
520 zone_info->max_zone_append_size;
b70f5097
NA
521 }
522 nr_devices++;
523 }
524
525 if (!zoned_devices && !incompat_zoned)
526 goto out;
527
528 if (!zoned_devices && incompat_zoned) {
529 /* No zoned block device found on ZONED filesystem */
530 btrfs_err(fs_info,
531 "zoned: no zoned devices found on a zoned filesystem");
532 ret = -EINVAL;
533 goto out;
534 }
535
536 if (zoned_devices && !incompat_zoned) {
537 btrfs_err(fs_info,
538 "zoned: mode not enabled but zoned device found");
539 ret = -EINVAL;
540 goto out;
541 }
542
543 if (zoned_devices != nr_devices) {
544 btrfs_err(fs_info,
545 "zoned: cannot mix zoned and regular devices");
546 ret = -EINVAL;
547 goto out;
548 }
549
550 /*
551 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
552 * __btrfs_alloc_chunk(). Since we want stripe_len == zone_size,
553 * check the alignment here.
554 */
555 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
556 btrfs_err(fs_info,
557 "zoned: zone size %llu not aligned to stripe %u",
558 zone_size, BTRFS_STRIPE_LEN);
559 ret = -EINVAL;
560 goto out;
561 }
562
a589dde0
NA
563 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
564 btrfs_err(fs_info, "zoned: mixed block groups not supported");
565 ret = -EINVAL;
566 goto out;
567 }
568
b70f5097 569 fs_info->zone_size = zone_size;
862931c7 570 fs_info->max_zone_append_size = max_zone_append_size;
1cd6121f 571 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
b70f5097 572
b53429ba
JT
573 /*
574 * Check mount options here, because we might change fs_info->zoned
575 * from fs_info->zone_size.
576 */
577 ret = btrfs_check_mountopts_zoned(fs_info);
578 if (ret)
579 goto out;
580
b70f5097
NA
581 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
582out:
583 return ret;
584}
5d1ab66c
NA
585
586int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
587{
588 if (!btrfs_is_zoned(info))
589 return 0;
590
591 /*
592 * Space cache writing is not COWed. Disable that to avoid write errors
593 * in sequential zones.
594 */
595 if (btrfs_test_opt(info, SPACE_CACHE)) {
596 btrfs_err(info, "zoned: space cache v1 is not supported");
597 return -EINVAL;
598 }
599
d206e9c9
NA
600 if (btrfs_test_opt(info, NODATACOW)) {
601 btrfs_err(info, "zoned: NODATACOW not supported");
602 return -EINVAL;
603 }
604
5d1ab66c
NA
605 return 0;
606}
12659251
NA
607
608static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
609 int rw, u64 *bytenr_ret)
610{
611 u64 wp;
612 int ret;
613
614 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
615 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
616 return 0;
617 }
618
619 ret = sb_write_pointer(bdev, zones, &wp);
620 if (ret != -ENOENT && ret < 0)
621 return ret;
622
623 if (rw == WRITE) {
624 struct blk_zone *reset = NULL;
625
626 if (wp == zones[0].start << SECTOR_SHIFT)
627 reset = &zones[0];
628 else if (wp == zones[1].start << SECTOR_SHIFT)
629 reset = &zones[1];
630
631 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
632 ASSERT(reset->cond == BLK_ZONE_COND_FULL);
633
634 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
635 reset->start, reset->len,
636 GFP_NOFS);
637 if (ret)
638 return ret;
639
640 reset->cond = BLK_ZONE_COND_EMPTY;
641 reset->wp = reset->start;
642 }
643 } else if (ret != -ENOENT) {
644 /* For READ, we want the precious one */
645 if (wp == zones[0].start << SECTOR_SHIFT)
646 wp = (zones[1].start + zones[1].len) << SECTOR_SHIFT;
647 wp -= BTRFS_SUPER_INFO_SIZE;
648 }
649
650 *bytenr_ret = wp;
651 return 0;
652
653}
654
655int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
656 u64 *bytenr_ret)
657{
658 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
659 unsigned int zone_sectors;
660 u32 sb_zone;
661 int ret;
12659251
NA
662 u8 zone_sectors_shift;
663 sector_t nr_sectors;
664 u32 nr_zones;
665
666 if (!bdev_is_zoned(bdev)) {
667 *bytenr_ret = btrfs_sb_offset(mirror);
668 return 0;
669 }
670
671 ASSERT(rw == READ || rw == WRITE);
672
673 zone_sectors = bdev_zone_sectors(bdev);
674 if (!is_power_of_2(zone_sectors))
675 return -EINVAL;
12659251 676 zone_sectors_shift = ilog2(zone_sectors);
ac7ac461 677 nr_sectors = bdev_nr_sectors(bdev);
12659251
NA
678 nr_zones = nr_sectors >> zone_sectors_shift;
679
680 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
681 if (sb_zone + 1 >= nr_zones)
682 return -ENOENT;
683
684 ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
685 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
686 zones);
687 if (ret < 0)
688 return ret;
689 if (ret != BTRFS_NR_SB_LOG_ZONES)
690 return -EIO;
691
692 return sb_log_location(bdev, zones, rw, bytenr_ret);
693}
694
695int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
696 u64 *bytenr_ret)
697{
698 struct btrfs_zoned_device_info *zinfo = device->zone_info;
699 u32 zone_num;
700
d6639b35
NA
701 /*
702 * For a zoned filesystem on a non-zoned block device, use the same
703 * super block locations as regular filesystem. Doing so, the super
704 * block can always be retrieved and the zoned flag of the volume
705 * detected from the super block information.
706 */
707 if (!bdev_is_zoned(device->bdev)) {
12659251
NA
708 *bytenr_ret = btrfs_sb_offset(mirror);
709 return 0;
710 }
711
712 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
713 if (zone_num + 1 >= zinfo->nr_zones)
714 return -ENOENT;
715
716 return sb_log_location(device->bdev,
717 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
718 rw, bytenr_ret);
719}
720
721static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
722 int mirror)
723{
724 u32 zone_num;
725
726 if (!zinfo)
727 return false;
728
729 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
730 if (zone_num + 1 >= zinfo->nr_zones)
731 return false;
732
733 if (!test_bit(zone_num, zinfo->seq_zones))
734 return false;
735
736 return true;
737}
738
739void btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
740{
741 struct btrfs_zoned_device_info *zinfo = device->zone_info;
742 struct blk_zone *zone;
743
744 if (!is_sb_log_zone(zinfo, mirror))
745 return;
746
747 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
748 if (zone->cond != BLK_ZONE_COND_FULL) {
749 if (zone->cond == BLK_ZONE_COND_EMPTY)
750 zone->cond = BLK_ZONE_COND_IMP_OPEN;
751
752 zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
753
754 if (zone->wp == zone->start + zone->len)
755 zone->cond = BLK_ZONE_COND_FULL;
756
757 return;
758 }
759
760 zone++;
761 ASSERT(zone->cond != BLK_ZONE_COND_FULL);
762 if (zone->cond == BLK_ZONE_COND_EMPTY)
763 zone->cond = BLK_ZONE_COND_IMP_OPEN;
764
765 zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
766
767 if (zone->wp == zone->start + zone->len)
768 zone->cond = BLK_ZONE_COND_FULL;
769}
770
771int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
772{
773 sector_t zone_sectors;
774 sector_t nr_sectors;
775 u8 zone_sectors_shift;
776 u32 sb_zone;
777 u32 nr_zones;
778
779 zone_sectors = bdev_zone_sectors(bdev);
780 zone_sectors_shift = ilog2(zone_sectors);
ac7ac461 781 nr_sectors = bdev_nr_sectors(bdev);
12659251
NA
782 nr_zones = nr_sectors >> zone_sectors_shift;
783
784 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
785 if (sb_zone + 1 >= nr_zones)
786 return -ENOENT;
787
788 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
789 sb_zone << zone_sectors_shift,
790 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
791}
1cd6121f
NA
792
793/**
794 * btrfs_find_allocatable_zones - find allocatable zones within a given region
795 *
796 * @device: the device to allocate a region on
797 * @hole_start: the position of the hole to allocate the region
798 * @num_bytes: size of wanted region
799 * @hole_end: the end of the hole
800 * @return: position of allocatable zones
801 *
802 * Allocatable region should not contain any superblock locations.
803 */
804u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
805 u64 hole_end, u64 num_bytes)
806{
807 struct btrfs_zoned_device_info *zinfo = device->zone_info;
808 const u8 shift = zinfo->zone_size_shift;
809 u64 nzones = num_bytes >> shift;
810 u64 pos = hole_start;
811 u64 begin, end;
812 bool have_sb;
813 int i;
814
815 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
816 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
817
818 while (pos < hole_end) {
819 begin = pos >> shift;
820 end = begin + nzones;
821
822 if (end > zinfo->nr_zones)
823 return hole_end;
824
825 /* Check if zones in the region are all empty */
826 if (btrfs_dev_is_sequential(device, pos) &&
827 find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
828 pos += zinfo->zone_size;
829 continue;
830 }
831
832 have_sb = false;
833 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
834 u32 sb_zone;
835 u64 sb_pos;
836
837 sb_zone = sb_zone_number(shift, i);
838 if (!(end <= sb_zone ||
839 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
840 have_sb = true;
841 pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
842 break;
843 }
844
845 /* We also need to exclude regular superblock positions */
846 sb_pos = btrfs_sb_offset(i);
847 if (!(pos + num_bytes <= sb_pos ||
848 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
849 have_sb = true;
850 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
851 zinfo->zone_size);
852 break;
853 }
854 }
855 if (!have_sb)
856 break;
857 }
858
859 return pos;
860}
861
862int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
863 u64 length, u64 *bytes)
864{
865 int ret;
866
867 *bytes = 0;
868 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
869 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
870 GFP_NOFS);
871 if (ret)
872 return ret;
873
874 *bytes = length;
875 while (length) {
876 btrfs_dev_set_zone_empty(device, physical);
877 physical += device->zone_info->zone_size;
878 length -= device->zone_info->zone_size;
879 }
880
881 return 0;
882}
883
884int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
885{
886 struct btrfs_zoned_device_info *zinfo = device->zone_info;
887 const u8 shift = zinfo->zone_size_shift;
888 unsigned long begin = start >> shift;
889 unsigned long end = (start + size) >> shift;
890 u64 pos;
891 int ret;
892
893 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
894 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
895
896 if (end > zinfo->nr_zones)
897 return -ERANGE;
898
899 /* All the zones are conventional */
900 if (find_next_bit(zinfo->seq_zones, begin, end) == end)
901 return 0;
902
903 /* All the zones are sequential and empty */
904 if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
905 find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
906 return 0;
907
908 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
909 u64 reset_bytes;
910
911 if (!btrfs_dev_is_sequential(device, pos) ||
912 btrfs_dev_is_empty_zone(device, pos))
913 continue;
914
915 /* Free regions should be empty */
916 btrfs_warn_in_rcu(
917 device->fs_info,
918 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
919 rcu_str_deref(device->name), device->devid, pos >> shift);
920 WARN_ON_ONCE(1);
921
922 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
923 &reset_bytes);
924 if (ret)
925 return ret;
926 }
927
928 return 0;
929}
08e11a3d 930
a94794d5
NA
931/*
932 * Calculate an allocation pointer from the extent allocation information
933 * for a block group consist of conventional zones. It is pointed to the
934 * end of the highest addressed extent in the block group as an allocation
935 * offset.
936 */
937static int calculate_alloc_pointer(struct btrfs_block_group *cache,
938 u64 *offset_ret)
939{
940 struct btrfs_fs_info *fs_info = cache->fs_info;
941 struct btrfs_root *root = fs_info->extent_root;
942 struct btrfs_path *path;
943 struct btrfs_key key;
944 struct btrfs_key found_key;
945 int ret;
946 u64 length;
947
948 path = btrfs_alloc_path();
949 if (!path)
950 return -ENOMEM;
951
952 key.objectid = cache->start + cache->length;
953 key.type = 0;
954 key.offset = 0;
955
956 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
957 /* We should not find the exact match */
958 if (!ret)
959 ret = -EUCLEAN;
960 if (ret < 0)
961 goto out;
962
963 ret = btrfs_previous_extent_item(root, path, cache->start);
964 if (ret) {
965 if (ret == 1) {
966 ret = 0;
967 *offset_ret = 0;
968 }
969 goto out;
970 }
971
972 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
973
974 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
975 length = found_key.offset;
976 else
977 length = fs_info->nodesize;
978
979 if (!(found_key.objectid >= cache->start &&
980 found_key.objectid + length <= cache->start + cache->length)) {
981 ret = -EUCLEAN;
982 goto out;
983 }
984 *offset_ret = found_key.objectid + length - cache->start;
985 ret = 0;
986
987out:
988 btrfs_free_path(path);
989 return ret;
990}
991
992int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
08e11a3d
NA
993{
994 struct btrfs_fs_info *fs_info = cache->fs_info;
995 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
996 struct extent_map *em;
997 struct map_lookup *map;
998 struct btrfs_device *device;
999 u64 logical = cache->start;
1000 u64 length = cache->length;
1001 u64 physical = 0;
1002 int ret;
1003 int i;
1004 unsigned int nofs_flag;
1005 u64 *alloc_offsets = NULL;
a94794d5 1006 u64 last_alloc = 0;
08e11a3d
NA
1007 u32 num_sequential = 0, num_conventional = 0;
1008
1009 if (!btrfs_is_zoned(fs_info))
1010 return 0;
1011
1012 /* Sanity check */
1013 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1014 btrfs_err(fs_info,
1015 "zoned: block group %llu len %llu unaligned to zone size %llu",
1016 logical, length, fs_info->zone_size);
1017 return -EIO;
1018 }
1019
1020 /* Get the chunk mapping */
1021 read_lock(&em_tree->lock);
1022 em = lookup_extent_mapping(em_tree, logical, length);
1023 read_unlock(&em_tree->lock);
1024
1025 if (!em)
1026 return -EINVAL;
1027
1028 map = em->map_lookup;
1029
1030 alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
1031 if (!alloc_offsets) {
1032 free_extent_map(em);
1033 return -ENOMEM;
1034 }
1035
1036 for (i = 0; i < map->num_stripes; i++) {
1037 bool is_sequential;
1038 struct blk_zone zone;
1039
1040 device = map->stripes[i].dev;
1041 physical = map->stripes[i].physical;
1042
1043 if (device->bdev == NULL) {
1044 alloc_offsets[i] = WP_MISSING_DEV;
1045 continue;
1046 }
1047
1048 is_sequential = btrfs_dev_is_sequential(device, physical);
1049 if (is_sequential)
1050 num_sequential++;
1051 else
1052 num_conventional++;
1053
1054 if (!is_sequential) {
1055 alloc_offsets[i] = WP_CONVENTIONAL;
1056 continue;
1057 }
1058
1059 /*
1060 * This zone will be used for allocation, so mark this zone
1061 * non-empty.
1062 */
1063 btrfs_dev_clear_zone_empty(device, physical);
1064
1065 /*
1066 * The group is mapped to a sequential zone. Get the zone write
1067 * pointer to determine the allocation offset within the zone.
1068 */
1069 WARN_ON(!IS_ALIGNED(physical, fs_info->zone_size));
1070 nofs_flag = memalloc_nofs_save();
1071 ret = btrfs_get_dev_zone(device, physical, &zone);
1072 memalloc_nofs_restore(nofs_flag);
1073 if (ret == -EIO || ret == -EOPNOTSUPP) {
1074 ret = 0;
1075 alloc_offsets[i] = WP_MISSING_DEV;
1076 continue;
1077 } else if (ret) {
1078 goto out;
1079 }
1080
1081 switch (zone.cond) {
1082 case BLK_ZONE_COND_OFFLINE:
1083 case BLK_ZONE_COND_READONLY:
1084 btrfs_err(fs_info,
1085 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1086 physical >> device->zone_info->zone_size_shift,
1087 rcu_str_deref(device->name), device->devid);
1088 alloc_offsets[i] = WP_MISSING_DEV;
1089 break;
1090 case BLK_ZONE_COND_EMPTY:
1091 alloc_offsets[i] = 0;
1092 break;
1093 case BLK_ZONE_COND_FULL:
1094 alloc_offsets[i] = fs_info->zone_size;
1095 break;
1096 default:
1097 /* Partially used zone */
1098 alloc_offsets[i] =
1099 ((zone.wp - zone.start) << SECTOR_SHIFT);
1100 break;
1101 }
1102 }
1103
08f45559
JT
1104 if (num_sequential > 0)
1105 cache->seq_zone = true;
1106
08e11a3d
NA
1107 if (num_conventional > 0) {
1108 /*
a94794d5
NA
1109 * Avoid calling calculate_alloc_pointer() for new BG. It
1110 * is no use for new BG. It must be always 0.
1111 *
1112 * Also, we have a lock chain of extent buffer lock ->
1113 * chunk mutex. For new BG, this function is called from
1114 * btrfs_make_block_group() which is already taking the
1115 * chunk mutex. Thus, we cannot call
1116 * calculate_alloc_pointer() which takes extent buffer
1117 * locks to avoid deadlock.
08e11a3d 1118 */
a94794d5
NA
1119 if (new) {
1120 cache->alloc_offset = 0;
1121 goto out;
1122 }
1123 ret = calculate_alloc_pointer(cache, &last_alloc);
1124 if (ret || map->num_stripes == num_conventional) {
1125 if (!ret)
1126 cache->alloc_offset = last_alloc;
1127 else
1128 btrfs_err(fs_info,
1129 "zoned: failed to determine allocation offset of bg %llu",
1130 cache->start);
1131 goto out;
1132 }
08e11a3d
NA
1133 }
1134
1135 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1136 case 0: /* single */
1137 cache->alloc_offset = alloc_offsets[0];
1138 break;
1139 case BTRFS_BLOCK_GROUP_DUP:
1140 case BTRFS_BLOCK_GROUP_RAID1:
1141 case BTRFS_BLOCK_GROUP_RAID0:
1142 case BTRFS_BLOCK_GROUP_RAID10:
1143 case BTRFS_BLOCK_GROUP_RAID5:
1144 case BTRFS_BLOCK_GROUP_RAID6:
1145 /* non-single profiles are not supported yet */
1146 default:
1147 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1148 btrfs_bg_type_to_raid_name(map->type));
1149 ret = -EINVAL;
1150 goto out;
1151 }
1152
1153out:
a94794d5
NA
1154 /* An extent is allocated after the write pointer */
1155 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1156 btrfs_err(fs_info,
1157 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1158 logical, last_alloc, cache->alloc_offset);
1159 ret = -EIO;
1160 }
1161
0bc09ca1
NA
1162 if (!ret)
1163 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1164
08e11a3d
NA
1165 kfree(alloc_offsets);
1166 free_extent_map(em);
1167
1168 return ret;
1169}
169e0da9
NA
1170
1171void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1172{
1173 u64 unusable, free;
1174
1175 if (!btrfs_is_zoned(cache->fs_info))
1176 return;
1177
1178 WARN_ON(cache->bytes_super != 0);
1179 unusable = cache->alloc_offset - cache->used;
1180 free = cache->length - cache->alloc_offset;
1181
1182 /* We only need ->free_space in ALLOC_SEQ block groups */
1183 cache->last_byte_to_unpin = (u64)-1;
1184 cache->cached = BTRFS_CACHE_FINISHED;
1185 cache->free_space_ctl->free_space = free;
1186 cache->zone_unusable = unusable;
1187
1188 /* Should not have any excluded extents. Just in case, though */
1189 btrfs_free_excluded_extents(cache);
1190}
d3575156
NA
1191
1192void btrfs_redirty_list_add(struct btrfs_transaction *trans,
1193 struct extent_buffer *eb)
1194{
1195 struct btrfs_fs_info *fs_info = eb->fs_info;
1196
1197 if (!btrfs_is_zoned(fs_info) ||
1198 btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) ||
1199 !list_empty(&eb->release_list))
1200 return;
1201
1202 set_extent_buffer_dirty(eb);
1203 set_extent_bits_nowait(&trans->dirty_pages, eb->start,
1204 eb->start + eb->len - 1, EXTENT_DIRTY);
1205 memzero_extent_buffer(eb, 0, eb->len);
1206 set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
1207
1208 spin_lock(&trans->releasing_ebs_lock);
1209 list_add_tail(&eb->release_list, &trans->releasing_ebs);
1210 spin_unlock(&trans->releasing_ebs_lock);
1211 atomic_inc(&eb->refs);
1212}
1213
1214void btrfs_free_redirty_list(struct btrfs_transaction *trans)
1215{
1216 spin_lock(&trans->releasing_ebs_lock);
1217 while (!list_empty(&trans->releasing_ebs)) {
1218 struct extent_buffer *eb;
1219
1220 eb = list_first_entry(&trans->releasing_ebs,
1221 struct extent_buffer, release_list);
1222 list_del_init(&eb->release_list);
1223 free_extent_buffer(eb);
1224 }
1225 spin_unlock(&trans->releasing_ebs_lock);
1226}
08f45559
JT
1227
1228bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
1229{
1230 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1231 struct btrfs_block_group *cache;
1232 bool ret = false;
1233
1234 if (!btrfs_is_zoned(fs_info))
1235 return false;
1236
1237 if (!fs_info->max_zone_append_size)
1238 return false;
1239
1240 if (!is_data_inode(&inode->vfs_inode))
1241 return false;
1242
1243 cache = btrfs_lookup_block_group(fs_info, em->block_start);
1244 ASSERT(cache);
1245 if (!cache)
1246 return false;
1247
1248 ret = cache->seq_zone;
1249 btrfs_put_block_group(cache);
1250
1251 return ret;
1252}
d8e3fb10
NA
1253
1254void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
1255 struct bio *bio)
1256{
1257 struct btrfs_ordered_extent *ordered;
1258 const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
1259
1260 if (bio_op(bio) != REQ_OP_ZONE_APPEND)
1261 return;
1262
1263 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset);
1264 if (WARN_ON(!ordered))
1265 return;
1266
1267 ordered->physical = physical;
1268 ordered->disk = bio->bi_disk;
1269 ordered->partno = bio->bi_partno;
1270
1271 btrfs_put_ordered_extent(ordered);
1272}
1273
1274void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
1275{
1276 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1277 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1278 struct extent_map_tree *em_tree;
1279 struct extent_map *em;
1280 struct btrfs_ordered_sum *sum;
1281 struct block_device *bdev;
1282 u64 orig_logical = ordered->disk_bytenr;
1283 u64 *logical = NULL;
1284 int nr, stripe_len;
1285
1286 /* Zoned devices should not have partitions. So, we can assume it is 0 */
1287 ASSERT(ordered->partno == 0);
1288 bdev = bdgrab(ordered->disk->part0);
1289 if (WARN_ON(!bdev))
1290 return;
1291
1292 if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, bdev,
1293 ordered->physical, &logical, &nr,
1294 &stripe_len)))
1295 goto out;
1296
1297 WARN_ON(nr != 1);
1298
1299 if (orig_logical == *logical)
1300 goto out;
1301
1302 ordered->disk_bytenr = *logical;
1303
1304 em_tree = &inode->extent_tree;
1305 write_lock(&em_tree->lock);
1306 em = search_extent_mapping(em_tree, ordered->file_offset,
1307 ordered->num_bytes);
1308 em->block_start = *logical;
1309 free_extent_map(em);
1310 write_unlock(&em_tree->lock);
1311
1312 list_for_each_entry(sum, &ordered->list, list) {
1313 if (*logical < orig_logical)
1314 sum->bytenr -= orig_logical - *logical;
1315 else
1316 sum->bytenr += *logical - orig_logical;
1317 }
1318
1319out:
1320 kfree(logical);
1321 bdput(bdev);
1322}
0bc09ca1
NA
1323
1324bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1325 struct extent_buffer *eb,
1326 struct btrfs_block_group **cache_ret)
1327{
1328 struct btrfs_block_group *cache;
1329 bool ret = true;
1330
1331 if (!btrfs_is_zoned(fs_info))
1332 return true;
1333
1334 cache = *cache_ret;
1335
1336 if (cache && (eb->start < cache->start ||
1337 cache->start + cache->length <= eb->start)) {
1338 btrfs_put_block_group(cache);
1339 cache = NULL;
1340 *cache_ret = NULL;
1341 }
1342
1343 if (!cache)
1344 cache = btrfs_lookup_block_group(fs_info, eb->start);
1345
1346 if (cache) {
1347 if (cache->meta_write_pointer != eb->start) {
1348 btrfs_put_block_group(cache);
1349 cache = NULL;
1350 ret = false;
1351 } else {
1352 cache->meta_write_pointer = eb->start + eb->len;
1353 }
1354
1355 *cache_ret = cache;
1356 }
1357
1358 return ret;
1359}
1360
1361void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
1362 struct extent_buffer *eb)
1363{
1364 if (!btrfs_is_zoned(eb->fs_info) || !cache)
1365 return;
1366
1367 ASSERT(cache->meta_write_pointer == eb->start + eb->len);
1368 cache->meta_write_pointer = eb->start;
1369}