Linux 6.12-rc1
[linux-2.6-block.git] / fs / xfs / scrub / bitmap.c
CommitLineData
739a2fe0 1// SPDX-License-Identifier: GPL-2.0-or-later
bc270b53 2/*
ecc73f8a 3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
739a2fe0 4 * Author: Darrick J. Wong <djwong@kernel.org>
bc270b53
DW
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
3a3108ea 9#include "xfs_bit.h"
bc270b53
DW
10#include "xfs_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
0e93d3f4 13#include "xfs_btree.h"
306195f3 14#include "scrub/scrub.h"
bc270b53
DW
15#include "scrub/bitmap.h"
16
6772fcc8
DW
17#include <linux/interval_tree_generic.h>
18
6ece924b
DW
19/* u64 bitmap */
20
21struct xbitmap64_node {
6772fcc8
DW
22 struct rb_node bn_rbnode;
23
24 /* First set bit of this interval and subtree. */
25 uint64_t bn_start;
26
27 /* Last set bit of this interval. */
28 uint64_t bn_last;
29
30 /* Last set bit of this subtree. Do not touch this. */
31 uint64_t __bn_subtree_last;
32};
33
34/* Define our own interval tree type with uint64_t parameters. */
35
36#define START(node) ((node)->bn_start)
37#define LAST(node) ((node)->bn_last)
178b48d5 38
86d969b4 39/*
6772fcc8
DW
40 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
41 * forward-declare them anyway for clarity.
86d969b4 42 */
76f011f7 43static inline __maybe_unused void
6ece924b 44xbitmap64_tree_insert(struct xbitmap64_node *node, struct rb_root_cached *root);
6772fcc8 45
76f011f7 46static inline __maybe_unused void
6ece924b 47xbitmap64_tree_remove(struct xbitmap64_node *node, struct rb_root_cached *root);
6772fcc8 48
76f011f7 49static inline __maybe_unused struct xbitmap64_node *
6ece924b 50xbitmap64_tree_iter_first(struct rb_root_cached *root, uint64_t start,
6772fcc8
DW
51 uint64_t last);
52
76f011f7 53static inline __maybe_unused struct xbitmap64_node *
6ece924b 54xbitmap64_tree_iter_next(struct xbitmap64_node *node, uint64_t start,
6772fcc8
DW
55 uint64_t last);
56
6ece924b 57INTERVAL_TREE_DEFINE(struct xbitmap64_node, bn_rbnode, uint64_t,
76f011f7
DC
58 __bn_subtree_last, START, LAST, static inline __maybe_unused,
59 xbitmap64_tree)
6772fcc8
DW
60
61/* Iterate each interval of a bitmap. Do not change the bitmap. */
6ece924b 62#define for_each_xbitmap64_extent(bn, bitmap) \
6772fcc8 63 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
6ece924b 64 struct xbitmap64_node, bn_rbnode); \
6772fcc8
DW
65 (bn) != NULL; \
66 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
6ece924b 67 struct xbitmap64_node, bn_rbnode))
6772fcc8
DW
68
69/* Clear a range of this bitmap. */
70int
6ece924b
DW
71xbitmap64_clear(
72 struct xbitmap64 *bitmap,
6772fcc8
DW
73 uint64_t start,
74 uint64_t len)
75{
6ece924b
DW
76 struct xbitmap64_node *bn;
77 struct xbitmap64_node *new_bn;
6772fcc8
DW
78 uint64_t last = start + len - 1;
79
6ece924b 80 while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last))) {
6772fcc8
DW
81 if (bn->bn_start < start && bn->bn_last > last) {
82 uint64_t old_last = bn->bn_last;
83
84 /* overlaps with the entire clearing range */
6ece924b 85 xbitmap64_tree_remove(bn, &bitmap->xb_root);
6772fcc8 86 bn->bn_last = start - 1;
6ece924b 87 xbitmap64_tree_insert(bn, &bitmap->xb_root);
6772fcc8
DW
88
89 /* add an extent */
6ece924b 90 new_bn = kmalloc(sizeof(struct xbitmap64_node),
6772fcc8
DW
91 XCHK_GFP_FLAGS);
92 if (!new_bn)
93 return -ENOMEM;
94 new_bn->bn_start = last + 1;
95 new_bn->bn_last = old_last;
6ece924b 96 xbitmap64_tree_insert(new_bn, &bitmap->xb_root);
6772fcc8
DW
97 } else if (bn->bn_start < start) {
98 /* overlaps with the left side of the clearing range */
6ece924b 99 xbitmap64_tree_remove(bn, &bitmap->xb_root);
6772fcc8 100 bn->bn_last = start - 1;
6ece924b 101 xbitmap64_tree_insert(bn, &bitmap->xb_root);
6772fcc8
DW
102 } else if (bn->bn_last > last) {
103 /* overlaps with the right side of the clearing range */
6ece924b 104 xbitmap64_tree_remove(bn, &bitmap->xb_root);
6772fcc8 105 bn->bn_start = last + 1;
6ece924b 106 xbitmap64_tree_insert(bn, &bitmap->xb_root);
6772fcc8
DW
107 break;
108 } else {
109 /* in the middle of the clearing range */
6ece924b 110 xbitmap64_tree_remove(bn, &bitmap->xb_root);
6772fcc8
DW
111 kfree(bn);
112 }
113 }
114
115 return 0;
116}
117
118/* Set a range of this bitmap. */
bc270b53 119int
6ece924b
DW
120xbitmap64_set(
121 struct xbitmap64 *bitmap,
86d969b4
DW
122 uint64_t start,
123 uint64_t len)
bc270b53 124{
6ece924b
DW
125 struct xbitmap64_node *left;
126 struct xbitmap64_node *right;
6772fcc8
DW
127 uint64_t last = start + len - 1;
128 int error;
bc270b53 129
6772fcc8 130 /* Is this whole range already set? */
6ece924b 131 left = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
6772fcc8
DW
132 if (left && left->bn_start <= start && left->bn_last >= last)
133 return 0;
bc270b53 134
6772fcc8 135 /* Clear out everything in the range we want to set. */
6ece924b 136 error = xbitmap64_clear(bitmap, start, len);
6772fcc8
DW
137 if (error)
138 return error;
139
140 /* Do we have a left-adjacent extent? */
6ece924b 141 left = xbitmap64_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
6772fcc8
DW
142 ASSERT(!left || left->bn_last + 1 == start);
143
144 /* Do we have a right-adjacent extent? */
6ece924b 145 right = xbitmap64_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
6772fcc8
DW
146 ASSERT(!right || right->bn_start == last + 1);
147
148 if (left && right) {
149 /* combine left and right adjacent extent */
6ece924b
DW
150 xbitmap64_tree_remove(left, &bitmap->xb_root);
151 xbitmap64_tree_remove(right, &bitmap->xb_root);
6772fcc8 152 left->bn_last = right->bn_last;
6ece924b 153 xbitmap64_tree_insert(left, &bitmap->xb_root);
6772fcc8
DW
154 kfree(right);
155 } else if (left) {
156 /* combine with left extent */
6ece924b 157 xbitmap64_tree_remove(left, &bitmap->xb_root);
6772fcc8 158 left->bn_last = last;
6ece924b 159 xbitmap64_tree_insert(left, &bitmap->xb_root);
6772fcc8
DW
160 } else if (right) {
161 /* combine with right extent */
6ece924b 162 xbitmap64_tree_remove(right, &bitmap->xb_root);
6772fcc8 163 right->bn_start = start;
6ece924b 164 xbitmap64_tree_insert(right, &bitmap->xb_root);
6772fcc8
DW
165 } else {
166 /* add an extent */
6ece924b 167 left = kmalloc(sizeof(struct xbitmap64_node), XCHK_GFP_FLAGS);
6772fcc8
DW
168 if (!left)
169 return -ENOMEM;
170 left->bn_start = start;
171 left->bn_last = last;
6ece924b 172 xbitmap64_tree_insert(left, &bitmap->xb_root);
6772fcc8 173 }
bc270b53
DW
174
175 return 0;
176}
177
86d969b4 178/* Free everything related to this bitmap. */
bc270b53 179void
6ece924b
DW
180xbitmap64_destroy(
181 struct xbitmap64 *bitmap)
bc270b53 182{
6ece924b 183 struct xbitmap64_node *bn;
bc270b53 184
6ece924b
DW
185 while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, 0, -1ULL))) {
186 xbitmap64_tree_remove(bn, &bitmap->xb_root);
6772fcc8 187 kfree(bn);
bc270b53
DW
188 }
189}
190
86d969b4
DW
191/* Set up a per-AG block bitmap. */
192void
6ece924b
DW
193xbitmap64_init(
194 struct xbitmap64 *bitmap)
86d969b4 195{
6772fcc8 196 bitmap->xb_root = RB_ROOT_CACHED;
bc270b53
DW
197}
198
199/*
86d969b4 200 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
bc270b53
DW
201 *
202 * The intent is that callers will iterate the rmapbt for all of its records
86d969b4 203 * for a given owner to generate @bitmap; and iterate all the blocks of the
bc270b53 204 * metadata structures that are not being rebuilt and have the same rmapbt
86d969b4
DW
205 * owner to generate @sub. This routine subtracts all the extents
206 * mentioned in sub from all the extents linked in @bitmap, which leaves
207 * @bitmap as the list of blocks that are not accounted for, which we assume
bc270b53 208 * are the dead blocks of the old metadata structure. The blocks mentioned in
86d969b4
DW
209 * @bitmap can be reaped.
210 *
211 * This is the logical equivalent of bitmap &= ~sub.
bc270b53 212 */
bc270b53 213int
6ece924b
DW
214xbitmap64_disunion(
215 struct xbitmap64 *bitmap,
216 struct xbitmap64 *sub)
bc270b53 217{
6ece924b 218 struct xbitmap64_node *bn;
6772fcc8 219 int error;
bc270b53 220
6ece924b 221 if (xbitmap64_empty(bitmap) || xbitmap64_empty(sub))
bc270b53 222 return 0;
bc270b53 223
6ece924b
DW
224 for_each_xbitmap64_extent(bn, sub) {
225 error = xbitmap64_clear(bitmap, bn->bn_start,
6772fcc8
DW
226 bn->bn_last - bn->bn_start + 1);
227 if (error)
228 return error;
bc270b53
DW
229 }
230
6772fcc8 231 return 0;
bc270b53 232}
0e93d3f4 233
6ece924b
DW
234/* How many bits are set in this bitmap? */
235uint64_t
236xbitmap64_hweight(
237 struct xbitmap64 *bitmap)
238{
239 struct xbitmap64_node *bn;
240 uint64_t ret = 0;
241
242 for_each_xbitmap64_extent(bn, bitmap)
243 ret += bn->bn_last - bn->bn_start + 1;
244
245 return ret;
246}
247
248/* Call a function for every run of set bits in this bitmap. */
249int
250xbitmap64_walk(
251 struct xbitmap64 *bitmap,
252 xbitmap64_walk_fn fn,
253 void *priv)
254{
255 struct xbitmap64_node *bn;
256 int error = 0;
257
258 for_each_xbitmap64_extent(bn, bitmap) {
259 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
260 if (error)
261 break;
262 }
263
264 return error;
265}
266
267/* Does this bitmap have no bits set at all? */
268bool
269xbitmap64_empty(
270 struct xbitmap64 *bitmap)
271{
272 return bitmap->xb_root.rb_root.rb_node == NULL;
273}
274
275/* Is the start of the range set or clear? And for how long? */
276bool
277xbitmap64_test(
278 struct xbitmap64 *bitmap,
279 uint64_t start,
280 uint64_t *len)
281{
282 struct xbitmap64_node *bn;
283 uint64_t last = start + *len - 1;
284
285 bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
286 if (!bn)
287 return false;
288 if (bn->bn_start <= start) {
289 if (bn->bn_last < last)
290 *len = bn->bn_last - start + 1;
291 return true;
292 }
293 *len = bn->bn_start - start;
294 return false;
295}
296
297/* u32 bitmap */
298
299struct xbitmap32_node {
300 struct rb_node bn_rbnode;
301
302 /* First set bit of this interval and subtree. */
303 uint32_t bn_start;
304
305 /* Last set bit of this interval. */
306 uint32_t bn_last;
307
308 /* Last set bit of this subtree. Do not touch this. */
309 uint32_t __bn_subtree_last;
310};
311
312/* Define our own interval tree type with uint32_t parameters. */
313
314/*
315 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
316 * forward-declare them anyway for clarity.
317 */
76f011f7 318static inline __maybe_unused void
6ece924b
DW
319xbitmap32_tree_insert(struct xbitmap32_node *node, struct rb_root_cached *root);
320
76f011f7 321static inline __maybe_unused void
6ece924b
DW
322xbitmap32_tree_remove(struct xbitmap32_node *node, struct rb_root_cached *root);
323
76f011f7 324static inline __maybe_unused struct xbitmap32_node *
6ece924b
DW
325xbitmap32_tree_iter_first(struct rb_root_cached *root, uint32_t start,
326 uint32_t last);
327
76f011f7 328static inline __maybe_unused struct xbitmap32_node *
6ece924b
DW
329xbitmap32_tree_iter_next(struct xbitmap32_node *node, uint32_t start,
330 uint32_t last);
331
332INTERVAL_TREE_DEFINE(struct xbitmap32_node, bn_rbnode, uint32_t,
76f011f7
DC
333 __bn_subtree_last, START, LAST, static inline __maybe_unused,
334 xbitmap32_tree)
6ece924b
DW
335
336/* Iterate each interval of a bitmap. Do not change the bitmap. */
337#define for_each_xbitmap32_extent(bn, bitmap) \
338 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
339 struct xbitmap32_node, bn_rbnode); \
340 (bn) != NULL; \
341 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
342 struct xbitmap32_node, bn_rbnode))
343
344/* Clear a range of this bitmap. */
345int
346xbitmap32_clear(
347 struct xbitmap32 *bitmap,
348 uint32_t start,
349 uint32_t len)
350{
351 struct xbitmap32_node *bn;
352 struct xbitmap32_node *new_bn;
353 uint32_t last = start + len - 1;
354
355 while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last))) {
356 if (bn->bn_start < start && bn->bn_last > last) {
357 uint32_t old_last = bn->bn_last;
358
359 /* overlaps with the entire clearing range */
360 xbitmap32_tree_remove(bn, &bitmap->xb_root);
361 bn->bn_last = start - 1;
362 xbitmap32_tree_insert(bn, &bitmap->xb_root);
363
364 /* add an extent */
365 new_bn = kmalloc(sizeof(struct xbitmap32_node),
366 XCHK_GFP_FLAGS);
367 if (!new_bn)
368 return -ENOMEM;
369 new_bn->bn_start = last + 1;
370 new_bn->bn_last = old_last;
371 xbitmap32_tree_insert(new_bn, &bitmap->xb_root);
372 } else if (bn->bn_start < start) {
373 /* overlaps with the left side of the clearing range */
374 xbitmap32_tree_remove(bn, &bitmap->xb_root);
375 bn->bn_last = start - 1;
376 xbitmap32_tree_insert(bn, &bitmap->xb_root);
377 } else if (bn->bn_last > last) {
378 /* overlaps with the right side of the clearing range */
379 xbitmap32_tree_remove(bn, &bitmap->xb_root);
380 bn->bn_start = last + 1;
381 xbitmap32_tree_insert(bn, &bitmap->xb_root);
382 break;
383 } else {
384 /* in the middle of the clearing range */
385 xbitmap32_tree_remove(bn, &bitmap->xb_root);
386 kfree(bn);
387 }
388 }
389
390 return 0;
391}
392
393/* Set a range of this bitmap. */
394int
395xbitmap32_set(
396 struct xbitmap32 *bitmap,
397 uint32_t start,
398 uint32_t len)
399{
400 struct xbitmap32_node *left;
401 struct xbitmap32_node *right;
402 uint32_t last = start + len - 1;
403 int error;
404
405 /* Is this whole range already set? */
406 left = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
407 if (left && left->bn_start <= start && left->bn_last >= last)
408 return 0;
409
410 /* Clear out everything in the range we want to set. */
411 error = xbitmap32_clear(bitmap, start, len);
412 if (error)
413 return error;
414
415 /* Do we have a left-adjacent extent? */
416 left = xbitmap32_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
417 ASSERT(!left || left->bn_last + 1 == start);
418
419 /* Do we have a right-adjacent extent? */
420 right = xbitmap32_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
421 ASSERT(!right || right->bn_start == last + 1);
422
423 if (left && right) {
424 /* combine left and right adjacent extent */
425 xbitmap32_tree_remove(left, &bitmap->xb_root);
426 xbitmap32_tree_remove(right, &bitmap->xb_root);
427 left->bn_last = right->bn_last;
428 xbitmap32_tree_insert(left, &bitmap->xb_root);
429 kfree(right);
430 } else if (left) {
431 /* combine with left extent */
432 xbitmap32_tree_remove(left, &bitmap->xb_root);
433 left->bn_last = last;
434 xbitmap32_tree_insert(left, &bitmap->xb_root);
435 } else if (right) {
436 /* combine with right extent */
437 xbitmap32_tree_remove(right, &bitmap->xb_root);
438 right->bn_start = start;
439 xbitmap32_tree_insert(right, &bitmap->xb_root);
440 } else {
441 /* add an extent */
442 left = kmalloc(sizeof(struct xbitmap32_node), XCHK_GFP_FLAGS);
443 if (!left)
444 return -ENOMEM;
445 left->bn_start = start;
446 left->bn_last = last;
447 xbitmap32_tree_insert(left, &bitmap->xb_root);
448 }
449
450 return 0;
451}
452
453/* Free everything related to this bitmap. */
454void
455xbitmap32_destroy(
456 struct xbitmap32 *bitmap)
457{
458 struct xbitmap32_node *bn;
459
460 while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, 0, -1U))) {
461 xbitmap32_tree_remove(bn, &bitmap->xb_root);
462 kfree(bn);
463 }
464}
465
466/* Set up a per-AG block bitmap. */
467void
468xbitmap32_init(
469 struct xbitmap32 *bitmap)
470{
471 bitmap->xb_root = RB_ROOT_CACHED;
472}
473
474/*
475 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
476 *
477 * The intent is that callers will iterate the rmapbt for all of its records
478 * for a given owner to generate @bitmap; and iterate all the blocks of the
479 * metadata structures that are not being rebuilt and have the same rmapbt
480 * owner to generate @sub. This routine subtracts all the extents
481 * mentioned in sub from all the extents linked in @bitmap, which leaves
482 * @bitmap as the list of blocks that are not accounted for, which we assume
483 * are the dead blocks of the old metadata structure. The blocks mentioned in
484 * @bitmap can be reaped.
485 *
486 * This is the logical equivalent of bitmap &= ~sub.
487 */
488int
489xbitmap32_disunion(
490 struct xbitmap32 *bitmap,
491 struct xbitmap32 *sub)
492{
493 struct xbitmap32_node *bn;
494 int error;
495
496 if (xbitmap32_empty(bitmap) || xbitmap32_empty(sub))
497 return 0;
498
499 for_each_xbitmap32_extent(bn, sub) {
500 error = xbitmap32_clear(bitmap, bn->bn_start,
501 bn->bn_last - bn->bn_start + 1);
502 if (error)
503 return error;
504 }
505
506 return 0;
507}
508
509/* How many bits are set in this bitmap? */
510uint32_t
511xbitmap32_hweight(
512 struct xbitmap32 *bitmap)
513{
514 struct xbitmap32_node *bn;
515 uint32_t ret = 0;
516
517 for_each_xbitmap32_extent(bn, bitmap)
518 ret += bn->bn_last - bn->bn_start + 1;
519
520 return ret;
521}
522
523/* Call a function for every run of set bits in this bitmap. */
524int
525xbitmap32_walk(
526 struct xbitmap32 *bitmap,
527 xbitmap32_walk_fn fn,
528 void *priv)
529{
530 struct xbitmap32_node *bn;
531 int error = 0;
532
533 for_each_xbitmap32_extent(bn, bitmap) {
534 error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
535 if (error)
536 break;
537 }
538
539 return error;
540}
541
542/* Does this bitmap have no bits set at all? */
543bool
544xbitmap32_empty(
545 struct xbitmap32 *bitmap)
546{
547 return bitmap->xb_root.rb_root.rb_node == NULL;
548}
549
550/* Is the start of the range set or clear? And for how long? */
551bool
552xbitmap32_test(
553 struct xbitmap32 *bitmap,
554 uint32_t start,
555 uint32_t *len)
556{
557 struct xbitmap32_node *bn;
558 uint32_t last = start + *len - 1;
559
560 bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
561 if (!bn)
562 return false;
563 if (bn->bn_start <= start) {
564 if (bn->bn_last < last)
565 *len = bn->bn_last - start + 1;
566 return true;
567 }
568 *len = bn->bn_start - start;
569 return false;
570}
e4fd1def
DW
571
572/* Count the number of set regions in this bitmap. */
573uint32_t
574xbitmap32_count_set_regions(
575 struct xbitmap32 *bitmap)
576{
577 struct xbitmap32_node *bn;
578 uint32_t nr = 0;
579
580 for_each_xbitmap32_extent(bn, bitmap)
581 nr++;
582
583 return nr;
584}