btrfs: move all btree inode initialization into btrfs_init_btree_inode
[linux-block.git] / fs / btrfs / scrub.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
a2de733c 2/*
b6bfebc1 3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
a2de733c
AJ
4 */
5
a2de733c 6#include <linux/blkdev.h>
558540c1 7#include <linux/ratelimit.h>
de2491fd 8#include <linux/sched/mm.h>
d5178578 9#include <crypto/hash.h>
a2de733c 10#include "ctree.h"
6e80d4f8 11#include "discard.h"
a2de733c
AJ
12#include "volumes.h"
13#include "disk-io.h"
14#include "ordered-data.h"
0ef8e451 15#include "transaction.h"
558540c1 16#include "backref.h"
5da6fcbc 17#include "extent_io.h"
ff023aac 18#include "dev-replace.h"
21adbd5c 19#include "check-integrity.h"
53b381b3 20#include "raid56.h"
aac0023c 21#include "block-group.h"
12659251 22#include "zoned.h"
c7f13d42 23#include "fs.h"
07e81dc9 24#include "accessors.h"
7c8ede16 25#include "file-item.h"
2fc6822c 26#include "scrub.h"
a2de733c
AJ
27
28/*
29 * This is only the first step towards a full-features scrub. It reads all
30 * extent and super block and verifies the checksums. In case a bad checksum
31 * is found or the extent cannot be read, good data will be written back if
32 * any can be found.
33 *
34 * Future enhancements:
a2de733c
AJ
35 * - In case an unrepairable extent is encountered, track which files are
36 * affected and report them
a2de733c 37 * - track and record media errors, throw out bad devices
a2de733c 38 * - add a mode to also read unallocated space
a2de733c
AJ
39 */
40
b5d67f64 41struct scrub_block;
d9d181c1 42struct scrub_ctx;
a2de733c 43
ff023aac 44/*
c9d328c0
QW
45 * The following three values only influence the performance.
46 *
ff023aac 47 * The last one configures the number of parallel and outstanding I/O
c9d328c0 48 * operations. The first one configures an upper limit for the number
ff023aac
SB
49 * of (dynamically allocated) pages that are added to a bio.
50 */
e360d2f5
QW
51#define SCRUB_SECTORS_PER_BIO 32 /* 128KiB per bio for 4KiB pages */
52#define SCRUB_BIOS_PER_SCTX 64 /* 8MiB per device in flight for 4KiB pages */
7a9e9987
SB
53
54/*
0bb3acdc 55 * The following value times PAGE_SIZE needs to be large enough to match the
7a9e9987 56 * largest node/leaf/sector size that shall be supported.
7a9e9987 57 */
7e737cbc 58#define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
a2de733c 59
f3e01e0e
QW
60#define SCRUB_MAX_PAGES (DIV_ROUND_UP(BTRFS_MAX_METADATA_BLOCKSIZE, PAGE_SIZE))
61
ed4c491a
JB
62/*
63 * Maximum number of mirrors that can be available for all profiles counting
64 * the target device of dev-replace as one. During an active device replace
65 * procedure, the target device of the copy operation is a mirror for the
66 * filesystem data as well that can be used to read data in order to repair
67 * read errors on other disks.
68 *
69 * Current value is derived from RAID1C4 with 4 copies.
70 */
71#define BTRFS_MAX_MIRRORS (4 + 1)
72
af8e2d1d 73struct scrub_recover {
6f615018 74 refcount_t refs;
4c664611 75 struct btrfs_io_context *bioc;
af8e2d1d
MX
76 u64 map_length;
77};
78
46343501 79struct scrub_sector {
b5d67f64 80 struct scrub_block *sblock;
5a6ac9ea 81 struct list_head list;
a2de733c
AJ
82 u64 flags; /* extent flags */
83 u64 generation;
8686c40e
QW
84 /* Offset in bytes to @sblock. */
85 u32 offset;
57019345 86 atomic_t refs;
d08e38b6
CIK
87 unsigned int have_csum:1;
88 unsigned int io_error:1;
a2de733c 89 u8 csum[BTRFS_CSUM_SIZE];
af8e2d1d
MX
90
91 struct scrub_recover *recover;
a2de733c
AJ
92};
93
94struct scrub_bio {
95 int index;
d9d181c1 96 struct scrub_ctx *sctx;
a36cf8b8 97 struct btrfs_device *dev;
a2de733c 98 struct bio *bio;
4e4cbee9 99 blk_status_t status;
a2de733c
AJ
100 u64 logical;
101 u64 physical;
e360d2f5
QW
102 struct scrub_sector *sectors[SCRUB_SECTORS_PER_BIO];
103 int sector_count;
a2de733c 104 int next_free;
be539518 105 struct work_struct work;
a2de733c
AJ
106};
107
b5d67f64 108struct scrub_block {
f3e01e0e
QW
109 /*
110 * Each page will have its page::private used to record the logical
111 * bytenr.
112 */
113 struct page *pages[SCRUB_MAX_PAGES];
46343501 114 struct scrub_sector *sectors[SCRUB_MAX_SECTORS_PER_BLOCK];
8686c40e 115 struct btrfs_device *dev;
f3e01e0e
QW
116 /* Logical bytenr of the sblock */
117 u64 logical;
8686c40e
QW
118 u64 physical;
119 u64 physical_for_dev_replace;
f3e01e0e
QW
120 /* Length of sblock in bytes */
121 u32 len;
7e737cbc 122 int sector_count;
8686c40e 123 int mirror_num;
f3e01e0e 124
46343501 125 atomic_t outstanding_sectors;
186debd6 126 refcount_t refs; /* free mem on transition to zero */
d9d181c1 127 struct scrub_ctx *sctx;
5a6ac9ea 128 struct scrub_parity *sparity;
b5d67f64
SB
129 struct {
130 unsigned int header_error:1;
131 unsigned int checksum_error:1;
132 unsigned int no_io_error_seen:1;
442a4f63 133 unsigned int generation_error:1; /* also sets header_error */
5a6ac9ea
MX
134
135 /* The following is for the data used to check parity */
136 /* It is for the data with checksum */
137 unsigned int data_corrected:1;
b5d67f64 138 };
be539518 139 struct work_struct work;
b5d67f64
SB
140};
141
5a6ac9ea
MX
142/* Used for the chunks with parity stripe such RAID5/6 */
143struct scrub_parity {
144 struct scrub_ctx *sctx;
145
146 struct btrfs_device *scrub_dev;
147
148 u64 logic_start;
149
150 u64 logic_end;
151
152 int nsectors;
153
fa485d21 154 u32 stripe_len;
5a6ac9ea 155
78a76450 156 refcount_t refs;
5a6ac9ea 157
46343501 158 struct list_head sectors_list;
5a6ac9ea
MX
159
160 /* Work of parity check and repair */
be539518 161 struct work_struct work;
5a6ac9ea
MX
162
163 /* Mark the parity blocks which have data */
381b9b4c 164 unsigned long dbitmap;
5a6ac9ea
MX
165
166 /*
167 * Mark the parity blocks which have data, but errors happen when
168 * read data or check data
169 */
381b9b4c 170 unsigned long ebitmap;
5a6ac9ea
MX
171};
172
d9d181c1 173struct scrub_ctx {
ff023aac 174 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
fb456252 175 struct btrfs_fs_info *fs_info;
a2de733c
AJ
176 int first_free;
177 int curr;
b6bfebc1
SB
178 atomic_t bios_in_flight;
179 atomic_t workers_pending;
a2de733c
AJ
180 spinlock_t list_lock;
181 wait_queue_head_t list_wait;
a2de733c
AJ
182 struct list_head csum_list;
183 atomic_t cancel_req;
8628764e 184 int readonly;
e360d2f5 185 int sectors_per_bio;
63a212ab 186
eb3b5053
DS
187 /* State of IO submission throttling affecting the associated device */
188 ktime_t throttle_deadline;
189 u64 throttle_sent;
190
63a212ab 191 int is_dev_replace;
de17addc 192 u64 write_pointer;
3fb99303
DS
193
194 struct scrub_bio *wr_curr_bio;
195 struct mutex wr_lock;
3fb99303 196 struct btrfs_device *wr_tgtdev;
2073c4c2 197 bool flush_all_writes;
63a212ab 198
a2de733c
AJ
199 /*
200 * statistics
201 */
202 struct btrfs_scrub_progress stat;
203 spinlock_t stat_lock;
f55985f4
FM
204
205 /*
206 * Use a ref counter to avoid use-after-free issues. Scrub workers
207 * decrement bios_in_flight and workers_pending and then do a wakeup
208 * on the list_wait wait queue. We must ensure the main scrub task
209 * doesn't free the scrub context before or while the workers are
210 * doing the wakeup() call.
211 */
99f4cdb1 212 refcount_t refs;
a2de733c
AJ
213};
214
558540c1
JS
215struct scrub_warning {
216 struct btrfs_path *path;
217 u64 extent_item_size;
558540c1 218 const char *errstr;
6aa21263 219 u64 physical;
558540c1
JS
220 u64 logical;
221 struct btrfs_device *dev;
558540c1
JS
222};
223
0966a7b1
QW
224struct full_stripe_lock {
225 struct rb_node node;
226 u64 logical;
227 u64 refs;
228 struct mutex mutex;
229};
230
f3e01e0e 231#ifndef CONFIG_64BIT
67da05b3 232/* This structure is for architectures whose (void *) is smaller than u64 */
f3e01e0e
QW
233struct scrub_page_private {
234 u64 logical;
235};
236#endif
237
238static int attach_scrub_page_private(struct page *page, u64 logical)
239{
240#ifdef CONFIG_64BIT
241 attach_page_private(page, (void *)logical);
242 return 0;
243#else
244 struct scrub_page_private *spp;
245
246 spp = kmalloc(sizeof(*spp), GFP_KERNEL);
247 if (!spp)
248 return -ENOMEM;
249 spp->logical = logical;
250 attach_page_private(page, (void *)spp);
251 return 0;
252#endif
253}
254
255static void detach_scrub_page_private(struct page *page)
256{
257#ifdef CONFIG_64BIT
258 detach_page_private(page);
259 return;
260#else
261 struct scrub_page_private *spp;
262
263 spp = detach_page_private(page);
264 kfree(spp);
265 return;
266#endif
267}
268
8686c40e
QW
269static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx,
270 struct btrfs_device *dev,
271 u64 logical, u64 physical,
272 u64 physical_for_dev_replace,
273 int mirror_num)
15b88f6d
QW
274{
275 struct scrub_block *sblock;
276
277 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
278 if (!sblock)
279 return NULL;
280 refcount_set(&sblock->refs, 1);
281 sblock->sctx = sctx;
f3e01e0e 282 sblock->logical = logical;
8686c40e
QW
283 sblock->physical = physical;
284 sblock->physical_for_dev_replace = physical_for_dev_replace;
285 sblock->dev = dev;
286 sblock->mirror_num = mirror_num;
15b88f6d 287 sblock->no_io_error_seen = 1;
f3e01e0e
QW
288 /*
289 * Scrub_block::pages will be allocated at alloc_scrub_sector() when
290 * the corresponding page is not allocated.
291 */
15b88f6d
QW
292 return sblock;
293}
294
f3e01e0e
QW
295/*
296 * Allocate a new scrub sector and attach it to @sblock.
297 *
298 * Will also allocate new pages for @sblock if needed.
299 */
300static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock,
02bc3927 301 u64 logical)
5dd3d8e4 302{
f3e01e0e 303 const pgoff_t page_index = (logical - sblock->logical) >> PAGE_SHIFT;
5dd3d8e4
QW
304 struct scrub_sector *ssector;
305
8686c40e
QW
306 /* We must never have scrub_block exceed U32_MAX in size. */
307 ASSERT(logical - sblock->logical < U32_MAX);
308
02bc3927 309 ssector = kzalloc(sizeof(*ssector), GFP_KERNEL);
5dd3d8e4
QW
310 if (!ssector)
311 return NULL;
f3e01e0e
QW
312
313 /* Allocate a new page if the slot is not allocated */
314 if (!sblock->pages[page_index]) {
315 int ret;
316
02bc3927 317 sblock->pages[page_index] = alloc_page(GFP_KERNEL);
f3e01e0e
QW
318 if (!sblock->pages[page_index]) {
319 kfree(ssector);
320 return NULL;
321 }
322 ret = attach_scrub_page_private(sblock->pages[page_index],
323 sblock->logical + (page_index << PAGE_SHIFT));
324 if (ret < 0) {
325 kfree(ssector);
326 __free_page(sblock->pages[page_index]);
327 sblock->pages[page_index] = NULL;
328 return NULL;
329 }
5dd3d8e4 330 }
f3e01e0e 331
5dd3d8e4
QW
332 atomic_set(&ssector->refs, 1);
333 ssector->sblock = sblock;
334 /* The sector to be added should not be used */
335 ASSERT(sblock->sectors[sblock->sector_count] == NULL);
8686c40e 336 ssector->offset = logical - sblock->logical;
f3e01e0e 337
5dd3d8e4
QW
338 /* The sector count must be smaller than the limit */
339 ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK);
340
341 sblock->sectors[sblock->sector_count] = ssector;
342 sblock->sector_count++;
eb2fad30 343 sblock->len += sblock->sctx->fs_info->sectorsize;
5dd3d8e4
QW
344
345 return ssector;
346}
347
eb2fad30
QW
348static struct page *scrub_sector_get_page(struct scrub_sector *ssector)
349{
350 struct scrub_block *sblock = ssector->sblock;
8686c40e 351 pgoff_t index;
eb2fad30
QW
352 /*
353 * When calling this function, ssector must be alreaday attached to the
354 * parent sblock.
355 */
356 ASSERT(sblock);
357
358 /* The range should be inside the sblock range */
8686c40e 359 ASSERT(ssector->offset < sblock->len);
eb2fad30 360
8686c40e 361 index = ssector->offset >> PAGE_SHIFT;
eb2fad30
QW
362 ASSERT(index < SCRUB_MAX_PAGES);
363 ASSERT(sblock->pages[index]);
364 ASSERT(PagePrivate(sblock->pages[index]));
365 return sblock->pages[index];
366}
367
368static unsigned int scrub_sector_get_page_offset(struct scrub_sector *ssector)
369{
370 struct scrub_block *sblock = ssector->sblock;
371
372 /*
373 * When calling this function, ssector must be already attached to the
374 * parent sblock.
375 */
376 ASSERT(sblock);
377
378 /* The range should be inside the sblock range */
8686c40e 379 ASSERT(ssector->offset < sblock->len);
eb2fad30 380
8686c40e 381 return offset_in_page(ssector->offset);
eb2fad30
QW
382}
383
384static char *scrub_sector_get_kaddr(struct scrub_sector *ssector)
385{
386 return page_address(scrub_sector_get_page(ssector)) +
387 scrub_sector_get_page_offset(ssector);
388}
389
390static int bio_add_scrub_sector(struct bio *bio, struct scrub_sector *ssector,
391 unsigned int len)
392{
393 return bio_add_page(bio, scrub_sector_get_page(ssector), len,
394 scrub_sector_get_page_offset(ssector));
395}
396
be50a8dd 397static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1dfa5005 398 struct scrub_block *sblocks_for_recheck[]);
34f5c8e9 399static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
affe4a5a
ZL
400 struct scrub_block *sblock,
401 int retry_failed_mirror);
ba7cf988 402static void scrub_recheck_block_checksum(struct scrub_block *sblock);
b5d67f64 403static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
114ab50d 404 struct scrub_block *sblock_good);
46343501 405static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
b5d67f64 406 struct scrub_block *sblock_good,
46343501 407 int sector_num, int force_write);
ff023aac 408static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
46343501
QW
409static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock,
410 int sector_num);
b5d67f64
SB
411static int scrub_checksum_data(struct scrub_block *sblock);
412static int scrub_checksum_tree_block(struct scrub_block *sblock);
413static int scrub_checksum_super(struct scrub_block *sblock);
b5d67f64 414static void scrub_block_put(struct scrub_block *sblock);
46343501
QW
415static void scrub_sector_get(struct scrub_sector *sector);
416static void scrub_sector_put(struct scrub_sector *sector);
5a6ac9ea
MX
417static void scrub_parity_get(struct scrub_parity *sparity);
418static void scrub_parity_put(struct scrub_parity *sparity);
46343501
QW
419static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
420 u64 physical, struct btrfs_device *dev, u64 flags,
421 u64 gen, int mirror_num, u8 *csum,
422 u64 physical_for_dev_replace);
4246a0b6 423static void scrub_bio_end_io(struct bio *bio);
be539518 424static void scrub_bio_end_io_worker(struct work_struct *work);
b5d67f64 425static void scrub_block_complete(struct scrub_block *sblock);
a13467ee
QW
426static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
427 u64 extent_logical, u32 extent_len,
428 u64 *extent_physical,
429 struct btrfs_device **extent_dev,
430 int *extent_mirror_num);
46343501
QW
431static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
432 struct scrub_sector *sector);
ff023aac 433static void scrub_wr_submit(struct scrub_ctx *sctx);
4246a0b6 434static void scrub_wr_bio_end_io(struct bio *bio);
be539518 435static void scrub_wr_bio_end_io_worker(struct work_struct *work);
f55985f4 436static void scrub_put_ctx(struct scrub_ctx *sctx);
1623edeb 437
46343501 438static inline int scrub_is_page_on_raid56(struct scrub_sector *sector)
762221f0 439{
46343501
QW
440 return sector->recover &&
441 (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
762221f0 442}
1623edeb 443
b6bfebc1
SB
444static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
445{
99f4cdb1 446 refcount_inc(&sctx->refs);
b6bfebc1
SB
447 atomic_inc(&sctx->bios_in_flight);
448}
449
450static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
451{
452 atomic_dec(&sctx->bios_in_flight);
453 wake_up(&sctx->list_wait);
f55985f4 454 scrub_put_ctx(sctx);
b6bfebc1
SB
455}
456
cb7ab021 457static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
3cb0929a
WS
458{
459 while (atomic_read(&fs_info->scrub_pause_req)) {
460 mutex_unlock(&fs_info->scrub_lock);
461 wait_event(fs_info->scrub_pause_wait,
462 atomic_read(&fs_info->scrub_pause_req) == 0);
463 mutex_lock(&fs_info->scrub_lock);
464 }
465}
466
0e22be89 467static void scrub_pause_on(struct btrfs_fs_info *fs_info)
cb7ab021
WS
468{
469 atomic_inc(&fs_info->scrubs_paused);
470 wake_up(&fs_info->scrub_pause_wait);
0e22be89 471}
cb7ab021 472
0e22be89
Z
473static void scrub_pause_off(struct btrfs_fs_info *fs_info)
474{
cb7ab021
WS
475 mutex_lock(&fs_info->scrub_lock);
476 __scrub_blocked_if_needed(fs_info);
477 atomic_dec(&fs_info->scrubs_paused);
478 mutex_unlock(&fs_info->scrub_lock);
479
480 wake_up(&fs_info->scrub_pause_wait);
481}
482
0e22be89
Z
483static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
484{
485 scrub_pause_on(fs_info);
486 scrub_pause_off(fs_info);
487}
488
0966a7b1
QW
489/*
490 * Insert new full stripe lock into full stripe locks tree
491 *
492 * Return pointer to existing or newly inserted full_stripe_lock structure if
493 * everything works well.
494 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
495 *
496 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
497 * function
498 */
499static struct full_stripe_lock *insert_full_stripe_lock(
500 struct btrfs_full_stripe_locks_tree *locks_root,
501 u64 fstripe_logical)
502{
503 struct rb_node **p;
504 struct rb_node *parent = NULL;
505 struct full_stripe_lock *entry;
506 struct full_stripe_lock *ret;
507
a32bf9a3 508 lockdep_assert_held(&locks_root->lock);
0966a7b1
QW
509
510 p = &locks_root->root.rb_node;
511 while (*p) {
512 parent = *p;
513 entry = rb_entry(parent, struct full_stripe_lock, node);
514 if (fstripe_logical < entry->logical) {
515 p = &(*p)->rb_left;
516 } else if (fstripe_logical > entry->logical) {
517 p = &(*p)->rb_right;
518 } else {
519 entry->refs++;
520 return entry;
521 }
522 }
523
a5fb1142
FM
524 /*
525 * Insert new lock.
a5fb1142 526 */
0966a7b1
QW
527 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
528 if (!ret)
529 return ERR_PTR(-ENOMEM);
530 ret->logical = fstripe_logical;
531 ret->refs = 1;
532 mutex_init(&ret->mutex);
533
534 rb_link_node(&ret->node, parent, p);
535 rb_insert_color(&ret->node, &locks_root->root);
536 return ret;
537}
538
539/*
540 * Search for a full stripe lock of a block group
541 *
542 * Return pointer to existing full stripe lock if found
543 * Return NULL if not found
544 */
545static struct full_stripe_lock *search_full_stripe_lock(
546 struct btrfs_full_stripe_locks_tree *locks_root,
547 u64 fstripe_logical)
548{
549 struct rb_node *node;
550 struct full_stripe_lock *entry;
551
a32bf9a3 552 lockdep_assert_held(&locks_root->lock);
0966a7b1
QW
553
554 node = locks_root->root.rb_node;
555 while (node) {
556 entry = rb_entry(node, struct full_stripe_lock, node);
557 if (fstripe_logical < entry->logical)
558 node = node->rb_left;
559 else if (fstripe_logical > entry->logical)
560 node = node->rb_right;
561 else
562 return entry;
563 }
564 return NULL;
565}
566
567/*
568 * Helper to get full stripe logical from a normal bytenr.
569 *
570 * Caller must ensure @cache is a RAID56 block group.
571 */
32da5386 572static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
0966a7b1
QW
573{
574 u64 ret;
575
576 /*
577 * Due to chunk item size limit, full stripe length should not be
578 * larger than U32_MAX. Just a sanity check here.
579 */
580 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
581
582 /*
583 * round_down() can only handle power of 2, while RAID56 full
584 * stripe length can be 64KiB * n, so we need to manually round down.
585 */
b3470b5d
DS
586 ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
587 cache->full_stripe_len + cache->start;
0966a7b1
QW
588 return ret;
589}
590
591/*
592 * Lock a full stripe to avoid concurrency of recovery and read
593 *
594 * It's only used for profiles with parities (RAID5/6), for other profiles it
595 * does nothing.
596 *
597 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
598 * So caller must call unlock_full_stripe() at the same context.
599 *
600 * Return <0 if encounters error.
601 */
602static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
603 bool *locked_ret)
604{
32da5386 605 struct btrfs_block_group *bg_cache;
0966a7b1
QW
606 struct btrfs_full_stripe_locks_tree *locks_root;
607 struct full_stripe_lock *existing;
608 u64 fstripe_start;
609 int ret = 0;
610
611 *locked_ret = false;
612 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
613 if (!bg_cache) {
614 ASSERT(0);
615 return -ENOENT;
616 }
617
618 /* Profiles not based on parity don't need full stripe lock */
619 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
620 goto out;
621 locks_root = &bg_cache->full_stripe_locks_root;
622
623 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
624
625 /* Now insert the full stripe lock */
626 mutex_lock(&locks_root->lock);
627 existing = insert_full_stripe_lock(locks_root, fstripe_start);
628 mutex_unlock(&locks_root->lock);
629 if (IS_ERR(existing)) {
630 ret = PTR_ERR(existing);
631 goto out;
632 }
633 mutex_lock(&existing->mutex);
634 *locked_ret = true;
635out:
636 btrfs_put_block_group(bg_cache);
637 return ret;
638}
639
640/*
641 * Unlock a full stripe.
642 *
643 * NOTE: Caller must ensure it's the same context calling corresponding
644 * lock_full_stripe().
645 *
646 * Return 0 if we unlock full stripe without problem.
647 * Return <0 for error
648 */
649static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
650 bool locked)
651{
32da5386 652 struct btrfs_block_group *bg_cache;
0966a7b1
QW
653 struct btrfs_full_stripe_locks_tree *locks_root;
654 struct full_stripe_lock *fstripe_lock;
655 u64 fstripe_start;
656 bool freeit = false;
657 int ret = 0;
658
659 /* If we didn't acquire full stripe lock, no need to continue */
660 if (!locked)
661 return 0;
662
663 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
664 if (!bg_cache) {
665 ASSERT(0);
666 return -ENOENT;
667 }
668 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
669 goto out;
670
671 locks_root = &bg_cache->full_stripe_locks_root;
672 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
673
674 mutex_lock(&locks_root->lock);
675 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
676 /* Unpaired unlock_full_stripe() detected */
677 if (!fstripe_lock) {
678 WARN_ON(1);
679 ret = -ENOENT;
680 mutex_unlock(&locks_root->lock);
681 goto out;
682 }
683
684 if (fstripe_lock->refs == 0) {
685 WARN_ON(1);
686 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
687 fstripe_lock->logical);
688 } else {
689 fstripe_lock->refs--;
690 }
691
692 if (fstripe_lock->refs == 0) {
693 rb_erase(&fstripe_lock->node, &locks_root->root);
694 freeit = true;
695 }
696 mutex_unlock(&locks_root->lock);
697
698 mutex_unlock(&fstripe_lock->mutex);
699 if (freeit)
700 kfree(fstripe_lock);
701out:
702 btrfs_put_block_group(bg_cache);
703 return ret;
704}
705
d9d181c1 706static void scrub_free_csums(struct scrub_ctx *sctx)
a2de733c 707{
d9d181c1 708 while (!list_empty(&sctx->csum_list)) {
a2de733c 709 struct btrfs_ordered_sum *sum;
d9d181c1 710 sum = list_first_entry(&sctx->csum_list,
a2de733c
AJ
711 struct btrfs_ordered_sum, list);
712 list_del(&sum->list);
713 kfree(sum);
714 }
715}
716
d9d181c1 717static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
a2de733c
AJ
718{
719 int i;
a2de733c 720
d9d181c1 721 if (!sctx)
a2de733c
AJ
722 return;
723
b5d67f64 724 /* this can happen when scrub is cancelled */
d9d181c1
SB
725 if (sctx->curr != -1) {
726 struct scrub_bio *sbio = sctx->bios[sctx->curr];
b5d67f64 727
eb2fad30 728 for (i = 0; i < sbio->sector_count; i++)
e360d2f5 729 scrub_block_put(sbio->sectors[i]->sblock);
b5d67f64
SB
730 bio_put(sbio->bio);
731 }
732
ff023aac 733 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
d9d181c1 734 struct scrub_bio *sbio = sctx->bios[i];
a2de733c
AJ
735
736 if (!sbio)
737 break;
a2de733c
AJ
738 kfree(sbio);
739 }
740
3fb99303 741 kfree(sctx->wr_curr_bio);
d9d181c1
SB
742 scrub_free_csums(sctx);
743 kfree(sctx);
a2de733c
AJ
744}
745
f55985f4
FM
746static void scrub_put_ctx(struct scrub_ctx *sctx)
747{
99f4cdb1 748 if (refcount_dec_and_test(&sctx->refs))
f55985f4
FM
749 scrub_free_ctx(sctx);
750}
751
92f7ba43
DS
752static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
753 struct btrfs_fs_info *fs_info, int is_dev_replace)
a2de733c 754{
d9d181c1 755 struct scrub_ctx *sctx;
a2de733c 756 int i;
a2de733c 757
58c4e173 758 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
d9d181c1 759 if (!sctx)
a2de733c 760 goto nomem;
99f4cdb1 761 refcount_set(&sctx->refs, 1);
63a212ab 762 sctx->is_dev_replace = is_dev_replace;
e360d2f5 763 sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO;
d9d181c1 764 sctx->curr = -1;
92f7ba43 765 sctx->fs_info = fs_info;
e49be14b 766 INIT_LIST_HEAD(&sctx->csum_list);
ff023aac 767 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
a2de733c
AJ
768 struct scrub_bio *sbio;
769
58c4e173 770 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
a2de733c
AJ
771 if (!sbio)
772 goto nomem;
d9d181c1 773 sctx->bios[i] = sbio;
a2de733c 774
a2de733c 775 sbio->index = i;
d9d181c1 776 sbio->sctx = sctx;
e360d2f5 777 sbio->sector_count = 0;
be539518 778 INIT_WORK(&sbio->work, scrub_bio_end_io_worker);
a2de733c 779
ff023aac 780 if (i != SCRUB_BIOS_PER_SCTX - 1)
d9d181c1 781 sctx->bios[i]->next_free = i + 1;
0ef8e451 782 else
d9d181c1
SB
783 sctx->bios[i]->next_free = -1;
784 }
785 sctx->first_free = 0;
b6bfebc1
SB
786 atomic_set(&sctx->bios_in_flight, 0);
787 atomic_set(&sctx->workers_pending, 0);
d9d181c1 788 atomic_set(&sctx->cancel_req, 0);
d9d181c1
SB
789
790 spin_lock_init(&sctx->list_lock);
791 spin_lock_init(&sctx->stat_lock);
792 init_waitqueue_head(&sctx->list_wait);
eb3b5053 793 sctx->throttle_deadline = 0;
ff023aac 794
3fb99303
DS
795 WARN_ON(sctx->wr_curr_bio != NULL);
796 mutex_init(&sctx->wr_lock);
797 sctx->wr_curr_bio = NULL;
8fcdac3f 798 if (is_dev_replace) {
ded56184 799 WARN_ON(!fs_info->dev_replace.tgtdev);
ded56184 800 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
2073c4c2 801 sctx->flush_all_writes = false;
ff023aac 802 }
8fcdac3f 803
d9d181c1 804 return sctx;
a2de733c
AJ
805
806nomem:
d9d181c1 807 scrub_free_ctx(sctx);
a2de733c
AJ
808 return ERR_PTR(-ENOMEM);
809}
810
c7499a64
FM
811static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
812 u64 root, void *warn_ctx)
558540c1 813{
558540c1
JS
814 u32 nlink;
815 int ret;
816 int i;
de2491fd 817 unsigned nofs_flag;
558540c1
JS
818 struct extent_buffer *eb;
819 struct btrfs_inode_item *inode_item;
ff023aac 820 struct scrub_warning *swarn = warn_ctx;
fb456252 821 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
558540c1
JS
822 struct inode_fs_paths *ipath = NULL;
823 struct btrfs_root *local_root;
1d4c08e0 824 struct btrfs_key key;
558540c1 825
56e9357a 826 local_root = btrfs_get_fs_root(fs_info, root, true);
558540c1
JS
827 if (IS_ERR(local_root)) {
828 ret = PTR_ERR(local_root);
829 goto err;
830 }
831
14692cc1
DS
832 /*
833 * this makes the path point to (inum INODE_ITEM ioff)
834 */
1d4c08e0
DS
835 key.objectid = inum;
836 key.type = BTRFS_INODE_ITEM_KEY;
837 key.offset = 0;
838
839 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
558540c1 840 if (ret) {
00246528 841 btrfs_put_root(local_root);
558540c1
JS
842 btrfs_release_path(swarn->path);
843 goto err;
844 }
845
846 eb = swarn->path->nodes[0];
847 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
848 struct btrfs_inode_item);
558540c1
JS
849 nlink = btrfs_inode_nlink(eb, inode_item);
850 btrfs_release_path(swarn->path);
851
de2491fd
DS
852 /*
853 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
854 * uses GFP_NOFS in this context, so we keep it consistent but it does
855 * not seem to be strictly necessary.
856 */
857 nofs_flag = memalloc_nofs_save();
558540c1 858 ipath = init_ipath(4096, local_root, swarn->path);
de2491fd 859 memalloc_nofs_restore(nofs_flag);
26bdef54 860 if (IS_ERR(ipath)) {
00246528 861 btrfs_put_root(local_root);
26bdef54
DC
862 ret = PTR_ERR(ipath);
863 ipath = NULL;
864 goto err;
865 }
558540c1
JS
866 ret = paths_from_inode(inum, ipath);
867
868 if (ret < 0)
869 goto err;
870
871 /*
872 * we deliberately ignore the bit ipath might have been too small to
873 * hold all of the paths here
874 */
875 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
5d163e0e 876 btrfs_warn_in_rcu(fs_info,
8df507cb 877"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
5d163e0e 878 swarn->errstr, swarn->logical,
cb3e217b 879 btrfs_dev_name(swarn->dev),
6aa21263 880 swarn->physical,
5d163e0e 881 root, inum, offset,
8df507cb 882 fs_info->sectorsize, nlink,
5d163e0e 883 (char *)(unsigned long)ipath->fspath->val[i]);
558540c1 884
00246528 885 btrfs_put_root(local_root);
558540c1
JS
886 free_ipath(ipath);
887 return 0;
888
889err:
5d163e0e 890 btrfs_warn_in_rcu(fs_info,
6aa21263 891 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
5d163e0e 892 swarn->errstr, swarn->logical,
cb3e217b 893 btrfs_dev_name(swarn->dev),
6aa21263 894 swarn->physical,
5d163e0e 895 root, inum, offset, ret);
558540c1
JS
896
897 free_ipath(ipath);
898 return 0;
899}
900
b5d67f64 901static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
558540c1 902{
a36cf8b8
SB
903 struct btrfs_device *dev;
904 struct btrfs_fs_info *fs_info;
558540c1
JS
905 struct btrfs_path *path;
906 struct btrfs_key found_key;
907 struct extent_buffer *eb;
908 struct btrfs_extent_item *ei;
909 struct scrub_warning swarn;
69917e43 910 unsigned long ptr = 0;
69917e43 911 u64 flags = 0;
558540c1 912 u64 ref_root;
69917e43 913 u32 item_size;
07c9a8e0 914 u8 ref_level = 0;
69917e43 915 int ret;
558540c1 916
7e737cbc 917 WARN_ON(sblock->sector_count < 1);
8686c40e 918 dev = sblock->dev;
fb456252 919 fs_info = sblock->sctx->fs_info;
a36cf8b8 920
e69bf81c
QW
921 /* Super block error, no need to search extent tree. */
922 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
923 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
cb3e217b 924 errstr, btrfs_dev_name(dev), sblock->physical);
e69bf81c
QW
925 return;
926 }
558540c1 927 path = btrfs_alloc_path();
8b9456da
DS
928 if (!path)
929 return;
558540c1 930
8686c40e
QW
931 swarn.physical = sblock->physical;
932 swarn.logical = sblock->logical;
558540c1 933 swarn.errstr = errstr;
a36cf8b8 934 swarn.dev = NULL;
558540c1 935
69917e43
LB
936 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
937 &flags);
558540c1
JS
938 if (ret < 0)
939 goto out;
940
558540c1
JS
941 swarn.extent_item_size = found_key.offset;
942
943 eb = path->nodes[0];
944 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
3212fa14 945 item_size = btrfs_item_size(eb, path->slots[0]);
558540c1 946
69917e43 947 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
558540c1 948 do {
6eda71d0
LB
949 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
950 item_size, &ref_root,
951 &ref_level);
ecaeb14b 952 btrfs_warn_in_rcu(fs_info,
6aa21263 953"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
5d163e0e 954 errstr, swarn.logical,
cb3e217b 955 btrfs_dev_name(dev),
6aa21263 956 swarn.physical,
558540c1
JS
957 ref_level ? "node" : "leaf",
958 ret < 0 ? -1 : ref_level,
959 ret < 0 ? -1 : ref_root);
960 } while (ret != 1);
d8fe29e9 961 btrfs_release_path(path);
558540c1 962 } else {
a2c8d27e
FM
963 struct btrfs_backref_walk_ctx ctx = { 0 };
964
d8fe29e9 965 btrfs_release_path(path);
a2c8d27e
FM
966
967 ctx.bytenr = found_key.objectid;
968 ctx.extent_item_pos = swarn.logical - found_key.objectid;
969 ctx.fs_info = fs_info;
970
558540c1 971 swarn.path = path;
a36cf8b8 972 swarn.dev = dev;
a2c8d27e
FM
973
974 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
558540c1
JS
975 }
976
977out:
978 btrfs_free_path(path);
558540c1
JS
979}
980
af8e2d1d
MX
981static inline void scrub_get_recover(struct scrub_recover *recover)
982{
6f615018 983 refcount_inc(&recover->refs);
af8e2d1d
MX
984}
985
e501bfe3
QW
986static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
987 struct scrub_recover *recover)
af8e2d1d 988{
6f615018 989 if (refcount_dec_and_test(&recover->refs)) {
e501bfe3 990 btrfs_bio_counter_dec(fs_info);
4c664611 991 btrfs_put_bioc(recover->bioc);
af8e2d1d
MX
992 kfree(recover);
993 }
994}
995
a2de733c 996/*
b5d67f64 997 * scrub_handle_errored_block gets called when either verification of the
46343501
QW
998 * sectors failed or the bio failed to read, e.g. with EIO. In the latter
999 * case, this function handles all sectors in the bio, even though only one
b5d67f64
SB
1000 * may be bad.
1001 * The goal of this function is to repair the errored block by using the
1002 * contents of one of the mirrors.
a2de733c 1003 */
b5d67f64 1004static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
a2de733c 1005{
d9d181c1 1006 struct scrub_ctx *sctx = sblock_to_check->sctx;
8686c40e 1007 struct btrfs_device *dev = sblock_to_check->dev;
b5d67f64 1008 struct btrfs_fs_info *fs_info;
b5d67f64 1009 u64 logical;
b5d67f64
SB
1010 unsigned int failed_mirror_index;
1011 unsigned int is_metadata;
1012 unsigned int have_csum;
1dfa5005
QW
1013 /* One scrub_block for each mirror */
1014 struct scrub_block *sblocks_for_recheck[BTRFS_MAX_MIRRORS] = { 0 };
b5d67f64
SB
1015 struct scrub_block *sblock_bad;
1016 int ret;
1017 int mirror_index;
7e737cbc 1018 int sector_num;
b5d67f64 1019 int success;
28d70e23 1020 bool full_stripe_locked;
7c3c7cb9 1021 unsigned int nofs_flag;
8bb1cf1b 1022 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
b5d67f64
SB
1023 DEFAULT_RATELIMIT_BURST);
1024
7e737cbc 1025 BUG_ON(sblock_to_check->sector_count < 1);
fb456252 1026 fs_info = sctx->fs_info;
7e737cbc 1027 if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
4ded4f63 1028 /*
e69bf81c 1029 * If we find an error in a super block, we just report it.
4ded4f63
SB
1030 * They will get written with the next transaction commit
1031 * anyway
1032 */
e69bf81c 1033 scrub_print_warning("super block error", sblock_to_check);
4ded4f63
SB
1034 spin_lock(&sctx->stat_lock);
1035 ++sctx->stat.super_errors;
1036 spin_unlock(&sctx->stat_lock);
e69bf81c 1037 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
4ded4f63
SB
1038 return 0;
1039 }
8686c40e
QW
1040 logical = sblock_to_check->logical;
1041 ASSERT(sblock_to_check->mirror_num);
1042 failed_mirror_index = sblock_to_check->mirror_num - 1;
7e737cbc 1043 is_metadata = !(sblock_to_check->sectors[0]->flags &
b5d67f64 1044 BTRFS_EXTENT_FLAG_DATA);
7e737cbc 1045 have_csum = sblock_to_check->sectors[0]->have_csum;
13db62b7 1046
554aed7d
JT
1047 if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical))
1048 return 0;
f7ef5287 1049
7c3c7cb9
FM
1050 /*
1051 * We must use GFP_NOFS because the scrub task might be waiting for a
1052 * worker task executing this function and in turn a transaction commit
1053 * might be waiting the scrub task to pause (which needs to wait for all
1054 * the worker tasks to complete before pausing).
1055 * We do allocations in the workers through insert_full_stripe_lock()
46343501 1056 * and scrub_add_sector_to_wr_bio(), which happens down the call chain of
7c3c7cb9
FM
1057 * this function.
1058 */
1059 nofs_flag = memalloc_nofs_save();
28d70e23
QW
1060 /*
1061 * For RAID5/6, race can happen for a different device scrub thread.
1062 * For data corruption, Parity and Data threads will both try
1063 * to recovery the data.
1064 * Race can lead to doubly added csum error, or even unrecoverable
1065 * error.
1066 */
1067 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
1068 if (ret < 0) {
7c3c7cb9 1069 memalloc_nofs_restore(nofs_flag);
28d70e23
QW
1070 spin_lock(&sctx->stat_lock);
1071 if (ret == -ENOMEM)
1072 sctx->stat.malloc_errors++;
1073 sctx->stat.read_errors++;
1074 sctx->stat.uncorrectable_errors++;
1075 spin_unlock(&sctx->stat_lock);
1076 return ret;
1077 }
1078
b5d67f64
SB
1079 /*
1080 * read all mirrors one after the other. This includes to
1081 * re-read the extent or metadata block that failed (that was
1082 * the cause that this fixup code is called) another time,
8df507cb 1083 * sector by sector this time in order to know which sectors
b5d67f64
SB
1084 * caused I/O errors and which ones are good (for all mirrors).
1085 * It is the goal to handle the situation when more than one
1086 * mirror contains I/O errors, but the errors do not
1087 * overlap, i.e. the data can be repaired by selecting the
8df507cb
QW
1088 * sectors from those mirrors without I/O error on the
1089 * particular sectors. One example (with blocks >= 2 * sectorsize)
1090 * would be that mirror #1 has an I/O error on the first sector,
1091 * the second sector is good, and mirror #2 has an I/O error on
1092 * the second sector, but the first sector is good.
1093 * Then the first sector of the first mirror can be repaired by
1094 * taking the first sector of the second mirror, and the
1095 * second sector of the second mirror can be repaired by
1096 * copying the contents of the 2nd sector of the 1st mirror.
1097 * One more note: if the sectors of one mirror contain I/O
b5d67f64
SB
1098 * errors, the checksum cannot be verified. In order to get
1099 * the best data for repairing, the first attempt is to find
1100 * a mirror without I/O errors and with a validated checksum.
8df507cb 1101 * Only if this is not possible, the sectors are picked from
b5d67f64
SB
1102 * mirrors with I/O errors without considering the checksum.
1103 * If the latter is the case, at the end, the checksum of the
1104 * repaired area is verified in order to correctly maintain
1105 * the statistics.
1106 */
1dfa5005 1107 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
15b88f6d
QW
1108 /*
1109 * Note: the two members refs and outstanding_sectors are not
1110 * used in the blocks that are used for the recheck procedure.
1111 *
1112 * But alloc_scrub_block() will initialize sblock::ref anyway,
1113 * so we can use scrub_block_put() to clean them up.
8686c40e
QW
1114 *
1115 * And here we don't setup the physical/dev for the sblock yet,
1116 * they will be correctly initialized in scrub_setup_recheck_block().
15b88f6d 1117 */
8686c40e
QW
1118 sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx, NULL,
1119 logical, 0, 0, mirror_index);
1dfa5005
QW
1120 if (!sblocks_for_recheck[mirror_index]) {
1121 spin_lock(&sctx->stat_lock);
1122 sctx->stat.malloc_errors++;
1123 sctx->stat.read_errors++;
1124 sctx->stat.uncorrectable_errors++;
1125 spin_unlock(&sctx->stat_lock);
1126 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1127 goto out;
1128 }
a2de733c
AJ
1129 }
1130
46343501 1131 /* Setup the context, map the logical blocks and alloc the sectors */
be50a8dd 1132 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
b5d67f64 1133 if (ret) {
d9d181c1
SB
1134 spin_lock(&sctx->stat_lock);
1135 sctx->stat.read_errors++;
1136 sctx->stat.uncorrectable_errors++;
1137 spin_unlock(&sctx->stat_lock);
a36cf8b8 1138 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
b5d67f64
SB
1139 goto out;
1140 }
1141 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1dfa5005 1142 sblock_bad = sblocks_for_recheck[failed_mirror_index];
13db62b7 1143
b5d67f64 1144 /* build and submit the bios for the failed mirror, check checksums */
affe4a5a 1145 scrub_recheck_block(fs_info, sblock_bad, 1);
a2de733c 1146
b5d67f64
SB
1147 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1148 sblock_bad->no_io_error_seen) {
1149 /*
46343501 1150 * The error disappeared after reading sector by sector, or
b5d67f64
SB
1151 * the area was part of a huge bio and other parts of the
1152 * bio caused I/O errors, or the block layer merged several
1153 * read requests into one and the error is caused by a
1154 * different bio (usually one of the two latter cases is
1155 * the cause)
1156 */
d9d181c1
SB
1157 spin_lock(&sctx->stat_lock);
1158 sctx->stat.unverified_errors++;
5a6ac9ea 1159 sblock_to_check->data_corrected = 1;
d9d181c1 1160 spin_unlock(&sctx->stat_lock);
a2de733c 1161
ff023aac
SB
1162 if (sctx->is_dev_replace)
1163 scrub_write_block_to_dev_replace(sblock_bad);
b5d67f64 1164 goto out;
a2de733c 1165 }
a2de733c 1166
b5d67f64 1167 if (!sblock_bad->no_io_error_seen) {
d9d181c1
SB
1168 spin_lock(&sctx->stat_lock);
1169 sctx->stat.read_errors++;
1170 spin_unlock(&sctx->stat_lock);
8bb1cf1b 1171 if (__ratelimit(&rs))
b5d67f64 1172 scrub_print_warning("i/o error", sblock_to_check);
a36cf8b8 1173 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
b5d67f64 1174 } else if (sblock_bad->checksum_error) {
d9d181c1
SB
1175 spin_lock(&sctx->stat_lock);
1176 sctx->stat.csum_errors++;
1177 spin_unlock(&sctx->stat_lock);
8bb1cf1b 1178 if (__ratelimit(&rs))
b5d67f64 1179 scrub_print_warning("checksum error", sblock_to_check);
a36cf8b8 1180 btrfs_dev_stat_inc_and_print(dev,
442a4f63 1181 BTRFS_DEV_STAT_CORRUPTION_ERRS);
b5d67f64 1182 } else if (sblock_bad->header_error) {
d9d181c1
SB
1183 spin_lock(&sctx->stat_lock);
1184 sctx->stat.verify_errors++;
1185 spin_unlock(&sctx->stat_lock);
8bb1cf1b 1186 if (__ratelimit(&rs))
b5d67f64
SB
1187 scrub_print_warning("checksum/header error",
1188 sblock_to_check);
442a4f63 1189 if (sblock_bad->generation_error)
a36cf8b8 1190 btrfs_dev_stat_inc_and_print(dev,
442a4f63
SB
1191 BTRFS_DEV_STAT_GENERATION_ERRS);
1192 else
a36cf8b8 1193 btrfs_dev_stat_inc_and_print(dev,
442a4f63 1194 BTRFS_DEV_STAT_CORRUPTION_ERRS);
b5d67f64 1195 }
a2de733c 1196
33ef30ad
ID
1197 if (sctx->readonly) {
1198 ASSERT(!sctx->is_dev_replace);
1199 goto out;
1200 }
a2de733c 1201
b5d67f64
SB
1202 /*
1203 * now build and submit the bios for the other mirrors, check
cb2ced73
SB
1204 * checksums.
1205 * First try to pick the mirror which is completely without I/O
b5d67f64
SB
1206 * errors and also does not have a checksum error.
1207 * If one is found, and if a checksum is present, the full block
1208 * that is known to contain an error is rewritten. Afterwards
1209 * the block is known to be corrected.
1210 * If a mirror is found which is completely correct, and no
46343501 1211 * checksum is present, only those sectors are rewritten that had
b5d67f64 1212 * an I/O error in the block to be repaired, since it cannot be
46343501
QW
1213 * determined, which copy of the other sectors is better (and it
1214 * could happen otherwise that a correct sector would be
b5d67f64
SB
1215 * overwritten by a bad one).
1216 */
762221f0 1217 for (mirror_index = 0; ;mirror_index++) {
cb2ced73 1218 struct scrub_block *sblock_other;
b5d67f64 1219
cb2ced73
SB
1220 if (mirror_index == failed_mirror_index)
1221 continue;
762221f0
LB
1222
1223 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
7e737cbc 1224 if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
762221f0
LB
1225 if (mirror_index >= BTRFS_MAX_MIRRORS)
1226 break;
1dfa5005 1227 if (!sblocks_for_recheck[mirror_index]->sector_count)
762221f0
LB
1228 break;
1229
1dfa5005 1230 sblock_other = sblocks_for_recheck[mirror_index];
762221f0 1231 } else {
7e737cbc 1232 struct scrub_recover *r = sblock_bad->sectors[0]->recover;
4c664611 1233 int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
762221f0
LB
1234
1235 if (mirror_index >= max_allowed)
1236 break;
1dfa5005 1237 if (!sblocks_for_recheck[1]->sector_count)
762221f0
LB
1238 break;
1239
1240 ASSERT(failed_mirror_index == 0);
1dfa5005 1241 sblock_other = sblocks_for_recheck[1];
8686c40e 1242 sblock_other->mirror_num = 1 + mirror_index;
762221f0 1243 }
cb2ced73
SB
1244
1245 /* build and submit the bios, check checksums */
affe4a5a 1246 scrub_recheck_block(fs_info, sblock_other, 0);
34f5c8e9
SB
1247
1248 if (!sblock_other->header_error &&
b5d67f64
SB
1249 !sblock_other->checksum_error &&
1250 sblock_other->no_io_error_seen) {
ff023aac
SB
1251 if (sctx->is_dev_replace) {
1252 scrub_write_block_to_dev_replace(sblock_other);
114ab50d 1253 goto corrected_error;
ff023aac 1254 } else {
ff023aac 1255 ret = scrub_repair_block_from_good_copy(
114ab50d
ZL
1256 sblock_bad, sblock_other);
1257 if (!ret)
1258 goto corrected_error;
ff023aac 1259 }
b5d67f64
SB
1260 }
1261 }
a2de733c 1262
b968fed1
ZL
1263 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1264 goto did_not_correct_error;
ff023aac
SB
1265
1266 /*
ff023aac 1267 * In case of I/O errors in the area that is supposed to be
8df507cb
QW
1268 * repaired, continue by picking good copies of those sectors.
1269 * Select the good sectors from mirrors to rewrite bad sectors from
b5d67f64
SB
1270 * the area to fix. Afterwards verify the checksum of the block
1271 * that is supposed to be repaired. This verification step is
1272 * only done for the purpose of statistic counting and for the
1273 * final scrub report, whether errors remain.
1274 * A perfect algorithm could make use of the checksum and try
8df507cb 1275 * all possible combinations of sectors from the different mirrors
b5d67f64 1276 * until the checksum verification succeeds. For example, when
8df507cb 1277 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
b5d67f64 1278 * of mirror #2 is readable but the final checksum test fails,
8df507cb 1279 * then the 2nd sector of mirror #3 could be tried, whether now
01327610 1280 * the final checksum succeeds. But this would be a rare
b5d67f64
SB
1281 * exception and is therefore not implemented. At least it is
1282 * avoided that the good copy is overwritten.
1283 * A more useful improvement would be to pick the sectors
1284 * without I/O error based on sector sizes (512 bytes on legacy
8df507cb 1285 * disks) instead of on sectorsize. Then maybe 512 byte of one
b5d67f64 1286 * mirror could be repaired by taking 512 byte of a different
8df507cb 1287 * mirror, even if other 512 byte sectors in the same sectorsize
b5d67f64 1288 * area are unreadable.
a2de733c 1289 */
b5d67f64 1290 success = 1;
7e737cbc
QW
1291 for (sector_num = 0; sector_num < sblock_bad->sector_count;
1292 sector_num++) {
46343501 1293 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
b968fed1 1294 struct scrub_block *sblock_other = NULL;
b5d67f64 1295
46343501
QW
1296 /* Skip no-io-error sectors in scrub */
1297 if (!sector_bad->io_error && !sctx->is_dev_replace)
a2de733c 1298 continue;
b5d67f64 1299
7e737cbc 1300 if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
4759700a
LB
1301 /*
1302 * In case of dev replace, if raid56 rebuild process
1303 * didn't work out correct data, then copy the content
1304 * in sblock_bad to make sure target device is identical
1305 * to source device, instead of writing garbage data in
1306 * sblock_for_recheck array to target device.
1307 */
1308 sblock_other = NULL;
46343501
QW
1309 } else if (sector_bad->io_error) {
1310 /* Try to find no-io-error sector in mirrors */
b968fed1
ZL
1311 for (mirror_index = 0;
1312 mirror_index < BTRFS_MAX_MIRRORS &&
1dfa5005 1313 sblocks_for_recheck[mirror_index]->sector_count > 0;
b968fed1 1314 mirror_index++) {
1dfa5005 1315 if (!sblocks_for_recheck[mirror_index]->
7e737cbc 1316 sectors[sector_num]->io_error) {
1dfa5005 1317 sblock_other = sblocks_for_recheck[mirror_index];
b968fed1 1318 break;
b5d67f64
SB
1319 }
1320 }
b968fed1
ZL
1321 if (!sblock_other)
1322 success = 0;
96e36920 1323 }
a2de733c 1324
b968fed1
ZL
1325 if (sctx->is_dev_replace) {
1326 /*
46343501
QW
1327 * Did not find a mirror to fetch the sector from.
1328 * scrub_write_sector_to_dev_replace() handles this
1329 * case (sector->io_error), by filling the block with
1330 * zeros before submitting the write request
b968fed1
ZL
1331 */
1332 if (!sblock_other)
1333 sblock_other = sblock_bad;
1334
46343501
QW
1335 if (scrub_write_sector_to_dev_replace(sblock_other,
1336 sector_num) != 0) {
e37abe97 1337 atomic64_inc(
0b246afa 1338 &fs_info->dev_replace.num_write_errors);
b968fed1
ZL
1339 success = 0;
1340 }
1341 } else if (sblock_other) {
46343501
QW
1342 ret = scrub_repair_sector_from_good_copy(sblock_bad,
1343 sblock_other,
1344 sector_num, 0);
b968fed1 1345 if (0 == ret)
46343501 1346 sector_bad->io_error = 0;
b968fed1
ZL
1347 else
1348 success = 0;
b5d67f64 1349 }
a2de733c 1350 }
a2de733c 1351
b968fed1 1352 if (success && !sctx->is_dev_replace) {
b5d67f64
SB
1353 if (is_metadata || have_csum) {
1354 /*
1355 * need to verify the checksum now that all
1356 * sectors on disk are repaired (the write
1357 * request for data to be repaired is on its way).
1358 * Just be lazy and use scrub_recheck_block()
1359 * which re-reads the data before the checksum
1360 * is verified, but most likely the data comes out
1361 * of the page cache.
1362 */
affe4a5a 1363 scrub_recheck_block(fs_info, sblock_bad, 1);
34f5c8e9 1364 if (!sblock_bad->header_error &&
b5d67f64
SB
1365 !sblock_bad->checksum_error &&
1366 sblock_bad->no_io_error_seen)
1367 goto corrected_error;
1368 else
1369 goto did_not_correct_error;
1370 } else {
1371corrected_error:
d9d181c1
SB
1372 spin_lock(&sctx->stat_lock);
1373 sctx->stat.corrected_errors++;
5a6ac9ea 1374 sblock_to_check->data_corrected = 1;
d9d181c1 1375 spin_unlock(&sctx->stat_lock);
b14af3b4
DS
1376 btrfs_err_rl_in_rcu(fs_info,
1377 "fixed up error at logical %llu on dev %s",
cb3e217b 1378 logical, btrfs_dev_name(dev));
8628764e 1379 }
b5d67f64
SB
1380 } else {
1381did_not_correct_error:
d9d181c1
SB
1382 spin_lock(&sctx->stat_lock);
1383 sctx->stat.uncorrectable_errors++;
1384 spin_unlock(&sctx->stat_lock);
b14af3b4
DS
1385 btrfs_err_rl_in_rcu(fs_info,
1386 "unable to fixup (regular) error at logical %llu on dev %s",
cb3e217b 1387 logical, btrfs_dev_name(dev));
96e36920 1388 }
a2de733c 1389
b5d67f64 1390out:
1dfa5005
QW
1391 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
1392 struct scrub_block *sblock = sblocks_for_recheck[mirror_index];
1393 struct scrub_recover *recover;
1394 int sector_index;
1395
1396 /* Not allocated, continue checking the next mirror */
1397 if (!sblock)
1398 continue;
1399
1400 for (sector_index = 0; sector_index < sblock->sector_count;
1401 sector_index++) {
1402 /*
1403 * Here we just cleanup the recover, each sector will be
1404 * properly cleaned up by later scrub_block_put()
1405 */
1406 recover = sblock->sectors[sector_index]->recover;
1407 if (recover) {
1408 scrub_put_recover(fs_info, recover);
1409 sblock->sectors[sector_index]->recover = NULL;
7a9e9987 1410 }
b5d67f64 1411 }
1dfa5005 1412 scrub_block_put(sblock);
b5d67f64 1413 }
a2de733c 1414
28d70e23 1415 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
7c3c7cb9 1416 memalloc_nofs_restore(nofs_flag);
28d70e23
QW
1417 if (ret < 0)
1418 return ret;
b5d67f64
SB
1419 return 0;
1420}
a2de733c 1421
4c664611 1422static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
af8e2d1d 1423{
4c664611 1424 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
10f11900 1425 return 2;
4c664611 1426 else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
10f11900
ZL
1427 return 3;
1428 else
4c664611 1429 return (int)bioc->num_stripes;
af8e2d1d
MX
1430}
1431
10f11900
ZL
1432static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1433 u64 *raid_map,
af8e2d1d
MX
1434 int nstripes, int mirror,
1435 int *stripe_index,
1436 u64 *stripe_offset)
1437{
1438 int i;
1439
ffe2d203 1440 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
af8e2d1d
MX
1441 /* RAID5/6 */
1442 for (i = 0; i < nstripes; i++) {
1443 if (raid_map[i] == RAID6_Q_STRIPE ||
1444 raid_map[i] == RAID5_P_STRIPE)
1445 continue;
1446
1447 if (logical >= raid_map[i] &&
ff18a4af 1448 logical < raid_map[i] + BTRFS_STRIPE_LEN)
af8e2d1d
MX
1449 break;
1450 }
1451
1452 *stripe_index = i;
1453 *stripe_offset = logical - raid_map[i];
1454 } else {
1455 /* The other RAID type */
1456 *stripe_index = mirror;
1457 *stripe_offset = 0;
1458 }
1459}
1460
be50a8dd 1461static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1dfa5005 1462 struct scrub_block *sblocks_for_recheck[])
b5d67f64 1463{
be50a8dd 1464 struct scrub_ctx *sctx = original_sblock->sctx;
fb456252 1465 struct btrfs_fs_info *fs_info = sctx->fs_info;
8686c40e 1466 u64 logical = original_sblock->logical;
7e737cbc 1467 u64 length = original_sblock->sector_count << fs_info->sectorsize_bits;
7e737cbc
QW
1468 u64 generation = original_sblock->sectors[0]->generation;
1469 u64 flags = original_sblock->sectors[0]->flags;
1470 u64 have_csum = original_sblock->sectors[0]->have_csum;
af8e2d1d 1471 struct scrub_recover *recover;
4c664611 1472 struct btrfs_io_context *bioc;
af8e2d1d
MX
1473 u64 sublen;
1474 u64 mapped_length;
1475 u64 stripe_offset;
1476 int stripe_index;
7e737cbc 1477 int sector_index = 0;
b5d67f64 1478 int mirror_index;
af8e2d1d 1479 int nmirrors;
b5d67f64
SB
1480 int ret;
1481
b5d67f64 1482 while (length > 0) {
8df507cb 1483 sublen = min_t(u64, length, fs_info->sectorsize);
af8e2d1d 1484 mapped_length = sublen;
4c664611 1485 bioc = NULL;
a2de733c 1486
b5d67f64 1487 /*
8df507cb
QW
1488 * With a length of sectorsize, each returned stripe represents
1489 * one mirror
b5d67f64 1490 */
e501bfe3 1491 btrfs_bio_counter_inc_blocked(fs_info);
cf8cddd3 1492 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
4c664611
QW
1493 logical, &mapped_length, &bioc);
1494 if (ret || !bioc || mapped_length < sublen) {
1495 btrfs_put_bioc(bioc);
e501bfe3 1496 btrfs_bio_counter_dec(fs_info);
b5d67f64
SB
1497 return -EIO;
1498 }
a2de733c 1499
fe10158c 1500 recover = kzalloc(sizeof(struct scrub_recover), GFP_KERNEL);
af8e2d1d 1501 if (!recover) {
4c664611 1502 btrfs_put_bioc(bioc);
e501bfe3 1503 btrfs_bio_counter_dec(fs_info);
af8e2d1d
MX
1504 return -ENOMEM;
1505 }
1506
6f615018 1507 refcount_set(&recover->refs, 1);
4c664611 1508 recover->bioc = bioc;
af8e2d1d
MX
1509 recover->map_length = mapped_length;
1510
7e737cbc 1511 ASSERT(sector_index < SCRUB_MAX_SECTORS_PER_BLOCK);
af8e2d1d 1512
4c664611 1513 nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
10f11900 1514
af8e2d1d 1515 for (mirror_index = 0; mirror_index < nmirrors;
b5d67f64
SB
1516 mirror_index++) {
1517 struct scrub_block *sblock;
46343501 1518 struct scrub_sector *sector;
b5d67f64 1519
1dfa5005 1520 sblock = sblocks_for_recheck[mirror_index];
7a9e9987 1521 sblock->sctx = sctx;
4734b7ed 1522
02bc3927 1523 sector = alloc_scrub_sector(sblock, logical);
46343501 1524 if (!sector) {
d9d181c1
SB
1525 spin_lock(&sctx->stat_lock);
1526 sctx->stat.malloc_errors++;
1527 spin_unlock(&sctx->stat_lock);
e501bfe3 1528 scrub_put_recover(fs_info, recover);
b5d67f64
SB
1529 return -ENOMEM;
1530 }
46343501
QW
1531 sector->flags = flags;
1532 sector->generation = generation;
46343501 1533 sector->have_csum = have_csum;
4734b7ed 1534 if (have_csum)
46343501 1535 memcpy(sector->csum,
7e737cbc 1536 original_sblock->sectors[0]->csum,
2ae0c2d8 1537 sctx->fs_info->csum_size);
af8e2d1d 1538
10f11900 1539 scrub_stripe_index_and_offset(logical,
4c664611
QW
1540 bioc->map_type,
1541 bioc->raid_map,
4c664611
QW
1542 bioc->num_stripes -
1543 bioc->num_tgtdevs,
af8e2d1d
MX
1544 mirror_index,
1545 &stripe_index,
1546 &stripe_offset);
8686c40e
QW
1547 /*
1548 * We're at the first sector, also populate @sblock
1549 * physical and dev.
1550 */
1551 if (sector_index == 0) {
1552 sblock->physical =
1553 bioc->stripes[stripe_index].physical +
1554 stripe_offset;
1555 sblock->dev = bioc->stripes[stripe_index].dev;
1556 sblock->physical_for_dev_replace =
1557 original_sblock->physical_for_dev_replace;
1558 }
af8e2d1d 1559
7e737cbc 1560 BUG_ON(sector_index >= original_sblock->sector_count);
af8e2d1d 1561 scrub_get_recover(recover);
46343501 1562 sector->recover = recover;
b5d67f64 1563 }
e501bfe3 1564 scrub_put_recover(fs_info, recover);
b5d67f64
SB
1565 length -= sublen;
1566 logical += sublen;
7e737cbc 1567 sector_index++;
b5d67f64
SB
1568 }
1569
1570 return 0;
96e36920
ID
1571}
1572
4246a0b6 1573static void scrub_bio_wait_endio(struct bio *bio)
af8e2d1d 1574{
b4ff5ad7 1575 complete(bio->bi_private);
af8e2d1d
MX
1576}
1577
af8e2d1d
MX
1578static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1579 struct bio *bio,
46343501 1580 struct scrub_sector *sector)
af8e2d1d 1581{
b4ff5ad7 1582 DECLARE_COMPLETION_ONSTACK(done);
af8e2d1d 1583
8686c40e
QW
1584 bio->bi_iter.bi_sector = (sector->offset + sector->sblock->logical) >>
1585 SECTOR_SHIFT;
af8e2d1d
MX
1586 bio->bi_private = &done;
1587 bio->bi_end_io = scrub_bio_wait_endio;
f1c29379 1588 raid56_parity_recover(bio, sector->recover->bioc, sector->sblock->mirror_num);
af8e2d1d 1589
b4ff5ad7
LB
1590 wait_for_completion_io(&done);
1591 return blk_status_to_errno(bio->bi_status);
af8e2d1d
MX
1592}
1593
6ca1765b
LB
1594static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1595 struct scrub_block *sblock)
1596{
46343501 1597 struct scrub_sector *first_sector = sblock->sectors[0];
6ca1765b 1598 struct bio *bio;
7e737cbc 1599 int i;
6ca1765b 1600
46343501 1601 /* All sectors in sblock belong to the same stripe on the same device. */
8686c40e
QW
1602 ASSERT(sblock->dev);
1603 if (!sblock->dev->bdev)
6ca1765b
LB
1604 goto out;
1605
8686c40e 1606 bio = bio_alloc(sblock->dev->bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
6ca1765b 1607
7e737cbc 1608 for (i = 0; i < sblock->sector_count; i++) {
46343501 1609 struct scrub_sector *sector = sblock->sectors[i];
6ca1765b 1610
eb2fad30 1611 bio_add_scrub_sector(bio, sector, fs_info->sectorsize);
6ca1765b
LB
1612 }
1613
46343501 1614 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) {
6ca1765b
LB
1615 bio_put(bio);
1616 goto out;
1617 }
1618
1619 bio_put(bio);
1620
1621 scrub_recheck_block_checksum(sblock);
1622
1623 return;
1624out:
7e737cbc
QW
1625 for (i = 0; i < sblock->sector_count; i++)
1626 sblock->sectors[i]->io_error = 1;
6ca1765b
LB
1627
1628 sblock->no_io_error_seen = 0;
1629}
1630
b5d67f64 1631/*
46343501
QW
1632 * This function will check the on disk data for checksum errors, header errors
1633 * and read I/O errors. If any I/O errors happen, the exact sectors which are
1634 * errored are marked as being bad. The goal is to enable scrub to take those
1635 * sectors that are not errored from all the mirrors so that the sectors that
1636 * are errored in the just handled mirror can be repaired.
b5d67f64 1637 */
34f5c8e9 1638static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
affe4a5a
ZL
1639 struct scrub_block *sblock,
1640 int retry_failed_mirror)
96e36920 1641{
7e737cbc 1642 int i;
96e36920 1643
b5d67f64 1644 sblock->no_io_error_seen = 1;
96e36920 1645
6ca1765b 1646 /* short cut for raid56 */
7e737cbc 1647 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0]))
6ca1765b
LB
1648 return scrub_recheck_block_on_raid56(fs_info, sblock);
1649
7e737cbc 1650 for (i = 0; i < sblock->sector_count; i++) {
46343501 1651 struct scrub_sector *sector = sblock->sectors[i];
f3b8a7f3
CH
1652 struct bio bio;
1653 struct bio_vec bvec;
b5d67f64 1654
8686c40e 1655 if (sblock->dev->bdev == NULL) {
46343501 1656 sector->io_error = 1;
ea9947b4
SB
1657 sblock->no_io_error_seen = 0;
1658 continue;
1659 }
1660
8686c40e 1661 bio_init(&bio, sblock->dev->bdev, &bvec, 1, REQ_OP_READ);
eb2fad30 1662 bio_add_scrub_sector(&bio, sector, fs_info->sectorsize);
8686c40e
QW
1663 bio.bi_iter.bi_sector = (sblock->physical + sector->offset) >>
1664 SECTOR_SHIFT;
b5d67f64 1665
f3b8a7f3
CH
1666 btrfsic_check_bio(&bio);
1667 if (submit_bio_wait(&bio)) {
46343501 1668 sector->io_error = 1;
6ca1765b 1669 sblock->no_io_error_seen = 0;
af8e2d1d 1670 }
33879d45 1671
f3b8a7f3 1672 bio_uninit(&bio);
b5d67f64 1673 }
96e36920 1674
b5d67f64 1675 if (sblock->no_io_error_seen)
ba7cf988 1676 scrub_recheck_block_checksum(sblock);
a2de733c
AJ
1677}
1678
46343501 1679static inline int scrub_check_fsid(u8 fsid[], struct scrub_sector *sector)
17a9be2f 1680{
8686c40e 1681 struct btrfs_fs_devices *fs_devices = sector->sblock->dev->fs_devices;
17a9be2f
MX
1682 int ret;
1683
44880fdc 1684 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
17a9be2f
MX
1685 return !ret;
1686}
1687
ba7cf988 1688static void scrub_recheck_block_checksum(struct scrub_block *sblock)
a2de733c 1689{
ba7cf988
ZL
1690 sblock->header_error = 0;
1691 sblock->checksum_error = 0;
1692 sblock->generation_error = 0;
b5d67f64 1693
7e737cbc 1694 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA)
ba7cf988
ZL
1695 scrub_checksum_data(sblock);
1696 else
1697 scrub_checksum_tree_block(sblock);
a2de733c
AJ
1698}
1699
b5d67f64 1700static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
114ab50d 1701 struct scrub_block *sblock_good)
b5d67f64 1702{
7e737cbc 1703 int i;
b5d67f64 1704 int ret = 0;
96e36920 1705
7e737cbc 1706 for (i = 0; i < sblock_bad->sector_count; i++) {
b5d67f64 1707 int ret_sub;
96e36920 1708
46343501
QW
1709 ret_sub = scrub_repair_sector_from_good_copy(sblock_bad,
1710 sblock_good, i, 1);
b5d67f64
SB
1711 if (ret_sub)
1712 ret = ret_sub;
a2de733c 1713 }
b5d67f64
SB
1714
1715 return ret;
1716}
1717
46343501
QW
1718static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
1719 struct scrub_block *sblock_good,
1720 int sector_num, int force_write)
b5d67f64 1721{
46343501
QW
1722 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1723 struct scrub_sector *sector_good = sblock_good->sectors[sector_num];
0b246afa 1724 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
8df507cb 1725 const u32 sectorsize = fs_info->sectorsize;
b5d67f64 1726
b5d67f64 1727 if (force_write || sblock_bad->header_error ||
46343501 1728 sblock_bad->checksum_error || sector_bad->io_error) {
f77dcc0d
CH
1729 struct bio bio;
1730 struct bio_vec bvec;
b5d67f64 1731 int ret;
b5d67f64 1732
8686c40e 1733 if (!sblock_bad->dev->bdev) {
0b246afa 1734 btrfs_warn_rl(fs_info,
5d163e0e 1735 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
ff023aac
SB
1736 return -EIO;
1737 }
1738
8686c40e
QW
1739 bio_init(&bio, sblock_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE);
1740 bio.bi_iter.bi_sector = (sblock_bad->physical +
1741 sector_bad->offset) >> SECTOR_SHIFT;
eb2fad30 1742 ret = bio_add_scrub_sector(&bio, sector_good, sectorsize);
b5d67f64 1743
f77dcc0d
CH
1744 btrfsic_check_bio(&bio);
1745 ret = submit_bio_wait(&bio);
1746 bio_uninit(&bio);
b5d67f64 1747
f77dcc0d 1748 if (ret) {
8686c40e 1749 btrfs_dev_stat_inc_and_print(sblock_bad->dev,
442a4f63 1750 BTRFS_DEV_STAT_WRITE_ERRS);
e37abe97 1751 atomic64_inc(&fs_info->dev_replace.num_write_errors);
442a4f63
SB
1752 return -EIO;
1753 }
a2de733c
AJ
1754 }
1755
b5d67f64
SB
1756 return 0;
1757}
1758
ff023aac
SB
1759static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1760{
0b246afa 1761 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
7e737cbc 1762 int i;
ff023aac 1763
5a6ac9ea
MX
1764 /*
1765 * This block is used for the check of the parity on the source device,
1766 * so the data needn't be written into the destination device.
1767 */
1768 if (sblock->sparity)
1769 return;
1770
7e737cbc 1771 for (i = 0; i < sblock->sector_count; i++) {
ff023aac
SB
1772 int ret;
1773
46343501 1774 ret = scrub_write_sector_to_dev_replace(sblock, i);
ff023aac 1775 if (ret)
e37abe97 1776 atomic64_inc(&fs_info->dev_replace.num_write_errors);
ff023aac
SB
1777 }
1778}
1779
46343501 1780static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num)
ff023aac 1781{
eb2fad30 1782 const u32 sectorsize = sblock->sctx->fs_info->sectorsize;
46343501 1783 struct scrub_sector *sector = sblock->sectors[sector_num];
ff023aac 1784
46343501 1785 if (sector->io_error)
eb2fad30 1786 memset(scrub_sector_get_kaddr(sector), 0, sectorsize);
ff023aac 1787
46343501 1788 return scrub_add_sector_to_wr_bio(sblock->sctx, sector);
ff023aac
SB
1789}
1790
de17addc
NA
1791static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
1792{
1793 int ret = 0;
1794 u64 length;
1795
1796 if (!btrfs_is_zoned(sctx->fs_info))
1797 return 0;
1798
7db1c5d1
NA
1799 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
1800 return 0;
1801
de17addc
NA
1802 if (sctx->write_pointer < physical) {
1803 length = physical - sctx->write_pointer;
1804
1805 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
1806 sctx->write_pointer, length);
1807 if (!ret)
1808 sctx->write_pointer = physical;
1809 }
1810 return ret;
1811}
1812
f3e01e0e
QW
1813static void scrub_block_get(struct scrub_block *sblock)
1814{
1815 refcount_inc(&sblock->refs);
1816}
1817
46343501
QW
1818static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
1819 struct scrub_sector *sector)
ff023aac 1820{
8686c40e 1821 struct scrub_block *sblock = sector->sblock;
ff023aac
SB
1822 struct scrub_bio *sbio;
1823 int ret;
8df507cb 1824 const u32 sectorsize = sctx->fs_info->sectorsize;
ff023aac 1825
3fb99303 1826 mutex_lock(&sctx->wr_lock);
ff023aac 1827again:
3fb99303
DS
1828 if (!sctx->wr_curr_bio) {
1829 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
58c4e173 1830 GFP_KERNEL);
3fb99303
DS
1831 if (!sctx->wr_curr_bio) {
1832 mutex_unlock(&sctx->wr_lock);
ff023aac
SB
1833 return -ENOMEM;
1834 }
3fb99303 1835 sctx->wr_curr_bio->sctx = sctx;
e360d2f5 1836 sctx->wr_curr_bio->sector_count = 0;
ff023aac 1837 }
3fb99303 1838 sbio = sctx->wr_curr_bio;
e360d2f5 1839 if (sbio->sector_count == 0) {
8686c40e
QW
1840 ret = fill_writer_pointer_gap(sctx, sector->offset +
1841 sblock->physical_for_dev_replace);
de17addc
NA
1842 if (ret) {
1843 mutex_unlock(&sctx->wr_lock);
1844 return ret;
1845 }
1846
8686c40e
QW
1847 sbio->physical = sblock->physical_for_dev_replace + sector->offset;
1848 sbio->logical = sblock->logical + sector->offset;
3fb99303 1849 sbio->dev = sctx->wr_tgtdev;
75c17e66
CH
1850 if (!sbio->bio) {
1851 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
1852 REQ_OP_WRITE, GFP_NOFS);
ff023aac 1853 }
75c17e66
CH
1854 sbio->bio->bi_private = sbio;
1855 sbio->bio->bi_end_io = scrub_wr_bio_end_io;
1856 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
4e4cbee9 1857 sbio->status = 0;
e360d2f5 1858 } else if (sbio->physical + sbio->sector_count * sectorsize !=
8686c40e 1859 sblock->physical_for_dev_replace + sector->offset ||
e360d2f5 1860 sbio->logical + sbio->sector_count * sectorsize !=
8686c40e 1861 sblock->logical + sector->offset) {
ff023aac
SB
1862 scrub_wr_submit(sctx);
1863 goto again;
1864 }
1865
eb2fad30 1866 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
8df507cb 1867 if (ret != sectorsize) {
e360d2f5 1868 if (sbio->sector_count < 1) {
ff023aac
SB
1869 bio_put(sbio->bio);
1870 sbio->bio = NULL;
3fb99303 1871 mutex_unlock(&sctx->wr_lock);
ff023aac
SB
1872 return -EIO;
1873 }
1874 scrub_wr_submit(sctx);
1875 goto again;
1876 }
1877
e360d2f5 1878 sbio->sectors[sbio->sector_count] = sector;
46343501 1879 scrub_sector_get(sector);
f3e01e0e
QW
1880 /*
1881 * Since ssector no longer holds a page, but uses sblock::pages, we
1882 * have to ensure the sblock had not been freed before our write bio
1883 * finished.
1884 */
1885 scrub_block_get(sector->sblock);
1886
e360d2f5
QW
1887 sbio->sector_count++;
1888 if (sbio->sector_count == sctx->sectors_per_bio)
ff023aac 1889 scrub_wr_submit(sctx);
3fb99303 1890 mutex_unlock(&sctx->wr_lock);
ff023aac
SB
1891
1892 return 0;
1893}
1894
1895static void scrub_wr_submit(struct scrub_ctx *sctx)
1896{
ff023aac
SB
1897 struct scrub_bio *sbio;
1898
3fb99303 1899 if (!sctx->wr_curr_bio)
ff023aac
SB
1900 return;
1901
3fb99303
DS
1902 sbio = sctx->wr_curr_bio;
1903 sctx->wr_curr_bio = NULL;
ff023aac
SB
1904 scrub_pending_bio_inc(sctx);
1905 /* process all writes in a single worker thread. Then the block layer
1906 * orders the requests before sending them to the driver which
1907 * doubled the write performance on spinning disks when measured
1908 * with Linux 3.5 */
58ff51f1
CH
1909 btrfsic_check_bio(sbio->bio);
1910 submit_bio(sbio->bio);
de17addc
NA
1911
1912 if (btrfs_is_zoned(sctx->fs_info))
e360d2f5 1913 sctx->write_pointer = sbio->physical + sbio->sector_count *
8df507cb 1914 sctx->fs_info->sectorsize;
ff023aac
SB
1915}
1916
4246a0b6 1917static void scrub_wr_bio_end_io(struct bio *bio)
ff023aac
SB
1918{
1919 struct scrub_bio *sbio = bio->bi_private;
fb456252 1920 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
ff023aac 1921
4e4cbee9 1922 sbio->status = bio->bi_status;
ff023aac
SB
1923 sbio->bio = bio;
1924
be539518
CH
1925 INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker);
1926 queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
ff023aac
SB
1927}
1928
be539518 1929static void scrub_wr_bio_end_io_worker(struct work_struct *work)
ff023aac
SB
1930{
1931 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1932 struct scrub_ctx *sctx = sbio->sctx;
1933 int i;
1934
e360d2f5 1935 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
4e4cbee9 1936 if (sbio->status) {
ff023aac 1937 struct btrfs_dev_replace *dev_replace =
fb456252 1938 &sbio->sctx->fs_info->dev_replace;
ff023aac 1939
e360d2f5
QW
1940 for (i = 0; i < sbio->sector_count; i++) {
1941 struct scrub_sector *sector = sbio->sectors[i];
ff023aac 1942
46343501 1943 sector->io_error = 1;
e37abe97 1944 atomic64_inc(&dev_replace->num_write_errors);
ff023aac
SB
1945 }
1946 }
1947
f3e01e0e
QW
1948 /*
1949 * In scrub_add_sector_to_wr_bio() we grab extra ref for sblock, now in
1950 * endio we should put the sblock.
1951 */
1952 for (i = 0; i < sbio->sector_count; i++) {
1953 scrub_block_put(sbio->sectors[i]->sblock);
e360d2f5 1954 scrub_sector_put(sbio->sectors[i]);
f3e01e0e 1955 }
ff023aac
SB
1956
1957 bio_put(sbio->bio);
1958 kfree(sbio);
1959 scrub_pending_bio_dec(sctx);
1960}
1961
1962static int scrub_checksum(struct scrub_block *sblock)
b5d67f64
SB
1963{
1964 u64 flags;
1965 int ret;
1966
ba7cf988
ZL
1967 /*
1968 * No need to initialize these stats currently,
1969 * because this function only use return value
1970 * instead of these stats value.
1971 *
1972 * Todo:
1973 * always use stats
1974 */
1975 sblock->header_error = 0;
1976 sblock->generation_error = 0;
1977 sblock->checksum_error = 0;
1978
7e737cbc
QW
1979 WARN_ON(sblock->sector_count < 1);
1980 flags = sblock->sectors[0]->flags;
b5d67f64
SB
1981 ret = 0;
1982 if (flags & BTRFS_EXTENT_FLAG_DATA)
1983 ret = scrub_checksum_data(sblock);
1984 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1985 ret = scrub_checksum_tree_block(sblock);
1986 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
e69bf81c 1987 ret = scrub_checksum_super(sblock);
b5d67f64
SB
1988 else
1989 WARN_ON(1);
1990 if (ret)
1991 scrub_handle_errored_block(sblock);
ff023aac
SB
1992
1993 return ret;
a2de733c
AJ
1994}
1995
b5d67f64 1996static int scrub_checksum_data(struct scrub_block *sblock)
a2de733c 1997{
d9d181c1 1998 struct scrub_ctx *sctx = sblock->sctx;
d5178578
JT
1999 struct btrfs_fs_info *fs_info = sctx->fs_info;
2000 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
a2de733c 2001 u8 csum[BTRFS_CSUM_SIZE];
46343501 2002 struct scrub_sector *sector;
b0485252 2003 char *kaddr;
a2de733c 2004
7e737cbc 2005 BUG_ON(sblock->sector_count < 1);
46343501
QW
2006 sector = sblock->sectors[0];
2007 if (!sector->have_csum)
a2de733c
AJ
2008 return 0;
2009
eb2fad30 2010 kaddr = scrub_sector_get_kaddr(sector);
b5d67f64 2011
771aba0d
DS
2012 shash->tfm = fs_info->csum_shash;
2013 crypto_shash_init(shash);
b5d67f64 2014
b29dca44 2015 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
a2de733c 2016
46343501 2017 if (memcmp(csum, sector->csum, fs_info->csum_size))
b29dca44 2018 sblock->checksum_error = 1;
ba7cf988 2019 return sblock->checksum_error;
a2de733c
AJ
2020}
2021
b5d67f64 2022static int scrub_checksum_tree_block(struct scrub_block *sblock)
a2de733c 2023{
d9d181c1 2024 struct scrub_ctx *sctx = sblock->sctx;
a2de733c 2025 struct btrfs_header *h;
0b246afa 2026 struct btrfs_fs_info *fs_info = sctx->fs_info;
d5178578 2027 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
b5d67f64
SB
2028 u8 calculated_csum[BTRFS_CSUM_SIZE];
2029 u8 on_disk_csum[BTRFS_CSUM_SIZE];
53f3251d
QW
2030 /*
2031 * This is done in sectorsize steps even for metadata as there's a
2032 * constraint for nodesize to be aligned to sectorsize. This will need
2033 * to change so we don't misuse data and metadata units like that.
2034 */
2035 const u32 sectorsize = sctx->fs_info->sectorsize;
2036 const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
521e1022 2037 int i;
46343501 2038 struct scrub_sector *sector;
b0485252 2039 char *kaddr;
d5178578 2040
7e737cbc 2041 BUG_ON(sblock->sector_count < 1);
53f3251d 2042
46343501 2043 /* Each member in sectors is just one sector */
7e737cbc 2044 ASSERT(sblock->sector_count == num_sectors);
53f3251d 2045
46343501 2046 sector = sblock->sectors[0];
eb2fad30 2047 kaddr = scrub_sector_get_kaddr(sector);
b0485252 2048 h = (struct btrfs_header *)kaddr;
2ae0c2d8 2049 memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
a2de733c
AJ
2050
2051 /*
2052 * we don't use the getter functions here, as we
2053 * a) don't have an extent buffer and
2054 * b) the page is already kmapped
2055 */
28232909 2056 if (sblock->logical != btrfs_stack_header_bytenr(h)) {
ba7cf988 2057 sblock->header_error = 1;
28232909
QW
2058 btrfs_warn_rl(fs_info,
2059 "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
2060 sblock->logical, sblock->mirror_num,
2061 btrfs_stack_header_bytenr(h),
2062 sblock->logical);
2063 goto out;
ba7cf988 2064 }
a2de733c 2065
28232909 2066 if (!scrub_check_fsid(h->fsid, sector)) {
ba7cf988 2067 sblock->header_error = 1;
28232909
QW
2068 btrfs_warn_rl(fs_info,
2069 "tree block %llu mirror %u has bad fsid, has %pU want %pU",
2070 sblock->logical, sblock->mirror_num,
2071 h->fsid, sblock->dev->fs_devices->fsid);
2072 goto out;
2073 }
a2de733c 2074
28232909 2075 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE)) {
ba7cf988 2076 sblock->header_error = 1;
28232909
QW
2077 btrfs_warn_rl(fs_info,
2078 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
2079 sblock->logical, sblock->mirror_num,
2080 h->chunk_tree_uuid, fs_info->chunk_tree_uuid);
2081 goto out;
2082 }
a2de733c 2083
521e1022
DS
2084 shash->tfm = fs_info->csum_shash;
2085 crypto_shash_init(shash);
2086 crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
53f3251d 2087 sectorsize - BTRFS_CSUM_SIZE);
b5d67f64 2088
53f3251d 2089 for (i = 1; i < num_sectors; i++) {
eb2fad30 2090 kaddr = scrub_sector_get_kaddr(sblock->sectors[i]);
53f3251d 2091 crypto_shash_update(shash, kaddr, sectorsize);
b5d67f64
SB
2092 }
2093
d5178578 2094 crypto_shash_final(shash, calculated_csum);
28232909 2095 if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size)) {
ba7cf988 2096 sblock->checksum_error = 1;
28232909
QW
2097 btrfs_warn_rl(fs_info,
2098 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
2099 sblock->logical, sblock->mirror_num,
2100 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
2101 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
2102 goto out;
2103 }
2104
2105 if (sector->generation != btrfs_stack_header_generation(h)) {
2106 sblock->header_error = 1;
2107 sblock->generation_error = 1;
2108 btrfs_warn_rl(fs_info,
2109 "tree block %llu mirror %u has bad generation, has %llu want %llu",
2110 sblock->logical, sblock->mirror_num,
2111 btrfs_stack_header_generation(h),
2112 sector->generation);
2113 }
a2de733c 2114
28232909 2115out:
ba7cf988 2116 return sblock->header_error || sblock->checksum_error;
a2de733c
AJ
2117}
2118
b5d67f64 2119static int scrub_checksum_super(struct scrub_block *sblock)
a2de733c
AJ
2120{
2121 struct btrfs_super_block *s;
d9d181c1 2122 struct scrub_ctx *sctx = sblock->sctx;
d5178578
JT
2123 struct btrfs_fs_info *fs_info = sctx->fs_info;
2124 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
b5d67f64 2125 u8 calculated_csum[BTRFS_CSUM_SIZE];
46343501 2126 struct scrub_sector *sector;
b0485252 2127 char *kaddr;
442a4f63
SB
2128 int fail_gen = 0;
2129 int fail_cor = 0;
d5178578 2130
7e737cbc 2131 BUG_ON(sblock->sector_count < 1);
46343501 2132 sector = sblock->sectors[0];
eb2fad30 2133 kaddr = scrub_sector_get_kaddr(sector);
b0485252 2134 s = (struct btrfs_super_block *)kaddr;
a2de733c 2135
8686c40e 2136 if (sblock->logical != btrfs_super_bytenr(s))
442a4f63 2137 ++fail_cor;
a2de733c 2138
46343501 2139 if (sector->generation != btrfs_super_generation(s))
442a4f63 2140 ++fail_gen;
a2de733c 2141
46343501 2142 if (!scrub_check_fsid(s->fsid, sector))
442a4f63 2143 ++fail_cor;
a2de733c 2144
83cf6d5e
DS
2145 shash->tfm = fs_info->csum_shash;
2146 crypto_shash_init(shash);
2147 crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
2148 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
b5d67f64 2149
2ae0c2d8 2150 if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
442a4f63 2151 ++fail_cor;
a2de733c 2152
442a4f63 2153 return fail_cor + fail_gen;
a2de733c
AJ
2154}
2155
b5d67f64
SB
2156static void scrub_block_put(struct scrub_block *sblock)
2157{
186debd6 2158 if (refcount_dec_and_test(&sblock->refs)) {
b5d67f64
SB
2159 int i;
2160
5a6ac9ea
MX
2161 if (sblock->sparity)
2162 scrub_parity_put(sblock->sparity);
2163
7e737cbc 2164 for (i = 0; i < sblock->sector_count; i++)
46343501 2165 scrub_sector_put(sblock->sectors[i]);
f3e01e0e
QW
2166 for (i = 0; i < DIV_ROUND_UP(sblock->len, PAGE_SIZE); i++) {
2167 if (sblock->pages[i]) {
2168 detach_scrub_page_private(sblock->pages[i]);
2169 __free_page(sblock->pages[i]);
2170 }
2171 }
b5d67f64
SB
2172 kfree(sblock);
2173 }
2174}
2175
46343501 2176static void scrub_sector_get(struct scrub_sector *sector)
7a9e9987 2177{
46343501 2178 atomic_inc(&sector->refs);
7a9e9987
SB
2179}
2180
46343501 2181static void scrub_sector_put(struct scrub_sector *sector)
7a9e9987 2182{
eb2fad30 2183 if (atomic_dec_and_test(&sector->refs))
46343501 2184 kfree(sector);
7a9e9987
SB
2185}
2186
eb3b5053
DS
2187/*
2188 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
2189 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
2190 */
2191static void scrub_throttle(struct scrub_ctx *sctx)
2192{
2193 const int time_slice = 1000;
2194 struct scrub_bio *sbio;
2195 struct btrfs_device *device;
2196 s64 delta;
2197 ktime_t now;
2198 u32 div;
2199 u64 bwlimit;
2200
2201 sbio = sctx->bios[sctx->curr];
2202 device = sbio->dev;
2203 bwlimit = READ_ONCE(device->scrub_speed_max);
2204 if (bwlimit == 0)
2205 return;
2206
2207 /*
2208 * Slice is divided into intervals when the IO is submitted, adjust by
2209 * bwlimit and maximum of 64 intervals.
2210 */
2211 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
2212 div = min_t(u32, 64, div);
2213
2214 /* Start new epoch, set deadline */
2215 now = ktime_get();
2216 if (sctx->throttle_deadline == 0) {
2217 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
2218 sctx->throttle_sent = 0;
2219 }
2220
2221 /* Still in the time to send? */
2222 if (ktime_before(now, sctx->throttle_deadline)) {
2223 /* If current bio is within the limit, send it */
2224 sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
2225 if (sctx->throttle_sent <= div_u64(bwlimit, div))
2226 return;
2227
2228 /* We're over the limit, sleep until the rest of the slice */
2229 delta = ktime_ms_delta(sctx->throttle_deadline, now);
2230 } else {
2231 /* New request after deadline, start new epoch */
2232 delta = 0;
2233 }
2234
2235 if (delta) {
2236 long timeout;
2237
2238 timeout = div_u64(delta * HZ, 1000);
2239 schedule_timeout_interruptible(timeout);
2240 }
2241
2242 /* Next call will start the deadline period */
2243 sctx->throttle_deadline = 0;
2244}
2245
d9d181c1 2246static void scrub_submit(struct scrub_ctx *sctx)
a2de733c
AJ
2247{
2248 struct scrub_bio *sbio;
2249
d9d181c1 2250 if (sctx->curr == -1)
1623edeb 2251 return;
a2de733c 2252
eb3b5053
DS
2253 scrub_throttle(sctx);
2254
d9d181c1
SB
2255 sbio = sctx->bios[sctx->curr];
2256 sctx->curr = -1;
b6bfebc1 2257 scrub_pending_bio_inc(sctx);
58ff51f1
CH
2258 btrfsic_check_bio(sbio->bio);
2259 submit_bio(sbio->bio);
a2de733c
AJ
2260}
2261
46343501
QW
2262static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
2263 struct scrub_sector *sector)
a2de733c 2264{
46343501 2265 struct scrub_block *sblock = sector->sblock;
a2de733c 2266 struct scrub_bio *sbio;
8df507cb 2267 const u32 sectorsize = sctx->fs_info->sectorsize;
69f4cb52 2268 int ret;
a2de733c
AJ
2269
2270again:
2271 /*
2272 * grab a fresh bio or wait for one to become available
2273 */
d9d181c1
SB
2274 while (sctx->curr == -1) {
2275 spin_lock(&sctx->list_lock);
2276 sctx->curr = sctx->first_free;
2277 if (sctx->curr != -1) {
2278 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2279 sctx->bios[sctx->curr]->next_free = -1;
e360d2f5 2280 sctx->bios[sctx->curr]->sector_count = 0;
d9d181c1 2281 spin_unlock(&sctx->list_lock);
a2de733c 2282 } else {
d9d181c1
SB
2283 spin_unlock(&sctx->list_lock);
2284 wait_event(sctx->list_wait, sctx->first_free != -1);
a2de733c
AJ
2285 }
2286 }
d9d181c1 2287 sbio = sctx->bios[sctx->curr];
e360d2f5 2288 if (sbio->sector_count == 0) {
8686c40e
QW
2289 sbio->physical = sblock->physical + sector->offset;
2290 sbio->logical = sblock->logical + sector->offset;
2291 sbio->dev = sblock->dev;
75c17e66
CH
2292 if (!sbio->bio) {
2293 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
2294 REQ_OP_READ, GFP_NOFS);
b5d67f64 2295 }
75c17e66
CH
2296 sbio->bio->bi_private = sbio;
2297 sbio->bio->bi_end_io = scrub_bio_end_io;
2298 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
4e4cbee9 2299 sbio->status = 0;
e360d2f5 2300 } else if (sbio->physical + sbio->sector_count * sectorsize !=
8686c40e 2301 sblock->physical + sector->offset ||
e360d2f5 2302 sbio->logical + sbio->sector_count * sectorsize !=
8686c40e
QW
2303 sblock->logical + sector->offset ||
2304 sbio->dev != sblock->dev) {
d9d181c1 2305 scrub_submit(sctx);
a2de733c
AJ
2306 goto again;
2307 }
69f4cb52 2308
e360d2f5 2309 sbio->sectors[sbio->sector_count] = sector;
eb2fad30 2310 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
8df507cb 2311 if (ret != sectorsize) {
e360d2f5 2312 if (sbio->sector_count < 1) {
b5d67f64
SB
2313 bio_put(sbio->bio);
2314 sbio->bio = NULL;
2315 return -EIO;
2316 }
d9d181c1 2317 scrub_submit(sctx);
69f4cb52
AJ
2318 goto again;
2319 }
2320
ff023aac 2321 scrub_block_get(sblock); /* one for the page added to the bio */
46343501 2322 atomic_inc(&sblock->outstanding_sectors);
e360d2f5
QW
2323 sbio->sector_count++;
2324 if (sbio->sector_count == sctx->sectors_per_bio)
d9d181c1 2325 scrub_submit(sctx);
b5d67f64
SB
2326
2327 return 0;
2328}
2329
22365979 2330static void scrub_missing_raid56_end_io(struct bio *bio)
73ff61db
OS
2331{
2332 struct scrub_block *sblock = bio->bi_private;
fb456252 2333 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
73ff61db 2334
f1c29379 2335 btrfs_bio_counter_dec(fs_info);
4e4cbee9 2336 if (bio->bi_status)
73ff61db
OS
2337 sblock->no_io_error_seen = 0;
2338
4673272f
ST
2339 bio_put(bio);
2340
be539518 2341 queue_work(fs_info->scrub_workers, &sblock->work);
73ff61db
OS
2342}
2343
be539518 2344static void scrub_missing_raid56_worker(struct work_struct *work)
73ff61db
OS
2345{
2346 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2347 struct scrub_ctx *sctx = sblock->sctx;
0b246afa 2348 struct btrfs_fs_info *fs_info = sctx->fs_info;
73ff61db
OS
2349 u64 logical;
2350 struct btrfs_device *dev;
2351
8686c40e
QW
2352 logical = sblock->logical;
2353 dev = sblock->dev;
73ff61db 2354
affe4a5a 2355 if (sblock->no_io_error_seen)
ba7cf988 2356 scrub_recheck_block_checksum(sblock);
73ff61db
OS
2357
2358 if (!sblock->no_io_error_seen) {
2359 spin_lock(&sctx->stat_lock);
2360 sctx->stat.read_errors++;
2361 spin_unlock(&sctx->stat_lock);
0b246afa 2362 btrfs_err_rl_in_rcu(fs_info,
b14af3b4 2363 "IO error rebuilding logical %llu for dev %s",
cb3e217b 2364 logical, btrfs_dev_name(dev));
73ff61db
OS
2365 } else if (sblock->header_error || sblock->checksum_error) {
2366 spin_lock(&sctx->stat_lock);
2367 sctx->stat.uncorrectable_errors++;
2368 spin_unlock(&sctx->stat_lock);
0b246afa 2369 btrfs_err_rl_in_rcu(fs_info,
b14af3b4 2370 "failed to rebuild valid logical %llu for dev %s",
cb3e217b 2371 logical, btrfs_dev_name(dev));
73ff61db
OS
2372 } else {
2373 scrub_write_block_to_dev_replace(sblock);
2374 }
2375
2073c4c2 2376 if (sctx->is_dev_replace && sctx->flush_all_writes) {
3fb99303 2377 mutex_lock(&sctx->wr_lock);
73ff61db 2378 scrub_wr_submit(sctx);
3fb99303 2379 mutex_unlock(&sctx->wr_lock);
73ff61db
OS
2380 }
2381
57d4f0b8 2382 scrub_block_put(sblock);
73ff61db
OS
2383 scrub_pending_bio_dec(sctx);
2384}
2385
2386static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2387{
2388 struct scrub_ctx *sctx = sblock->sctx;
fb456252 2389 struct btrfs_fs_info *fs_info = sctx->fs_info;
7e737cbc 2390 u64 length = sblock->sector_count << fs_info->sectorsize_bits;
8686c40e 2391 u64 logical = sblock->logical;
4c664611 2392 struct btrfs_io_context *bioc = NULL;
73ff61db
OS
2393 struct bio *bio;
2394 struct btrfs_raid_bio *rbio;
2395 int ret;
2396 int i;
2397
ae6529c3 2398 btrfs_bio_counter_inc_blocked(fs_info);
cf8cddd3 2399 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
4c664611
QW
2400 &length, &bioc);
2401 if (ret || !bioc || !bioc->raid_map)
2402 goto bioc_out;
73ff61db
OS
2403
2404 if (WARN_ON(!sctx->is_dev_replace ||
4c664611 2405 !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
73ff61db
OS
2406 /*
2407 * We shouldn't be scrubbing a missing device. Even for dev
2408 * replace, we should only get here for RAID 5/6. We either
2409 * managed to mount something with no mirrors remaining or
a13467ee 2410 * there's a bug in scrub_find_good_copy()/btrfs_map_block().
73ff61db 2411 */
4c664611 2412 goto bioc_out;
73ff61db
OS
2413 }
2414
75c17e66 2415 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
73ff61db
OS
2416 bio->bi_iter.bi_sector = logical >> 9;
2417 bio->bi_private = sblock;
2418 bio->bi_end_io = scrub_missing_raid56_end_io;
2419
ff18a4af 2420 rbio = raid56_alloc_missing_rbio(bio, bioc);
73ff61db
OS
2421 if (!rbio)
2422 goto rbio_out;
2423
7e737cbc 2424 for (i = 0; i < sblock->sector_count; i++) {
46343501 2425 struct scrub_sector *sector = sblock->sectors[i];
73ff61db 2426
eb2fad30
QW
2427 raid56_add_scrub_pages(rbio, scrub_sector_get_page(sector),
2428 scrub_sector_get_page_offset(sector),
8686c40e 2429 sector->offset + sector->sblock->logical);
73ff61db
OS
2430 }
2431
be539518 2432 INIT_WORK(&sblock->work, scrub_missing_raid56_worker);
73ff61db
OS
2433 scrub_block_get(sblock);
2434 scrub_pending_bio_inc(sctx);
2435 raid56_submit_missing_rbio(rbio);
f1c29379 2436 btrfs_put_bioc(bioc);
73ff61db
OS
2437 return;
2438
2439rbio_out:
2440 bio_put(bio);
4c664611 2441bioc_out:
ae6529c3 2442 btrfs_bio_counter_dec(fs_info);
4c664611 2443 btrfs_put_bioc(bioc);
73ff61db
OS
2444 spin_lock(&sctx->stat_lock);
2445 sctx->stat.malloc_errors++;
2446 spin_unlock(&sctx->stat_lock);
2447}
2448
46343501 2449static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
a36cf8b8 2450 u64 physical, struct btrfs_device *dev, u64 flags,
96e63a45 2451 u64 gen, int mirror_num, u8 *csum,
ff023aac 2452 u64 physical_for_dev_replace)
b5d67f64
SB
2453{
2454 struct scrub_block *sblock;
d0a7a9c0 2455 const u32 sectorsize = sctx->fs_info->sectorsize;
b5d67f64
SB
2456 int index;
2457
8686c40e
QW
2458 sblock = alloc_scrub_block(sctx, dev, logical, physical,
2459 physical_for_dev_replace, mirror_num);
b5d67f64 2460 if (!sblock) {
d9d181c1
SB
2461 spin_lock(&sctx->stat_lock);
2462 sctx->stat.malloc_errors++;
2463 spin_unlock(&sctx->stat_lock);
b5d67f64 2464 return -ENOMEM;
a2de733c 2465 }
b5d67f64 2466
b5d67f64 2467 for (index = 0; len > 0; index++) {
46343501 2468 struct scrub_sector *sector;
d0a7a9c0
QW
2469 /*
2470 * Here we will allocate one page for one sector to scrub.
2471 * This is fine if PAGE_SIZE == sectorsize, but will cost
2472 * more memory for PAGE_SIZE > sectorsize case.
2473 */
2474 u32 l = min(sectorsize, len);
b5d67f64 2475
02bc3927 2476 sector = alloc_scrub_sector(sblock, logical);
46343501 2477 if (!sector) {
d9d181c1
SB
2478 spin_lock(&sctx->stat_lock);
2479 sctx->stat.malloc_errors++;
2480 spin_unlock(&sctx->stat_lock);
7a9e9987 2481 scrub_block_put(sblock);
b5d67f64
SB
2482 return -ENOMEM;
2483 }
46343501
QW
2484 sector->flags = flags;
2485 sector->generation = gen;
b5d67f64 2486 if (csum) {
46343501
QW
2487 sector->have_csum = 1;
2488 memcpy(sector->csum, csum, sctx->fs_info->csum_size);
b5d67f64 2489 } else {
46343501 2490 sector->have_csum = 0;
b5d67f64 2491 }
b5d67f64
SB
2492 len -= l;
2493 logical += l;
2494 physical += l;
ff023aac 2495 physical_for_dev_replace += l;
b5d67f64
SB
2496 }
2497
7e737cbc 2498 WARN_ON(sblock->sector_count == 0);
e6e674bd 2499 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
73ff61db
OS
2500 /*
2501 * This case should only be hit for RAID 5/6 device replace. See
2502 * the comment in scrub_missing_raid56_pages() for details.
2503 */
2504 scrub_missing_raid56_pages(sblock);
2505 } else {
7e737cbc 2506 for (index = 0; index < sblock->sector_count; index++) {
46343501 2507 struct scrub_sector *sector = sblock->sectors[index];
73ff61db 2508 int ret;
1bc87793 2509
46343501 2510 ret = scrub_add_sector_to_rd_bio(sctx, sector);
73ff61db
OS
2511 if (ret) {
2512 scrub_block_put(sblock);
2513 return ret;
2514 }
b5d67f64 2515 }
a2de733c 2516
96e63a45 2517 if (flags & BTRFS_EXTENT_FLAG_SUPER)
73ff61db
OS
2518 scrub_submit(sctx);
2519 }
a2de733c 2520
b5d67f64
SB
2521 /* last one frees, either here or in bio completion for last page */
2522 scrub_block_put(sblock);
a2de733c
AJ
2523 return 0;
2524}
2525
4246a0b6 2526static void scrub_bio_end_io(struct bio *bio)
b5d67f64
SB
2527{
2528 struct scrub_bio *sbio = bio->bi_private;
fb456252 2529 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
b5d67f64 2530
4e4cbee9 2531 sbio->status = bio->bi_status;
b5d67f64
SB
2532 sbio->bio = bio;
2533
be539518 2534 queue_work(fs_info->scrub_workers, &sbio->work);
b5d67f64
SB
2535}
2536
be539518 2537static void scrub_bio_end_io_worker(struct work_struct *work)
b5d67f64
SB
2538{
2539 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
d9d181c1 2540 struct scrub_ctx *sctx = sbio->sctx;
b5d67f64
SB
2541 int i;
2542
e360d2f5 2543 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
4e4cbee9 2544 if (sbio->status) {
e360d2f5
QW
2545 for (i = 0; i < sbio->sector_count; i++) {
2546 struct scrub_sector *sector = sbio->sectors[i];
b5d67f64 2547
46343501
QW
2548 sector->io_error = 1;
2549 sector->sblock->no_io_error_seen = 0;
b5d67f64
SB
2550 }
2551 }
2552
46343501 2553 /* Now complete the scrub_block items that have all pages completed */
e360d2f5
QW
2554 for (i = 0; i < sbio->sector_count; i++) {
2555 struct scrub_sector *sector = sbio->sectors[i];
46343501 2556 struct scrub_block *sblock = sector->sblock;
b5d67f64 2557
46343501 2558 if (atomic_dec_and_test(&sblock->outstanding_sectors))
b5d67f64
SB
2559 scrub_block_complete(sblock);
2560 scrub_block_put(sblock);
2561 }
2562
b5d67f64
SB
2563 bio_put(sbio->bio);
2564 sbio->bio = NULL;
d9d181c1
SB
2565 spin_lock(&sctx->list_lock);
2566 sbio->next_free = sctx->first_free;
2567 sctx->first_free = sbio->index;
2568 spin_unlock(&sctx->list_lock);
ff023aac 2569
2073c4c2 2570 if (sctx->is_dev_replace && sctx->flush_all_writes) {
3fb99303 2571 mutex_lock(&sctx->wr_lock);
ff023aac 2572 scrub_wr_submit(sctx);
3fb99303 2573 mutex_unlock(&sctx->wr_lock);
ff023aac
SB
2574 }
2575
b6bfebc1 2576 scrub_pending_bio_dec(sctx);
b5d67f64
SB
2577}
2578
5a6ac9ea
MX
2579static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2580 unsigned long *bitmap,
fa485d21 2581 u64 start, u32 len)
5a6ac9ea 2582{
972d7219 2583 u64 offset;
7736b0a4 2584 u32 nsectors;
ab108d99 2585 u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
5a6ac9ea
MX
2586
2587 if (len >= sparity->stripe_len) {
2588 bitmap_set(bitmap, 0, sparity->nsectors);
2589 return;
2590 }
2591
2592 start -= sparity->logic_start;
972d7219 2593 start = div64_u64_rem(start, sparity->stripe_len, &offset);
ab108d99 2594 offset = offset >> sectorsize_bits;
fa485d21 2595 nsectors = len >> sectorsize_bits;
5a6ac9ea
MX
2596
2597 if (offset + nsectors <= sparity->nsectors) {
2598 bitmap_set(bitmap, offset, nsectors);
2599 return;
2600 }
2601
2602 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2603 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2604}
2605
2606static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
fa485d21 2607 u64 start, u32 len)
5a6ac9ea 2608{
381b9b4c 2609 __scrub_mark_bitmap(sparity, &sparity->ebitmap, start, len);
5a6ac9ea
MX
2610}
2611
2612static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
fa485d21 2613 u64 start, u32 len)
5a6ac9ea 2614{
381b9b4c 2615 __scrub_mark_bitmap(sparity, &sparity->dbitmap, start, len);
5a6ac9ea
MX
2616}
2617
b5d67f64
SB
2618static void scrub_block_complete(struct scrub_block *sblock)
2619{
5a6ac9ea
MX
2620 int corrupted = 0;
2621
ff023aac 2622 if (!sblock->no_io_error_seen) {
5a6ac9ea 2623 corrupted = 1;
b5d67f64 2624 scrub_handle_errored_block(sblock);
ff023aac
SB
2625 } else {
2626 /*
2627 * if has checksum error, write via repair mechanism in
2628 * dev replace case, otherwise write here in dev replace
2629 * case.
2630 */
5a6ac9ea
MX
2631 corrupted = scrub_checksum(sblock);
2632 if (!corrupted && sblock->sctx->is_dev_replace)
ff023aac
SB
2633 scrub_write_block_to_dev_replace(sblock);
2634 }
5a6ac9ea
MX
2635
2636 if (sblock->sparity && corrupted && !sblock->data_corrected) {
8686c40e
QW
2637 u64 start = sblock->logical;
2638 u64 end = sblock->logical +
2639 sblock->sectors[sblock->sector_count - 1]->offset +
8df507cb 2640 sblock->sctx->fs_info->sectorsize;
5a6ac9ea 2641
fa485d21 2642 ASSERT(end - start <= U32_MAX);
5a6ac9ea
MX
2643 scrub_parity_mark_sectors_error(sblock->sparity,
2644 start, end - start);
2645 }
b5d67f64
SB
2646}
2647
480a8ec8
QW
2648static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
2649{
2650 sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
2651 list_del(&sum->list);
2652 kfree(sum);
2653}
2654
2655/*
2656 * Find the desired csum for range [logical, logical + sectorsize), and store
2657 * the csum into @csum.
2658 *
2659 * The search source is sctx->csum_list, which is a pre-populated list
1a9fd417 2660 * storing bytenr ordered csum ranges. We're responsible to cleanup any range
480a8ec8
QW
2661 * that is before @logical.
2662 *
2663 * Return 0 if there is no csum for the range.
2664 * Return 1 if there is csum for the range and copied to @csum.
2665 */
3b5753ec 2666static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
a2de733c 2667{
480a8ec8 2668 bool found = false;
a2de733c 2669
d9d181c1 2670 while (!list_empty(&sctx->csum_list)) {
480a8ec8
QW
2671 struct btrfs_ordered_sum *sum = NULL;
2672 unsigned long index;
2673 unsigned long num_sectors;
2674
d9d181c1 2675 sum = list_first_entry(&sctx->csum_list,
a2de733c 2676 struct btrfs_ordered_sum, list);
480a8ec8 2677 /* The current csum range is beyond our range, no csum found */
a2de733c 2678 if (sum->bytenr > logical)
a2de733c
AJ
2679 break;
2680
480a8ec8
QW
2681 /*
2682 * The current sum is before our bytenr, since scrub is always
2683 * done in bytenr order, the csum will never be used anymore,
2684 * clean it up so that later calls won't bother with the range,
2685 * and continue search the next range.
2686 */
2687 if (sum->bytenr + sum->len <= logical) {
2688 drop_csum_range(sctx, sum);
2689 continue;
2690 }
a2de733c 2691
480a8ec8
QW
2692 /* Now the csum range covers our bytenr, copy the csum */
2693 found = true;
2694 index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
2695 num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
1d1bf92d 2696
480a8ec8
QW
2697 memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
2698 sctx->fs_info->csum_size);
2699
2700 /* Cleanup the range if we're at the end of the csum range */
2701 if (index == num_sectors - 1)
2702 drop_csum_range(sctx, sum);
2703 break;
a2de733c 2704 }
480a8ec8
QW
2705 if (!found)
2706 return 0;
f51a4a18 2707 return 1;
a2de733c
AJ
2708}
2709
2710/* scrub extent tries to collect up to 64 kB for each bio */
6ca1765b 2711static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
fa485d21 2712 u64 logical, u32 len,
a36cf8b8 2713 u64 physical, struct btrfs_device *dev, u64 flags,
a13467ee 2714 u64 gen, int mirror_num)
a2de733c 2715{
a13467ee
QW
2716 struct btrfs_device *src_dev = dev;
2717 u64 src_physical = physical;
2718 int src_mirror = mirror_num;
a2de733c
AJ
2719 int ret;
2720 u8 csum[BTRFS_CSUM_SIZE];
b5d67f64
SB
2721 u32 blocksize;
2722
2723 if (flags & BTRFS_EXTENT_FLAG_DATA) {
6ca1765b
LB
2724 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2725 blocksize = map->stripe_len;
2726 else
b75b51f8 2727 blocksize = sctx->fs_info->sectorsize;
d9d181c1
SB
2728 spin_lock(&sctx->stat_lock);
2729 sctx->stat.data_extents_scrubbed++;
2730 sctx->stat.data_bytes_scrubbed += len;
2731 spin_unlock(&sctx->stat_lock);
b5d67f64 2732 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
6ca1765b
LB
2733 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2734 blocksize = map->stripe_len;
2735 else
2736 blocksize = sctx->fs_info->nodesize;
d9d181c1
SB
2737 spin_lock(&sctx->stat_lock);
2738 sctx->stat.tree_extents_scrubbed++;
2739 sctx->stat.tree_bytes_scrubbed += len;
2740 spin_unlock(&sctx->stat_lock);
b5d67f64 2741 } else {
25cc1226 2742 blocksize = sctx->fs_info->sectorsize;
ff023aac 2743 WARN_ON(1);
b5d67f64 2744 }
a2de733c 2745
a13467ee
QW
2746 /*
2747 * For dev-replace case, we can have @dev being a missing device.
2748 * Regular scrub will avoid its execution on missing device at all,
2749 * as that would trigger tons of read error.
2750 *
2751 * Reading from missing device will cause read error counts to
2752 * increase unnecessarily.
2753 * So here we change the read source to a good mirror.
2754 */
2755 if (sctx->is_dev_replace && !dev->bdev)
2756 scrub_find_good_copy(sctx->fs_info, logical, len, &src_physical,
2757 &src_dev, &src_mirror);
a2de733c 2758 while (len) {
fa485d21 2759 u32 l = min(len, blocksize);
a2de733c
AJ
2760 int have_csum = 0;
2761
2762 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2763 /* push csums to sbio */
3b5753ec 2764 have_csum = scrub_find_csum(sctx, logical, csum);
a2de733c 2765 if (have_csum == 0)
d9d181c1 2766 ++sctx->stat.no_csum;
a2de733c 2767 }
a13467ee
QW
2768 ret = scrub_sectors(sctx, logical, l, src_physical, src_dev,
2769 flags, gen, src_mirror,
2770 have_csum ? csum : NULL, physical);
a2de733c
AJ
2771 if (ret)
2772 return ret;
2773 len -= l;
2774 logical += l;
2775 physical += l;
a13467ee 2776 src_physical += l;
a2de733c
AJ
2777 }
2778 return 0;
2779}
2780
46343501 2781static int scrub_sectors_for_parity(struct scrub_parity *sparity,
fa485d21 2782 u64 logical, u32 len,
5a6ac9ea
MX
2783 u64 physical, struct btrfs_device *dev,
2784 u64 flags, u64 gen, int mirror_num, u8 *csum)
2785{
2786 struct scrub_ctx *sctx = sparity->sctx;
2787 struct scrub_block *sblock;
d0a7a9c0 2788 const u32 sectorsize = sctx->fs_info->sectorsize;
5a6ac9ea
MX
2789 int index;
2790
d0a7a9c0
QW
2791 ASSERT(IS_ALIGNED(len, sectorsize));
2792
8686c40e 2793 sblock = alloc_scrub_block(sctx, dev, logical, physical, physical, mirror_num);
5a6ac9ea
MX
2794 if (!sblock) {
2795 spin_lock(&sctx->stat_lock);
2796 sctx->stat.malloc_errors++;
2797 spin_unlock(&sctx->stat_lock);
2798 return -ENOMEM;
2799 }
2800
5a6ac9ea
MX
2801 sblock->sparity = sparity;
2802 scrub_parity_get(sparity);
2803
2804 for (index = 0; len > 0; index++) {
46343501 2805 struct scrub_sector *sector;
5a6ac9ea 2806
02bc3927 2807 sector = alloc_scrub_sector(sblock, logical);
46343501 2808 if (!sector) {
5a6ac9ea
MX
2809 spin_lock(&sctx->stat_lock);
2810 sctx->stat.malloc_errors++;
2811 spin_unlock(&sctx->stat_lock);
2812 scrub_block_put(sblock);
2813 return -ENOMEM;
2814 }
46343501 2815 sblock->sectors[index] = sector;
5a6ac9ea 2816 /* For scrub parity */
46343501
QW
2817 scrub_sector_get(sector);
2818 list_add_tail(&sector->list, &sparity->sectors_list);
46343501
QW
2819 sector->flags = flags;
2820 sector->generation = gen;
5a6ac9ea 2821 if (csum) {
46343501
QW
2822 sector->have_csum = 1;
2823 memcpy(sector->csum, csum, sctx->fs_info->csum_size);
5a6ac9ea 2824 } else {
46343501 2825 sector->have_csum = 0;
5a6ac9ea 2826 }
d0a7a9c0
QW
2827
2828 /* Iterate over the stripe range in sectorsize steps */
2829 len -= sectorsize;
2830 logical += sectorsize;
2831 physical += sectorsize;
5a6ac9ea
MX
2832 }
2833
7e737cbc
QW
2834 WARN_ON(sblock->sector_count == 0);
2835 for (index = 0; index < sblock->sector_count; index++) {
46343501 2836 struct scrub_sector *sector = sblock->sectors[index];
5a6ac9ea
MX
2837 int ret;
2838
46343501 2839 ret = scrub_add_sector_to_rd_bio(sctx, sector);
5a6ac9ea
MX
2840 if (ret) {
2841 scrub_block_put(sblock);
2842 return ret;
2843 }
2844 }
2845
46343501 2846 /* Last one frees, either here or in bio completion for last sector */
5a6ac9ea
MX
2847 scrub_block_put(sblock);
2848 return 0;
2849}
2850
2851static int scrub_extent_for_parity(struct scrub_parity *sparity,
fa485d21 2852 u64 logical, u32 len,
5a6ac9ea
MX
2853 u64 physical, struct btrfs_device *dev,
2854 u64 flags, u64 gen, int mirror_num)
2855{
2856 struct scrub_ctx *sctx = sparity->sctx;
2857 int ret;
2858 u8 csum[BTRFS_CSUM_SIZE];
2859 u32 blocksize;
2860
e6e674bd 2861 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
4a770891
OS
2862 scrub_parity_mark_sectors_error(sparity, logical, len);
2863 return 0;
2864 }
2865
5a6ac9ea 2866 if (flags & BTRFS_EXTENT_FLAG_DATA) {
6ca1765b 2867 blocksize = sparity->stripe_len;
5a6ac9ea 2868 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
6ca1765b 2869 blocksize = sparity->stripe_len;
5a6ac9ea 2870 } else {
25cc1226 2871 blocksize = sctx->fs_info->sectorsize;
5a6ac9ea
MX
2872 WARN_ON(1);
2873 }
2874
2875 while (len) {
fa485d21 2876 u32 l = min(len, blocksize);
5a6ac9ea
MX
2877 int have_csum = 0;
2878
2879 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2880 /* push csums to sbio */
3b5753ec 2881 have_csum = scrub_find_csum(sctx, logical, csum);
5a6ac9ea
MX
2882 if (have_csum == 0)
2883 goto skip;
2884 }
46343501 2885 ret = scrub_sectors_for_parity(sparity, logical, l, physical, dev,
5a6ac9ea
MX
2886 flags, gen, mirror_num,
2887 have_csum ? csum : NULL);
5a6ac9ea
MX
2888 if (ret)
2889 return ret;
6b6d24b3 2890skip:
5a6ac9ea
MX
2891 len -= l;
2892 logical += l;
2893 physical += l;
2894 }
2895 return 0;
2896}
2897
3b080b25
WS
2898/*
2899 * Given a physical address, this will calculate it's
2900 * logical offset. if this is a parity stripe, it will return
2901 * the most left data stripe's logical offset.
2902 *
2903 * return 0 if it is a data stripe, 1 means parity stripe.
2904 */
2905static int get_raid56_logic_offset(u64 physical, int num,
5a6ac9ea
MX
2906 struct map_lookup *map, u64 *offset,
2907 u64 *stripe_start)
3b080b25
WS
2908{
2909 int i;
2910 int j = 0;
2911 u64 stripe_nr;
2912 u64 last_offset;
9d644a62
DS
2913 u32 stripe_index;
2914 u32 rot;
cff82672 2915 const int data_stripes = nr_data_stripes(map);
3b080b25 2916
cff82672 2917 last_offset = (physical - map->stripes[num].physical) * data_stripes;
5a6ac9ea
MX
2918 if (stripe_start)
2919 *stripe_start = last_offset;
2920
3b080b25 2921 *offset = last_offset;
cff82672 2922 for (i = 0; i < data_stripes; i++) {
3b080b25
WS
2923 *offset = last_offset + i * map->stripe_len;
2924
42c61ab6 2925 stripe_nr = div64_u64(*offset, map->stripe_len);
cff82672 2926 stripe_nr = div_u64(stripe_nr, data_stripes);
3b080b25
WS
2927
2928 /* Work out the disk rotation on this stripe-set */
47c5713f 2929 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
3b080b25
WS
2930 /* calculate which stripe this data locates */
2931 rot += i;
e4fbaee2 2932 stripe_index = rot % map->num_stripes;
3b080b25
WS
2933 if (stripe_index == num)
2934 return 0;
2935 if (stripe_index < num)
2936 j++;
2937 }
2938 *offset = last_offset + j * map->stripe_len;
2939 return 1;
2940}
2941
5a6ac9ea
MX
2942static void scrub_free_parity(struct scrub_parity *sparity)
2943{
2944 struct scrub_ctx *sctx = sparity->sctx;
46343501 2945 struct scrub_sector *curr, *next;
5a6ac9ea
MX
2946 int nbits;
2947
381b9b4c 2948 nbits = bitmap_weight(&sparity->ebitmap, sparity->nsectors);
5a6ac9ea
MX
2949 if (nbits) {
2950 spin_lock(&sctx->stat_lock);
2951 sctx->stat.read_errors += nbits;
2952 sctx->stat.uncorrectable_errors += nbits;
2953 spin_unlock(&sctx->stat_lock);
2954 }
2955
46343501 2956 list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) {
5a6ac9ea 2957 list_del_init(&curr->list);
46343501 2958 scrub_sector_put(curr);
5a6ac9ea
MX
2959 }
2960
2961 kfree(sparity);
2962}
2963
be539518 2964static void scrub_parity_bio_endio_worker(struct work_struct *work)
20b2e302
ZL
2965{
2966 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2967 work);
2968 struct scrub_ctx *sctx = sparity->sctx;
2969
f1c29379 2970 btrfs_bio_counter_dec(sctx->fs_info);
20b2e302
ZL
2971 scrub_free_parity(sparity);
2972 scrub_pending_bio_dec(sctx);
2973}
2974
4246a0b6 2975static void scrub_parity_bio_endio(struct bio *bio)
5a6ac9ea 2976{
0d031dc4 2977 struct scrub_parity *sparity = bio->bi_private;
0b246afa 2978 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
5a6ac9ea 2979
4e4cbee9 2980 if (bio->bi_status)
381b9b4c
QW
2981 bitmap_or(&sparity->ebitmap, &sparity->ebitmap,
2982 &sparity->dbitmap, sparity->nsectors);
5a6ac9ea 2983
5a6ac9ea 2984 bio_put(bio);
20b2e302 2985
be539518
CH
2986 INIT_WORK(&sparity->work, scrub_parity_bio_endio_worker);
2987 queue_work(fs_info->scrub_parity_workers, &sparity->work);
5a6ac9ea
MX
2988}
2989
2990static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2991{
2992 struct scrub_ctx *sctx = sparity->sctx;
0b246afa 2993 struct btrfs_fs_info *fs_info = sctx->fs_info;
5a6ac9ea
MX
2994 struct bio *bio;
2995 struct btrfs_raid_bio *rbio;
4c664611 2996 struct btrfs_io_context *bioc = NULL;
5a6ac9ea
MX
2997 u64 length;
2998 int ret;
2999
381b9b4c
QW
3000 if (!bitmap_andnot(&sparity->dbitmap, &sparity->dbitmap,
3001 &sparity->ebitmap, sparity->nsectors))
5a6ac9ea
MX
3002 goto out;
3003
a0dd59de 3004 length = sparity->logic_end - sparity->logic_start;
ae6529c3
QW
3005
3006 btrfs_bio_counter_inc_blocked(fs_info);
0b246afa 3007 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
4c664611
QW
3008 &length, &bioc);
3009 if (ret || !bioc || !bioc->raid_map)
3010 goto bioc_out;
5a6ac9ea 3011
75c17e66 3012 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
5a6ac9ea
MX
3013 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
3014 bio->bi_private = sparity;
3015 bio->bi_end_io = scrub_parity_bio_endio;
3016
ff18a4af 3017 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc,
6a258d72 3018 sparity->scrub_dev,
381b9b4c 3019 &sparity->dbitmap,
5a6ac9ea 3020 sparity->nsectors);
f1c29379 3021 btrfs_put_bioc(bioc);
5a6ac9ea
MX
3022 if (!rbio)
3023 goto rbio_out;
3024
5a6ac9ea
MX
3025 scrub_pending_bio_inc(sctx);
3026 raid56_parity_submit_scrub_rbio(rbio);
3027 return;
3028
3029rbio_out:
3030 bio_put(bio);
4c664611 3031bioc_out:
ae6529c3 3032 btrfs_bio_counter_dec(fs_info);
381b9b4c 3033 bitmap_or(&sparity->ebitmap, &sparity->ebitmap, &sparity->dbitmap,
5a6ac9ea
MX
3034 sparity->nsectors);
3035 spin_lock(&sctx->stat_lock);
3036 sctx->stat.malloc_errors++;
3037 spin_unlock(&sctx->stat_lock);
3038out:
3039 scrub_free_parity(sparity);
3040}
3041
5a6ac9ea
MX
3042static void scrub_parity_get(struct scrub_parity *sparity)
3043{
78a76450 3044 refcount_inc(&sparity->refs);
5a6ac9ea
MX
3045}
3046
3047static void scrub_parity_put(struct scrub_parity *sparity)
3048{
78a76450 3049 if (!refcount_dec_and_test(&sparity->refs))
5a6ac9ea
MX
3050 return;
3051
3052 scrub_parity_check_and_repair(sparity);
3053}
3054
416bd7e7
QW
3055/*
3056 * Return 0 if the extent item range covers any byte of the range.
3057 * Return <0 if the extent item is before @search_start.
3058 * Return >0 if the extent item is after @start_start + @search_len.
3059 */
3060static int compare_extent_item_range(struct btrfs_path *path,
3061 u64 search_start, u64 search_len)
3062{
3063 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
3064 u64 len;
3065 struct btrfs_key key;
3066
3067 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3068 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
3069 key.type == BTRFS_METADATA_ITEM_KEY);
3070 if (key.type == BTRFS_METADATA_ITEM_KEY)
3071 len = fs_info->nodesize;
3072 else
3073 len = key.offset;
3074
3075 if (key.objectid + len <= search_start)
3076 return -1;
3077 if (key.objectid >= search_start + search_len)
3078 return 1;
3079 return 0;
3080}
3081
3082/*
3083 * Locate one extent item which covers any byte in range
3084 * [@search_start, @search_start + @search_length)
3085 *
3086 * If the path is not initialized, we will initialize the search by doing
3087 * a btrfs_search_slot().
3088 * If the path is already initialized, we will use the path as the initial
3089 * slot, to avoid duplicated btrfs_search_slot() calls.
3090 *
3091 * NOTE: If an extent item starts before @search_start, we will still
3092 * return the extent item. This is for data extent crossing stripe boundary.
3093 *
3094 * Return 0 if we found such extent item, and @path will point to the extent item.
3095 * Return >0 if no such extent item can be found, and @path will be released.
3096 * Return <0 if hit fatal error, and @path will be released.
3097 */
3098static int find_first_extent_item(struct btrfs_root *extent_root,
3099 struct btrfs_path *path,
3100 u64 search_start, u64 search_len)
3101{
3102 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3103 struct btrfs_key key;
3104 int ret;
3105
3106 /* Continue using the existing path */
3107 if (path->nodes[0])
3108 goto search_forward;
3109
3110 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3111 key.type = BTRFS_METADATA_ITEM_KEY;
3112 else
3113 key.type = BTRFS_EXTENT_ITEM_KEY;
3114 key.objectid = search_start;
3115 key.offset = (u64)-1;
3116
3117 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3118 if (ret < 0)
3119 return ret;
3120
3121 ASSERT(ret > 0);
3122 /*
3123 * Here we intentionally pass 0 as @min_objectid, as there could be
3124 * an extent item starting before @search_start.
3125 */
3126 ret = btrfs_previous_extent_item(extent_root, path, 0);
3127 if (ret < 0)
3128 return ret;
3129 /*
3130 * No matter whether we have found an extent item, the next loop will
3131 * properly do every check on the key.
3132 */
3133search_forward:
3134 while (true) {
3135 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3136 if (key.objectid >= search_start + search_len)
3137 break;
3138 if (key.type != BTRFS_METADATA_ITEM_KEY &&
3139 key.type != BTRFS_EXTENT_ITEM_KEY)
3140 goto next;
3141
3142 ret = compare_extent_item_range(path, search_start, search_len);
3143 if (ret == 0)
3144 return ret;
3145 if (ret > 0)
3146 break;
3147next:
3148 path->slots[0]++;
3149 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3150 ret = btrfs_next_leaf(extent_root, path);
3151 if (ret) {
3152 /* Either no more item or fatal error */
3153 btrfs_release_path(path);
3154 return ret;
3155 }
3156 }
3157 }
3158 btrfs_release_path(path);
3159 return 1;
3160}
3161
09022b14
QW
3162static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
3163 u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
3164{
3165 struct btrfs_key key;
3166 struct btrfs_extent_item *ei;
3167
3168 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3169 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
3170 key.type == BTRFS_EXTENT_ITEM_KEY);
3171 *extent_start_ret = key.objectid;
3172 if (key.type == BTRFS_METADATA_ITEM_KEY)
3173 *size_ret = path->nodes[0]->fs_info->nodesize;
3174 else
3175 *size_ret = key.offset;
3176 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
3177 *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
3178 *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
3179}
3180
8557635e
QW
3181static bool does_range_cross_boundary(u64 extent_start, u64 extent_len,
3182 u64 boundary_start, u64 boudary_len)
3183{
3184 return (extent_start < boundary_start &&
3185 extent_start + extent_len > boundary_start) ||
3186 (extent_start < boundary_start + boudary_len &&
3187 extent_start + extent_len > boundary_start + boudary_len);
3188}
3189
9ae53bf9
QW
3190static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
3191 struct scrub_parity *sparity,
3192 struct map_lookup *map,
3193 struct btrfs_device *sdev,
3194 struct btrfs_path *path,
3195 u64 logical)
3196{
3197 struct btrfs_fs_info *fs_info = sctx->fs_info;
3198 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
3199 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, logical);
d483bfd2 3200 u64 cur_logical = logical;
9ae53bf9
QW
3201 int ret;
3202
3203 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3204
3205 /* Path must not be populated */
3206 ASSERT(!path->nodes[0]);
3207
d483bfd2 3208 while (cur_logical < logical + map->stripe_len) {
9ae53bf9
QW
3209 struct btrfs_io_context *bioc = NULL;
3210 struct btrfs_device *extent_dev;
9ae53bf9
QW
3211 u64 extent_start;
3212 u64 extent_size;
3213 u64 mapped_length;
3214 u64 extent_flags;
3215 u64 extent_gen;
3216 u64 extent_physical;
3217 u64 extent_mirror_num;
3218
d483bfd2
QW
3219 ret = find_first_extent_item(extent_root, path, cur_logical,
3220 logical + map->stripe_len - cur_logical);
3221 /* No more extent item in this data stripe */
3222 if (ret > 0) {
3223 ret = 0;
9ae53bf9
QW
3224 break;
3225 }
d483bfd2 3226 if (ret < 0)
9ae53bf9 3227 break;
d483bfd2
QW
3228 get_extent_info(path, &extent_start, &extent_size, &extent_flags,
3229 &extent_gen);
9ae53bf9 3230
d483bfd2 3231 /* Metadata should not cross stripe boundaries */
9ae53bf9 3232 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
d483bfd2
QW
3233 does_range_cross_boundary(extent_start, extent_size,
3234 logical, map->stripe_len)) {
9ae53bf9 3235 btrfs_err(fs_info,
d483bfd2
QW
3236 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3237 extent_start, logical);
9ae53bf9
QW
3238 spin_lock(&sctx->stat_lock);
3239 sctx->stat.uncorrectable_errors++;
3240 spin_unlock(&sctx->stat_lock);
d483bfd2
QW
3241 cur_logical += extent_size;
3242 continue;
9ae53bf9
QW
3243 }
3244
d483bfd2
QW
3245 /* Skip hole range which doesn't have any extent */
3246 cur_logical = max(extent_start, cur_logical);
9ae53bf9 3247
d483bfd2
QW
3248 /* Truncate the range inside this data stripe */
3249 extent_size = min(extent_start + extent_size,
3250 logical + map->stripe_len) - cur_logical;
3251 extent_start = cur_logical;
3252 ASSERT(extent_size <= U32_MAX);
9ae53bf9
QW
3253
3254 scrub_parity_mark_sectors_data(sparity, extent_start, extent_size);
3255
3256 mapped_length = extent_size;
3257 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_start,
3258 &mapped_length, &bioc, 0);
3259 if (!ret && (!bioc || mapped_length < extent_size))
3260 ret = -EIO;
3261 if (ret) {
3262 btrfs_put_bioc(bioc);
3263 scrub_parity_mark_sectors_error(sparity, extent_start,
3264 extent_size);
3265 break;
3266 }
3267 extent_physical = bioc->stripes[0].physical;
3268 extent_mirror_num = bioc->mirror_num;
3269 extent_dev = bioc->stripes[0].dev;
3270 btrfs_put_bioc(bioc);
3271
97e38239
QW
3272 ret = btrfs_lookup_csums_list(csum_root, extent_start,
3273 extent_start + extent_size - 1,
3274 &sctx->csum_list, 1, false);
9ae53bf9
QW
3275 if (ret) {
3276 scrub_parity_mark_sectors_error(sparity, extent_start,
3277 extent_size);
3278 break;
3279 }
3280
3281 ret = scrub_extent_for_parity(sparity, extent_start,
3282 extent_size, extent_physical,
3283 extent_dev, extent_flags,
3284 extent_gen, extent_mirror_num);
3285 scrub_free_csums(sctx);
3286
3287 if (ret) {
3288 scrub_parity_mark_sectors_error(sparity, extent_start,
3289 extent_size);
3290 break;
3291 }
3292
3293 cond_resched();
d483bfd2 3294 cur_logical += extent_size;
9ae53bf9
QW
3295 }
3296 btrfs_release_path(path);
3297 return ret;
3298}
3299
5a6ac9ea
MX
3300static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
3301 struct map_lookup *map,
3302 struct btrfs_device *sdev,
5a6ac9ea
MX
3303 u64 logic_start,
3304 u64 logic_end)
3305{
fb456252 3306 struct btrfs_fs_info *fs_info = sctx->fs_info;
2522dbe8 3307 struct btrfs_path *path;
9ae53bf9 3308 u64 cur_logical;
5a6ac9ea 3309 int ret;
5a6ac9ea
MX
3310 struct scrub_parity *sparity;
3311 int nsectors;
5a6ac9ea 3312
2522dbe8
QW
3313 path = btrfs_alloc_path();
3314 if (!path) {
3315 spin_lock(&sctx->stat_lock);
3316 sctx->stat.malloc_errors++;
3317 spin_unlock(&sctx->stat_lock);
3318 return -ENOMEM;
3319 }
3320 path->search_commit_root = 1;
3321 path->skip_locking = 1;
3322
fa485d21 3323 ASSERT(map->stripe_len <= U32_MAX);
ab108d99 3324 nsectors = map->stripe_len >> fs_info->sectorsize_bits;
381b9b4c
QW
3325 ASSERT(nsectors <= BITS_PER_LONG);
3326 sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS);
5a6ac9ea
MX
3327 if (!sparity) {
3328 spin_lock(&sctx->stat_lock);
3329 sctx->stat.malloc_errors++;
3330 spin_unlock(&sctx->stat_lock);
2522dbe8 3331 btrfs_free_path(path);
5a6ac9ea
MX
3332 return -ENOMEM;
3333 }
3334
fa485d21 3335 ASSERT(map->stripe_len <= U32_MAX);
5a6ac9ea
MX
3336 sparity->stripe_len = map->stripe_len;
3337 sparity->nsectors = nsectors;
3338 sparity->sctx = sctx;
3339 sparity->scrub_dev = sdev;
3340 sparity->logic_start = logic_start;
3341 sparity->logic_end = logic_end;
78a76450 3342 refcount_set(&sparity->refs, 1);
46343501 3343 INIT_LIST_HEAD(&sparity->sectors_list);
5a6ac9ea
MX
3344
3345 ret = 0;
9ae53bf9
QW
3346 for (cur_logical = logic_start; cur_logical < logic_end;
3347 cur_logical += map->stripe_len) {
3348 ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map,
3349 sdev, path, cur_logical);
5a6ac9ea 3350 if (ret < 0)
5a6ac9ea 3351 break;
fa485d21 3352 }
9ae53bf9 3353
5a6ac9ea
MX
3354 scrub_parity_put(sparity);
3355 scrub_submit(sctx);
3fb99303 3356 mutex_lock(&sctx->wr_lock);
5a6ac9ea 3357 scrub_wr_submit(sctx);
3fb99303 3358 mutex_unlock(&sctx->wr_lock);
5a6ac9ea 3359
2522dbe8 3360 btrfs_free_path(path);
5a6ac9ea
MX
3361 return ret < 0 ? ret : 0;
3362}
3363
de17addc
NA
3364static void sync_replace_for_zoned(struct scrub_ctx *sctx)
3365{
3366 if (!btrfs_is_zoned(sctx->fs_info))
3367 return;
3368
3369 sctx->flush_all_writes = true;
3370 scrub_submit(sctx);
3371 mutex_lock(&sctx->wr_lock);
3372 scrub_wr_submit(sctx);
3373 mutex_unlock(&sctx->wr_lock);
3374
3375 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3376}
3377
7db1c5d1
NA
3378static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
3379 u64 physical, u64 physical_end)
3380{
3381 struct btrfs_fs_info *fs_info = sctx->fs_info;
3382 int ret = 0;
3383
3384 if (!btrfs_is_zoned(fs_info))
3385 return 0;
3386
3387 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3388
3389 mutex_lock(&sctx->wr_lock);
3390 if (sctx->write_pointer < physical_end) {
3391 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
3392 physical,
3393 sctx->write_pointer);
3394 if (ret)
3395 btrfs_err(fs_info,
3396 "zoned: failed to recover write pointer");
3397 }
3398 mutex_unlock(&sctx->wr_lock);
3399 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
3400
3401 return ret;
3402}
3403
09022b14
QW
3404/*
3405 * Scrub one range which can only has simple mirror based profile.
3406 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
3407 * RAID0/RAID10).
3408 *
3409 * Since we may need to handle a subset of block group, we need @logical_start
3410 * and @logical_length parameter.
3411 */
3412static int scrub_simple_mirror(struct scrub_ctx *sctx,
3413 struct btrfs_root *extent_root,
3414 struct btrfs_root *csum_root,
3415 struct btrfs_block_group *bg,
3416 struct map_lookup *map,
3417 u64 logical_start, u64 logical_length,
3418 struct btrfs_device *device,
3419 u64 physical, int mirror_num)
3420{
3421 struct btrfs_fs_info *fs_info = sctx->fs_info;
3422 const u64 logical_end = logical_start + logical_length;
3423 /* An artificial limit, inherit from old scrub behavior */
3424 const u32 max_length = SZ_64K;
3425 struct btrfs_path path = { 0 };
3426 u64 cur_logical = logical_start;
3427 int ret;
3428
3429 /* The range must be inside the bg */
3430 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
3431
3432 path.search_commit_root = 1;
3433 path.skip_locking = 1;
3434 /* Go through each extent items inside the logical range */
3435 while (cur_logical < logical_end) {
09022b14
QW
3436 u64 extent_start;
3437 u64 extent_len;
3438 u64 extent_flags;
3439 u64 extent_gen;
3440 u64 scrub_len;
09022b14
QW
3441
3442 /* Canceled? */
3443 if (atomic_read(&fs_info->scrub_cancel_req) ||
3444 atomic_read(&sctx->cancel_req)) {
3445 ret = -ECANCELED;
3446 break;
3447 }
3448 /* Paused? */
3449 if (atomic_read(&fs_info->scrub_pause_req)) {
3450 /* Push queued extents */
3451 sctx->flush_all_writes = true;
3452 scrub_submit(sctx);
3453 mutex_lock(&sctx->wr_lock);
3454 scrub_wr_submit(sctx);
3455 mutex_unlock(&sctx->wr_lock);
3456 wait_event(sctx->list_wait,
3457 atomic_read(&sctx->bios_in_flight) == 0);
3458 sctx->flush_all_writes = false;
3459 scrub_blocked_if_needed(fs_info);
3460 }
3461 /* Block group removed? */
3462 spin_lock(&bg->lock);
3349b57f 3463 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
09022b14
QW
3464 spin_unlock(&bg->lock);
3465 ret = 0;
3466 break;
3467 }
3468 spin_unlock(&bg->lock);
3469
3470 ret = find_first_extent_item(extent_root, &path, cur_logical,
3471 logical_end - cur_logical);
3472 if (ret > 0) {
3473 /* No more extent, just update the accounting */
3474 sctx->stat.last_physical = physical + logical_length;
3475 ret = 0;
3476 break;
3477 }
3478 if (ret < 0)
3479 break;
3480 get_extent_info(&path, &extent_start, &extent_len,
3481 &extent_flags, &extent_gen);
3482 /* Skip hole range which doesn't have any extent */
3483 cur_logical = max(extent_start, cur_logical);
3484
3485 /*
3486 * Scrub len has three limits:
3487 * - Extent size limit
3488 * - Scrub range limit
3489 * This is especially imporatant for RAID0/RAID10 to reuse
3490 * this function
3491 * - Max scrub size limit
3492 */
3493 scrub_len = min(min(extent_start + extent_len,
3494 logical_end), cur_logical + max_length) -
3495 cur_logical;
09022b14 3496
09022b14 3497 if (extent_flags & BTRFS_EXTENT_FLAG_DATA) {
97e38239 3498 ret = btrfs_lookup_csums_list(csum_root, cur_logical,
09022b14 3499 cur_logical + scrub_len - 1,
26ce9114 3500 &sctx->csum_list, 1, false);
09022b14
QW
3501 if (ret)
3502 break;
3503 }
3504 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3505 does_range_cross_boundary(extent_start, extent_len,
3506 logical_start, logical_length)) {
3507 btrfs_err(fs_info,
3508"scrub: tree block %llu spanning boundaries, ignored. boundary=[%llu, %llu)",
3509 extent_start, logical_start, logical_end);
3510 spin_lock(&sctx->stat_lock);
3511 sctx->stat.uncorrectable_errors++;
3512 spin_unlock(&sctx->stat_lock);
3513 cur_logical += scrub_len;
3514 continue;
3515 }
a13467ee
QW
3516 ret = scrub_extent(sctx, map, cur_logical, scrub_len,
3517 cur_logical - logical_start + physical,
3518 device, extent_flags, extent_gen,
3519 mirror_num);
09022b14
QW
3520 scrub_free_csums(sctx);
3521 if (ret)
3522 break;
3523 if (sctx->is_dev_replace)
3524 sync_replace_for_zoned(sctx);
3525 cur_logical += scrub_len;
3526 /* Don't hold CPU for too long time */
3527 cond_resched();
3528 }
3529 btrfs_release_path(&path);
3530 return ret;
3531}
3532
8557635e
QW
3533/* Calculate the full stripe length for simple stripe based profiles */
3534static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
3535{
3536 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3537 BTRFS_BLOCK_GROUP_RAID10));
3538
3539 return map->num_stripes / map->sub_stripes * map->stripe_len;
3540}
3541
3542/* Get the logical bytenr for the stripe */
3543static u64 simple_stripe_get_logical(struct map_lookup *map,
3544 struct btrfs_block_group *bg,
3545 int stripe_index)
3546{
3547 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3548 BTRFS_BLOCK_GROUP_RAID10));
3549 ASSERT(stripe_index < map->num_stripes);
3550
3551 /*
3552 * (stripe_index / sub_stripes) gives how many data stripes we need to
3553 * skip.
3554 */
3555 return (stripe_index / map->sub_stripes) * map->stripe_len + bg->start;
3556}
3557
3558/* Get the mirror number for the stripe */
3559static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
3560{
3561 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3562 BTRFS_BLOCK_GROUP_RAID10));
3563 ASSERT(stripe_index < map->num_stripes);
3564
3565 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
3566 return stripe_index % map->sub_stripes + 1;
3567}
3568
3569static int scrub_simple_stripe(struct scrub_ctx *sctx,
3570 struct btrfs_root *extent_root,
3571 struct btrfs_root *csum_root,
3572 struct btrfs_block_group *bg,
3573 struct map_lookup *map,
3574 struct btrfs_device *device,
3575 int stripe_index)
3576{
3577 const u64 logical_increment = simple_stripe_full_stripe_len(map);
3578 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
3579 const u64 orig_physical = map->stripes[stripe_index].physical;
3580 const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
3581 u64 cur_logical = orig_logical;
3582 u64 cur_physical = orig_physical;
3583 int ret = 0;
3584
3585 while (cur_logical < bg->start + bg->length) {
3586 /*
3587 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
3588 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
3589 * this stripe.
3590 */
3591 ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map,
3592 cur_logical, map->stripe_len, device,
3593 cur_physical, mirror_num);
3594 if (ret)
3595 return ret;
3596 /* Skip to next stripe which belongs to the target device */
3597 cur_logical += logical_increment;
3598 /* For physical offset, we just go to next stripe */
3599 cur_physical += map->stripe_len;
3600 }
3601 return ret;
3602}
3603
d9d181c1 3604static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2ae8ae3d 3605 struct btrfs_block_group *bg,
bc88b486 3606 struct extent_map *em,
a36cf8b8 3607 struct btrfs_device *scrub_dev,
bc88b486 3608 int stripe_index)
a2de733c 3609{
2522dbe8 3610 struct btrfs_path *path;
fb456252 3611 struct btrfs_fs_info *fs_info = sctx->fs_info;
29cbcf40 3612 struct btrfs_root *root;
fc28b25e 3613 struct btrfs_root *csum_root;
e7786c3a 3614 struct blk_plug plug;
bc88b486 3615 struct map_lookup *map = em->map_lookup;
09022b14 3616 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2ae8ae3d 3617 const u64 chunk_logical = bg->start;
a2de733c 3618 int ret;
1194a824 3619 u64 physical = map->stripes[stripe_index].physical;
bc88b486
QW
3620 const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
3621 const u64 physical_end = physical + dev_stripe_len;
a2de733c 3622 u64 logical;
625f1c8d 3623 u64 logic_end;
18d30ab9 3624 /* The logical increment after finishing one stripe */
5c07c53f 3625 u64 increment;
18d30ab9 3626 /* Offset inside the chunk */
a2de733c 3627 u64 offset;
5a6ac9ea
MX
3628 u64 stripe_logical;
3629 u64 stripe_end;
3b080b25 3630 int stop_loop = 0;
53b381b3 3631
a2de733c
AJ
3632 path = btrfs_alloc_path();
3633 if (!path)
3634 return -ENOMEM;
3635
b5d67f64
SB
3636 /*
3637 * work on commit root. The related disk blocks are static as
3638 * long as COW is applied. This means, it is save to rewrite
3639 * them to repair disk errors without any race conditions
3640 */
a2de733c
AJ
3641 path->search_commit_root = 1;
3642 path->skip_locking = 1;
dcf62b20 3643 path->reada = READA_FORWARD;
a2de733c 3644
d9d181c1 3645 wait_event(sctx->list_wait,
b6bfebc1 3646 atomic_read(&sctx->bios_in_flight) == 0);
cb7ab021 3647 scrub_blocked_if_needed(fs_info);
7a26285e 3648
e430c428
QW
3649 root = btrfs_extent_root(fs_info, bg->start);
3650 csum_root = btrfs_csum_root(fs_info, bg->start);
fc28b25e 3651
a2de733c
AJ
3652 /*
3653 * collect all data csums for the stripe to avoid seeking during
3654 * the scrub. This might currently (crc32) end up to be about 1MB
3655 */
e7786c3a 3656 blk_start_plug(&plug);
a2de733c 3657
de17addc
NA
3658 if (sctx->is_dev_replace &&
3659 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
3660 mutex_lock(&sctx->wr_lock);
3661 sctx->write_pointer = physical;
3662 mutex_unlock(&sctx->wr_lock);
3663 sctx->flush_all_writes = true;
3664 }
3665
09022b14
QW
3666 /*
3667 * There used to be a big double loop to handle all profiles using the
3668 * same routine, which grows larger and more gross over time.
3669 *
3670 * So here we handle each profile differently, so simpler profiles
3671 * have simpler scrubbing function.
3672 */
3673 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
3674 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
3675 /*
3676 * Above check rules out all complex profile, the remaining
3677 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
3678 * mirrored duplication without stripe.
3679 *
3680 * Only @physical and @mirror_num needs to calculated using
3681 * @stripe_index.
3682 */
3683 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3684 bg->start, bg->length, scrub_dev,
3685 map->stripes[stripe_index].physical,
3686 stripe_index + 1);
e430c428 3687 offset = 0;
09022b14
QW
3688 goto out;
3689 }
8557635e
QW
3690 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3691 ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
3692 scrub_dev, stripe_index);
e430c428 3693 offset = map->stripe_len * (stripe_index / map->sub_stripes);
8557635e
QW
3694 goto out;
3695 }
3696
3697 /* Only RAID56 goes through the old code */
3698 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
a2de733c 3699 ret = 0;
e430c428
QW
3700
3701 /* Calculate the logical end of the stripe */
3702 get_raid56_logic_offset(physical_end, stripe_index,
3703 map, &logic_end, NULL);
3704 logic_end += chunk_logical;
3705
3706 /* Initialize @offset in case we need to go to out: label */
3707 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
3708 increment = map->stripe_len * nr_data_stripes(map);
3709
18d30ab9
QW
3710 /*
3711 * Due to the rotation, for RAID56 it's better to iterate each stripe
3712 * using their physical offset.
3713 */
3b080b25 3714 while (physical < physical_end) {
18d30ab9
QW
3715 ret = get_raid56_logic_offset(physical, stripe_index, map,
3716 &logical, &stripe_logical);
e430c428
QW
3717 logical += chunk_logical;
3718 if (ret) {
3719 /* it is parity strip */
3720 stripe_logical += chunk_logical;
3721 stripe_end = stripe_logical + increment;
3722 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3723 stripe_logical,
3724 stripe_end);
3725 if (ret)
3726 goto out;
18d30ab9 3727 goto next;
f2f66a2f
ZL
3728 }
3729
18d30ab9
QW
3730 /*
3731 * Now we're at a data stripe, scrub each extents in the range.
3732 *
3733 * At this stage, if we ignore the repair part, inside each data
3734 * stripe it is no different than SINGLE profile.
3735 * We can reuse scrub_simple_mirror() here, as the repair part
3736 * is still based on @mirror_num.
3737 */
3738 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3739 logical, map->stripe_len,
3740 scrub_dev, physical, 1);
a2de733c
AJ
3741 if (ret < 0)
3742 goto out;
a2de733c 3743next:
a2de733c
AJ
3744 logical += increment;
3745 physical += map->stripe_len;
d9d181c1 3746 spin_lock(&sctx->stat_lock);
625f1c8d 3747 if (stop_loop)
bc88b486
QW
3748 sctx->stat.last_physical =
3749 map->stripes[stripe_index].physical + dev_stripe_len;
625f1c8d
LB
3750 else
3751 sctx->stat.last_physical = physical;
d9d181c1 3752 spin_unlock(&sctx->stat_lock);
625f1c8d
LB
3753 if (stop_loop)
3754 break;
a2de733c 3755 }
ff023aac 3756out:
a2de733c 3757 /* push queued extents */
d9d181c1 3758 scrub_submit(sctx);
3fb99303 3759 mutex_lock(&sctx->wr_lock);
ff023aac 3760 scrub_wr_submit(sctx);
3fb99303 3761 mutex_unlock(&sctx->wr_lock);
a2de733c 3762
e7786c3a 3763 blk_finish_plug(&plug);
a2de733c 3764 btrfs_free_path(path);
7db1c5d1
NA
3765
3766 if (sctx->is_dev_replace && ret >= 0) {
3767 int ret2;
3768
2ae8ae3d
QW
3769 ret2 = sync_write_pointer_for_zoned(sctx,
3770 chunk_logical + offset,
3771 map->stripes[stripe_index].physical,
3772 physical_end);
7db1c5d1
NA
3773 if (ret2)
3774 ret = ret2;
3775 }
3776
a2de733c
AJ
3777 return ret < 0 ? ret : 0;
3778}
3779
d9d181c1 3780static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
d04fbe19 3781 struct btrfs_block_group *bg,
a36cf8b8 3782 struct btrfs_device *scrub_dev,
020d5b73 3783 u64 dev_offset,
d04fbe19 3784 u64 dev_extent_len)
a2de733c 3785{
fb456252 3786 struct btrfs_fs_info *fs_info = sctx->fs_info;
c8bf1b67 3787 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
a2de733c
AJ
3788 struct map_lookup *map;
3789 struct extent_map *em;
3790 int i;
ff023aac 3791 int ret = 0;
a2de733c 3792
c8bf1b67 3793 read_lock(&map_tree->lock);
d04fbe19 3794 em = lookup_extent_mapping(map_tree, bg->start, bg->length);
c8bf1b67 3795 read_unlock(&map_tree->lock);
a2de733c 3796
020d5b73
FM
3797 if (!em) {
3798 /*
3799 * Might have been an unused block group deleted by the cleaner
3800 * kthread or relocation.
3801 */
d04fbe19 3802 spin_lock(&bg->lock);
3349b57f 3803 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
020d5b73 3804 ret = -EINVAL;
d04fbe19 3805 spin_unlock(&bg->lock);
020d5b73
FM
3806
3807 return ret;
3808 }
d04fbe19 3809 if (em->start != bg->start)
a2de733c 3810 goto out;
d04fbe19 3811 if (em->len < dev_extent_len)
a2de733c
AJ
3812 goto out;
3813
d04fbe19 3814 map = em->map_lookup;
a2de733c 3815 for (i = 0; i < map->num_stripes; ++i) {
a36cf8b8 3816 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
859acaf1 3817 map->stripes[i].physical == dev_offset) {
bc88b486 3818 ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
a2de733c
AJ
3819 if (ret)
3820 goto out;
3821 }
3822 }
3823out:
3824 free_extent_map(em);
3825
3826 return ret;
3827}
3828
de17addc
NA
3829static int finish_extent_writes_for_zoned(struct btrfs_root *root,
3830 struct btrfs_block_group *cache)
3831{
3832 struct btrfs_fs_info *fs_info = cache->fs_info;
3833 struct btrfs_trans_handle *trans;
3834
3835 if (!btrfs_is_zoned(fs_info))
3836 return 0;
3837
3838 btrfs_wait_block_group_reservations(cache);
3839 btrfs_wait_nocow_writers(cache);
3840 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
3841
3842 trans = btrfs_join_transaction(root);
3843 if (IS_ERR(trans))
3844 return PTR_ERR(trans);
3845 return btrfs_commit_transaction(trans);
3846}
3847
a2de733c 3848static noinline_for_stack
a36cf8b8 3849int scrub_enumerate_chunks(struct scrub_ctx *sctx,
32934280 3850 struct btrfs_device *scrub_dev, u64 start, u64 end)
a2de733c
AJ
3851{
3852 struct btrfs_dev_extent *dev_extent = NULL;
3853 struct btrfs_path *path;
0b246afa
JM
3854 struct btrfs_fs_info *fs_info = sctx->fs_info;
3855 struct btrfs_root *root = fs_info->dev_root;
a2de733c 3856 u64 chunk_offset;
55e3a601 3857 int ret = 0;
76a8efa1 3858 int ro_set;
a2de733c
AJ
3859 int slot;
3860 struct extent_buffer *l;
3861 struct btrfs_key key;
3862 struct btrfs_key found_key;
32da5386 3863 struct btrfs_block_group *cache;
ff023aac 3864 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
a2de733c
AJ
3865
3866 path = btrfs_alloc_path();
3867 if (!path)
3868 return -ENOMEM;
3869
e4058b54 3870 path->reada = READA_FORWARD;
a2de733c
AJ
3871 path->search_commit_root = 1;
3872 path->skip_locking = 1;
3873
a36cf8b8 3874 key.objectid = scrub_dev->devid;
a2de733c
AJ
3875 key.offset = 0ull;
3876 key.type = BTRFS_DEV_EXTENT_KEY;
3877
a2de733c 3878 while (1) {
d04fbe19
QW
3879 u64 dev_extent_len;
3880
a2de733c
AJ
3881 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3882 if (ret < 0)
8c51032f
AJ
3883 break;
3884 if (ret > 0) {
3885 if (path->slots[0] >=
3886 btrfs_header_nritems(path->nodes[0])) {
3887 ret = btrfs_next_leaf(root, path);
55e3a601
Z
3888 if (ret < 0)
3889 break;
3890 if (ret > 0) {
3891 ret = 0;
8c51032f 3892 break;
55e3a601
Z
3893 }
3894 } else {
3895 ret = 0;
8c51032f
AJ
3896 }
3897 }
a2de733c
AJ
3898
3899 l = path->nodes[0];
3900 slot = path->slots[0];
3901
3902 btrfs_item_key_to_cpu(l, &found_key, slot);
3903
a36cf8b8 3904 if (found_key.objectid != scrub_dev->devid)
a2de733c
AJ
3905 break;
3906
962a298f 3907 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
a2de733c
AJ
3908 break;
3909
3910 if (found_key.offset >= end)
3911 break;
3912
3913 if (found_key.offset < key.offset)
3914 break;
3915
3916 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
d04fbe19 3917 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
a2de733c 3918
d04fbe19 3919 if (found_key.offset + dev_extent_len <= start)
ced96edc 3920 goto skip;
a2de733c 3921
a2de733c
AJ
3922 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3923
3924 /*
3925 * get a reference on the corresponding block group to prevent
3926 * the chunk from going away while we scrub it
3927 */
3928 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
ced96edc
QW
3929
3930 /* some chunks are removed but not committed to disk yet,
3931 * continue scrubbing */
3932 if (!cache)
3933 goto skip;
3934
a692e13d
FM
3935 ASSERT(cache->start <= chunk_offset);
3936 /*
3937 * We are using the commit root to search for device extents, so
3938 * that means we could have found a device extent item from a
3939 * block group that was deleted in the current transaction. The
3940 * logical start offset of the deleted block group, stored at
3941 * @chunk_offset, might be part of the logical address range of
3942 * a new block group (which uses different physical extents).
3943 * In this case btrfs_lookup_block_group() has returned the new
3944 * block group, and its start address is less than @chunk_offset.
3945 *
3946 * We skip such new block groups, because it's pointless to
3947 * process them, as we won't find their extents because we search
3948 * for them using the commit root of the extent tree. For a device
3949 * replace it's also fine to skip it, we won't miss copying them
3950 * to the target device because we have the write duplication
3951 * setup through the regular write path (by btrfs_map_block()),
3952 * and we have committed a transaction when we started the device
3953 * replace, right after setting up the device replace state.
3954 */
3955 if (cache->start < chunk_offset) {
3956 btrfs_put_block_group(cache);
3957 goto skip;
3958 }
3959
78ce9fc2 3960 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
3349b57f 3961 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
0dc16ef4
FM
3962 btrfs_put_block_group(cache);
3963 goto skip;
78ce9fc2 3964 }
78ce9fc2
NA
3965 }
3966
2473d24f
FM
3967 /*
3968 * Make sure that while we are scrubbing the corresponding block
3969 * group doesn't get its logical address and its device extents
3970 * reused for another block group, which can possibly be of a
3971 * different type and different profile. We do this to prevent
3972 * false error detections and crashes due to bogus attempts to
3973 * repair extents.
3974 */
3975 spin_lock(&cache->lock);
3349b57f 3976 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2473d24f
FM
3977 spin_unlock(&cache->lock);
3978 btrfs_put_block_group(cache);
3979 goto skip;
3980 }
6b7304af 3981 btrfs_freeze_block_group(cache);
2473d24f
FM
3982 spin_unlock(&cache->lock);
3983
55e3a601
Z
3984 /*
3985 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3986 * to avoid deadlock caused by:
3987 * btrfs_inc_block_group_ro()
3988 * -> btrfs_wait_for_commit()
3989 * -> btrfs_commit_transaction()
3990 * -> btrfs_scrub_pause()
3991 */
3992 scrub_pause_on(fs_info);
b12de528
QW
3993
3994 /*
3995 * Don't do chunk preallocation for scrub.
3996 *
3997 * This is especially important for SYSTEM bgs, or we can hit
3998 * -EFBIG from btrfs_finish_chunk_alloc() like:
3999 * 1. The only SYSTEM bg is marked RO.
4000 * Since SYSTEM bg is small, that's pretty common.
4001 * 2. New SYSTEM bg will be allocated
4002 * Due to regular version will allocate new chunk.
4003 * 3. New SYSTEM bg is empty and will get cleaned up
4004 * Before cleanup really happens, it's marked RO again.
4005 * 4. Empty SYSTEM bg get scrubbed
4006 * We go back to 2.
4007 *
4008 * This can easily boost the amount of SYSTEM chunks if cleaner
4009 * thread can't be triggered fast enough, and use up all space
4010 * of btrfs_super_block::sys_chunk_array
1bbb97b8
QW
4011 *
4012 * While for dev replace, we need to try our best to mark block
4013 * group RO, to prevent race between:
4014 * - Write duplication
4015 * Contains latest data
4016 * - Scrub copy
4017 * Contains data from commit tree
4018 *
4019 * If target block group is not marked RO, nocow writes can
4020 * be overwritten by scrub copy, causing data corruption.
4021 * So for dev-replace, it's not allowed to continue if a block
4022 * group is not RO.
b12de528 4023 */
1bbb97b8 4024 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
de17addc
NA
4025 if (!ret && sctx->is_dev_replace) {
4026 ret = finish_extent_writes_for_zoned(root, cache);
4027 if (ret) {
4028 btrfs_dec_block_group_ro(cache);
4029 scrub_pause_off(fs_info);
4030 btrfs_put_block_group(cache);
4031 break;
4032 }
4033 }
4034
76a8efa1
Z
4035 if (ret == 0) {
4036 ro_set = 1;
1bbb97b8 4037 } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
76a8efa1
Z
4038 /*
4039 * btrfs_inc_block_group_ro return -ENOSPC when it
4040 * failed in creating new chunk for metadata.
1bbb97b8 4041 * It is not a problem for scrub, because
76a8efa1
Z
4042 * metadata are always cowed, and our scrub paused
4043 * commit_transactions.
4044 */
4045 ro_set = 0;
195a49ea
FM
4046 } else if (ret == -ETXTBSY) {
4047 btrfs_warn(fs_info,
4048 "skipping scrub of block group %llu due to active swapfile",
4049 cache->start);
4050 scrub_pause_off(fs_info);
4051 ret = 0;
4052 goto skip_unfreeze;
76a8efa1 4053 } else {
5d163e0e 4054 btrfs_warn(fs_info,
913e1535 4055 "failed setting block group ro: %d", ret);
6b7304af 4056 btrfs_unfreeze_block_group(cache);
55e3a601 4057 btrfs_put_block_group(cache);
1bbb97b8 4058 scrub_pause_off(fs_info);
55e3a601
Z
4059 break;
4060 }
4061
1bbb97b8
QW
4062 /*
4063 * Now the target block is marked RO, wait for nocow writes to
4064 * finish before dev-replace.
4065 * COW is fine, as COW never overwrites extents in commit tree.
4066 */
4067 if (sctx->is_dev_replace) {
4068 btrfs_wait_nocow_writers(cache);
4069 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
4070 cache->length);
4071 }
4072
4073 scrub_pause_off(fs_info);
3ec17a67 4074 down_write(&dev_replace->rwsem);
d04fbe19 4075 dev_replace->cursor_right = found_key.offset + dev_extent_len;
ff023aac
SB
4076 dev_replace->cursor_left = found_key.offset;
4077 dev_replace->item_needs_writeback = 1;
cb5583dd
DS
4078 up_write(&dev_replace->rwsem);
4079
d04fbe19
QW
4080 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
4081 dev_extent_len);
ff023aac
SB
4082
4083 /*
4084 * flush, submit all pending read and write bios, afterwards
4085 * wait for them.
4086 * Note that in the dev replace case, a read request causes
4087 * write requests that are submitted in the read completion
4088 * worker. Therefore in the current situation, it is required
4089 * that all write requests are flushed, so that all read and
4090 * write requests are really completed when bios_in_flight
4091 * changes to 0.
4092 */
2073c4c2 4093 sctx->flush_all_writes = true;
ff023aac 4094 scrub_submit(sctx);
3fb99303 4095 mutex_lock(&sctx->wr_lock);
ff023aac 4096 scrub_wr_submit(sctx);
3fb99303 4097 mutex_unlock(&sctx->wr_lock);
ff023aac
SB
4098
4099 wait_event(sctx->list_wait,
4100 atomic_read(&sctx->bios_in_flight) == 0);
b708ce96
Z
4101
4102 scrub_pause_on(fs_info);
12cf9372
WS
4103
4104 /*
4105 * must be called before we decrease @scrub_paused.
4106 * make sure we don't block transaction commit while
4107 * we are waiting pending workers finished.
4108 */
ff023aac
SB
4109 wait_event(sctx->list_wait,
4110 atomic_read(&sctx->workers_pending) == 0);
2073c4c2 4111 sctx->flush_all_writes = false;
12cf9372 4112
b708ce96 4113 scrub_pause_off(fs_info);
ff023aac 4114
78ce9fc2
NA
4115 if (sctx->is_dev_replace &&
4116 !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
4117 cache, found_key.offset))
4118 ro_set = 0;
4119
3ec17a67 4120 down_write(&dev_replace->rwsem);
1a1a8b73
FM
4121 dev_replace->cursor_left = dev_replace->cursor_right;
4122 dev_replace->item_needs_writeback = 1;
3ec17a67 4123 up_write(&dev_replace->rwsem);
1a1a8b73 4124
76a8efa1 4125 if (ro_set)
2ff7e61e 4126 btrfs_dec_block_group_ro(cache);
ff023aac 4127
758f2dfc
FM
4128 /*
4129 * We might have prevented the cleaner kthread from deleting
4130 * this block group if it was already unused because we raced
4131 * and set it to RO mode first. So add it back to the unused
4132 * list, otherwise it might not ever be deleted unless a manual
4133 * balance is triggered or it becomes used and unused again.
4134 */
4135 spin_lock(&cache->lock);
3349b57f
JB
4136 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
4137 !cache->ro && cache->reserved == 0 && cache->used == 0) {
758f2dfc 4138 spin_unlock(&cache->lock);
6e80d4f8
DZ
4139 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
4140 btrfs_discard_queue_work(&fs_info->discard_ctl,
4141 cache);
4142 else
4143 btrfs_mark_bg_unused(cache);
758f2dfc
FM
4144 } else {
4145 spin_unlock(&cache->lock);
4146 }
195a49ea 4147skip_unfreeze:
6b7304af 4148 btrfs_unfreeze_block_group(cache);
a2de733c
AJ
4149 btrfs_put_block_group(cache);
4150 if (ret)
4151 break;
32934280 4152 if (sctx->is_dev_replace &&
af1be4f8 4153 atomic64_read(&dev_replace->num_write_errors) > 0) {
ff023aac
SB
4154 ret = -EIO;
4155 break;
4156 }
4157 if (sctx->stat.malloc_errors > 0) {
4158 ret = -ENOMEM;
4159 break;
4160 }
ced96edc 4161skip:
d04fbe19 4162 key.offset = found_key.offset + dev_extent_len;
71267333 4163 btrfs_release_path(path);
a2de733c
AJ
4164 }
4165
a2de733c 4166 btrfs_free_path(path);
8c51032f 4167
55e3a601 4168 return ret;
a2de733c
AJ
4169}
4170
a36cf8b8
SB
4171static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
4172 struct btrfs_device *scrub_dev)
a2de733c
AJ
4173{
4174 int i;
4175 u64 bytenr;
4176 u64 gen;
4177 int ret;
0b246afa 4178 struct btrfs_fs_info *fs_info = sctx->fs_info;
a2de733c 4179
84961539 4180 if (BTRFS_FS_ERROR(fs_info))
fbabd4a3 4181 return -EROFS;
79787eaa 4182
5f546063 4183 /* Seed devices of a new filesystem has their own generation. */
0b246afa 4184 if (scrub_dev->fs_devices != fs_info->fs_devices)
5f546063
MX
4185 gen = scrub_dev->generation;
4186 else
0b246afa 4187 gen = fs_info->last_trans_committed;
a2de733c
AJ
4188
4189 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
4190 bytenr = btrfs_sb_offset(i);
935e5cc9
MX
4191 if (bytenr + BTRFS_SUPER_INFO_SIZE >
4192 scrub_dev->commit_total_bytes)
a2de733c 4193 break;
12659251
NA
4194 if (!btrfs_check_super_location(scrub_dev, bytenr))
4195 continue;
a2de733c 4196
46343501
QW
4197 ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
4198 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
4199 NULL, bytenr);
a2de733c
AJ
4200 if (ret)
4201 return ret;
4202 }
b6bfebc1 4203 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
a2de733c
AJ
4204
4205 return 0;
4206}
4207
e89c4a9c
JB
4208static void scrub_workers_put(struct btrfs_fs_info *fs_info)
4209{
4210 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
4211 &fs_info->scrub_lock)) {
be539518
CH
4212 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
4213 struct workqueue_struct *scrub_wr_comp =
4214 fs_info->scrub_wr_completion_workers;
4215 struct workqueue_struct *scrub_parity =
4216 fs_info->scrub_parity_workers;
e89c4a9c
JB
4217
4218 fs_info->scrub_workers = NULL;
4219 fs_info->scrub_wr_completion_workers = NULL;
4220 fs_info->scrub_parity_workers = NULL;
4221 mutex_unlock(&fs_info->scrub_lock);
4222
be539518
CH
4223 if (scrub_workers)
4224 destroy_workqueue(scrub_workers);
4225 if (scrub_wr_comp)
4226 destroy_workqueue(scrub_wr_comp);
4227 if (scrub_parity)
4228 destroy_workqueue(scrub_parity);
e89c4a9c
JB
4229 }
4230}
4231
a2de733c
AJ
4232/*
4233 * get a reference count on fs_info->scrub_workers. start worker if necessary
4234 */
ff023aac
SB
4235static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4236 int is_dev_replace)
a2de733c 4237{
be539518
CH
4238 struct workqueue_struct *scrub_workers = NULL;
4239 struct workqueue_struct *scrub_wr_comp = NULL;
4240 struct workqueue_struct *scrub_parity = NULL;
6f011058 4241 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
0339ef2f 4242 int max_active = fs_info->thread_pool_size;
e89c4a9c 4243 int ret = -ENOMEM;
a2de733c 4244
e89c4a9c
JB
4245 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
4246 return 0;
eb4318e5 4247
be539518
CH
4248 scrub_workers = alloc_workqueue("btrfs-scrub", flags,
4249 is_dev_replace ? 1 : max_active);
e89c4a9c
JB
4250 if (!scrub_workers)
4251 goto fail_scrub_workers;
e82afc52 4252
be539518 4253 scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active);
e89c4a9c
JB
4254 if (!scrub_wr_comp)
4255 goto fail_scrub_wr_completion_workers;
ff09c4ca 4256
be539518 4257 scrub_parity = alloc_workqueue("btrfs-scrubparity", flags, max_active);
e89c4a9c
JB
4258 if (!scrub_parity)
4259 goto fail_scrub_parity_workers;
4260
4261 mutex_lock(&fs_info->scrub_lock);
4262 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
4263 ASSERT(fs_info->scrub_workers == NULL &&
4264 fs_info->scrub_wr_completion_workers == NULL &&
4265 fs_info->scrub_parity_workers == NULL);
4266 fs_info->scrub_workers = scrub_workers;
4267 fs_info->scrub_wr_completion_workers = scrub_wr_comp;
4268 fs_info->scrub_parity_workers = scrub_parity;
ff09c4ca 4269 refcount_set(&fs_info->scrub_workers_refcnt, 1);
e89c4a9c
JB
4270 mutex_unlock(&fs_info->scrub_lock);
4271 return 0;
632dd772 4272 }
e89c4a9c
JB
4273 /* Other thread raced in and created the workers for us */
4274 refcount_inc(&fs_info->scrub_workers_refcnt);
4275 mutex_unlock(&fs_info->scrub_lock);
e82afc52 4276
e89c4a9c 4277 ret = 0;
be539518 4278 destroy_workqueue(scrub_parity);
e82afc52 4279fail_scrub_parity_workers:
be539518 4280 destroy_workqueue(scrub_wr_comp);
e82afc52 4281fail_scrub_wr_completion_workers:
be539518 4282 destroy_workqueue(scrub_workers);
e82afc52 4283fail_scrub_workers:
e89c4a9c 4284 return ret;
a2de733c
AJ
4285}
4286
aa1b8cd4
SB
4287int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4288 u64 end, struct btrfs_scrub_progress *progress,
63a212ab 4289 int readonly, int is_dev_replace)
a2de733c 4290{
562d7b15 4291 struct btrfs_dev_lookup_args args = { .devid = devid };
d9d181c1 4292 struct scrub_ctx *sctx;
a2de733c
AJ
4293 int ret;
4294 struct btrfs_device *dev;
a5fb1142 4295 unsigned int nofs_flag;
f9eab5f0 4296 bool need_commit = false;
a2de733c 4297
aa1b8cd4 4298 if (btrfs_fs_closing(fs_info))
6c3abeda 4299 return -EAGAIN;
a2de733c 4300
fc65bb53
QW
4301 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
4302 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
b5d67f64 4303
fc65bb53
QW
4304 /*
4305 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
4306 * value (max nodesize / min sectorsize), thus nodesize should always
4307 * be fine.
4308 */
4309 ASSERT(fs_info->nodesize <=
4310 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
7a9e9987 4311
0e94c4f4
DS
4312 /* Allocate outside of device_list_mutex */
4313 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
4314 if (IS_ERR(sctx))
4315 return PTR_ERR(sctx);
a2de733c 4316
e89c4a9c
JB
4317 ret = scrub_workers_get(fs_info, is_dev_replace);
4318 if (ret)
4319 goto out_free_ctx;
4320
aa1b8cd4 4321 mutex_lock(&fs_info->fs_devices->device_list_mutex);
562d7b15 4322 dev = btrfs_find_device(fs_info->fs_devices, &args);
e6e674bd
AJ
4323 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
4324 !is_dev_replace)) {
aa1b8cd4 4325 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
0e94c4f4 4326 ret = -ENODEV;
e89c4a9c 4327 goto out;
a2de733c 4328 }
a2de733c 4329
ebbede42
AJ
4330 if (!is_dev_replace && !readonly &&
4331 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
5d68da3b 4332 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a4852cf2
DS
4333 btrfs_err_in_rcu(fs_info,
4334 "scrub on devid %llu: filesystem on %s is not writable",
cb3e217b 4335 devid, btrfs_dev_name(dev));
0e94c4f4 4336 ret = -EROFS;
e89c4a9c 4337 goto out;
5d68da3b
MX
4338 }
4339
3b7a016f 4340 mutex_lock(&fs_info->scrub_lock);
e12c9621 4341 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
401e29c1 4342 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
a2de733c 4343 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4 4344 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
0e94c4f4 4345 ret = -EIO;
e89c4a9c 4346 goto out;
a2de733c
AJ
4347 }
4348
cb5583dd 4349 down_read(&fs_info->dev_replace.rwsem);
cadbc0a0 4350 if (dev->scrub_ctx ||
8dabb742
SB
4351 (!is_dev_replace &&
4352 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
cb5583dd 4353 up_read(&fs_info->dev_replace.rwsem);
a2de733c 4354 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4 4355 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
0e94c4f4 4356 ret = -EINPROGRESS;
e89c4a9c 4357 goto out;
a2de733c 4358 }
cb5583dd 4359 up_read(&fs_info->dev_replace.rwsem);
3b7a016f 4360
d9d181c1 4361 sctx->readonly = readonly;
cadbc0a0 4362 dev->scrub_ctx = sctx;
3cb0929a 4363 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c 4364
3cb0929a
WS
4365 /*
4366 * checking @scrub_pause_req here, we can avoid
4367 * race between committing transaction and scrubbing.
4368 */
cb7ab021 4369 __scrub_blocked_if_needed(fs_info);
a2de733c
AJ
4370 atomic_inc(&fs_info->scrubs_running);
4371 mutex_unlock(&fs_info->scrub_lock);
a2de733c 4372
a5fb1142
FM
4373 /*
4374 * In order to avoid deadlock with reclaim when there is a transaction
4375 * trying to pause scrub, make sure we use GFP_NOFS for all the
46343501 4376 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
a5fb1142
FM
4377 * invoked by our callees. The pausing request is done when the
4378 * transaction commit starts, and it blocks the transaction until scrub
4379 * is paused (done at specific points at scrub_stripe() or right above
4380 * before incrementing fs_info->scrubs_running).
4381 */
4382 nofs_flag = memalloc_nofs_save();
ff023aac 4383 if (!is_dev_replace) {
f9eab5f0
QW
4384 u64 old_super_errors;
4385
4386 spin_lock(&sctx->stat_lock);
4387 old_super_errors = sctx->stat.super_errors;
4388 spin_unlock(&sctx->stat_lock);
4389
d1e14420 4390 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
9b011adf
WS
4391 /*
4392 * by holding device list mutex, we can
4393 * kick off writing super in log tree sync.
4394 */
3cb0929a 4395 mutex_lock(&fs_info->fs_devices->device_list_mutex);
ff023aac 4396 ret = scrub_supers(sctx, dev);
3cb0929a 4397 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
f9eab5f0
QW
4398
4399 spin_lock(&sctx->stat_lock);
4400 /*
4401 * Super block errors found, but we can not commit transaction
4402 * at current context, since btrfs_commit_transaction() needs
4403 * to pause the current running scrub (hold by ourselves).
4404 */
4405 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
4406 need_commit = true;
4407 spin_unlock(&sctx->stat_lock);
ff023aac 4408 }
a2de733c
AJ
4409
4410 if (!ret)
32934280 4411 ret = scrub_enumerate_chunks(sctx, dev, start, end);
a5fb1142 4412 memalloc_nofs_restore(nofs_flag);
a2de733c 4413
b6bfebc1 4414 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
a2de733c
AJ
4415 atomic_dec(&fs_info->scrubs_running);
4416 wake_up(&fs_info->scrub_pause_wait);
4417
b6bfebc1 4418 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
0ef8e451 4419
a2de733c 4420 if (progress)
d9d181c1 4421 memcpy(progress, &sctx->stat, sizeof(*progress));
a2de733c 4422
d1e14420
AJ
4423 if (!is_dev_replace)
4424 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
4425 ret ? "not finished" : "finished", devid, ret);
4426
a2de733c 4427 mutex_lock(&fs_info->scrub_lock);
cadbc0a0 4428 dev->scrub_ctx = NULL;
a2de733c
AJ
4429 mutex_unlock(&fs_info->scrub_lock);
4430
e89c4a9c 4431 scrub_workers_put(fs_info);
f55985f4 4432 scrub_put_ctx(sctx);
a2de733c 4433
f9eab5f0
QW
4434 /*
4435 * We found some super block errors before, now try to force a
4436 * transaction commit, as scrub has finished.
4437 */
4438 if (need_commit) {
4439 struct btrfs_trans_handle *trans;
4440
4441 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4442 if (IS_ERR(trans)) {
4443 ret = PTR_ERR(trans);
4444 btrfs_err(fs_info,
4445 "scrub: failed to start transaction to fix super block errors: %d", ret);
4446 return ret;
4447 }
4448 ret = btrfs_commit_transaction(trans);
4449 if (ret < 0)
4450 btrfs_err(fs_info,
4451 "scrub: failed to commit transaction to fix super block errors: %d", ret);
4452 }
0e94c4f4 4453 return ret;
e89c4a9c
JB
4454out:
4455 scrub_workers_put(fs_info);
0e94c4f4
DS
4456out_free_ctx:
4457 scrub_free_ctx(sctx);
4458
a2de733c
AJ
4459 return ret;
4460}
4461
2ff7e61e 4462void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
a2de733c 4463{
a2de733c
AJ
4464 mutex_lock(&fs_info->scrub_lock);
4465 atomic_inc(&fs_info->scrub_pause_req);
4466 while (atomic_read(&fs_info->scrubs_paused) !=
4467 atomic_read(&fs_info->scrubs_running)) {
4468 mutex_unlock(&fs_info->scrub_lock);
4469 wait_event(fs_info->scrub_pause_wait,
4470 atomic_read(&fs_info->scrubs_paused) ==
4471 atomic_read(&fs_info->scrubs_running));
4472 mutex_lock(&fs_info->scrub_lock);
4473 }
4474 mutex_unlock(&fs_info->scrub_lock);
a2de733c
AJ
4475}
4476
2ff7e61e 4477void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
a2de733c 4478{
a2de733c
AJ
4479 atomic_dec(&fs_info->scrub_pause_req);
4480 wake_up(&fs_info->scrub_pause_wait);
a2de733c
AJ
4481}
4482
aa1b8cd4 4483int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
a2de733c 4484{
a2de733c
AJ
4485 mutex_lock(&fs_info->scrub_lock);
4486 if (!atomic_read(&fs_info->scrubs_running)) {
4487 mutex_unlock(&fs_info->scrub_lock);
4488 return -ENOTCONN;
4489 }
4490
4491 atomic_inc(&fs_info->scrub_cancel_req);
4492 while (atomic_read(&fs_info->scrubs_running)) {
4493 mutex_unlock(&fs_info->scrub_lock);
4494 wait_event(fs_info->scrub_pause_wait,
4495 atomic_read(&fs_info->scrubs_running) == 0);
4496 mutex_lock(&fs_info->scrub_lock);
4497 }
4498 atomic_dec(&fs_info->scrub_cancel_req);
4499 mutex_unlock(&fs_info->scrub_lock);
4500
4501 return 0;
4502}
4503
163e97ee 4504int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
49b25e05 4505{
163e97ee 4506 struct btrfs_fs_info *fs_info = dev->fs_info;
d9d181c1 4507 struct scrub_ctx *sctx;
a2de733c
AJ
4508
4509 mutex_lock(&fs_info->scrub_lock);
cadbc0a0 4510 sctx = dev->scrub_ctx;
d9d181c1 4511 if (!sctx) {
a2de733c
AJ
4512 mutex_unlock(&fs_info->scrub_lock);
4513 return -ENOTCONN;
4514 }
d9d181c1 4515 atomic_inc(&sctx->cancel_req);
cadbc0a0 4516 while (dev->scrub_ctx) {
a2de733c
AJ
4517 mutex_unlock(&fs_info->scrub_lock);
4518 wait_event(fs_info->scrub_pause_wait,
cadbc0a0 4519 dev->scrub_ctx == NULL);
a2de733c
AJ
4520 mutex_lock(&fs_info->scrub_lock);
4521 }
4522 mutex_unlock(&fs_info->scrub_lock);
4523
4524 return 0;
4525}
1623edeb 4526
2ff7e61e 4527int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
a2de733c
AJ
4528 struct btrfs_scrub_progress *progress)
4529{
562d7b15 4530 struct btrfs_dev_lookup_args args = { .devid = devid };
a2de733c 4531 struct btrfs_device *dev;
d9d181c1 4532 struct scrub_ctx *sctx = NULL;
a2de733c 4533
0b246afa 4534 mutex_lock(&fs_info->fs_devices->device_list_mutex);
562d7b15 4535 dev = btrfs_find_device(fs_info->fs_devices, &args);
a2de733c 4536 if (dev)
cadbc0a0 4537 sctx = dev->scrub_ctx;
d9d181c1
SB
4538 if (sctx)
4539 memcpy(progress, &sctx->stat, sizeof(*progress));
0b246afa 4540 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c 4541
d9d181c1 4542 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
a2de733c 4543}
ff023aac 4544
a13467ee
QW
4545static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
4546 u64 extent_logical, u32 extent_len,
4547 u64 *extent_physical,
4548 struct btrfs_device **extent_dev,
4549 int *extent_mirror_num)
ff023aac
SB
4550{
4551 u64 mapped_length;
4c664611 4552 struct btrfs_io_context *bioc = NULL;
ff023aac
SB
4553 int ret;
4554
4555 mapped_length = extent_len;
cf8cddd3 4556 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4c664611
QW
4557 &mapped_length, &bioc, 0);
4558 if (ret || !bioc || mapped_length < extent_len ||
4559 !bioc->stripes[0].dev->bdev) {
4560 btrfs_put_bioc(bioc);
ff023aac
SB
4561 return;
4562 }
4563
4c664611
QW
4564 *extent_physical = bioc->stripes[0].physical;
4565 *extent_mirror_num = bioc->mirror_num;
4566 *extent_dev = bioc->stripes[0].dev;
4567 btrfs_put_bioc(bioc);
ff023aac 4568}