overlayfs: Implement splice-read
[linux-block.git] / fs / btrfs / raid56.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
53b381b3
DW
2/*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
53b381b3 5 */
c1d7c514 6
53b381b3 7#include <linux/sched.h>
53b381b3
DW
8#include <linux/bio.h>
9#include <linux/slab.h>
53b381b3 10#include <linux/blkdev.h>
53b381b3
DW
11#include <linux/raid/pq.h>
12#include <linux/hash.h>
13#include <linux/list_sort.h>
14#include <linux/raid/xor.h>
818e010b 15#include <linux/mm.h>
9b569ea0 16#include "messages.h"
cea62800 17#include "misc.h"
53b381b3 18#include "ctree.h"
53b381b3 19#include "disk-io.h"
53b381b3
DW
20#include "volumes.h"
21#include "raid56.h"
22#include "async-thread.h"
c5a41562 23#include "file-item.h"
7a315072 24#include "btrfs_inode.h"
53b381b3
DW
25
26/* set when additional merges to this rbio are not allowed */
27#define RBIO_RMW_LOCKED_BIT 1
28
4ae10b3a
CM
29/*
30 * set when this rbio is sitting in the hash, but it is just a cache
31 * of past RMW
32 */
33#define RBIO_CACHE_BIT 2
34
35/*
36 * set when it is safe to trust the stripe_pages for caching
37 */
38#define RBIO_CACHE_READY_BIT 3
39
4ae10b3a
CM
40#define RBIO_CACHE_SIZE 1024
41
8a953348
DS
42#define BTRFS_STRIPE_HASH_TABLE_BITS 11
43
44/* Used by the raid56 code to lock stripes for read/modify/write */
45struct btrfs_stripe_hash {
46 struct list_head hash_list;
47 spinlock_t lock;
48};
49
50/* Used by the raid56 code to lock stripes for read/modify/write */
51struct btrfs_stripe_hash_table {
52 struct list_head stripe_cache;
53 spinlock_t cache_lock;
54 int cache_size;
55 struct btrfs_stripe_hash table[];
56};
57
eb357060
QW
58/*
59 * A bvec like structure to present a sector inside a page.
60 *
61 * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
62 */
63struct sector_ptr {
64 struct page *page;
00425dd9
QW
65 unsigned int pgoff:24;
66 unsigned int uptodate:8;
eb357060
QW
67};
68
93723095
QW
69static void rmw_rbio_work(struct work_struct *work);
70static void rmw_rbio_work_locked(struct work_struct *work);
53b381b3
DW
71static void index_rbio_pages(struct btrfs_raid_bio *rbio);
72static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
73
6bfd0133
QW
74static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check);
75static void scrub_rbio_work_locked(struct work_struct *work);
5a6ac9ea 76
797d74b7
QW
77static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
78{
2942a50d 79 bitmap_free(rbio->error_bitmap);
797d74b7
QW
80 kfree(rbio->stripe_pages);
81 kfree(rbio->bio_sectors);
82 kfree(rbio->stripe_sectors);
83 kfree(rbio->finish_pointers);
84}
85
ff2b64a2
QW
86static void free_raid_bio(struct btrfs_raid_bio *rbio)
87{
88 int i;
89
90 if (!refcount_dec_and_test(&rbio->refs))
91 return;
92
93 WARN_ON(!list_empty(&rbio->stripe_cache));
94 WARN_ON(!list_empty(&rbio->hash_list));
95 WARN_ON(!bio_list_empty(&rbio->bio_list));
96
97 for (i = 0; i < rbio->nr_pages; i++) {
98 if (rbio->stripe_pages[i]) {
99 __free_page(rbio->stripe_pages[i]);
100 rbio->stripe_pages[i] = NULL;
101 }
102 }
103
104 btrfs_put_bioc(rbio->bioc);
797d74b7 105 free_raid_bio_pointers(rbio);
ff2b64a2
QW
106 kfree(rbio);
107}
108
385de0ef 109static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
ac638859 110{
385de0ef
CH
111 INIT_WORK(&rbio->work, work_func);
112 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
ac638859
DS
113}
114
53b381b3
DW
115/*
116 * the stripe hash table is used for locking, and to collect
117 * bios in hopes of making a full stripe
118 */
119int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
120{
121 struct btrfs_stripe_hash_table *table;
122 struct btrfs_stripe_hash_table *x;
123 struct btrfs_stripe_hash *cur;
124 struct btrfs_stripe_hash *h;
125 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
126 int i;
127
128 if (info->stripe_hash_table)
129 return 0;
130
83c8266a
DS
131 /*
132 * The table is large, starting with order 4 and can go as high as
133 * order 7 in case lock debugging is turned on.
134 *
135 * Try harder to allocate and fallback to vmalloc to lower the chance
136 * of a failing mount.
137 */
ee787f95 138 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
818e010b
DS
139 if (!table)
140 return -ENOMEM;
53b381b3 141
4ae10b3a
CM
142 spin_lock_init(&table->cache_lock);
143 INIT_LIST_HEAD(&table->stripe_cache);
144
53b381b3
DW
145 h = table->table;
146
147 for (i = 0; i < num_entries; i++) {
148 cur = h + i;
149 INIT_LIST_HEAD(&cur->hash_list);
150 spin_lock_init(&cur->lock);
53b381b3
DW
151 }
152
153 x = cmpxchg(&info->stripe_hash_table, NULL, table);
fe3b7bb0 154 kvfree(x);
53b381b3
DW
155 return 0;
156}
157
4ae10b3a
CM
158/*
159 * caching an rbio means to copy anything from the
ac26df8b 160 * bio_sectors array into the stripe_pages array. We
4ae10b3a
CM
161 * use the page uptodate bit in the stripe cache array
162 * to indicate if it has valid data
163 *
164 * once the caching is done, we set the cache ready
165 * bit.
166 */
167static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
168{
169 int i;
4ae10b3a
CM
170 int ret;
171
172 ret = alloc_rbio_pages(rbio);
173 if (ret)
174 return;
175
00425dd9
QW
176 for (i = 0; i < rbio->nr_sectors; i++) {
177 /* Some range not covered by bio (partial write), skip it */
88074c8b
QW
178 if (!rbio->bio_sectors[i].page) {
179 /*
180 * Even if the sector is not covered by bio, if it is
181 * a data sector it should still be uptodate as it is
182 * read from disk.
183 */
184 if (i < rbio->nr_data * rbio->stripe_nsectors)
185 ASSERT(rbio->stripe_sectors[i].uptodate);
00425dd9 186 continue;
88074c8b 187 }
00425dd9
QW
188
189 ASSERT(rbio->stripe_sectors[i].page);
190 memcpy_page(rbio->stripe_sectors[i].page,
191 rbio->stripe_sectors[i].pgoff,
192 rbio->bio_sectors[i].page,
193 rbio->bio_sectors[i].pgoff,
194 rbio->bioc->fs_info->sectorsize);
195 rbio->stripe_sectors[i].uptodate = 1;
196 }
4ae10b3a
CM
197 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
198}
199
53b381b3
DW
200/*
201 * we hash on the first logical address of the stripe
202 */
203static int rbio_bucket(struct btrfs_raid_bio *rbio)
204{
18d758a2 205 u64 num = rbio->bioc->full_stripe_logical;
53b381b3
DW
206
207 /*
208 * we shift down quite a bit. We're using byte
209 * addressing, and most of the lower bits are zeros.
210 * This tends to upset hash_64, and it consistently
211 * returns just one or two different values.
212 *
213 * shifting off the lower bits fixes things.
214 */
215 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
216}
217
d4e28d9b
QW
218static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
219 unsigned int page_nr)
220{
221 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
222 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
223 int i;
224
225 ASSERT(page_nr < rbio->nr_pages);
226
227 for (i = sectors_per_page * page_nr;
228 i < sectors_per_page * page_nr + sectors_per_page;
229 i++) {
230 if (!rbio->stripe_sectors[i].uptodate)
231 return false;
232 }
233 return true;
234}
235
eb357060
QW
236/*
237 * Update the stripe_sectors[] array to use correct page and pgoff
238 *
239 * Should be called every time any page pointer in stripes_pages[] got modified.
240 */
241static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
242{
243 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
244 u32 offset;
245 int i;
246
247 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
248 int page_index = offset >> PAGE_SHIFT;
249
250 ASSERT(page_index < rbio->nr_pages);
251 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
252 rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
253 }
254}
255
4d100466
QW
256static void steal_rbio_page(struct btrfs_raid_bio *src,
257 struct btrfs_raid_bio *dest, int page_nr)
258{
259 const u32 sectorsize = src->bioc->fs_info->sectorsize;
260 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
261 int i;
262
263 if (dest->stripe_pages[page_nr])
264 __free_page(dest->stripe_pages[page_nr]);
265 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
266 src->stripe_pages[page_nr] = NULL;
267
268 /* Also update the sector->uptodate bits. */
269 for (i = sectors_per_page * page_nr;
270 i < sectors_per_page * page_nr + sectors_per_page; i++)
271 dest->stripe_sectors[i].uptodate = true;
272}
273
88074c8b
QW
274static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
275{
276 const int sector_nr = (page_nr << PAGE_SHIFT) >>
277 rbio->bioc->fs_info->sectorsize_bits;
278
279 /*
280 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
281 * we won't have a page which is half data half parity.
282 *
283 * Thus if the first sector of the page belongs to data stripes, then
284 * the full page belongs to data stripes.
285 */
286 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
287}
288
4ae10b3a 289/*
d4e28d9b
QW
290 * Stealing an rbio means taking all the uptodate pages from the stripe array
291 * in the source rbio and putting them into the destination rbio.
292 *
293 * This will also update the involved stripe_sectors[] which are referring to
294 * the old pages.
4ae10b3a
CM
295 */
296static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
297{
298 int i;
4ae10b3a
CM
299
300 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
301 return;
302
303 for (i = 0; i < dest->nr_pages; i++) {
88074c8b
QW
304 struct page *p = src->stripe_pages[i];
305
306 /*
307 * We don't need to steal P/Q pages as they will always be
308 * regenerated for RMW or full write anyway.
309 */
310 if (!is_data_stripe_page(src, i))
4ae10b3a 311 continue;
4ae10b3a 312
88074c8b
QW
313 /*
314 * If @src already has RBIO_CACHE_READY_BIT, it should have
315 * all data stripe pages present and uptodate.
316 */
317 ASSERT(p);
318 ASSERT(full_page_sectors_uptodate(src, i));
4d100466 319 steal_rbio_page(src, dest, i);
4ae10b3a 320 }
eb357060
QW
321 index_stripe_sectors(dest);
322 index_stripe_sectors(src);
4ae10b3a
CM
323}
324
53b381b3
DW
325/*
326 * merging means we take the bio_list from the victim and
327 * splice it into the destination. The victim should
328 * be discarded afterwards.
329 *
330 * must be called with dest->rbio_list_lock held
331 */
332static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
334{
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
bd8f7e62
QW
337 /* Also inherit the bitmaps from @victim. */
338 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
339 dest->stripe_nsectors);
53b381b3
DW
340 bio_list_init(&victim->bio_list);
341}
342
343/*
4ae10b3a
CM
344 * used to prune items that are in the cache. The caller
345 * must hold the hash table lock.
346 */
347static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
348{
349 int bucket = rbio_bucket(rbio);
350 struct btrfs_stripe_hash_table *table;
351 struct btrfs_stripe_hash *h;
352 int freeit = 0;
353
354 /*
355 * check the bit again under the hash table lock.
356 */
357 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
358 return;
359
6a258d72 360 table = rbio->bioc->fs_info->stripe_hash_table;
4ae10b3a
CM
361 h = table->table + bucket;
362
363 /* hold the lock for the bucket because we may be
364 * removing it from the hash table
365 */
366 spin_lock(&h->lock);
367
368 /*
369 * hold the lock for the bio list because we need
370 * to make sure the bio list is empty
371 */
372 spin_lock(&rbio->bio_list_lock);
373
374 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
375 list_del_init(&rbio->stripe_cache);
376 table->cache_size -= 1;
377 freeit = 1;
378
379 /* if the bio list isn't empty, this rbio is
380 * still involved in an IO. We take it out
381 * of the cache list, and drop the ref that
382 * was held for the list.
383 *
384 * If the bio_list was empty, we also remove
385 * the rbio from the hash_table, and drop
386 * the corresponding ref
387 */
388 if (bio_list_empty(&rbio->bio_list)) {
389 if (!list_empty(&rbio->hash_list)) {
390 list_del_init(&rbio->hash_list);
dec95574 391 refcount_dec(&rbio->refs);
4ae10b3a
CM
392 BUG_ON(!list_empty(&rbio->plug_list));
393 }
394 }
395 }
396
397 spin_unlock(&rbio->bio_list_lock);
398 spin_unlock(&h->lock);
399
400 if (freeit)
ff2b64a2 401 free_raid_bio(rbio);
4ae10b3a
CM
402}
403
404/*
405 * prune a given rbio from the cache
406 */
407static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
408{
409 struct btrfs_stripe_hash_table *table;
4ae10b3a
CM
410
411 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
412 return;
413
6a258d72 414 table = rbio->bioc->fs_info->stripe_hash_table;
4ae10b3a 415
74cc3600 416 spin_lock(&table->cache_lock);
4ae10b3a 417 __remove_rbio_from_cache(rbio);
74cc3600 418 spin_unlock(&table->cache_lock);
4ae10b3a
CM
419}
420
421/*
422 * remove everything in the cache
423 */
48a3b636 424static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
4ae10b3a
CM
425{
426 struct btrfs_stripe_hash_table *table;
4ae10b3a
CM
427 struct btrfs_raid_bio *rbio;
428
429 table = info->stripe_hash_table;
430
74cc3600 431 spin_lock(&table->cache_lock);
4ae10b3a
CM
432 while (!list_empty(&table->stripe_cache)) {
433 rbio = list_entry(table->stripe_cache.next,
434 struct btrfs_raid_bio,
435 stripe_cache);
436 __remove_rbio_from_cache(rbio);
437 }
74cc3600 438 spin_unlock(&table->cache_lock);
4ae10b3a
CM
439}
440
441/*
442 * remove all cached entries and free the hash table
443 * used by unmount
53b381b3
DW
444 */
445void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
446{
447 if (!info->stripe_hash_table)
448 return;
4ae10b3a 449 btrfs_clear_rbio_cache(info);
f749303b 450 kvfree(info->stripe_hash_table);
53b381b3
DW
451 info->stripe_hash_table = NULL;
452}
453
4ae10b3a
CM
454/*
455 * insert an rbio into the stripe cache. It
456 * must have already been prepared by calling
457 * cache_rbio_pages
458 *
459 * If this rbio was already cached, it gets
460 * moved to the front of the lru.
461 *
462 * If the size of the rbio cache is too big, we
463 * prune an item.
464 */
465static void cache_rbio(struct btrfs_raid_bio *rbio)
466{
467 struct btrfs_stripe_hash_table *table;
4ae10b3a
CM
468
469 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
470 return;
471
6a258d72 472 table = rbio->bioc->fs_info->stripe_hash_table;
4ae10b3a 473
74cc3600 474 spin_lock(&table->cache_lock);
4ae10b3a
CM
475 spin_lock(&rbio->bio_list_lock);
476
477 /* bump our ref if we were not in the list before */
478 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
dec95574 479 refcount_inc(&rbio->refs);
4ae10b3a
CM
480
481 if (!list_empty(&rbio->stripe_cache)){
482 list_move(&rbio->stripe_cache, &table->stripe_cache);
483 } else {
484 list_add(&rbio->stripe_cache, &table->stripe_cache);
485 table->cache_size += 1;
486 }
487
488 spin_unlock(&rbio->bio_list_lock);
489
490 if (table->cache_size > RBIO_CACHE_SIZE) {
491 struct btrfs_raid_bio *found;
492
493 found = list_entry(table->stripe_cache.prev,
494 struct btrfs_raid_bio,
495 stripe_cache);
496
497 if (found != rbio)
498 __remove_rbio_from_cache(found);
499 }
500
74cc3600 501 spin_unlock(&table->cache_lock);
4ae10b3a
CM
502}
503
53b381b3
DW
504/*
505 * helper function to run the xor_blocks api. It is only
506 * able to do MAX_XOR_BLOCKS at a time, so we need to
507 * loop through.
508 */
509static void run_xor(void **pages, int src_cnt, ssize_t len)
510{
511 int src_off = 0;
512 int xor_src_cnt = 0;
513 void *dest = pages[src_cnt];
514
515 while(src_cnt > 0) {
516 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
517 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
518
519 src_cnt -= xor_src_cnt;
520 src_off += xor_src_cnt;
521 }
522}
523
524/*
176571a1
DS
525 * Returns true if the bio list inside this rbio covers an entire stripe (no
526 * rmw required).
53b381b3 527 */
176571a1 528static int rbio_is_full(struct btrfs_raid_bio *rbio)
53b381b3
DW
529{
530 unsigned long size = rbio->bio_list_bytes;
531 int ret = 1;
532
74cc3600 533 spin_lock(&rbio->bio_list_lock);
ff18a4af 534 if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
53b381b3 535 ret = 0;
ff18a4af 536 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
74cc3600 537 spin_unlock(&rbio->bio_list_lock);
176571a1 538
53b381b3
DW
539 return ret;
540}
541
542/*
543 * returns 1 if it is safe to merge two rbios together.
544 * The merging is safe if the two rbios correspond to
545 * the same stripe and if they are both going in the same
546 * direction (read vs write), and if neither one is
547 * locked for final IO
548 *
549 * The caller is responsible for locking such that
550 * rmw_locked is safe to test
551 */
552static int rbio_can_merge(struct btrfs_raid_bio *last,
553 struct btrfs_raid_bio *cur)
554{
555 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
556 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
557 return 0;
558
4ae10b3a
CM
559 /*
560 * we can't merge with cached rbios, since the
561 * idea is that when we merge the destination
562 * rbio is going to run our IO for us. We can
01327610 563 * steal from cached rbios though, other functions
4ae10b3a
CM
564 * handle that.
565 */
566 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
567 test_bit(RBIO_CACHE_BIT, &cur->flags))
568 return 0;
569
18d758a2 570 if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical)
53b381b3
DW
571 return 0;
572
5a6ac9ea
MX
573 /* we can't merge with different operations */
574 if (last->operation != cur->operation)
575 return 0;
576 /*
577 * We've need read the full stripe from the drive.
578 * check and repair the parity and write the new results.
579 *
580 * We're not allowed to add any new bios to the
581 * bio list here, anyone else that wants to
582 * change this stripe needs to do their own rmw.
583 */
db34be19 584 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
53b381b3 585 return 0;
53b381b3 586
ad3daf1c
QW
587 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
588 last->operation == BTRFS_RBIO_READ_REBUILD)
b4ee1782
OS
589 return 0;
590
53b381b3
DW
591 return 1;
592}
593
3e77605d
QW
594static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
595 unsigned int stripe_nr,
596 unsigned int sector_nr)
597{
598 ASSERT(stripe_nr < rbio->real_stripes);
599 ASSERT(sector_nr < rbio->stripe_nsectors);
600
601 return stripe_nr * rbio->stripe_nsectors + sector_nr;
602}
603
604/* Return a sector from rbio->stripe_sectors, not from the bio list */
605static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
606 unsigned int stripe_nr,
607 unsigned int sector_nr)
608{
609 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
610 sector_nr)];
611}
612
1145059a
QW
613/* Grab a sector inside P stripe */
614static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
615 unsigned int sector_nr)
b7178a5f 616{
1145059a 617 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
b7178a5f
ZL
618}
619
1145059a
QW
620/* Grab a sector inside Q stripe, return NULL if not RAID6 */
621static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
622 unsigned int sector_nr)
53b381b3 623{
1145059a
QW
624 if (rbio->nr_data + 1 == rbio->real_stripes)
625 return NULL;
626 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
53b381b3
DW
627}
628
53b381b3
DW
629/*
630 * The first stripe in the table for a logical address
631 * has the lock. rbios are added in one of three ways:
632 *
633 * 1) Nobody has the stripe locked yet. The rbio is given
634 * the lock and 0 is returned. The caller must start the IO
635 * themselves.
636 *
637 * 2) Someone has the stripe locked, but we're able to merge
638 * with the lock owner. The rbio is freed and the IO will
639 * start automatically along with the existing rbio. 1 is returned.
640 *
641 * 3) Someone has the stripe locked, but we're not able to merge.
642 * The rbio is added to the lock owner's plug list, or merged into
643 * an rbio already on the plug list. When the lock owner unlocks,
644 * the next rbio on the list is run and the IO is started automatically.
645 * 1 is returned
646 *
647 * If we return 0, the caller still owns the rbio and must continue with
648 * IO submission. If we return 1, the caller must assume the rbio has
649 * already been freed.
650 */
651static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
652{
721860d5 653 struct btrfs_stripe_hash *h;
53b381b3
DW
654 struct btrfs_raid_bio *cur;
655 struct btrfs_raid_bio *pending;
53b381b3 656 struct btrfs_raid_bio *freeit = NULL;
4ae10b3a 657 struct btrfs_raid_bio *cache_drop = NULL;
53b381b3 658 int ret = 0;
53b381b3 659
6a258d72 660 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
721860d5 661
74cc3600 662 spin_lock(&h->lock);
53b381b3 663 list_for_each_entry(cur, &h->hash_list, hash_list) {
18d758a2 664 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical)
9d6cb1b0 665 continue;
4ae10b3a 666
9d6cb1b0 667 spin_lock(&cur->bio_list_lock);
4ae10b3a 668
9d6cb1b0
JT
669 /* Can we steal this cached rbio's pages? */
670 if (bio_list_empty(&cur->bio_list) &&
671 list_empty(&cur->plug_list) &&
672 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
673 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
674 list_del_init(&cur->hash_list);
675 refcount_dec(&cur->refs);
53b381b3 676
9d6cb1b0
JT
677 steal_rbio(cur, rbio);
678 cache_drop = cur;
679 spin_unlock(&cur->bio_list_lock);
4ae10b3a 680
9d6cb1b0
JT
681 goto lockit;
682 }
53b381b3 683
9d6cb1b0
JT
684 /* Can we merge into the lock owner? */
685 if (rbio_can_merge(cur, rbio)) {
686 merge_rbio(cur, rbio);
53b381b3 687 spin_unlock(&cur->bio_list_lock);
9d6cb1b0 688 freeit = rbio;
53b381b3
DW
689 ret = 1;
690 goto out;
691 }
9d6cb1b0
JT
692
693
694 /*
695 * We couldn't merge with the running rbio, see if we can merge
696 * with the pending ones. We don't have to check for rmw_locked
697 * because there is no way they are inside finish_rmw right now
698 */
699 list_for_each_entry(pending, &cur->plug_list, plug_list) {
700 if (rbio_can_merge(pending, rbio)) {
701 merge_rbio(pending, rbio);
702 spin_unlock(&cur->bio_list_lock);
703 freeit = rbio;
704 ret = 1;
705 goto out;
706 }
707 }
708
709 /*
710 * No merging, put us on the tail of the plug list, our rbio
711 * will be started with the currently running rbio unlocks
712 */
713 list_add_tail(&rbio->plug_list, &cur->plug_list);
714 spin_unlock(&cur->bio_list_lock);
715 ret = 1;
716 goto out;
53b381b3 717 }
4ae10b3a 718lockit:
dec95574 719 refcount_inc(&rbio->refs);
53b381b3
DW
720 list_add(&rbio->hash_list, &h->hash_list);
721out:
74cc3600 722 spin_unlock(&h->lock);
4ae10b3a
CM
723 if (cache_drop)
724 remove_rbio_from_cache(cache_drop);
53b381b3 725 if (freeit)
ff2b64a2 726 free_raid_bio(freeit);
53b381b3
DW
727 return ret;
728}
729
d817ce35
QW
730static void recover_rbio_work_locked(struct work_struct *work);
731
53b381b3
DW
732/*
733 * called as rmw or parity rebuild is completed. If the plug list has more
734 * rbios waiting for this stripe, the next one on the list will be started
735 */
736static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
737{
738 int bucket;
739 struct btrfs_stripe_hash *h;
4ae10b3a 740 int keep_cache = 0;
53b381b3
DW
741
742 bucket = rbio_bucket(rbio);
6a258d72 743 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
53b381b3 744
4ae10b3a
CM
745 if (list_empty(&rbio->plug_list))
746 cache_rbio(rbio);
747
74cc3600 748 spin_lock(&h->lock);
53b381b3
DW
749 spin_lock(&rbio->bio_list_lock);
750
751 if (!list_empty(&rbio->hash_list)) {
4ae10b3a
CM
752 /*
753 * if we're still cached and there is no other IO
754 * to perform, just leave this rbio here for others
755 * to steal from later
756 */
757 if (list_empty(&rbio->plug_list) &&
758 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
759 keep_cache = 1;
760 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
761 BUG_ON(!bio_list_empty(&rbio->bio_list));
762 goto done;
763 }
53b381b3
DW
764
765 list_del_init(&rbio->hash_list);
dec95574 766 refcount_dec(&rbio->refs);
53b381b3
DW
767
768 /*
769 * we use the plug list to hold all the rbios
770 * waiting for the chance to lock this stripe.
771 * hand the lock over to one of them.
772 */
773 if (!list_empty(&rbio->plug_list)) {
774 struct btrfs_raid_bio *next;
775 struct list_head *head = rbio->plug_list.next;
776
777 next = list_entry(head, struct btrfs_raid_bio,
778 plug_list);
779
780 list_del_init(&rbio->plug_list);
781
782 list_add(&next->hash_list, &h->hash_list);
dec95574 783 refcount_inc(&next->refs);
53b381b3 784 spin_unlock(&rbio->bio_list_lock);
74cc3600 785 spin_unlock(&h->lock);
53b381b3 786
1b94b556 787 if (next->operation == BTRFS_RBIO_READ_REBUILD)
d817ce35 788 start_async_work(next, recover_rbio_work_locked);
b4ee1782
OS
789 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
790 steal_rbio(rbio, next);
d817ce35 791 start_async_work(next, recover_rbio_work_locked);
b4ee1782 792 } else if (next->operation == BTRFS_RBIO_WRITE) {
4ae10b3a 793 steal_rbio(rbio, next);
93723095 794 start_async_work(next, rmw_rbio_work_locked);
5a6ac9ea
MX
795 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
796 steal_rbio(rbio, next);
6bfd0133 797 start_async_work(next, scrub_rbio_work_locked);
4ae10b3a 798 }
53b381b3
DW
799
800 goto done_nolock;
53b381b3
DW
801 }
802 }
4ae10b3a 803done:
53b381b3 804 spin_unlock(&rbio->bio_list_lock);
74cc3600 805 spin_unlock(&h->lock);
53b381b3
DW
806
807done_nolock:
4ae10b3a
CM
808 if (!keep_cache)
809 remove_rbio_from_cache(rbio);
53b381b3
DW
810}
811
7583d8d0 812static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
53b381b3 813{
7583d8d0
LB
814 struct bio *next;
815
816 while (cur) {
817 next = cur->bi_next;
818 cur->bi_next = NULL;
819 cur->bi_status = err;
820 bio_endio(cur);
821 cur = next;
822 }
53b381b3
DW
823}
824
825/*
826 * this frees the rbio and runs through all the bios in the
827 * bio_list and calls end_io on them
828 */
4e4cbee9 829static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
53b381b3
DW
830{
831 struct bio *cur = bio_list_get(&rbio->bio_list);
7583d8d0 832 struct bio *extra;
4245215d 833
c5a41562
QW
834 kfree(rbio->csum_buf);
835 bitmap_free(rbio->csum_bitmap);
836 rbio->csum_buf = NULL;
837 rbio->csum_bitmap = NULL;
838
bd8f7e62
QW
839 /*
840 * Clear the data bitmap, as the rbio may be cached for later usage.
841 * do this before before unlock_stripe() so there will be no new bio
842 * for this bio.
843 */
844 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
4245215d 845
7583d8d0
LB
846 /*
847 * At this moment, rbio->bio_list is empty, however since rbio does not
848 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
849 * hash list, rbio may be merged with others so that rbio->bio_list
850 * becomes non-empty.
851 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
852 * more and we can call bio_endio() on all queued bios.
853 */
854 unlock_stripe(rbio);
855 extra = bio_list_get(&rbio->bio_list);
ff2b64a2 856 free_raid_bio(rbio);
53b381b3 857
7583d8d0
LB
858 rbio_endio_bio_list(cur, err);
859 if (extra)
860 rbio_endio_bio_list(extra, err);
53b381b3
DW
861}
862
43dd529a
DS
863/*
864 * Get a sector pointer specified by its @stripe_nr and @sector_nr.
3e77605d
QW
865 *
866 * @rbio: The raid bio
867 * @stripe_nr: Stripe number, valid range [0, real_stripe)
868 * @sector_nr: Sector number inside the stripe,
869 * valid range [0, stripe_nsectors)
870 * @bio_list_only: Whether to use sectors inside the bio list only.
871 *
872 * The read/modify/write code wants to reuse the original bio page as much
873 * as possible, and only use stripe_sectors as fallback.
874 */
875static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
876 int stripe_nr, int sector_nr,
877 bool bio_list_only)
878{
879 struct sector_ptr *sector;
880 int index;
881
882 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
883 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
884
885 index = stripe_nr * rbio->stripe_nsectors + sector_nr;
886 ASSERT(index >= 0 && index < rbio->nr_sectors);
887
74cc3600 888 spin_lock(&rbio->bio_list_lock);
3e77605d
QW
889 sector = &rbio->bio_sectors[index];
890 if (sector->page || bio_list_only) {
891 /* Don't return sector without a valid page pointer */
892 if (!sector->page)
893 sector = NULL;
74cc3600 894 spin_unlock(&rbio->bio_list_lock);
3e77605d
QW
895 return sector;
896 }
74cc3600 897 spin_unlock(&rbio->bio_list_lock);
3e77605d
QW
898
899 return &rbio->stripe_sectors[index];
900}
901
53b381b3
DW
902/*
903 * allocation and initial setup for the btrfs_raid_bio. Not
904 * this does not allocate any pages for rbio->pages.
905 */
2ff7e61e 906static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
ff18a4af 907 struct btrfs_io_context *bioc)
53b381b3 908{
1faf3885 909 const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes;
ff18a4af 910 const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
843de58b 911 const unsigned int num_pages = stripe_npages * real_stripes;
ff18a4af
CH
912 const unsigned int stripe_nsectors =
913 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
94efbe19 914 const unsigned int num_sectors = stripe_nsectors * real_stripes;
53b381b3 915 struct btrfs_raid_bio *rbio;
53b381b3 916
94efbe19
QW
917 /* PAGE_SIZE must also be aligned to sectorsize for subpage support */
918 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
c67c68eb
QW
919 /*
920 * Our current stripe len should be fixed to 64k thus stripe_nsectors
921 * (at most 16) should be no larger than BITS_PER_LONG.
922 */
923 ASSERT(stripe_nsectors <= BITS_PER_LONG);
843de58b 924
797d74b7 925 rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
af8e2d1d 926 if (!rbio)
53b381b3 927 return ERR_PTR(-ENOMEM);
797d74b7
QW
928 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
929 GFP_NOFS);
930 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
931 GFP_NOFS);
932 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
933 GFP_NOFS);
934 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
2942a50d 935 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
797d74b7
QW
936
937 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
2942a50d 938 !rbio->finish_pointers || !rbio->error_bitmap) {
797d74b7
QW
939 free_raid_bio_pointers(rbio);
940 kfree(rbio);
941 return ERR_PTR(-ENOMEM);
942 }
53b381b3
DW
943
944 bio_list_init(&rbio->bio_list);
d817ce35 945 init_waitqueue_head(&rbio->io_wait);
53b381b3
DW
946 INIT_LIST_HEAD(&rbio->plug_list);
947 spin_lock_init(&rbio->bio_list_lock);
4ae10b3a 948 INIT_LIST_HEAD(&rbio->stripe_cache);
53b381b3 949 INIT_LIST_HEAD(&rbio->hash_list);
f1c29379 950 btrfs_get_bioc(bioc);
4c664611 951 rbio->bioc = bioc;
53b381b3 952 rbio->nr_pages = num_pages;
94efbe19 953 rbio->nr_sectors = num_sectors;
2c8cdd6e 954 rbio->real_stripes = real_stripes;
5a6ac9ea 955 rbio->stripe_npages = stripe_npages;
94efbe19 956 rbio->stripe_nsectors = stripe_nsectors;
dec95574 957 refcount_set(&rbio->refs, 1);
b89e1b01 958 atomic_set(&rbio->stripes_pending, 0);
53b381b3 959
0b30f719
QW
960 ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
961 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
53b381b3 962
53b381b3
DW
963 return rbio;
964}
965
966/* allocate pages for all the stripes in the bio, including parity */
967static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
968{
eb357060
QW
969 int ret;
970
971 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
972 if (ret < 0)
973 return ret;
974 /* Mapping all sectors */
975 index_stripe_sectors(rbio);
976 return 0;
53b381b3
DW
977}
978
b7178a5f 979/* only allocate pages for p/q stripes */
53b381b3
DW
980static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
981{
f77183dc 982 const int data_pages = rbio->nr_data * rbio->stripe_npages;
eb357060 983 int ret;
53b381b3 984
eb357060
QW
985 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
986 rbio->stripe_pages + data_pages);
987 if (ret < 0)
988 return ret;
989
990 index_stripe_sectors(rbio);
991 return 0;
53b381b3
DW
992}
993
75b47033 994/*
67da05b3 995 * Return the total number of errors found in the vertical stripe of @sector_nr.
75b47033
QW
996 *
997 * @faila and @failb will also be updated to the first and second stripe
998 * number of the errors.
999 */
1000static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1001 int *faila, int *failb)
1002{
1003 int stripe_nr;
1004 int found_errors = 0;
1005
ad3daf1c
QW
1006 if (faila || failb) {
1007 /*
1008 * Both @faila and @failb should be valid pointers if any of
1009 * them is specified.
1010 */
1011 ASSERT(faila && failb);
1012 *faila = -1;
1013 *failb = -1;
1014 }
75b47033
QW
1015
1016 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1017 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1018
1019 if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1020 found_errors++;
ad3daf1c
QW
1021 if (faila) {
1022 /* Update faila and failb. */
1023 if (*faila < 0)
1024 *faila = stripe_nr;
1025 else if (*failb < 0)
1026 *failb = stripe_nr;
1027 }
75b47033
QW
1028 }
1029 }
1030 return found_errors;
1031}
1032
53b381b3 1033/*
3e77605d
QW
1034 * Add a single sector @sector into our list of bios for IO.
1035 *
1036 * Return 0 if everything went well.
1037 * Return <0 for error.
53b381b3 1038 */
3e77605d
QW
1039static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
1040 struct bio_list *bio_list,
1041 struct sector_ptr *sector,
1042 unsigned int stripe_nr,
1043 unsigned int sector_nr,
bf9486d6 1044 enum req_op op)
53b381b3 1045{
3e77605d 1046 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
53b381b3 1047 struct bio *last = bio_list->tail;
53b381b3
DW
1048 int ret;
1049 struct bio *bio;
4c664611 1050 struct btrfs_io_stripe *stripe;
53b381b3
DW
1051 u64 disk_start;
1052
3e77605d
QW
1053 /*
1054 * Note: here stripe_nr has taken device replace into consideration,
1055 * thus it can be larger than rbio->real_stripe.
1056 * So here we check against bioc->num_stripes, not rbio->real_stripes.
1057 */
1058 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
1059 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
1060 ASSERT(sector->page);
1061
4c664611 1062 stripe = &rbio->bioc->stripes[stripe_nr];
3e77605d 1063 disk_start = stripe->physical + sector_nr * sectorsize;
53b381b3
DW
1064
1065 /* if the device is missing, just fail this stripe */
2942a50d 1066 if (!stripe->dev->bdev) {
ad3daf1c
QW
1067 int found_errors;
1068
2942a50d
QW
1069 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
1070 rbio->error_bitmap);
ad3daf1c
QW
1071
1072 /* Check if we have reached tolerance early. */
1073 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1074 NULL, NULL);
1075 if (found_errors > rbio->bioc->max_errors)
1076 return -EIO;
1077 return 0;
2942a50d 1078 }
53b381b3
DW
1079
1080 /* see if we can add this page onto our existing bio */
1081 if (last) {
1201b58b 1082 u64 last_end = last->bi_iter.bi_sector << 9;
4f024f37 1083 last_end += last->bi_iter.bi_size;
53b381b3
DW
1084
1085 /*
1086 * we can't merge these if they are from different
1087 * devices or if they are not contiguous
1088 */
f90ae76a 1089 if (last_end == disk_start && !last->bi_status &&
309dca30 1090 last->bi_bdev == stripe->dev->bdev) {
3e77605d
QW
1091 ret = bio_add_page(last, sector->page, sectorsize,
1092 sector->pgoff);
1093 if (ret == sectorsize)
53b381b3
DW
1094 return 0;
1095 }
1096 }
1097
1098 /* put a new bio on the list */
ff18a4af
CH
1099 bio = bio_alloc(stripe->dev->bdev,
1100 max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
bf9486d6 1101 op, GFP_NOFS);
4f024f37 1102 bio->bi_iter.bi_sector = disk_start >> 9;
e01bf588 1103 bio->bi_private = rbio;
53b381b3 1104
cf32e41f 1105 __bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
53b381b3
DW
1106 bio_list_add(bio_list, bio);
1107 return 0;
1108}
1109
00425dd9
QW
1110static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
1111{
1112 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1113 struct bio_vec bvec;
1114 struct bvec_iter iter;
1115 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
18d758a2 1116 rbio->bioc->full_stripe_logical;
00425dd9 1117
00425dd9
QW
1118 bio_for_each_segment(bvec, bio, iter) {
1119 u32 bvec_offset;
1120
1121 for (bvec_offset = 0; bvec_offset < bvec.bv_len;
1122 bvec_offset += sectorsize, offset += sectorsize) {
1123 int index = offset / sectorsize;
1124 struct sector_ptr *sector = &rbio->bio_sectors[index];
1125
1126 sector->page = bvec.bv_page;
1127 sector->pgoff = bvec.bv_offset + bvec_offset;
1128 ASSERT(sector->pgoff < PAGE_SIZE);
1129 }
1130 }
1131}
1132
53b381b3
DW
1133/*
1134 * helper function to walk our bio list and populate the bio_pages array with
1135 * the result. This seems expensive, but it is faster than constantly
1136 * searching through the bio list as we setup the IO in finish_rmw or stripe
1137 * reconstruction.
1138 *
1139 * This must be called before you trust the answers from page_in_rbio
1140 */
1141static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1142{
1143 struct bio *bio;
53b381b3 1144
74cc3600 1145 spin_lock(&rbio->bio_list_lock);
00425dd9
QW
1146 bio_list_for_each(bio, &rbio->bio_list)
1147 index_one_bio(rbio, bio);
1148
74cc3600 1149 spin_unlock(&rbio->bio_list_lock);
53b381b3
DW
1150}
1151
b8bea09a
QW
1152static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1153 struct raid56_bio_trace_info *trace_info)
1154{
1155 const struct btrfs_io_context *bioc = rbio->bioc;
1156 int i;
1157
1158 ASSERT(bioc);
1159
1160 /* We rely on bio->bi_bdev to find the stripe number. */
1161 if (!bio->bi_bdev)
1162 goto not_found;
1163
1164 for (i = 0; i < bioc->num_stripes; i++) {
1165 if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1166 continue;
1167 trace_info->stripe_nr = i;
1168 trace_info->devid = bioc->stripes[i].dev->devid;
1169 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1170 bioc->stripes[i].physical;
1171 return;
1172 }
1173
1174not_found:
1175 trace_info->devid = -1;
1176 trace_info->offset = -1;
1177 trace_info->stripe_nr = -1;
1178}
1179
801fcfc5
CH
1180static inline void bio_list_put(struct bio_list *bio_list)
1181{
1182 struct bio *bio;
1183
1184 while ((bio = bio_list_pop(bio_list)))
1185 bio_put(bio);
1186}
1187
67da05b3 1188/* Generate PQ for one vertical stripe. */
30e3c897
QW
1189static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
1190{
1191 void **pointers = rbio->finish_pointers;
1192 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1193 struct sector_ptr *sector;
1194 int stripe;
1195 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
1196
1197 /* First collect one sector from each data stripe */
1198 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1199 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
1200 pointers[stripe] = kmap_local_page(sector->page) +
1201 sector->pgoff;
1202 }
1203
1204 /* Then add the parity stripe */
1205 sector = rbio_pstripe_sector(rbio, sectornr);
1206 sector->uptodate = 1;
1207 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
1208
1209 if (has_qstripe) {
1210 /*
1211 * RAID6, add the qstripe and call the library function
1212 * to fill in our p/q
1213 */
1214 sector = rbio_qstripe_sector(rbio, sectornr);
1215 sector->uptodate = 1;
1216 pointers[stripe++] = kmap_local_page(sector->page) +
1217 sector->pgoff;
1218
1219 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
1220 pointers);
1221 } else {
1222 /* raid5 */
1223 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
1224 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
1225 }
1226 for (stripe = stripe - 1; stripe >= 0; stripe--)
1227 kunmap_local(pointers[stripe]);
1228}
1229
6486d21c
QW
1230static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
1231 struct bio_list *bio_list)
53b381b3 1232{
36920044
QW
1233 /* The total sector number inside the full stripe. */
1234 int total_sector_nr;
3e77605d 1235 int sectornr;
6486d21c 1236 int stripe;
53b381b3
DW
1237 int ret;
1238
6486d21c 1239 ASSERT(bio_list_size(bio_list) == 0);
53b381b3 1240
bd8f7e62
QW
1241 /* We should have at least one data sector. */
1242 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1243
5eb30ee2
QW
1244 /*
1245 * Reset errors, as we may have errors inherited from from degraded
1246 * write.
1247 */
2942a50d 1248 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
5eb30ee2 1249
53b381b3 1250 /*
6486d21c 1251 * Start assembly. Make bios for everything from the higher layers (the
36920044 1252 * bio_list in our rbio) and our P/Q. Ignore everything else.
53b381b3 1253 */
36920044
QW
1254 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1255 total_sector_nr++) {
1256 struct sector_ptr *sector;
3e77605d 1257
36920044
QW
1258 stripe = total_sector_nr / rbio->stripe_nsectors;
1259 sectornr = total_sector_nr % rbio->stripe_nsectors;
53b381b3 1260
36920044
QW
1261 /* This vertical stripe has no data, skip it. */
1262 if (!test_bit(sectornr, &rbio->dbitmap))
1263 continue;
53b381b3 1264
36920044
QW
1265 if (stripe < rbio->nr_data) {
1266 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1267 if (!sector)
1268 continue;
1269 } else {
1270 sector = rbio_stripe_sector(rbio, stripe, sectornr);
53b381b3 1271 }
36920044 1272
6486d21c 1273 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
ff18a4af 1274 sectornr, REQ_OP_WRITE);
36920044 1275 if (ret)
6486d21c 1276 goto error;
53b381b3
DW
1277 }
1278
1faf3885 1279 if (likely(!rbio->bioc->replace_nr_stripes))
6486d21c 1280 return 0;
2c8cdd6e 1281
1faf3885
QW
1282 /*
1283 * Make a copy for the replace target device.
1284 *
1285 * Thus the source stripe number (in replace_stripe_src) should be valid.
1286 */
1287 ASSERT(rbio->bioc->replace_stripe_src >= 0);
1288
36920044
QW
1289 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1290 total_sector_nr++) {
1291 struct sector_ptr *sector;
2c8cdd6e 1292
36920044
QW
1293 stripe = total_sector_nr / rbio->stripe_nsectors;
1294 sectornr = total_sector_nr % rbio->stripe_nsectors;
3e77605d 1295
1faf3885
QW
1296 /*
1297 * For RAID56, there is only one device that can be replaced,
1298 * and replace_stripe_src[0] indicates the stripe number we
1299 * need to copy from.
1300 */
1301 if (stripe != rbio->bioc->replace_stripe_src) {
36920044
QW
1302 /*
1303 * We can skip the whole stripe completely, note
1304 * total_sector_nr will be increased by one anyway.
1305 */
1306 ASSERT(sectornr == 0);
1307 total_sector_nr += rbio->stripe_nsectors - 1;
1308 continue;
1309 }
2c8cdd6e 1310
36920044
QW
1311 /* This vertical stripe has no data, skip it. */
1312 if (!test_bit(sectornr, &rbio->dbitmap))
1313 continue;
2c8cdd6e 1314
36920044
QW
1315 if (stripe < rbio->nr_data) {
1316 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1317 if (!sector)
1318 continue;
1319 } else {
1320 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2c8cdd6e 1321 }
36920044 1322
6486d21c 1323 ret = rbio_add_io_sector(rbio, bio_list, sector,
1faf3885 1324 rbio->real_stripes,
ff18a4af 1325 sectornr, REQ_OP_WRITE);
36920044 1326 if (ret)
6486d21c 1327 goto error;
2c8cdd6e
MX
1328 }
1329
6486d21c
QW
1330 return 0;
1331error:
801fcfc5 1332 bio_list_put(bio_list);
6486d21c
QW
1333 return -EIO;
1334}
1335
2942a50d
QW
1336static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
1337{
1338 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1339 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
18d758a2 1340 rbio->bioc->full_stripe_logical;
2942a50d
QW
1341 int total_nr_sector = offset >> fs_info->sectorsize_bits;
1342
1343 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
1344
1345 bitmap_set(rbio->error_bitmap, total_nr_sector,
1346 bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
1347
1348 /*
1349 * Special handling for raid56_alloc_missing_rbio() used by
1350 * scrub/replace. Unlike call path in raid56_parity_recover(), they
1351 * pass an empty bio here. Thus we have to find out the missing device
1352 * and mark the stripe error instead.
1353 */
1354 if (bio->bi_iter.bi_size == 0) {
1355 bool found_missing = false;
1356 int stripe_nr;
1357
1358 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1359 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
1360 found_missing = true;
1361 bitmap_set(rbio->error_bitmap,
1362 stripe_nr * rbio->stripe_nsectors,
1363 rbio->stripe_nsectors);
1364 }
1365 }
1366 ASSERT(found_missing);
1367 }
1368}
1369
5fdb7afc 1370/*
67da05b3 1371 * For subpage case, we can no longer set page Up-to-date directly for
5fdb7afc
QW
1372 * stripe_pages[], thus we need to locate the sector.
1373 */
1374static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
1375 struct page *page,
1376 unsigned int pgoff)
1377{
1378 int i;
1379
1380 for (i = 0; i < rbio->nr_sectors; i++) {
1381 struct sector_ptr *sector = &rbio->stripe_sectors[i];
1382
1383 if (sector->page == page && sector->pgoff == pgoff)
1384 return sector;
1385 }
1386 return NULL;
1387}
1388
53b381b3
DW
1389/*
1390 * this sets each page in the bio uptodate. It should only be used on private
1391 * rbio pages, nothing that comes in from the higher layers
1392 */
5fdb7afc 1393static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
53b381b3 1394{
5fdb7afc 1395 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
0198e5b7 1396 struct bio_vec *bvec;
6dc4f100 1397 struct bvec_iter_all iter_all;
6592e58c 1398
0198e5b7 1399 ASSERT(!bio_flagged(bio, BIO_CLONED));
53b381b3 1400
5fdb7afc
QW
1401 bio_for_each_segment_all(bvec, bio, iter_all) {
1402 struct sector_ptr *sector;
1403 int pgoff;
1404
1405 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
1406 pgoff += sectorsize) {
1407 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
1408 ASSERT(sector);
1409 if (sector)
1410 sector->uptodate = 1;
1411 }
1412 }
53b381b3
DW
1413}
1414
2942a50d
QW
1415static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
1416{
1417 struct bio_vec *bv = bio_first_bvec_all(bio);
1418 int i;
1419
1420 for (i = 0; i < rbio->nr_sectors; i++) {
1421 struct sector_ptr *sector;
1422
1423 sector = &rbio->stripe_sectors[i];
1424 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1425 break;
1426 sector = &rbio->bio_sectors[i];
1427 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1428 break;
1429 }
1430 ASSERT(i < rbio->nr_sectors);
1431 return i;
1432}
1433
1434static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
1435{
1436 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1437 u32 bio_size = 0;
1438 struct bio_vec *bvec;
a9ad4d87 1439 int i;
2942a50d 1440
c9a43aaf 1441 bio_for_each_bvec_all(bvec, bio, i)
2942a50d
QW
1442 bio_size += bvec->bv_len;
1443
a9ad4d87
QW
1444 /*
1445 * Since we can have multiple bios touching the error_bitmap, we cannot
1446 * call bitmap_set() without protection.
1447 *
1448 * Instead use set_bit() for each bit, as set_bit() itself is atomic.
1449 */
1450 for (i = total_sector_nr; i < total_sector_nr +
1451 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
1452 set_bit(i, rbio->error_bitmap);
2942a50d
QW
1453}
1454
7a315072
QW
1455/* Verify the data sectors at read time. */
1456static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
1457 struct bio *bio)
1458{
1459 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1460 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1461 struct bio_vec *bvec;
1462 struct bvec_iter_all iter_all;
1463
1464 /* No data csum for the whole stripe, no need to verify. */
1465 if (!rbio->csum_bitmap || !rbio->csum_buf)
1466 return;
1467
1468 /* P/Q stripes, they have no data csum to verify against. */
1469 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
1470 return;
1471
1472 bio_for_each_segment_all(bvec, bio, iter_all) {
1473 int bv_offset;
1474
1475 for (bv_offset = bvec->bv_offset;
1476 bv_offset < bvec->bv_offset + bvec->bv_len;
1477 bv_offset += fs_info->sectorsize, total_sector_nr++) {
1478 u8 csum_buf[BTRFS_CSUM_SIZE];
1479 u8 *expected_csum = rbio->csum_buf +
1480 total_sector_nr * fs_info->csum_size;
1481 int ret;
1482
1483 /* No csum for this sector, skip to the next sector. */
1484 if (!test_bit(total_sector_nr, rbio->csum_bitmap))
1485 continue;
1486
1487 ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
1488 bv_offset, csum_buf, expected_csum);
1489 if (ret < 0)
1490 set_bit(total_sector_nr, rbio->error_bitmap);
1491 }
1492 }
1493}
1494
d817ce35
QW
1495static void raid_wait_read_end_io(struct bio *bio)
1496{
1497 struct btrfs_raid_bio *rbio = bio->bi_private;
1498
7a315072 1499 if (bio->bi_status) {
2942a50d 1500 rbio_update_error_bitmap(rbio, bio);
7a315072 1501 } else {
d817ce35 1502 set_bio_pages_uptodate(rbio, bio);
7a315072
QW
1503 verify_bio_data_sectors(rbio, bio);
1504 }
d817ce35
QW
1505
1506 bio_put(bio);
1507 if (atomic_dec_and_test(&rbio->stripes_pending))
1508 wake_up(&rbio->io_wait);
1509}
1510
1c76fb7b 1511static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio,
d817ce35
QW
1512 struct bio_list *bio_list)
1513{
1514 struct bio *bio;
1515
1516 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1517 while ((bio = bio_list_pop(bio_list))) {
1518 bio->bi_end_io = raid_wait_read_end_io;
1519
1520 if (trace_raid56_scrub_read_recover_enabled()) {
1521 struct raid56_bio_trace_info trace_info = { 0 };
1522
1523 bio_get_trace_info(rbio, bio, &trace_info);
1524 trace_raid56_scrub_read_recover(rbio, bio, &trace_info);
1525 }
1526 submit_bio(bio);
1527 }
1c76fb7b
CH
1528
1529 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
d817ce35
QW
1530}
1531
5eb30ee2
QW
1532static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
1533{
1534 const int data_pages = rbio->nr_data * rbio->stripe_npages;
1535 int ret;
1536
1537 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages);
1538 if (ret < 0)
1539 return ret;
1540
1541 index_stripe_sectors(rbio);
1542 return 0;
1543}
1544
6ac0f488
CM
1545/*
1546 * We use plugging call backs to collect full stripes.
1547 * Any time we get a partial stripe write while plugged
1548 * we collect it into a list. When the unplug comes down,
1549 * we sort the list by logical block number and merge
1550 * everything we can into the same rbios
1551 */
1552struct btrfs_plug_cb {
1553 struct blk_plug_cb cb;
1554 struct btrfs_fs_info *info;
1555 struct list_head rbio_list;
385de0ef 1556 struct work_struct work;
6ac0f488
CM
1557};
1558
1559/*
1560 * rbios on the plug list are sorted for easier merging.
1561 */
4f0f586b
ST
1562static int plug_cmp(void *priv, const struct list_head *a,
1563 const struct list_head *b)
6ac0f488 1564{
214cc184
DS
1565 const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1566 plug_list);
1567 const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1568 plug_list);
4f024f37
KO
1569 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1570 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
6ac0f488
CM
1571
1572 if (a_sector < b_sector)
1573 return -1;
1574 if (a_sector > b_sector)
1575 return 1;
1576 return 0;
1577}
1578
93723095 1579static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
6ac0f488 1580{
93723095 1581 struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
6ac0f488
CM
1582 struct btrfs_raid_bio *cur;
1583 struct btrfs_raid_bio *last = NULL;
1584
6ac0f488 1585 list_sort(NULL, &plug->rbio_list, plug_cmp);
93723095 1586
6ac0f488
CM
1587 while (!list_empty(&plug->rbio_list)) {
1588 cur = list_entry(plug->rbio_list.next,
1589 struct btrfs_raid_bio, plug_list);
1590 list_del_init(&cur->plug_list);
1591
1592 if (rbio_is_full(cur)) {
93723095
QW
1593 /* We have a full stripe, queue it down. */
1594 start_async_work(cur, rmw_rbio_work);
6ac0f488
CM
1595 continue;
1596 }
1597 if (last) {
1598 if (rbio_can_merge(last, cur)) {
1599 merge_rbio(last, cur);
ff2b64a2 1600 free_raid_bio(cur);
6ac0f488 1601 continue;
6ac0f488 1602 }
93723095 1603 start_async_work(last, rmw_rbio_work);
6ac0f488
CM
1604 }
1605 last = cur;
1606 }
93723095
QW
1607 if (last)
1608 start_async_work(last, rmw_rbio_work);
6ac0f488
CM
1609 kfree(plug);
1610}
1611
bd8f7e62
QW
1612/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1613static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1614{
1615 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1616 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
18d758a2 1617 const u64 full_stripe_start = rbio->bioc->full_stripe_logical;
bd8f7e62
QW
1618 const u32 orig_len = orig_bio->bi_iter.bi_size;
1619 const u32 sectorsize = fs_info->sectorsize;
1620 u64 cur_logical;
1621
1622 ASSERT(orig_logical >= full_stripe_start &&
1623 orig_logical + orig_len <= full_stripe_start +
ff18a4af 1624 rbio->nr_data * BTRFS_STRIPE_LEN);
bd8f7e62
QW
1625
1626 bio_list_add(&rbio->bio_list, orig_bio);
1627 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1628
1629 /* Update the dbitmap. */
1630 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1631 cur_logical += sectorsize) {
1632 int bit = ((u32)(cur_logical - full_stripe_start) >>
1633 fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1634
1635 set_bit(bit, &rbio->dbitmap);
1636 }
1637}
1638
53b381b3
DW
1639/*
1640 * our main entry point for writes from the rest of the FS.
1641 */
31683f4a 1642void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
53b381b3 1643{
6a258d72 1644 struct btrfs_fs_info *fs_info = bioc->fs_info;
53b381b3 1645 struct btrfs_raid_bio *rbio;
6ac0f488
CM
1646 struct btrfs_plug_cb *plug = NULL;
1647 struct blk_plug_cb *cb;
53b381b3 1648
ff18a4af 1649 rbio = alloc_rbio(fs_info, bioc);
af8e2d1d 1650 if (IS_ERR(rbio)) {
abb49e87
CH
1651 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
1652 bio_endio(bio);
1653 return;
af8e2d1d 1654 }
1b94b556 1655 rbio->operation = BTRFS_RBIO_WRITE;
bd8f7e62 1656 rbio_add_bio(rbio, bio);
6ac0f488
CM
1657
1658 /*
93723095 1659 * Don't plug on full rbios, just get them out the door
6ac0f488
CM
1660 * as quickly as we can
1661 */
abb49e87
CH
1662 if (!rbio_is_full(rbio)) {
1663 cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
1664 if (cb) {
1665 plug = container_of(cb, struct btrfs_plug_cb, cb);
1666 if (!plug->info) {
1667 plug->info = fs_info;
1668 INIT_LIST_HEAD(&plug->rbio_list);
1669 }
1670 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1671 return;
6ac0f488 1672 }
6ac0f488 1673 }
abb49e87 1674
93723095
QW
1675 /*
1676 * Either we don't have any existing plug, or we're doing a full stripe,
abb49e87 1677 * queue the rmw work now.
93723095
QW
1678 */
1679 start_async_work(rbio, rmw_rbio_work);
53b381b3
DW
1680}
1681
7a315072
QW
1682static int verify_one_sector(struct btrfs_raid_bio *rbio,
1683 int stripe_nr, int sector_nr)
1684{
1685 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1686 struct sector_ptr *sector;
1687 u8 csum_buf[BTRFS_CSUM_SIZE];
1688 u8 *csum_expected;
1689 int ret;
1690
1691 if (!rbio->csum_bitmap || !rbio->csum_buf)
1692 return 0;
1693
1694 /* No way to verify P/Q as they are not covered by data csum. */
1695 if (stripe_nr >= rbio->nr_data)
1696 return 0;
1697 /*
1698 * If we're rebuilding a read, we have to use pages from the
1699 * bio list if possible.
1700 */
1701 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1702 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
1703 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1704 } else {
1705 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1706 }
1707
1708 ASSERT(sector->page);
1709
1710 csum_expected = rbio->csum_buf +
1711 (stripe_nr * rbio->stripe_nsectors + sector_nr) *
1712 fs_info->csum_size;
1713 ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff,
1714 csum_buf, csum_expected);
1715 return ret;
1716}
1717
9c5ff9b4
QW
1718/*
1719 * Recover a vertical stripe specified by @sector_nr.
1720 * @*pointers are the pre-allocated pointers by the caller, so we don't
1721 * need to allocate/free the pointers again and again.
1722 */
75b47033
QW
1723static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1724 void **pointers, void **unmap_array)
9c5ff9b4
QW
1725{
1726 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1727 struct sector_ptr *sector;
1728 const u32 sectorsize = fs_info->sectorsize;
75b47033
QW
1729 int found_errors;
1730 int faila;
1731 int failb;
9c5ff9b4 1732 int stripe_nr;
7a315072 1733 int ret = 0;
9c5ff9b4
QW
1734
1735 /*
1736 * Now we just use bitmap to mark the horizontal stripes in
1737 * which we have data when doing parity scrub.
1738 */
1739 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1740 !test_bit(sector_nr, &rbio->dbitmap))
75b47033
QW
1741 return 0;
1742
1743 found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1744 &failb);
1745 /*
67da05b3 1746 * No errors in the vertical stripe, skip it. Can happen for recovery
75b47033
QW
1747 * which only part of a stripe failed csum check.
1748 */
1749 if (!found_errors)
1750 return 0;
1751
1752 if (found_errors > rbio->bioc->max_errors)
1753 return -EIO;
9c5ff9b4
QW
1754
1755 /*
1756 * Setup our array of pointers with sectors from each stripe
1757 *
1758 * NOTE: store a duplicate array of pointers to preserve the
1759 * pointer order.
1760 */
1761 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1762 /*
75b47033
QW
1763 * If we're rebuilding a read, we have to use pages from the
1764 * bio list if possible.
9c5ff9b4
QW
1765 */
1766 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
75b47033 1767 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
9c5ff9b4
QW
1768 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1769 } else {
1770 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1771 }
1772 ASSERT(sector->page);
1773 pointers[stripe_nr] = kmap_local_page(sector->page) +
1774 sector->pgoff;
1775 unmap_array[stripe_nr] = pointers[stripe_nr];
1776 }
1777
1778 /* All raid6 handling here */
1779 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1780 /* Single failure, rebuild from parity raid5 style */
1781 if (failb < 0) {
1782 if (faila == rbio->nr_data)
1783 /*
1784 * Just the P stripe has failed, without
1785 * a bad data or Q stripe.
1786 * We have nothing to do, just skip the
1787 * recovery for this stripe.
1788 */
1789 goto cleanup;
1790 /*
1791 * a single failure in raid6 is rebuilt
1792 * in the pstripe code below
1793 */
1794 goto pstripe;
1795 }
1796
1797 /*
1798 * If the q stripe is failed, do a pstripe reconstruction from
1799 * the xors.
1800 * If both the q stripe and the P stripe are failed, we're
1801 * here due to a crc mismatch and we can't give them the
1802 * data they want.
1803 */
18d758a2
QW
1804 if (failb == rbio->real_stripes - 1) {
1805 if (faila == rbio->real_stripes - 2)
9c5ff9b4
QW
1806 /*
1807 * Only P and Q are corrupted.
1808 * We only care about data stripes recovery,
1809 * can skip this vertical stripe.
1810 */
1811 goto cleanup;
1812 /*
1813 * Otherwise we have one bad data stripe and
1814 * a good P stripe. raid5!
1815 */
1816 goto pstripe;
1817 }
1818
18d758a2 1819 if (failb == rbio->real_stripes - 2) {
9c5ff9b4
QW
1820 raid6_datap_recov(rbio->real_stripes, sectorsize,
1821 faila, pointers);
1822 } else {
1823 raid6_2data_recov(rbio->real_stripes, sectorsize,
1824 faila, failb, pointers);
1825 }
1826 } else {
1827 void *p;
1828
1829 /* Rebuild from P stripe here (raid5 or raid6). */
1830 ASSERT(failb == -1);
1831pstripe:
1832 /* Copy parity block into failed block to start with */
1833 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1834
1835 /* Rearrange the pointer array */
1836 p = pointers[faila];
1837 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
1838 stripe_nr++)
1839 pointers[stripe_nr] = pointers[stripe_nr + 1];
1840 pointers[rbio->nr_data - 1] = p;
1841
1842 /* Xor in the rest */
1843 run_xor(pointers, rbio->nr_data - 1, sectorsize);
1844
1845 }
1846
1847 /*
1848 * No matter if this is a RMW or recovery, we should have all
1849 * failed sectors repaired in the vertical stripe, thus they are now
1850 * uptodate.
1851 * Especially if we determine to cache the rbio, we need to
1852 * have at least all data sectors uptodate.
7a315072
QW
1853 *
1854 * If possible, also check if the repaired sector matches its data
1855 * checksum.
9c5ff9b4 1856 */
75b47033 1857 if (faila >= 0) {
7a315072
QW
1858 ret = verify_one_sector(rbio, faila, sector_nr);
1859 if (ret < 0)
1860 goto cleanup;
1861
75b47033 1862 sector = rbio_stripe_sector(rbio, faila, sector_nr);
9c5ff9b4
QW
1863 sector->uptodate = 1;
1864 }
75b47033 1865 if (failb >= 0) {
f7c11aff 1866 ret = verify_one_sector(rbio, failb, sector_nr);
7a315072
QW
1867 if (ret < 0)
1868 goto cleanup;
1869
75b47033 1870 sector = rbio_stripe_sector(rbio, failb, sector_nr);
9c5ff9b4
QW
1871 sector->uptodate = 1;
1872 }
1873
1874cleanup:
1875 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1876 kunmap_local(unmap_array[stripe_nr]);
7a315072 1877 return ret;
9c5ff9b4
QW
1878}
1879
ec936b03 1880static int recover_sectors(struct btrfs_raid_bio *rbio)
53b381b3 1881{
9c5ff9b4
QW
1882 void **pointers = NULL;
1883 void **unmap_array = NULL;
ec936b03
QW
1884 int sectornr;
1885 int ret = 0;
53b381b3 1886
07e4d380 1887 /*
ec936b03
QW
1888 * @pointers array stores the pointer for each sector.
1889 *
1890 * @unmap_array stores copy of pointers that does not get reordered
1891 * during reconstruction so that kunmap_local works.
07e4d380 1892 */
31e818fe 1893 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
94a0b58d 1894 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
ec936b03
QW
1895 if (!pointers || !unmap_array) {
1896 ret = -ENOMEM;
1897 goto out;
94a0b58d
IW
1898 }
1899
b4ee1782
OS
1900 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1901 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
74cc3600 1902 spin_lock(&rbio->bio_list_lock);
53b381b3 1903 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
74cc3600 1904 spin_unlock(&rbio->bio_list_lock);
53b381b3
DW
1905 }
1906
1907 index_rbio_pages(rbio);
1908
75b47033
QW
1909 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
1910 ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
1911 if (ret < 0)
1912 break;
1913 }
53b381b3 1914
ec936b03 1915out:
53b381b3 1916 kfree(pointers);
ec936b03
QW
1917 kfree(unmap_array);
1918 return ret;
1919}
1920
40f87ddb 1921static void recover_rbio(struct btrfs_raid_bio *rbio)
53b381b3 1922{
d838d05e 1923 struct bio_list bio_list = BIO_EMPTY_LIST;
d31968d9
QW
1924 int total_sector_nr;
1925 int ret = 0;
53b381b3 1926
d838d05e
CH
1927 /*
1928 * Either we're doing recover for a read failure or degraded write,
1929 * caller should have set error bitmap correctly.
1930 */
1931 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
1932
1933 /* For recovery, we need to read all sectors including P/Q. */
1934 ret = alloc_rbio_pages(rbio);
1935 if (ret < 0)
40f87ddb 1936 goto out;
d838d05e
CH
1937
1938 index_rbio_pages(rbio);
1939
53b381b3 1940 /*
f6065f8e
QW
1941 * Read everything that hasn't failed. However this time we will
1942 * not trust any cached sector.
1943 * As we may read out some stale data but higher layer is not reading
1944 * that stale part.
1945 *
1946 * So here we always re-read everything in recovery path.
53b381b3 1947 */
ef340fcc
QW
1948 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1949 total_sector_nr++) {
1950 int stripe = total_sector_nr / rbio->stripe_nsectors;
1951 int sectornr = total_sector_nr % rbio->stripe_nsectors;
1952 struct sector_ptr *sector;
1953
75b47033
QW
1954 /*
1955 * Skip the range which has error. It can be a range which is
1956 * marked error (for csum mismatch), or it can be a missing
1957 * device.
1958 */
1959 if (!rbio->bioc->stripes[stripe].dev->bdev ||
1960 test_bit(total_sector_nr, rbio->error_bitmap)) {
1961 /*
1962 * Also set the error bit for missing device, which
1963 * may not yet have its error bit set.
1964 */
1965 set_bit(total_sector_nr, rbio->error_bitmap);
53b381b3 1966 continue;
5588383e 1967 }
75b47033 1968
ef340fcc 1969 sector = rbio_stripe_sector(rbio, stripe, sectornr);
d838d05e 1970 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
ff18a4af 1971 sectornr, REQ_OP_READ);
d838d05e
CH
1972 if (ret < 0) {
1973 bio_list_put(&bio_list);
40f87ddb 1974 goto out;
d838d05e 1975 }
53b381b3 1976 }
d817ce35 1977
1c76fb7b 1978 submit_read_wait_bio_list(rbio, &bio_list);
40f87ddb
CH
1979 ret = recover_sectors(rbio);
1980out:
1981 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
d817ce35
QW
1982}
1983
1984static void recover_rbio_work(struct work_struct *work)
1985{
1986 struct btrfs_raid_bio *rbio;
d817ce35
QW
1987
1988 rbio = container_of(work, struct btrfs_raid_bio, work);
40f87ddb
CH
1989 if (!lock_stripe_add(rbio))
1990 recover_rbio(rbio);
d817ce35
QW
1991}
1992
1993static void recover_rbio_work_locked(struct work_struct *work)
1994{
40f87ddb 1995 recover_rbio(container_of(work, struct btrfs_raid_bio, work));
d817ce35
QW
1996}
1997
75b47033
QW
1998static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
1999{
2000 bool found = false;
2001 int sector_nr;
2002
2003 /*
2004 * This is for RAID6 extra recovery tries, thus mirror number should
2005 * be large than 2.
2006 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
2007 * RAID5 methods.
2008 */
2009 ASSERT(mirror_num > 2);
2010 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2011 int found_errors;
2012 int faila;
2013 int failb;
2014
2015 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2016 &faila, &failb);
2017 /* This vertical stripe doesn't have errors. */
2018 if (!found_errors)
2019 continue;
2020
2021 /*
2022 * If we found errors, there should be only one error marked
2023 * by previous set_rbio_range_error().
2024 */
2025 ASSERT(found_errors == 1);
2026 found = true;
2027
2028 /* Now select another stripe to mark as error. */
2029 failb = rbio->real_stripes - (mirror_num - 1);
2030 if (failb <= faila)
2031 failb--;
2032
2033 /* Set the extra bit in error bitmap. */
2034 if (failb >= 0)
2035 set_bit(failb * rbio->stripe_nsectors + sector_nr,
2036 rbio->error_bitmap);
2037 }
2038
2039 /* We should found at least one vertical stripe with error.*/
2040 ASSERT(found);
2041}
2042
53b381b3
DW
2043/*
2044 * the main entry point for reads from the higher layers. This
2045 * is really only called when the normal read path had a failure,
2046 * so we assume the bio they send down corresponds to a failed part
2047 * of the drive.
2048 */
6065fd95 2049void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
f1c29379 2050 int mirror_num)
53b381b3 2051{
6a258d72 2052 struct btrfs_fs_info *fs_info = bioc->fs_info;
53b381b3 2053 struct btrfs_raid_bio *rbio;
53b381b3 2054
ff18a4af 2055 rbio = alloc_rbio(fs_info, bioc);
af8e2d1d 2056 if (IS_ERR(rbio)) {
6065fd95 2057 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
d817ce35
QW
2058 bio_endio(bio);
2059 return;
af8e2d1d 2060 }
53b381b3 2061
1b94b556 2062 rbio->operation = BTRFS_RBIO_READ_REBUILD;
bd8f7e62 2063 rbio_add_bio(rbio, bio);
53b381b3 2064
2942a50d
QW
2065 set_rbio_range_error(rbio, bio);
2066
53b381b3 2067 /*
8810f751
LB
2068 * Loop retry:
2069 * for 'mirror == 2', reconstruct from all other stripes.
2070 * for 'mirror_num > 2', select a stripe to fail on every retry.
53b381b3 2071 */
ad3daf1c 2072 if (mirror_num > 2)
75b47033 2073 set_rbio_raid6_extra_error(rbio, mirror_num);
53b381b3 2074
d817ce35 2075 start_async_work(rbio, recover_rbio_work);
53b381b3
DW
2076}
2077
c5a41562
QW
2078static void fill_data_csums(struct btrfs_raid_bio *rbio)
2079{
2080 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
2081 struct btrfs_root *csum_root = btrfs_csum_root(fs_info,
18d758a2
QW
2082 rbio->bioc->full_stripe_logical);
2083 const u64 start = rbio->bioc->full_stripe_logical;
c5a41562
QW
2084 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
2085 fs_info->sectorsize_bits;
2086 int ret;
2087
2088 /* The rbio should not have its csum buffer initialized. */
2089 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
2090
2091 /*
2092 * Skip the csum search if:
2093 *
2094 * - The rbio doesn't belong to data block groups
2095 * Then we are doing IO for tree blocks, no need to search csums.
2096 *
2097 * - The rbio belongs to mixed block groups
2098 * This is to avoid deadlock, as we're already holding the full
2099 * stripe lock, if we trigger a metadata read, and it needs to do
2100 * raid56 recovery, we will deadlock.
2101 */
2102 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
2103 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
2104 return;
2105
2106 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
2107 fs_info->csum_size, GFP_NOFS);
2108 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
2109 GFP_NOFS);
2110 if (!rbio->csum_buf || !rbio->csum_bitmap) {
2111 ret = -ENOMEM;
2112 goto error;
2113 }
2114
2115 ret = btrfs_lookup_csums_bitmap(csum_root, start, start + len - 1,
b9795475 2116 rbio->csum_buf, rbio->csum_bitmap, false);
c5a41562
QW
2117 if (ret < 0)
2118 goto error;
2119 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
2120 goto no_csum;
2121 return;
2122
2123error:
2124 /*
2125 * We failed to allocate memory or grab the csum, but it's not fatal,
2126 * we can still continue. But better to warn users that RMW is no
2127 * longer safe for this particular sub-stripe write.
2128 */
2129 btrfs_warn_rl(fs_info,
2130"sub-stripe write for full stripe %llu is not safe, failed to get csum: %d",
18d758a2 2131 rbio->bioc->full_stripe_logical, ret);
c5a41562
QW
2132no_csum:
2133 kfree(rbio->csum_buf);
2134 bitmap_free(rbio->csum_bitmap);
2135 rbio->csum_buf = NULL;
2136 rbio->csum_bitmap = NULL;
2137}
2138
7a315072 2139static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
5eb30ee2 2140{
02efa3a6
CH
2141 struct bio_list bio_list = BIO_EMPTY_LIST;
2142 int total_sector_nr;
2143 int ret = 0;
5eb30ee2 2144
c5a41562
QW
2145 /*
2146 * Fill the data csums we need for data verification. We need to fill
2147 * the csum_bitmap/csum_buf first, as our endio function will try to
2148 * verify the data sectors.
2149 */
2150 fill_data_csums(rbio);
2151
02efa3a6
CH
2152 /*
2153 * Build a list of bios to read all sectors (including data and P/Q).
2154 *
2155 * This behavior is to compensate the later csum verification and recovery.
2156 */
2157 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2158 total_sector_nr++) {
2159 struct sector_ptr *sector;
2160 int stripe = total_sector_nr / rbio->stripe_nsectors;
2161 int sectornr = total_sector_nr % rbio->stripe_nsectors;
5eb30ee2 2162
02efa3a6
CH
2163 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2164 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2165 stripe, sectornr, REQ_OP_READ);
2166 if (ret) {
2167 bio_list_put(&bio_list);
2168 return ret;
2169 }
2170 }
7a315072
QW
2171
2172 /*
2173 * We may or may not have any corrupted sectors (including missing dev
2174 * and csum mismatch), just let recover_sectors() to handle them all.
2175 */
02efa3a6
CH
2176 submit_read_wait_bio_list(rbio, &bio_list);
2177 return recover_sectors(rbio);
5eb30ee2
QW
2178}
2179
2180static void raid_wait_write_end_io(struct bio *bio)
2181{
2182 struct btrfs_raid_bio *rbio = bio->bi_private;
2183 blk_status_t err = bio->bi_status;
2184
ad3daf1c 2185 if (err)
2942a50d 2186 rbio_update_error_bitmap(rbio, bio);
5eb30ee2
QW
2187 bio_put(bio);
2188 if (atomic_dec_and_test(&rbio->stripes_pending))
2189 wake_up(&rbio->io_wait);
2190}
2191
2192static void submit_write_bios(struct btrfs_raid_bio *rbio,
2193 struct bio_list *bio_list)
2194{
2195 struct bio *bio;
2196
2197 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
2198 while ((bio = bio_list_pop(bio_list))) {
2199 bio->bi_end_io = raid_wait_write_end_io;
2200
2201 if (trace_raid56_write_stripe_enabled()) {
2202 struct raid56_bio_trace_info trace_info = { 0 };
2203
2204 bio_get_trace_info(rbio, bio, &trace_info);
2205 trace_raid56_write_stripe(rbio, bio, &trace_info);
2206 }
2207 submit_bio(bio);
2208 }
2209}
2210
7a315072
QW
2211/*
2212 * To determine if we need to read any sector from the disk.
2213 * Should only be utilized in RMW path, to skip cached rbio.
2214 */
2215static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
2216{
2217 int i;
2218
2219 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
2220 struct sector_ptr *sector = &rbio->stripe_sectors[i];
2221
2222 /*
2223 * We have a sector which doesn't have page nor uptodate,
2224 * thus this rbio can not be cached one, as cached one must
2225 * have all its data sectors present and uptodate.
2226 */
2227 if (!sector->page || !sector->uptodate)
2228 return true;
2229 }
2230 return false;
2231}
2232
1d0ef1ca 2233static void rmw_rbio(struct btrfs_raid_bio *rbio)
5eb30ee2
QW
2234{
2235 struct bio_list bio_list;
2236 int sectornr;
2237 int ret = 0;
2238
2239 /*
2240 * Allocate the pages for parity first, as P/Q pages will always be
2241 * needed for both full-stripe and sub-stripe writes.
2242 */
2243 ret = alloc_rbio_parity_pages(rbio);
2244 if (ret < 0)
1d0ef1ca 2245 goto out;
5eb30ee2 2246
7a315072
QW
2247 /*
2248 * Either full stripe write, or we have every data sector already
2249 * cached, can go to write path immediately.
2250 */
4d762701
CH
2251 if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) {
2252 /*
2253 * Now we're doing sub-stripe write, also need all data stripes
2254 * to do the full RMW.
2255 */
2256 ret = alloc_rbio_data_pages(rbio);
2257 if (ret < 0)
1d0ef1ca 2258 goto out;
5eb30ee2 2259
4d762701 2260 index_rbio_pages(rbio);
5eb30ee2 2261
4d762701
CH
2262 ret = rmw_read_wait_recover(rbio);
2263 if (ret < 0)
1d0ef1ca 2264 goto out;
4d762701 2265 }
5eb30ee2 2266
5eb30ee2
QW
2267 /*
2268 * At this stage we're not allowed to add any new bios to the
2269 * bio list any more, anyone else that wants to change this stripe
2270 * needs to do their own rmw.
2271 */
74cc3600 2272 spin_lock(&rbio->bio_list_lock);
5eb30ee2 2273 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
74cc3600 2274 spin_unlock(&rbio->bio_list_lock);
5eb30ee2 2275
2942a50d 2276 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
5eb30ee2
QW
2277
2278 index_rbio_pages(rbio);
2279
2280 /*
2281 * We don't cache full rbios because we're assuming
2282 * the higher layers are unlikely to use this area of
2283 * the disk again soon. If they do use it again,
2284 * hopefully they will send another full bio.
2285 */
2286 if (!rbio_is_full(rbio))
2287 cache_rbio_pages(rbio);
2288 else
2289 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2290
2291 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
2292 generate_pq_vertical(rbio, sectornr);
2293
2294 bio_list_init(&bio_list);
2295 ret = rmw_assemble_write_bios(rbio, &bio_list);
2296 if (ret < 0)
1d0ef1ca 2297 goto out;
5eb30ee2
QW
2298
2299 /* We should have at least one bio assembled. */
2300 ASSERT(bio_list_size(&bio_list));
2301 submit_write_bios(rbio, &bio_list);
2302 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2303
ad3daf1c
QW
2304 /* We may have more errors than our tolerance during the read. */
2305 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2306 int found_errors;
2307
2308 found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2309 if (found_errors > rbio->bioc->max_errors) {
2310 ret = -EIO;
2311 break;
2312 }
2313 }
1d0ef1ca
CH
2314out:
2315 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
5eb30ee2
QW
2316}
2317
93723095
QW
2318static void rmw_rbio_work(struct work_struct *work)
2319{
2320 struct btrfs_raid_bio *rbio;
93723095
QW
2321
2322 rbio = container_of(work, struct btrfs_raid_bio, work);
1d0ef1ca
CH
2323 if (lock_stripe_add(rbio) == 0)
2324 rmw_rbio(rbio);
93723095
QW
2325}
2326
2327static void rmw_rbio_work_locked(struct work_struct *work)
53b381b3 2328{
1d0ef1ca 2329 rmw_rbio(container_of(work, struct btrfs_raid_bio, work));
53b381b3
DW
2330}
2331
5a6ac9ea
MX
2332/*
2333 * The following code is used to scrub/replace the parity stripe
2334 *
4c664611 2335 * Caller must have already increased bio_counter for getting @bioc.
ae6529c3 2336 *
5a6ac9ea
MX
2337 * Note: We need make sure all the pages that add into the scrub/replace
2338 * raid bio are correct and not be changed during the scrub/replace. That
2339 * is those pages just hold metadata or file data with checksum.
2340 */
2341
6a258d72
QW
2342struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
2343 struct btrfs_io_context *bioc,
ff18a4af 2344 struct btrfs_device *scrub_dev,
6a258d72 2345 unsigned long *dbitmap, int stripe_nsectors)
5a6ac9ea 2346{
6a258d72 2347 struct btrfs_fs_info *fs_info = bioc->fs_info;
5a6ac9ea
MX
2348 struct btrfs_raid_bio *rbio;
2349 int i;
2350
ff18a4af 2351 rbio = alloc_rbio(fs_info, bioc);
5a6ac9ea
MX
2352 if (IS_ERR(rbio))
2353 return NULL;
2354 bio_list_add(&rbio->bio_list, bio);
2355 /*
2356 * This is a special bio which is used to hold the completion handler
2357 * and make the scrub rbio is similar to the other types
2358 */
2359 ASSERT(!bio->bi_iter.bi_size);
2360 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2361
9cd3a7eb 2362 /*
4c664611 2363 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
9cd3a7eb
LB
2364 * to the end position, so this search can start from the first parity
2365 * stripe.
2366 */
2367 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
4c664611 2368 if (bioc->stripes[i].dev == scrub_dev) {
5a6ac9ea
MX
2369 rbio->scrubp = i;
2370 break;
2371 }
2372 }
9cd3a7eb 2373 ASSERT(i < rbio->real_stripes);
5a6ac9ea 2374
c67c68eb 2375 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
5a6ac9ea
MX
2376 return rbio;
2377}
2378
5a6ac9ea
MX
2379/*
2380 * We just scrub the parity that we have correct data on the same horizontal,
2381 * so we needn't allocate all pages for all the stripes.
2382 */
2383static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2384{
3907ce29 2385 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
aee35e4b 2386 int total_sector_nr;
5a6ac9ea 2387
aee35e4b
QW
2388 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2389 total_sector_nr++) {
2390 struct page *page;
2391 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2392 int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
5a6ac9ea 2393
aee35e4b
QW
2394 if (!test_bit(sectornr, &rbio->dbitmap))
2395 continue;
2396 if (rbio->stripe_pages[index])
2397 continue;
2398 page = alloc_page(GFP_NOFS);
2399 if (!page)
2400 return -ENOMEM;
2401 rbio->stripe_pages[index] = page;
5a6ac9ea 2402 }
eb357060 2403 index_stripe_sectors(rbio);
5a6ac9ea
MX
2404 return 0;
2405}
2406
6bfd0133 2407static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check)
5a6ac9ea 2408{
4c664611 2409 struct btrfs_io_context *bioc = rbio->bioc;
46900662 2410 const u32 sectorsize = bioc->fs_info->sectorsize;
1389053e 2411 void **pointers = rbio->finish_pointers;
c67c68eb 2412 unsigned long *pbitmap = &rbio->finish_pbitmap;
5a6ac9ea
MX
2413 int nr_data = rbio->nr_data;
2414 int stripe;
3e77605d 2415 int sectornr;
c17af965 2416 bool has_qstripe;
46900662
QW
2417 struct sector_ptr p_sector = { 0 };
2418 struct sector_ptr q_sector = { 0 };
5a6ac9ea 2419 struct bio_list bio_list;
76035976 2420 int is_replace = 0;
5a6ac9ea
MX
2421 int ret;
2422
2423 bio_list_init(&bio_list);
2424
c17af965
DS
2425 if (rbio->real_stripes - rbio->nr_data == 1)
2426 has_qstripe = false;
2427 else if (rbio->real_stripes - rbio->nr_data == 2)
2428 has_qstripe = true;
2429 else
5a6ac9ea 2430 BUG();
5a6ac9ea 2431
1faf3885
QW
2432 /*
2433 * Replace is running and our P/Q stripe is being replaced, then we
2434 * need to duplicate the final write to replace target.
2435 */
2436 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) {
76035976 2437 is_replace = 1;
c67c68eb 2438 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
76035976
MX
2439 }
2440
5a6ac9ea
MX
2441 /*
2442 * Because the higher layers(scrubber) are unlikely to
2443 * use this area of the disk again soon, so don't cache
2444 * it.
2445 */
2446 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2447
2448 if (!need_check)
2449 goto writeback;
2450
46900662
QW
2451 p_sector.page = alloc_page(GFP_NOFS);
2452 if (!p_sector.page)
6bfd0133 2453 return -ENOMEM;
46900662
QW
2454 p_sector.pgoff = 0;
2455 p_sector.uptodate = 1;
5a6ac9ea 2456
c17af965 2457 if (has_qstripe) {
d70cef0d 2458 /* RAID6, allocate and map temp space for the Q stripe */
46900662
QW
2459 q_sector.page = alloc_page(GFP_NOFS);
2460 if (!q_sector.page) {
2461 __free_page(p_sector.page);
2462 p_sector.page = NULL;
6bfd0133 2463 return -ENOMEM;
5a6ac9ea 2464 }
46900662
QW
2465 q_sector.pgoff = 0;
2466 q_sector.uptodate = 1;
2467 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
5a6ac9ea
MX
2468 }
2469
2942a50d 2470 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
5a6ac9ea 2471
d70cef0d 2472 /* Map the parity stripe just once */
46900662 2473 pointers[nr_data] = kmap_local_page(p_sector.page);
d70cef0d 2474
c67c68eb 2475 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
46900662 2476 struct sector_ptr *sector;
5a6ac9ea 2477 void *parity;
46900662 2478
5a6ac9ea
MX
2479 /* first collect one page from each data stripe */
2480 for (stripe = 0; stripe < nr_data; stripe++) {
46900662
QW
2481 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
2482 pointers[stripe] = kmap_local_page(sector->page) +
2483 sector->pgoff;
5a6ac9ea
MX
2484 }
2485
c17af965 2486 if (has_qstripe) {
d70cef0d 2487 /* RAID6, call the library function to fill in our P/Q */
46900662 2488 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
5a6ac9ea
MX
2489 pointers);
2490 } else {
2491 /* raid5 */
46900662
QW
2492 memcpy(pointers[nr_data], pointers[0], sectorsize);
2493 run_xor(pointers + 1, nr_data - 1, sectorsize);
5a6ac9ea
MX
2494 }
2495
01327610 2496 /* Check scrubbing parity and repair it */
46900662
QW
2497 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2498 parity = kmap_local_page(sector->page) + sector->pgoff;
2499 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2500 memcpy(parity, pointers[rbio->scrubp], sectorsize);
5a6ac9ea
MX
2501 else
2502 /* Parity is right, needn't writeback */
c67c68eb 2503 bitmap_clear(&rbio->dbitmap, sectornr, 1);
58c1a35c 2504 kunmap_local(parity);
5a6ac9ea 2505
94a0b58d
IW
2506 for (stripe = nr_data - 1; stripe >= 0; stripe--)
2507 kunmap_local(pointers[stripe]);
5a6ac9ea
MX
2508 }
2509
94a0b58d 2510 kunmap_local(pointers[nr_data]);
46900662
QW
2511 __free_page(p_sector.page);
2512 p_sector.page = NULL;
2513 if (q_sector.page) {
94a0b58d 2514 kunmap_local(pointers[rbio->real_stripes - 1]);
46900662
QW
2515 __free_page(q_sector.page);
2516 q_sector.page = NULL;
d70cef0d 2517 }
5a6ac9ea
MX
2518
2519writeback:
2520 /*
2521 * time to start writing. Make bios for everything from the
2522 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2523 * everything else.
2524 */
c67c68eb 2525 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
3e77605d 2526 struct sector_ptr *sector;
5a6ac9ea 2527
3e77605d
QW
2528 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2529 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
ff18a4af 2530 sectornr, REQ_OP_WRITE);
5a6ac9ea
MX
2531 if (ret)
2532 goto cleanup;
2533 }
2534
76035976
MX
2535 if (!is_replace)
2536 goto submit_write;
2537
1faf3885
QW
2538 /*
2539 * Replace is running and our parity stripe needs to be duplicated to
2540 * the target device. Check we have a valid source stripe number.
2541 */
2542 ASSERT(rbio->bioc->replace_stripe_src >= 0);
3e77605d
QW
2543 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
2544 struct sector_ptr *sector;
76035976 2545
3e77605d
QW
2546 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2547 ret = rbio_add_io_sector(rbio, &bio_list, sector,
1faf3885
QW
2548 rbio->real_stripes,
2549 sectornr, REQ_OP_WRITE);
76035976
MX
2550 if (ret)
2551 goto cleanup;
2552 }
2553
2554submit_write:
6bfd0133
QW
2555 submit_write_bios(rbio, &bio_list);
2556 return 0;
5a6ac9ea
MX
2557
2558cleanup:
801fcfc5 2559 bio_list_put(&bio_list);
6bfd0133 2560 return ret;
5a6ac9ea
MX
2561}
2562
2563static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2564{
2565 if (stripe >= 0 && stripe < rbio->nr_data)
2566 return 1;
2567 return 0;
2568}
2569
6bfd0133 2570static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
5a6ac9ea 2571{
75b47033
QW
2572 void **pointers = NULL;
2573 void **unmap_array = NULL;
2574 int sector_nr;
e7fc357e 2575 int ret = 0;
5a6ac9ea 2576
75b47033
QW
2577 /*
2578 * @pointers array stores the pointer for each sector.
2579 *
2580 * @unmap_array stores copy of pointers that does not get reordered
2581 * during reconstruction so that kunmap_local works.
2582 */
2583 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2584 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2585 if (!pointers || !unmap_array) {
2586 ret = -ENOMEM;
2587 goto out;
2588 }
5a6ac9ea 2589
75b47033
QW
2590 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2591 int dfail = 0, failp = -1;
2592 int faila;
2593 int failb;
2594 int found_errors;
5a6ac9ea 2595
75b47033
QW
2596 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2597 &faila, &failb);
2598 if (found_errors > rbio->bioc->max_errors) {
2599 ret = -EIO;
2600 goto out;
2601 }
2602 if (found_errors == 0)
2603 continue;
5a6ac9ea 2604
75b47033
QW
2605 /* We should have at least one error here. */
2606 ASSERT(faila >= 0 || failb >= 0);
5a6ac9ea 2607
75b47033
QW
2608 if (is_data_stripe(rbio, faila))
2609 dfail++;
2610 else if (is_parity_stripe(faila))
2611 failp = faila;
5a6ac9ea 2612
75b47033
QW
2613 if (is_data_stripe(rbio, failb))
2614 dfail++;
2615 else if (is_parity_stripe(failb))
2616 failp = failb;
2617 /*
2618 * Because we can not use a scrubbing parity to repair the
2619 * data, so the capability of the repair is declined. (In the
2620 * case of RAID5, we can not repair anything.)
2621 */
2622 if (dfail > rbio->bioc->max_errors - 1) {
2623 ret = -EIO;
2624 goto out;
2625 }
2626 /*
2627 * If all data is good, only parity is correctly, just repair
2628 * the parity, no need to recover data stripes.
2629 */
2630 if (dfail == 0)
2631 continue;
6bfd0133 2632
75b47033
QW
2633 /*
2634 * Here means we got one corrupted data stripe and one
2635 * corrupted parity on RAID6, if the corrupted parity is
2636 * scrubbing parity, luckily, use the other one to repair the
2637 * data, or we can not repair the data stripe.
2638 */
2639 if (failp != rbio->scrubp) {
2640 ret = -EIO;
2641 goto out;
2642 }
2643
2644 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2645 if (ret < 0)
2646 goto out;
2647 }
2648out:
2649 kfree(pointers);
2650 kfree(unmap_array);
6bfd0133 2651 return ret;
5a6ac9ea
MX
2652}
2653
52f0c198 2654static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
5a6ac9ea 2655{
52f0c198 2656 struct bio_list bio_list = BIO_EMPTY_LIST;
cb3450b7
QW
2657 int total_sector_nr;
2658 int ret = 0;
5a6ac9ea 2659
1c10702e
QW
2660 /* Build a list of bios to read all the missing parts. */
2661 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2662 total_sector_nr++) {
2663 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2664 int stripe = total_sector_nr / rbio->stripe_nsectors;
2665 struct sector_ptr *sector;
5a6ac9ea 2666
1c10702e
QW
2667 /* No data in the vertical stripe, no need to read. */
2668 if (!test_bit(sectornr, &rbio->dbitmap))
2669 continue;
5a6ac9ea 2670
1c10702e
QW
2671 /*
2672 * We want to find all the sectors missing from the rbio and
2673 * read them from the disk. If sector_in_rbio() finds a sector
2674 * in the bio list we don't need to read it off the stripe.
2675 */
2676 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
2677 if (sector)
2678 continue;
2679
2680 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2681 /*
2682 * The bio cache may have handed us an uptodate sector. If so,
2683 * use it.
2684 */
2685 if (sector->uptodate)
2686 continue;
2687
52f0c198 2688 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
ff18a4af 2689 sectornr, REQ_OP_READ);
52f0c198
CH
2690 if (ret) {
2691 bio_list_put(&bio_list);
2692 return ret;
2693 }
5a6ac9ea 2694 }
52f0c198
CH
2695
2696 submit_read_wait_bio_list(rbio, &bio_list);
cb3450b7 2697 return 0;
cb3450b7
QW
2698}
2699
08241d3c 2700static void scrub_rbio(struct btrfs_raid_bio *rbio)
cb3450b7 2701{
6bfd0133 2702 bool need_check = false;
ad3daf1c 2703 int sector_nr;
cb3450b7 2704 int ret;
cb3450b7 2705
cb3450b7
QW
2706 ret = alloc_rbio_essential_pages(rbio);
2707 if (ret)
08241d3c 2708 goto out;
cb3450b7 2709
2942a50d
QW
2710 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2711
52f0c198 2712 ret = scrub_assemble_read_bios(rbio);
cb3450b7 2713 if (ret < 0)
08241d3c 2714 goto out;
5a6ac9ea 2715
75b47033 2716 /* We may have some failures, recover the failed sectors first. */
6bfd0133
QW
2717 ret = recover_scrub_rbio(rbio);
2718 if (ret < 0)
08241d3c 2719 goto out;
5a6ac9ea 2720
6bfd0133
QW
2721 /*
2722 * We have every sector properly prepared. Can finish the scrub
2723 * and writeback the good content.
2724 */
2725 ret = finish_parity_scrub(rbio, need_check);
2726 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
ad3daf1c
QW
2727 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2728 int found_errors;
2729
2730 found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2731 if (found_errors > rbio->bioc->max_errors) {
2732 ret = -EIO;
2733 break;
2734 }
2735 }
08241d3c
CH
2736out:
2737 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
5a6ac9ea
MX
2738}
2739
6bfd0133 2740static void scrub_rbio_work_locked(struct work_struct *work)
5a6ac9ea 2741{
08241d3c 2742 scrub_rbio(container_of(work, struct btrfs_raid_bio, work));
5a6ac9ea
MX
2743}
2744
5a6ac9ea
MX
2745void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2746{
2747 if (!lock_stripe_add(rbio))
6bfd0133 2748 start_async_work(rbio, scrub_rbio_work_locked);
5a6ac9ea 2749}