1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
7 #include <linux/sched.h>
8 #include <linux/wait.h>
10 #include <linux/slab.h>
11 #include <linux/buffer_head.h>
12 #include <linux/blkdev.h>
13 #include <linux/random.h>
14 #include <linux/iocontext.h>
15 #include <linux/capability.h>
16 #include <linux/ratelimit.h>
17 #include <linux/kthread.h>
18 #include <linux/raid/pq.h>
19 #include <linux/hash.h>
20 #include <linux/list_sort.h>
21 #include <linux/raid/xor.h>
23 #include <asm/div64.h>
25 #include "extent_map.h"
27 #include "transaction.h"
28 #include "print-tree.h"
31 #include "async-thread.h"
32 #include "check-integrity.h"
33 #include "rcu-string.h"
35 /* set when additional merges to this rbio are not allowed */
36 #define RBIO_RMW_LOCKED_BIT 1
39 * set when this rbio is sitting in the hash, but it is just a cache
42 #define RBIO_CACHE_BIT 2
45 * set when it is safe to trust the stripe_pages for caching
47 #define RBIO_CACHE_READY_BIT 3
49 #define RBIO_CACHE_SIZE 1024
53 BTRFS_RBIO_READ_REBUILD,
54 BTRFS_RBIO_PARITY_SCRUB,
55 BTRFS_RBIO_REBUILD_MISSING,
58 struct btrfs_raid_bio {
59 struct btrfs_fs_info *fs_info;
60 struct btrfs_bio *bbio;
62 /* while we're doing rmw on a stripe
63 * we put it into a hash table so we can
64 * lock the stripe and merge more rbios
67 struct list_head hash_list;
70 * LRU list for the stripe cache
72 struct list_head stripe_cache;
75 * for scheduling work in the helper threads
77 struct btrfs_work work;
80 * bio list and bio_list_lock are used
81 * to add more bios into the stripe
82 * in hopes of avoiding the full rmw
84 struct bio_list bio_list;
85 spinlock_t bio_list_lock;
87 /* also protected by the bio_list_lock, the
88 * plug list is used by the plugging code
89 * to collect partial bios while plugged. The
90 * stripe locking code also uses it to hand off
91 * the stripe lock to the next pending IO
93 struct list_head plug_list;
96 * flags that tell us if it is safe to
101 /* size of each individual stripe on disk */
104 /* number of data stripes (no p/q) */
111 * set if we're doing a parity rebuild
112 * for a read from higher up, which is handled
113 * differently from a parity rebuild as part of
116 enum btrfs_rbio_ops operation;
118 /* first bad stripe */
121 /* second bad stripe (for raid6 use) */
126 * number of pages needed to represent the full
132 * size of all the bios in the bio_list. This
133 * helps us decide if the rbio maps to a full
142 atomic_t stripes_pending;
146 * these are two arrays of pointers. We allocate the
147 * rbio big enough to hold them both and setup their
148 * locations when the rbio is allocated
151 /* pointers to pages that we allocated for
152 * reading/writing stripes directly from the disk (including P/Q)
154 struct page **stripe_pages;
157 * pointers to the pages in the bio_list. Stored
158 * here for faster lookup
160 struct page **bio_pages;
163 * bitmap to record which horizontal stripe has data
165 unsigned long *dbitmap;
168 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
169 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
170 static void rmw_work(struct btrfs_work *work);
171 static void read_rebuild_work(struct btrfs_work *work);
172 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
173 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
174 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
175 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
176 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
177 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
178 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
180 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
182 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
185 * the stripe hash table is used for locking, and to collect
186 * bios in hopes of making a full stripe
188 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
190 struct btrfs_stripe_hash_table *table;
191 struct btrfs_stripe_hash_table *x;
192 struct btrfs_stripe_hash *cur;
193 struct btrfs_stripe_hash *h;
194 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
198 if (info->stripe_hash_table)
202 * The table is large, starting with order 4 and can go as high as
203 * order 7 in case lock debugging is turned on.
205 * Try harder to allocate and fallback to vmalloc to lower the chance
206 * of a failing mount.
208 table_size = sizeof(*table) + sizeof(*h) * num_entries;
209 table = kvzalloc(table_size, GFP_KERNEL);
213 spin_lock_init(&table->cache_lock);
214 INIT_LIST_HEAD(&table->stripe_cache);
218 for (i = 0; i < num_entries; i++) {
220 INIT_LIST_HEAD(&cur->hash_list);
221 spin_lock_init(&cur->lock);
224 x = cmpxchg(&info->stripe_hash_table, NULL, table);
231 * caching an rbio means to copy anything from the
232 * bio_pages array into the stripe_pages array. We
233 * use the page uptodate bit in the stripe cache array
234 * to indicate if it has valid data
236 * once the caching is done, we set the cache ready
239 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
246 ret = alloc_rbio_pages(rbio);
250 for (i = 0; i < rbio->nr_pages; i++) {
251 if (!rbio->bio_pages[i])
254 s = kmap(rbio->bio_pages[i]);
255 d = kmap(rbio->stripe_pages[i]);
257 memcpy(d, s, PAGE_SIZE);
259 kunmap(rbio->bio_pages[i]);
260 kunmap(rbio->stripe_pages[i]);
261 SetPageUptodate(rbio->stripe_pages[i]);
263 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
267 * we hash on the first logical address of the stripe
269 static int rbio_bucket(struct btrfs_raid_bio *rbio)
271 u64 num = rbio->bbio->raid_map[0];
274 * we shift down quite a bit. We're using byte
275 * addressing, and most of the lower bits are zeros.
276 * This tends to upset hash_64, and it consistently
277 * returns just one or two different values.
279 * shifting off the lower bits fixes things.
281 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
285 * stealing an rbio means taking all the uptodate pages from the stripe
286 * array in the source rbio and putting them into the destination rbio
288 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
294 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
297 for (i = 0; i < dest->nr_pages; i++) {
298 s = src->stripe_pages[i];
299 if (!s || !PageUptodate(s)) {
303 d = dest->stripe_pages[i];
307 dest->stripe_pages[i] = s;
308 src->stripe_pages[i] = NULL;
313 * merging means we take the bio_list from the victim and
314 * splice it into the destination. The victim should
315 * be discarded afterwards.
317 * must be called with dest->rbio_list_lock held
319 static void merge_rbio(struct btrfs_raid_bio *dest,
320 struct btrfs_raid_bio *victim)
322 bio_list_merge(&dest->bio_list, &victim->bio_list);
323 dest->bio_list_bytes += victim->bio_list_bytes;
324 dest->generic_bio_cnt += victim->generic_bio_cnt;
325 bio_list_init(&victim->bio_list);
329 * used to prune items that are in the cache. The caller
330 * must hold the hash table lock.
332 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
334 int bucket = rbio_bucket(rbio);
335 struct btrfs_stripe_hash_table *table;
336 struct btrfs_stripe_hash *h;
340 * check the bit again under the hash table lock.
342 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
345 table = rbio->fs_info->stripe_hash_table;
346 h = table->table + bucket;
348 /* hold the lock for the bucket because we may be
349 * removing it from the hash table
354 * hold the lock for the bio list because we need
355 * to make sure the bio list is empty
357 spin_lock(&rbio->bio_list_lock);
359 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
360 list_del_init(&rbio->stripe_cache);
361 table->cache_size -= 1;
364 /* if the bio list isn't empty, this rbio is
365 * still involved in an IO. We take it out
366 * of the cache list, and drop the ref that
367 * was held for the list.
369 * If the bio_list was empty, we also remove
370 * the rbio from the hash_table, and drop
371 * the corresponding ref
373 if (bio_list_empty(&rbio->bio_list)) {
374 if (!list_empty(&rbio->hash_list)) {
375 list_del_init(&rbio->hash_list);
376 refcount_dec(&rbio->refs);
377 BUG_ON(!list_empty(&rbio->plug_list));
382 spin_unlock(&rbio->bio_list_lock);
383 spin_unlock(&h->lock);
386 __free_raid_bio(rbio);
390 * prune a given rbio from the cache
392 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
394 struct btrfs_stripe_hash_table *table;
397 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
400 table = rbio->fs_info->stripe_hash_table;
402 spin_lock_irqsave(&table->cache_lock, flags);
403 __remove_rbio_from_cache(rbio);
404 spin_unlock_irqrestore(&table->cache_lock, flags);
408 * remove everything in the cache
410 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
412 struct btrfs_stripe_hash_table *table;
414 struct btrfs_raid_bio *rbio;
416 table = info->stripe_hash_table;
418 spin_lock_irqsave(&table->cache_lock, flags);
419 while (!list_empty(&table->stripe_cache)) {
420 rbio = list_entry(table->stripe_cache.next,
421 struct btrfs_raid_bio,
423 __remove_rbio_from_cache(rbio);
425 spin_unlock_irqrestore(&table->cache_lock, flags);
429 * remove all cached entries and free the hash table
432 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
434 if (!info->stripe_hash_table)
436 btrfs_clear_rbio_cache(info);
437 kvfree(info->stripe_hash_table);
438 info->stripe_hash_table = NULL;
442 * insert an rbio into the stripe cache. It
443 * must have already been prepared by calling
446 * If this rbio was already cached, it gets
447 * moved to the front of the lru.
449 * If the size of the rbio cache is too big, we
452 static void cache_rbio(struct btrfs_raid_bio *rbio)
454 struct btrfs_stripe_hash_table *table;
457 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
460 table = rbio->fs_info->stripe_hash_table;
462 spin_lock_irqsave(&table->cache_lock, flags);
463 spin_lock(&rbio->bio_list_lock);
465 /* bump our ref if we were not in the list before */
466 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
467 refcount_inc(&rbio->refs);
469 if (!list_empty(&rbio->stripe_cache)){
470 list_move(&rbio->stripe_cache, &table->stripe_cache);
472 list_add(&rbio->stripe_cache, &table->stripe_cache);
473 table->cache_size += 1;
476 spin_unlock(&rbio->bio_list_lock);
478 if (table->cache_size > RBIO_CACHE_SIZE) {
479 struct btrfs_raid_bio *found;
481 found = list_entry(table->stripe_cache.prev,
482 struct btrfs_raid_bio,
486 __remove_rbio_from_cache(found);
489 spin_unlock_irqrestore(&table->cache_lock, flags);
493 * helper function to run the xor_blocks api. It is only
494 * able to do MAX_XOR_BLOCKS at a time, so we need to
497 static void run_xor(void **pages, int src_cnt, ssize_t len)
501 void *dest = pages[src_cnt];
504 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
505 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
507 src_cnt -= xor_src_cnt;
508 src_off += xor_src_cnt;
513 * returns true if the bio list inside this rbio
514 * covers an entire stripe (no rmw required).
515 * Must be called with the bio list lock held, or
516 * at a time when you know it is impossible to add
517 * new bios into the list
519 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
521 unsigned long size = rbio->bio_list_bytes;
524 if (size != rbio->nr_data * rbio->stripe_len)
527 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
531 static int rbio_is_full(struct btrfs_raid_bio *rbio)
536 spin_lock_irqsave(&rbio->bio_list_lock, flags);
537 ret = __rbio_is_full(rbio);
538 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
543 * returns 1 if it is safe to merge two rbios together.
544 * The merging is safe if the two rbios correspond to
545 * the same stripe and if they are both going in the same
546 * direction (read vs write), and if neither one is
547 * locked for final IO
549 * The caller is responsible for locking such that
550 * rmw_locked is safe to test
552 static int rbio_can_merge(struct btrfs_raid_bio *last,
553 struct btrfs_raid_bio *cur)
555 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
556 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
560 * we can't merge with cached rbios, since the
561 * idea is that when we merge the destination
562 * rbio is going to run our IO for us. We can
563 * steal from cached rbios though, other functions
566 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
567 test_bit(RBIO_CACHE_BIT, &cur->flags))
570 if (last->bbio->raid_map[0] !=
571 cur->bbio->raid_map[0])
574 /* we can't merge with different operations */
575 if (last->operation != cur->operation)
578 * We've need read the full stripe from the drive.
579 * check and repair the parity and write the new results.
581 * We're not allowed to add any new bios to the
582 * bio list here, anyone else that wants to
583 * change this stripe needs to do their own rmw.
585 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
588 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
591 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
592 int fa = last->faila;
593 int fb = last->failb;
594 int cur_fa = cur->faila;
595 int cur_fb = cur->failb;
597 if (last->faila >= last->failb) {
602 if (cur->faila >= cur->failb) {
607 if (fa != cur_fa || fb != cur_fb)
613 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
616 return stripe * rbio->stripe_npages + index;
620 * these are just the pages from the rbio array, not from anything
621 * the FS sent down to us
623 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
626 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
630 * helper to index into the pstripe
632 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
634 return rbio_stripe_page(rbio, rbio->nr_data, index);
638 * helper to index into the qstripe, returns null
639 * if there is no qstripe
641 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
643 if (rbio->nr_data + 1 == rbio->real_stripes)
645 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
649 * The first stripe in the table for a logical address
650 * has the lock. rbios are added in one of three ways:
652 * 1) Nobody has the stripe locked yet. The rbio is given
653 * the lock and 0 is returned. The caller must start the IO
656 * 2) Someone has the stripe locked, but we're able to merge
657 * with the lock owner. The rbio is freed and the IO will
658 * start automatically along with the existing rbio. 1 is returned.
660 * 3) Someone has the stripe locked, but we're not able to merge.
661 * The rbio is added to the lock owner's plug list, or merged into
662 * an rbio already on the plug list. When the lock owner unlocks,
663 * the next rbio on the list is run and the IO is started automatically.
666 * If we return 0, the caller still owns the rbio and must continue with
667 * IO submission. If we return 1, the caller must assume the rbio has
668 * already been freed.
670 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
672 int bucket = rbio_bucket(rbio);
673 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
674 struct btrfs_raid_bio *cur;
675 struct btrfs_raid_bio *pending;
677 struct btrfs_raid_bio *freeit = NULL;
678 struct btrfs_raid_bio *cache_drop = NULL;
681 spin_lock_irqsave(&h->lock, flags);
682 list_for_each_entry(cur, &h->hash_list, hash_list) {
683 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
684 spin_lock(&cur->bio_list_lock);
686 /* can we steal this cached rbio's pages? */
687 if (bio_list_empty(&cur->bio_list) &&
688 list_empty(&cur->plug_list) &&
689 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
690 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
691 list_del_init(&cur->hash_list);
692 refcount_dec(&cur->refs);
694 steal_rbio(cur, rbio);
696 spin_unlock(&cur->bio_list_lock);
701 /* can we merge into the lock owner? */
702 if (rbio_can_merge(cur, rbio)) {
703 merge_rbio(cur, rbio);
704 spin_unlock(&cur->bio_list_lock);
712 * we couldn't merge with the running
713 * rbio, see if we can merge with the
714 * pending ones. We don't have to
715 * check for rmw_locked because there
716 * is no way they are inside finish_rmw
719 list_for_each_entry(pending, &cur->plug_list,
721 if (rbio_can_merge(pending, rbio)) {
722 merge_rbio(pending, rbio);
723 spin_unlock(&cur->bio_list_lock);
730 /* no merging, put us on the tail of the plug list,
731 * our rbio will be started with the currently
732 * running rbio unlocks
734 list_add_tail(&rbio->plug_list, &cur->plug_list);
735 spin_unlock(&cur->bio_list_lock);
741 refcount_inc(&rbio->refs);
742 list_add(&rbio->hash_list, &h->hash_list);
744 spin_unlock_irqrestore(&h->lock, flags);
746 remove_rbio_from_cache(cache_drop);
748 __free_raid_bio(freeit);
753 * called as rmw or parity rebuild is completed. If the plug list has more
754 * rbios waiting for this stripe, the next one on the list will be started
756 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
759 struct btrfs_stripe_hash *h;
763 bucket = rbio_bucket(rbio);
764 h = rbio->fs_info->stripe_hash_table->table + bucket;
766 if (list_empty(&rbio->plug_list))
769 spin_lock_irqsave(&h->lock, flags);
770 spin_lock(&rbio->bio_list_lock);
772 if (!list_empty(&rbio->hash_list)) {
774 * if we're still cached and there is no other IO
775 * to perform, just leave this rbio here for others
776 * to steal from later
778 if (list_empty(&rbio->plug_list) &&
779 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
781 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
782 BUG_ON(!bio_list_empty(&rbio->bio_list));
786 list_del_init(&rbio->hash_list);
787 refcount_dec(&rbio->refs);
790 * we use the plug list to hold all the rbios
791 * waiting for the chance to lock this stripe.
792 * hand the lock over to one of them.
794 if (!list_empty(&rbio->plug_list)) {
795 struct btrfs_raid_bio *next;
796 struct list_head *head = rbio->plug_list.next;
798 next = list_entry(head, struct btrfs_raid_bio,
801 list_del_init(&rbio->plug_list);
803 list_add(&next->hash_list, &h->hash_list);
804 refcount_inc(&next->refs);
805 spin_unlock(&rbio->bio_list_lock);
806 spin_unlock_irqrestore(&h->lock, flags);
808 if (next->operation == BTRFS_RBIO_READ_REBUILD)
809 async_read_rebuild(next);
810 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
811 steal_rbio(rbio, next);
812 async_read_rebuild(next);
813 } else if (next->operation == BTRFS_RBIO_WRITE) {
814 steal_rbio(rbio, next);
815 async_rmw_stripe(next);
816 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
817 steal_rbio(rbio, next);
818 async_scrub_parity(next);
825 spin_unlock(&rbio->bio_list_lock);
826 spin_unlock_irqrestore(&h->lock, flags);
830 remove_rbio_from_cache(rbio);
833 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
837 if (!refcount_dec_and_test(&rbio->refs))
840 WARN_ON(!list_empty(&rbio->stripe_cache));
841 WARN_ON(!list_empty(&rbio->hash_list));
842 WARN_ON(!bio_list_empty(&rbio->bio_list));
844 for (i = 0; i < rbio->nr_pages; i++) {
845 if (rbio->stripe_pages[i]) {
846 __free_page(rbio->stripe_pages[i]);
847 rbio->stripe_pages[i] = NULL;
851 btrfs_put_bbio(rbio->bbio);
855 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
862 cur->bi_status = err;
869 * this frees the rbio and runs through all the bios in the
870 * bio_list and calls end_io on them
872 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
874 struct bio *cur = bio_list_get(&rbio->bio_list);
877 if (rbio->generic_bio_cnt)
878 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
881 * At this moment, rbio->bio_list is empty, however since rbio does not
882 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
883 * hash list, rbio may be merged with others so that rbio->bio_list
885 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
886 * more and we can call bio_endio() on all queued bios.
889 extra = bio_list_get(&rbio->bio_list);
890 __free_raid_bio(rbio);
892 rbio_endio_bio_list(cur, err);
894 rbio_endio_bio_list(extra, err);
898 * end io function used by finish_rmw. When we finally
899 * get here, we've written a full stripe
901 static void raid_write_end_io(struct bio *bio)
903 struct btrfs_raid_bio *rbio = bio->bi_private;
904 blk_status_t err = bio->bi_status;
908 fail_bio_stripe(rbio, bio);
912 if (!atomic_dec_and_test(&rbio->stripes_pending))
917 /* OK, we have read all the stripes we need to. */
918 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
919 0 : rbio->bbio->max_errors;
920 if (atomic_read(&rbio->error) > max_errors)
923 rbio_orig_end_io(rbio, err);
927 * the read/modify/write code wants to use the original bio for
928 * any pages it included, and then use the rbio for everything
929 * else. This function decides if a given index (stripe number)
930 * and page number in that stripe fall inside the original bio
933 * if you set bio_list_only, you'll get a NULL back for any ranges
934 * that are outside the bio_list
936 * This doesn't take any refs on anything, you get a bare page pointer
937 * and the caller must bump refs as required.
939 * You must call index_rbio_pages once before you can trust
940 * the answers from this function.
942 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
943 int index, int pagenr, int bio_list_only)
946 struct page *p = NULL;
948 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
950 spin_lock_irq(&rbio->bio_list_lock);
951 p = rbio->bio_pages[chunk_page];
952 spin_unlock_irq(&rbio->bio_list_lock);
954 if (p || bio_list_only)
957 return rbio->stripe_pages[chunk_page];
961 * number of pages we need for the entire stripe across all the
964 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
966 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
970 * allocation and initial setup for the btrfs_raid_bio. Not
971 * this does not allocate any pages for rbio->pages.
973 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
974 struct btrfs_bio *bbio,
977 struct btrfs_raid_bio *rbio;
979 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
980 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
981 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
984 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
985 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
986 sizeof(long), GFP_NOFS);
988 return ERR_PTR(-ENOMEM);
990 bio_list_init(&rbio->bio_list);
991 INIT_LIST_HEAD(&rbio->plug_list);
992 spin_lock_init(&rbio->bio_list_lock);
993 INIT_LIST_HEAD(&rbio->stripe_cache);
994 INIT_LIST_HEAD(&rbio->hash_list);
996 rbio->fs_info = fs_info;
997 rbio->stripe_len = stripe_len;
998 rbio->nr_pages = num_pages;
999 rbio->real_stripes = real_stripes;
1000 rbio->stripe_npages = stripe_npages;
1003 refcount_set(&rbio->refs, 1);
1004 atomic_set(&rbio->error, 0);
1005 atomic_set(&rbio->stripes_pending, 0);
1008 * the stripe_pages and bio_pages array point to the extra
1009 * memory we allocated past the end of the rbio
1012 rbio->stripe_pages = p;
1013 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1014 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1016 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1017 nr_data = real_stripes - 1;
1018 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1019 nr_data = real_stripes - 2;
1023 rbio->nr_data = nr_data;
1027 /* allocate pages for all the stripes in the bio, including parity */
1028 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1033 for (i = 0; i < rbio->nr_pages; i++) {
1034 if (rbio->stripe_pages[i])
1036 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1039 rbio->stripe_pages[i] = page;
1044 /* only allocate pages for p/q stripes */
1045 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1050 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1052 for (; i < rbio->nr_pages; i++) {
1053 if (rbio->stripe_pages[i])
1055 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1058 rbio->stripe_pages[i] = page;
1064 * add a single page from a specific stripe into our list of bios for IO
1065 * this will try to merge into existing bios if possible, and returns
1066 * zero if all went well.
1068 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1069 struct bio_list *bio_list,
1072 unsigned long page_index,
1073 unsigned long bio_max_len)
1075 struct bio *last = bio_list->tail;
1079 struct btrfs_bio_stripe *stripe;
1082 stripe = &rbio->bbio->stripes[stripe_nr];
1083 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1085 /* if the device is missing, just fail this stripe */
1086 if (!stripe->dev->bdev)
1087 return fail_rbio_index(rbio, stripe_nr);
1089 /* see if we can add this page onto our existing bio */
1091 last_end = (u64)last->bi_iter.bi_sector << 9;
1092 last_end += last->bi_iter.bi_size;
1095 * we can't merge these if they are from different
1096 * devices or if they are not contiguous
1098 if (last_end == disk_start && stripe->dev->bdev &&
1100 last->bi_disk == stripe->dev->bdev->bd_disk &&
1101 last->bi_partno == stripe->dev->bdev->bd_partno) {
1102 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1103 if (ret == PAGE_SIZE)
1108 /* put a new bio on the list */
1109 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1110 bio->bi_iter.bi_size = 0;
1111 bio_set_dev(bio, stripe->dev->bdev);
1112 bio->bi_iter.bi_sector = disk_start >> 9;
1114 bio_add_page(bio, page, PAGE_SIZE, 0);
1115 bio_list_add(bio_list, bio);
1120 * while we're doing the read/modify/write cycle, we could
1121 * have errors in reading pages off the disk. This checks
1122 * for errors and if we're not able to read the page it'll
1123 * trigger parity reconstruction. The rmw will be finished
1124 * after we've reconstructed the failed stripes
1126 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1128 if (rbio->faila >= 0 || rbio->failb >= 0) {
1129 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1130 __raid56_parity_recover(rbio);
1137 * helper function to walk our bio list and populate the bio_pages array with
1138 * the result. This seems expensive, but it is faster than constantly
1139 * searching through the bio list as we setup the IO in finish_rmw or stripe
1142 * This must be called before you trust the answers from page_in_rbio
1144 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1148 unsigned long stripe_offset;
1149 unsigned long page_index;
1151 spin_lock_irq(&rbio->bio_list_lock);
1152 bio_list_for_each(bio, &rbio->bio_list) {
1153 struct bio_vec bvec;
1154 struct bvec_iter iter;
1157 start = (u64)bio->bi_iter.bi_sector << 9;
1158 stripe_offset = start - rbio->bbio->raid_map[0];
1159 page_index = stripe_offset >> PAGE_SHIFT;
1161 if (bio_flagged(bio, BIO_CLONED))
1162 bio->bi_iter = btrfs_io_bio(bio)->iter;
1164 bio_for_each_segment(bvec, bio, iter) {
1165 rbio->bio_pages[page_index + i] = bvec.bv_page;
1169 spin_unlock_irq(&rbio->bio_list_lock);
1173 * this is called from one of two situations. We either
1174 * have a full stripe from the higher layers, or we've read all
1175 * the missing bits off disk.
1177 * This will calculate the parity and then send down any
1180 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1182 struct btrfs_bio *bbio = rbio->bbio;
1183 void *pointers[rbio->real_stripes];
1184 int nr_data = rbio->nr_data;
1189 struct bio_list bio_list;
1193 bio_list_init(&bio_list);
1195 if (rbio->real_stripes - rbio->nr_data == 1) {
1196 p_stripe = rbio->real_stripes - 1;
1197 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1198 p_stripe = rbio->real_stripes - 2;
1199 q_stripe = rbio->real_stripes - 1;
1204 /* at this point we either have a full stripe,
1205 * or we've read the full stripe from the drive.
1206 * recalculate the parity and write the new results.
1208 * We're not allowed to add any new bios to the
1209 * bio list here, anyone else that wants to
1210 * change this stripe needs to do their own rmw.
1212 spin_lock_irq(&rbio->bio_list_lock);
1213 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1214 spin_unlock_irq(&rbio->bio_list_lock);
1216 atomic_set(&rbio->error, 0);
1219 * now that we've set rmw_locked, run through the
1220 * bio list one last time and map the page pointers
1222 * We don't cache full rbios because we're assuming
1223 * the higher layers are unlikely to use this area of
1224 * the disk again soon. If they do use it again,
1225 * hopefully they will send another full bio.
1227 index_rbio_pages(rbio);
1228 if (!rbio_is_full(rbio))
1229 cache_rbio_pages(rbio);
1231 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1233 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1235 /* first collect one page from each data stripe */
1236 for (stripe = 0; stripe < nr_data; stripe++) {
1237 p = page_in_rbio(rbio, stripe, pagenr, 0);
1238 pointers[stripe] = kmap(p);
1241 /* then add the parity stripe */
1242 p = rbio_pstripe_page(rbio, pagenr);
1244 pointers[stripe++] = kmap(p);
1246 if (q_stripe != -1) {
1249 * raid6, add the qstripe and call the
1250 * library function to fill in our p/q
1252 p = rbio_qstripe_page(rbio, pagenr);
1254 pointers[stripe++] = kmap(p);
1256 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1260 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1261 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1265 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1266 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1270 * time to start writing. Make bios for everything from the
1271 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1274 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1275 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1277 if (stripe < rbio->nr_data) {
1278 page = page_in_rbio(rbio, stripe, pagenr, 1);
1282 page = rbio_stripe_page(rbio, stripe, pagenr);
1285 ret = rbio_add_io_page(rbio, &bio_list,
1286 page, stripe, pagenr, rbio->stripe_len);
1292 if (likely(!bbio->num_tgtdevs))
1295 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1296 if (!bbio->tgtdev_map[stripe])
1299 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1301 if (stripe < rbio->nr_data) {
1302 page = page_in_rbio(rbio, stripe, pagenr, 1);
1306 page = rbio_stripe_page(rbio, stripe, pagenr);
1309 ret = rbio_add_io_page(rbio, &bio_list, page,
1310 rbio->bbio->tgtdev_map[stripe],
1311 pagenr, rbio->stripe_len);
1318 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1319 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1322 bio = bio_list_pop(&bio_list);
1326 bio->bi_private = rbio;
1327 bio->bi_end_io = raid_write_end_io;
1328 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1335 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1337 while ((bio = bio_list_pop(&bio_list)))
1342 * helper to find the stripe number for a given bio. Used to figure out which
1343 * stripe has failed. This expects the bio to correspond to a physical disk,
1344 * so it looks up based on physical sector numbers.
1346 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1349 u64 physical = bio->bi_iter.bi_sector;
1352 struct btrfs_bio_stripe *stripe;
1356 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1357 stripe = &rbio->bbio->stripes[i];
1358 stripe_start = stripe->physical;
1359 if (physical >= stripe_start &&
1360 physical < stripe_start + rbio->stripe_len &&
1361 stripe->dev->bdev &&
1362 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1363 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1371 * helper to find the stripe number for a given
1372 * bio (before mapping). Used to figure out which stripe has
1373 * failed. This looks up based on logical block numbers.
1375 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1378 u64 logical = bio->bi_iter.bi_sector;
1384 for (i = 0; i < rbio->nr_data; i++) {
1385 stripe_start = rbio->bbio->raid_map[i];
1386 if (logical >= stripe_start &&
1387 logical < stripe_start + rbio->stripe_len) {
1395 * returns -EIO if we had too many failures
1397 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1399 unsigned long flags;
1402 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1404 /* we already know this stripe is bad, move on */
1405 if (rbio->faila == failed || rbio->failb == failed)
1408 if (rbio->faila == -1) {
1409 /* first failure on this rbio */
1410 rbio->faila = failed;
1411 atomic_inc(&rbio->error);
1412 } else if (rbio->failb == -1) {
1413 /* second failure on this rbio */
1414 rbio->failb = failed;
1415 atomic_inc(&rbio->error);
1420 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1426 * helper to fail a stripe based on a physical disk
1429 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1432 int failed = find_bio_stripe(rbio, bio);
1437 return fail_rbio_index(rbio, failed);
1441 * this sets each page in the bio uptodate. It should only be used on private
1442 * rbio pages, nothing that comes in from the higher layers
1444 static void set_bio_pages_uptodate(struct bio *bio)
1446 struct bio_vec *bvec;
1449 ASSERT(!bio_flagged(bio, BIO_CLONED));
1451 bio_for_each_segment_all(bvec, bio, i)
1452 SetPageUptodate(bvec->bv_page);
1456 * end io for the read phase of the rmw cycle. All the bios here are physical
1457 * stripe bios we've read from the disk so we can recalculate the parity of the
1460 * This will usually kick off finish_rmw once all the bios are read in, but it
1461 * may trigger parity reconstruction if we had any errors along the way
1463 static void raid_rmw_end_io(struct bio *bio)
1465 struct btrfs_raid_bio *rbio = bio->bi_private;
1468 fail_bio_stripe(rbio, bio);
1470 set_bio_pages_uptodate(bio);
1474 if (!atomic_dec_and_test(&rbio->stripes_pending))
1477 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1481 * this will normally call finish_rmw to start our write
1482 * but if there are any failed stripes we'll reconstruct
1485 validate_rbio_for_rmw(rbio);
1490 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1493 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1495 btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1496 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1499 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1501 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1502 read_rebuild_work, NULL, NULL);
1504 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1508 * the stripe must be locked by the caller. It will
1509 * unlock after all the writes are done
1511 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1513 int bios_to_read = 0;
1514 struct bio_list bio_list;
1520 bio_list_init(&bio_list);
1522 ret = alloc_rbio_pages(rbio);
1526 index_rbio_pages(rbio);
1528 atomic_set(&rbio->error, 0);
1530 * build a list of bios to read all the missing parts of this
1533 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1534 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1537 * we want to find all the pages missing from
1538 * the rbio and read them from the disk. If
1539 * page_in_rbio finds a page in the bio list
1540 * we don't need to read it off the stripe.
1542 page = page_in_rbio(rbio, stripe, pagenr, 1);
1546 page = rbio_stripe_page(rbio, stripe, pagenr);
1548 * the bio cache may have handed us an uptodate
1549 * page. If so, be happy and use it
1551 if (PageUptodate(page))
1554 ret = rbio_add_io_page(rbio, &bio_list, page,
1555 stripe, pagenr, rbio->stripe_len);
1561 bios_to_read = bio_list_size(&bio_list);
1562 if (!bios_to_read) {
1564 * this can happen if others have merged with
1565 * us, it means there is nothing left to read.
1566 * But if there are missing devices it may not be
1567 * safe to do the full stripe write yet.
1573 * the bbio may be freed once we submit the last bio. Make sure
1574 * not to touch it after that
1576 atomic_set(&rbio->stripes_pending, bios_to_read);
1578 bio = bio_list_pop(&bio_list);
1582 bio->bi_private = rbio;
1583 bio->bi_end_io = raid_rmw_end_io;
1584 bio_set_op_attrs(bio, REQ_OP_READ, 0);
1586 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1590 /* the actual write will happen once the reads are done */
1594 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1596 while ((bio = bio_list_pop(&bio_list)))
1602 validate_rbio_for_rmw(rbio);
1607 * if the upper layers pass in a full stripe, we thank them by only allocating
1608 * enough pages to hold the parity, and sending it all down quickly.
1610 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1614 ret = alloc_rbio_parity_pages(rbio);
1616 __free_raid_bio(rbio);
1620 ret = lock_stripe_add(rbio);
1627 * partial stripe writes get handed over to async helpers.
1628 * We're really hoping to merge a few more writes into this
1629 * rbio before calculating new parity
1631 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1635 ret = lock_stripe_add(rbio);
1637 async_rmw_stripe(rbio);
1642 * sometimes while we were reading from the drive to
1643 * recalculate parity, enough new bios come into create
1644 * a full stripe. So we do a check here to see if we can
1645 * go directly to finish_rmw
1647 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1649 /* head off into rmw land if we don't have a full stripe */
1650 if (!rbio_is_full(rbio))
1651 return partial_stripe_write(rbio);
1652 return full_stripe_write(rbio);
1656 * We use plugging call backs to collect full stripes.
1657 * Any time we get a partial stripe write while plugged
1658 * we collect it into a list. When the unplug comes down,
1659 * we sort the list by logical block number and merge
1660 * everything we can into the same rbios
1662 struct btrfs_plug_cb {
1663 struct blk_plug_cb cb;
1664 struct btrfs_fs_info *info;
1665 struct list_head rbio_list;
1666 struct btrfs_work work;
1670 * rbios on the plug list are sorted for easier merging.
1672 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1674 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1676 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1678 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1679 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1681 if (a_sector < b_sector)
1683 if (a_sector > b_sector)
1688 static void run_plug(struct btrfs_plug_cb *plug)
1690 struct btrfs_raid_bio *cur;
1691 struct btrfs_raid_bio *last = NULL;
1694 * sort our plug list then try to merge
1695 * everything we can in hopes of creating full
1698 list_sort(NULL, &plug->rbio_list, plug_cmp);
1699 while (!list_empty(&plug->rbio_list)) {
1700 cur = list_entry(plug->rbio_list.next,
1701 struct btrfs_raid_bio, plug_list);
1702 list_del_init(&cur->plug_list);
1704 if (rbio_is_full(cur)) {
1705 /* we have a full stripe, send it down */
1706 full_stripe_write(cur);
1710 if (rbio_can_merge(last, cur)) {
1711 merge_rbio(last, cur);
1712 __free_raid_bio(cur);
1716 __raid56_parity_write(last);
1721 __raid56_parity_write(last);
1727 * if the unplug comes from schedule, we have to push the
1728 * work off to a helper thread
1730 static void unplug_work(struct btrfs_work *work)
1732 struct btrfs_plug_cb *plug;
1733 plug = container_of(work, struct btrfs_plug_cb, work);
1737 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1739 struct btrfs_plug_cb *plug;
1740 plug = container_of(cb, struct btrfs_plug_cb, cb);
1742 if (from_schedule) {
1743 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1744 unplug_work, NULL, NULL);
1745 btrfs_queue_work(plug->info->rmw_workers,
1753 * our main entry point for writes from the rest of the FS.
1755 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1756 struct btrfs_bio *bbio, u64 stripe_len)
1758 struct btrfs_raid_bio *rbio;
1759 struct btrfs_plug_cb *plug = NULL;
1760 struct blk_plug_cb *cb;
1763 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1765 btrfs_put_bbio(bbio);
1766 return PTR_ERR(rbio);
1768 bio_list_add(&rbio->bio_list, bio);
1769 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1770 rbio->operation = BTRFS_RBIO_WRITE;
1772 btrfs_bio_counter_inc_noblocked(fs_info);
1773 rbio->generic_bio_cnt = 1;
1776 * don't plug on full rbios, just get them out the door
1777 * as quickly as we can
1779 if (rbio_is_full(rbio)) {
1780 ret = full_stripe_write(rbio);
1782 btrfs_bio_counter_dec(fs_info);
1786 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1788 plug = container_of(cb, struct btrfs_plug_cb, cb);
1790 plug->info = fs_info;
1791 INIT_LIST_HEAD(&plug->rbio_list);
1793 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1796 ret = __raid56_parity_write(rbio);
1798 btrfs_bio_counter_dec(fs_info);
1804 * all parity reconstruction happens here. We've read in everything
1805 * we can find from the drives and this does the heavy lifting of
1806 * sorting the good from the bad.
1808 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1812 int faila = -1, failb = -1;
1817 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1819 err = BLK_STS_RESOURCE;
1823 faila = rbio->faila;
1824 failb = rbio->failb;
1826 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1827 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1828 spin_lock_irq(&rbio->bio_list_lock);
1829 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1830 spin_unlock_irq(&rbio->bio_list_lock);
1833 index_rbio_pages(rbio);
1835 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1837 * Now we just use bitmap to mark the horizontal stripes in
1838 * which we have data when doing parity scrub.
1840 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1841 !test_bit(pagenr, rbio->dbitmap))
1844 /* setup our array of pointers with pages
1847 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1849 * if we're rebuilding a read, we have to use
1850 * pages from the bio list
1852 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1853 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1854 (stripe == faila || stripe == failb)) {
1855 page = page_in_rbio(rbio, stripe, pagenr, 0);
1857 page = rbio_stripe_page(rbio, stripe, pagenr);
1859 pointers[stripe] = kmap(page);
1862 /* all raid6 handling here */
1863 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1865 * single failure, rebuild from parity raid5
1869 if (faila == rbio->nr_data) {
1871 * Just the P stripe has failed, without
1872 * a bad data or Q stripe.
1873 * TODO, we should redo the xor here.
1875 err = BLK_STS_IOERR;
1879 * a single failure in raid6 is rebuilt
1880 * in the pstripe code below
1885 /* make sure our ps and qs are in order */
1886 if (faila > failb) {
1892 /* if the q stripe is failed, do a pstripe reconstruction
1894 * If both the q stripe and the P stripe are failed, we're
1895 * here due to a crc mismatch and we can't give them the
1898 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1899 if (rbio->bbio->raid_map[faila] ==
1901 err = BLK_STS_IOERR;
1905 * otherwise we have one bad data stripe and
1906 * a good P stripe. raid5!
1911 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1912 raid6_datap_recov(rbio->real_stripes,
1913 PAGE_SIZE, faila, pointers);
1915 raid6_2data_recov(rbio->real_stripes,
1916 PAGE_SIZE, faila, failb,
1922 /* rebuild from P stripe here (raid5 or raid6) */
1923 BUG_ON(failb != -1);
1925 /* Copy parity block into failed block to start with */
1926 memcpy(pointers[faila],
1927 pointers[rbio->nr_data],
1930 /* rearrange the pointer array */
1931 p = pointers[faila];
1932 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1933 pointers[stripe] = pointers[stripe + 1];
1934 pointers[rbio->nr_data - 1] = p;
1936 /* xor in the rest */
1937 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1939 /* if we're doing this rebuild as part of an rmw, go through
1940 * and set all of our private rbio pages in the
1941 * failed stripes as uptodate. This way finish_rmw will
1942 * know they can be trusted. If this was a read reconstruction,
1943 * other endio functions will fiddle the uptodate bits
1945 if (rbio->operation == BTRFS_RBIO_WRITE) {
1946 for (i = 0; i < rbio->stripe_npages; i++) {
1948 page = rbio_stripe_page(rbio, faila, i);
1949 SetPageUptodate(page);
1952 page = rbio_stripe_page(rbio, failb, i);
1953 SetPageUptodate(page);
1957 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1959 * if we're rebuilding a read, we have to use
1960 * pages from the bio list
1962 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1963 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1964 (stripe == faila || stripe == failb)) {
1965 page = page_in_rbio(rbio, stripe, pagenr, 0);
1967 page = rbio_stripe_page(rbio, stripe, pagenr);
1979 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1980 * valid rbio which is consistent with ondisk content, thus such a
1981 * valid rbio can be cached to avoid further disk reads.
1983 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1984 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1986 * - In case of two failures, where rbio->failb != -1:
1988 * Do not cache this rbio since the above read reconstruction
1989 * (raid6_datap_recov() or raid6_2data_recov()) may have
1990 * changed some content of stripes which are not identical to
1991 * on-disk content any more, otherwise, a later write/recover
1992 * may steal stripe_pages from this rbio and end up with
1993 * corruptions or rebuild failures.
1995 * - In case of single failure, where rbio->failb == -1:
1997 * Cache this rbio iff the above read reconstruction is
1998 * excuted without problems.
2000 if (err == BLK_STS_OK && rbio->failb < 0)
2001 cache_rbio_pages(rbio);
2003 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2005 rbio_orig_end_io(rbio, err);
2006 } else if (err == BLK_STS_OK) {
2010 if (rbio->operation == BTRFS_RBIO_WRITE)
2012 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2013 finish_parity_scrub(rbio, 0);
2017 rbio_orig_end_io(rbio, err);
2022 * This is called only for stripes we've read from disk to
2023 * reconstruct the parity.
2025 static void raid_recover_end_io(struct bio *bio)
2027 struct btrfs_raid_bio *rbio = bio->bi_private;
2030 * we only read stripe pages off the disk, set them
2031 * up to date if there were no errors
2034 fail_bio_stripe(rbio, bio);
2036 set_bio_pages_uptodate(bio);
2039 if (!atomic_dec_and_test(&rbio->stripes_pending))
2042 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2043 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2045 __raid_recover_end_io(rbio);
2049 * reads everything we need off the disk to reconstruct
2050 * the parity. endio handlers trigger final reconstruction
2051 * when the IO is done.
2053 * This is used both for reads from the higher layers and for
2054 * parity construction required to finish a rmw cycle.
2056 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2058 int bios_to_read = 0;
2059 struct bio_list bio_list;
2065 bio_list_init(&bio_list);
2067 ret = alloc_rbio_pages(rbio);
2071 atomic_set(&rbio->error, 0);
2074 * read everything that hasn't failed. Thanks to the
2075 * stripe cache, it is possible that some or all of these
2076 * pages are going to be uptodate.
2078 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2079 if (rbio->faila == stripe || rbio->failb == stripe) {
2080 atomic_inc(&rbio->error);
2084 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2088 * the rmw code may have already read this
2091 p = rbio_stripe_page(rbio, stripe, pagenr);
2092 if (PageUptodate(p))
2095 ret = rbio_add_io_page(rbio, &bio_list,
2096 rbio_stripe_page(rbio, stripe, pagenr),
2097 stripe, pagenr, rbio->stripe_len);
2103 bios_to_read = bio_list_size(&bio_list);
2104 if (!bios_to_read) {
2106 * we might have no bios to read just because the pages
2107 * were up to date, or we might have no bios to read because
2108 * the devices were gone.
2110 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2111 __raid_recover_end_io(rbio);
2119 * the bbio may be freed once we submit the last bio. Make sure
2120 * not to touch it after that
2122 atomic_set(&rbio->stripes_pending, bios_to_read);
2124 bio = bio_list_pop(&bio_list);
2128 bio->bi_private = rbio;
2129 bio->bi_end_io = raid_recover_end_io;
2130 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2132 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2140 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2141 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2142 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2144 while ((bio = bio_list_pop(&bio_list)))
2151 * the main entry point for reads from the higher layers. This
2152 * is really only called when the normal read path had a failure,
2153 * so we assume the bio they send down corresponds to a failed part
2156 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2157 struct btrfs_bio *bbio, u64 stripe_len,
2158 int mirror_num, int generic_io)
2160 struct btrfs_raid_bio *rbio;
2164 ASSERT(bbio->mirror_num == mirror_num);
2165 btrfs_io_bio(bio)->mirror_num = mirror_num;
2168 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2171 btrfs_put_bbio(bbio);
2172 return PTR_ERR(rbio);
2175 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2176 bio_list_add(&rbio->bio_list, bio);
2177 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2179 rbio->faila = find_logical_bio_stripe(rbio, bio);
2180 if (rbio->faila == -1) {
2182 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2183 __func__, (u64)bio->bi_iter.bi_sector << 9,
2184 (u64)bio->bi_iter.bi_size, bbio->map_type);
2186 btrfs_put_bbio(bbio);
2192 btrfs_bio_counter_inc_noblocked(fs_info);
2193 rbio->generic_bio_cnt = 1;
2195 btrfs_get_bbio(bbio);
2200 * for 'mirror == 2', reconstruct from all other stripes.
2201 * for 'mirror_num > 2', select a stripe to fail on every retry.
2203 if (mirror_num > 2) {
2205 * 'mirror == 3' is to fail the p stripe and
2206 * reconstruct from the q stripe. 'mirror > 3' is to
2207 * fail a data stripe and reconstruct from p+q stripe.
2209 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2210 ASSERT(rbio->failb > 0);
2211 if (rbio->failb <= rbio->faila)
2215 ret = lock_stripe_add(rbio);
2218 * __raid56_parity_recover will end the bio with
2219 * any errors it hits. We don't want to return
2220 * its error value up the stack because our caller
2221 * will end up calling bio_endio with any nonzero
2225 __raid56_parity_recover(rbio);
2227 * our rbio has been added to the list of
2228 * rbios that will be handled after the
2229 * currently lock owner is done
2235 static void rmw_work(struct btrfs_work *work)
2237 struct btrfs_raid_bio *rbio;
2239 rbio = container_of(work, struct btrfs_raid_bio, work);
2240 raid56_rmw_stripe(rbio);
2243 static void read_rebuild_work(struct btrfs_work *work)
2245 struct btrfs_raid_bio *rbio;
2247 rbio = container_of(work, struct btrfs_raid_bio, work);
2248 __raid56_parity_recover(rbio);
2252 * The following code is used to scrub/replace the parity stripe
2254 * Caller must have already increased bio_counter for getting @bbio.
2256 * Note: We need make sure all the pages that add into the scrub/replace
2257 * raid bio are correct and not be changed during the scrub/replace. That
2258 * is those pages just hold metadata or file data with checksum.
2261 struct btrfs_raid_bio *
2262 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2263 struct btrfs_bio *bbio, u64 stripe_len,
2264 struct btrfs_device *scrub_dev,
2265 unsigned long *dbitmap, int stripe_nsectors)
2267 struct btrfs_raid_bio *rbio;
2270 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2273 bio_list_add(&rbio->bio_list, bio);
2275 * This is a special bio which is used to hold the completion handler
2276 * and make the scrub rbio is similar to the other types
2278 ASSERT(!bio->bi_iter.bi_size);
2279 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2282 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2283 * to the end position, so this search can start from the first parity
2286 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2287 if (bbio->stripes[i].dev == scrub_dev) {
2292 ASSERT(i < rbio->real_stripes);
2294 /* Now we just support the sectorsize equals to page size */
2295 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2296 ASSERT(rbio->stripe_npages == stripe_nsectors);
2297 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2300 * We have already increased bio_counter when getting bbio, record it
2301 * so we can free it at rbio_orig_end_io().
2303 rbio->generic_bio_cnt = 1;
2308 /* Used for both parity scrub and missing. */
2309 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2315 ASSERT(logical >= rbio->bbio->raid_map[0]);
2316 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2317 rbio->stripe_len * rbio->nr_data);
2318 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2319 index = stripe_offset >> PAGE_SHIFT;
2320 rbio->bio_pages[index] = page;
2324 * We just scrub the parity that we have correct data on the same horizontal,
2325 * so we needn't allocate all pages for all the stripes.
2327 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2334 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2335 for (i = 0; i < rbio->real_stripes; i++) {
2336 index = i * rbio->stripe_npages + bit;
2337 if (rbio->stripe_pages[index])
2340 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2343 rbio->stripe_pages[index] = page;
2349 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2352 struct btrfs_bio *bbio = rbio->bbio;
2353 void *pointers[rbio->real_stripes];
2354 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2355 int nr_data = rbio->nr_data;
2360 struct page *p_page = NULL;
2361 struct page *q_page = NULL;
2362 struct bio_list bio_list;
2367 bio_list_init(&bio_list);
2369 if (rbio->real_stripes - rbio->nr_data == 1) {
2370 p_stripe = rbio->real_stripes - 1;
2371 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2372 p_stripe = rbio->real_stripes - 2;
2373 q_stripe = rbio->real_stripes - 1;
2378 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2380 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2384 * Because the higher layers(scrubber) are unlikely to
2385 * use this area of the disk again soon, so don't cache
2388 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2393 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2396 SetPageUptodate(p_page);
2398 if (q_stripe != -1) {
2399 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2401 __free_page(p_page);
2404 SetPageUptodate(q_page);
2407 atomic_set(&rbio->error, 0);
2409 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2412 /* first collect one page from each data stripe */
2413 for (stripe = 0; stripe < nr_data; stripe++) {
2414 p = page_in_rbio(rbio, stripe, pagenr, 0);
2415 pointers[stripe] = kmap(p);
2418 /* then add the parity stripe */
2419 pointers[stripe++] = kmap(p_page);
2421 if (q_stripe != -1) {
2424 * raid6, add the qstripe and call the
2425 * library function to fill in our p/q
2427 pointers[stripe++] = kmap(q_page);
2429 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2433 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2434 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2437 /* Check scrubbing parity and repair it */
2438 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2440 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2441 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2443 /* Parity is right, needn't writeback */
2444 bitmap_clear(rbio->dbitmap, pagenr, 1);
2447 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2448 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2451 __free_page(p_page);
2453 __free_page(q_page);
2457 * time to start writing. Make bios for everything from the
2458 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2461 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2464 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2465 ret = rbio_add_io_page(rbio, &bio_list,
2466 page, rbio->scrubp, pagenr, rbio->stripe_len);
2474 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2477 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2478 ret = rbio_add_io_page(rbio, &bio_list, page,
2479 bbio->tgtdev_map[rbio->scrubp],
2480 pagenr, rbio->stripe_len);
2486 nr_data = bio_list_size(&bio_list);
2488 /* Every parity is right */
2489 rbio_orig_end_io(rbio, BLK_STS_OK);
2493 atomic_set(&rbio->stripes_pending, nr_data);
2496 bio = bio_list_pop(&bio_list);
2500 bio->bi_private = rbio;
2501 bio->bi_end_io = raid_write_end_io;
2502 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2509 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2511 while ((bio = bio_list_pop(&bio_list)))
2515 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2517 if (stripe >= 0 && stripe < rbio->nr_data)
2523 * While we're doing the parity check and repair, we could have errors
2524 * in reading pages off the disk. This checks for errors and if we're
2525 * not able to read the page it'll trigger parity reconstruction. The
2526 * parity scrub will be finished after we've reconstructed the failed
2529 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2531 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2534 if (rbio->faila >= 0 || rbio->failb >= 0) {
2535 int dfail = 0, failp = -1;
2537 if (is_data_stripe(rbio, rbio->faila))
2539 else if (is_parity_stripe(rbio->faila))
2540 failp = rbio->faila;
2542 if (is_data_stripe(rbio, rbio->failb))
2544 else if (is_parity_stripe(rbio->failb))
2545 failp = rbio->failb;
2548 * Because we can not use a scrubbing parity to repair
2549 * the data, so the capability of the repair is declined.
2550 * (In the case of RAID5, we can not repair anything)
2552 if (dfail > rbio->bbio->max_errors - 1)
2556 * If all data is good, only parity is correctly, just
2557 * repair the parity.
2560 finish_parity_scrub(rbio, 0);
2565 * Here means we got one corrupted data stripe and one
2566 * corrupted parity on RAID6, if the corrupted parity
2567 * is scrubbing parity, luckily, use the other one to repair
2568 * the data, or we can not repair the data stripe.
2570 if (failp != rbio->scrubp)
2573 __raid_recover_end_io(rbio);
2575 finish_parity_scrub(rbio, 1);
2580 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2584 * end io for the read phase of the rmw cycle. All the bios here are physical
2585 * stripe bios we've read from the disk so we can recalculate the parity of the
2588 * This will usually kick off finish_rmw once all the bios are read in, but it
2589 * may trigger parity reconstruction if we had any errors along the way
2591 static void raid56_parity_scrub_end_io(struct bio *bio)
2593 struct btrfs_raid_bio *rbio = bio->bi_private;
2596 fail_bio_stripe(rbio, bio);
2598 set_bio_pages_uptodate(bio);
2602 if (!atomic_dec_and_test(&rbio->stripes_pending))
2606 * this will normally call finish_rmw to start our write
2607 * but if there are any failed stripes we'll reconstruct
2610 validate_rbio_for_parity_scrub(rbio);
2613 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2615 int bios_to_read = 0;
2616 struct bio_list bio_list;
2622 bio_list_init(&bio_list);
2624 ret = alloc_rbio_essential_pages(rbio);
2628 atomic_set(&rbio->error, 0);
2630 * build a list of bios to read all the missing parts of this
2633 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2634 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2637 * we want to find all the pages missing from
2638 * the rbio and read them from the disk. If
2639 * page_in_rbio finds a page in the bio list
2640 * we don't need to read it off the stripe.
2642 page = page_in_rbio(rbio, stripe, pagenr, 1);
2646 page = rbio_stripe_page(rbio, stripe, pagenr);
2648 * the bio cache may have handed us an uptodate
2649 * page. If so, be happy and use it
2651 if (PageUptodate(page))
2654 ret = rbio_add_io_page(rbio, &bio_list, page,
2655 stripe, pagenr, rbio->stripe_len);
2661 bios_to_read = bio_list_size(&bio_list);
2662 if (!bios_to_read) {
2664 * this can happen if others have merged with
2665 * us, it means there is nothing left to read.
2666 * But if there are missing devices it may not be
2667 * safe to do the full stripe write yet.
2673 * the bbio may be freed once we submit the last bio. Make sure
2674 * not to touch it after that
2676 atomic_set(&rbio->stripes_pending, bios_to_read);
2678 bio = bio_list_pop(&bio_list);
2682 bio->bi_private = rbio;
2683 bio->bi_end_io = raid56_parity_scrub_end_io;
2684 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2686 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2690 /* the actual write will happen once the reads are done */
2694 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2696 while ((bio = bio_list_pop(&bio_list)))
2702 validate_rbio_for_parity_scrub(rbio);
2705 static void scrub_parity_work(struct btrfs_work *work)
2707 struct btrfs_raid_bio *rbio;
2709 rbio = container_of(work, struct btrfs_raid_bio, work);
2710 raid56_parity_scrub_stripe(rbio);
2713 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2715 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2716 scrub_parity_work, NULL, NULL);
2718 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2721 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2723 if (!lock_stripe_add(rbio))
2724 async_scrub_parity(rbio);
2727 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2729 struct btrfs_raid_bio *
2730 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2731 struct btrfs_bio *bbio, u64 length)
2733 struct btrfs_raid_bio *rbio;
2735 rbio = alloc_rbio(fs_info, bbio, length);
2739 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2740 bio_list_add(&rbio->bio_list, bio);
2742 * This is a special bio which is used to hold the completion handler
2743 * and make the scrub rbio is similar to the other types
2745 ASSERT(!bio->bi_iter.bi_size);
2747 rbio->faila = find_logical_bio_stripe(rbio, bio);
2748 if (rbio->faila == -1) {
2755 * When we get bbio, we have already increased bio_counter, record it
2756 * so we can free it at rbio_orig_end_io()
2758 rbio->generic_bio_cnt = 1;
2763 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2765 if (!lock_stripe_add(rbio))
2766 async_read_rebuild(rbio);