1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
7 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
20 #include "async-thread.h"
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT 1
26 * set when this rbio is sitting in the hash, but it is just a cache
29 #define RBIO_CACHE_BIT 2
32 * set when it is safe to trust the stripe_pages for caching
34 #define RBIO_CACHE_READY_BIT 3
36 #define RBIO_CACHE_SIZE 1024
38 #define BTRFS_STRIPE_HASH_TABLE_BITS 11
40 /* Used by the raid56 code to lock stripes for read/modify/write */
41 struct btrfs_stripe_hash {
42 struct list_head hash_list;
46 /* Used by the raid56 code to lock stripes for read/modify/write */
47 struct btrfs_stripe_hash_table {
48 struct list_head stripe_cache;
49 spinlock_t cache_lock;
51 struct btrfs_stripe_hash table[];
56 BTRFS_RBIO_READ_REBUILD,
57 BTRFS_RBIO_PARITY_SCRUB,
58 BTRFS_RBIO_REBUILD_MISSING,
61 struct btrfs_raid_bio {
62 struct btrfs_fs_info *fs_info;
63 struct btrfs_bio *bbio;
65 /* while we're doing rmw on a stripe
66 * we put it into a hash table so we can
67 * lock the stripe and merge more rbios
70 struct list_head hash_list;
73 * LRU list for the stripe cache
75 struct list_head stripe_cache;
78 * for scheduling work in the helper threads
80 struct btrfs_work work;
83 * bio list and bio_list_lock are used
84 * to add more bios into the stripe
85 * in hopes of avoiding the full rmw
87 struct bio_list bio_list;
88 spinlock_t bio_list_lock;
90 /* also protected by the bio_list_lock, the
91 * plug list is used by the plugging code
92 * to collect partial bios while plugged. The
93 * stripe locking code also uses it to hand off
94 * the stripe lock to the next pending IO
96 struct list_head plug_list;
99 * flags that tell us if it is safe to
100 * merge with this bio
104 /* size of each individual stripe on disk */
107 /* number of data stripes (no p/q) */
114 * set if we're doing a parity rebuild
115 * for a read from higher up, which is handled
116 * differently from a parity rebuild as part of
119 enum btrfs_rbio_ops operation;
121 /* first bad stripe */
124 /* second bad stripe (for raid6 use) */
129 * number of pages needed to represent the full
135 * size of all the bios in the bio_list. This
136 * helps us decide if the rbio maps to a full
145 atomic_t stripes_pending;
149 * these are two arrays of pointers. We allocate the
150 * rbio big enough to hold them both and setup their
151 * locations when the rbio is allocated
154 /* pointers to pages that we allocated for
155 * reading/writing stripes directly from the disk (including P/Q)
157 struct page **stripe_pages;
160 * pointers to the pages in the bio_list. Stored
161 * here for faster lookup
163 struct page **bio_pages;
166 * bitmap to record which horizontal stripe has data
168 unsigned long *dbitmap;
170 /* allocated with real_stripes-many pointers for finish_*() calls */
171 void **finish_pointers;
173 /* allocated with stripe_npages-many bits for finish_*() calls */
174 unsigned long *finish_pbitmap;
177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179 static void rmw_work(struct btrfs_work *work);
180 static void read_rebuild_work(struct btrfs_work *work);
181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
189 static void scrub_parity_work(struct btrfs_work *work);
191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
193 btrfs_init_work(&rbio->work, work_func, NULL, NULL);
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
198 * the stripe hash table is used for locking, and to collect
199 * bios in hopes of making a full stripe
201 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
203 struct btrfs_stripe_hash_table *table;
204 struct btrfs_stripe_hash_table *x;
205 struct btrfs_stripe_hash *cur;
206 struct btrfs_stripe_hash *h;
207 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
211 if (info->stripe_hash_table)
215 * The table is large, starting with order 4 and can go as high as
216 * order 7 in case lock debugging is turned on.
218 * Try harder to allocate and fallback to vmalloc to lower the chance
219 * of a failing mount.
221 table_size = sizeof(*table) + sizeof(*h) * num_entries;
222 table = kvzalloc(table_size, GFP_KERNEL);
226 spin_lock_init(&table->cache_lock);
227 INIT_LIST_HEAD(&table->stripe_cache);
231 for (i = 0; i < num_entries; i++) {
233 INIT_LIST_HEAD(&cur->hash_list);
234 spin_lock_init(&cur->lock);
237 x = cmpxchg(&info->stripe_hash_table, NULL, table);
244 * caching an rbio means to copy anything from the
245 * bio_pages array into the stripe_pages array. We
246 * use the page uptodate bit in the stripe cache array
247 * to indicate if it has valid data
249 * once the caching is done, we set the cache ready
252 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
259 ret = alloc_rbio_pages(rbio);
263 for (i = 0; i < rbio->nr_pages; i++) {
264 if (!rbio->bio_pages[i])
267 s = kmap(rbio->bio_pages[i]);
268 d = kmap(rbio->stripe_pages[i]);
272 kunmap(rbio->bio_pages[i]);
273 kunmap(rbio->stripe_pages[i]);
274 SetPageUptodate(rbio->stripe_pages[i]);
276 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
280 * we hash on the first logical address of the stripe
282 static int rbio_bucket(struct btrfs_raid_bio *rbio)
284 u64 num = rbio->bbio->raid_map[0];
287 * we shift down quite a bit. We're using byte
288 * addressing, and most of the lower bits are zeros.
289 * This tends to upset hash_64, and it consistently
290 * returns just one or two different values.
292 * shifting off the lower bits fixes things.
294 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
298 * stealing an rbio means taking all the uptodate pages from the stripe
299 * array in the source rbio and putting them into the destination rbio
301 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
307 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
310 for (i = 0; i < dest->nr_pages; i++) {
311 s = src->stripe_pages[i];
312 if (!s || !PageUptodate(s)) {
316 d = dest->stripe_pages[i];
320 dest->stripe_pages[i] = s;
321 src->stripe_pages[i] = NULL;
326 * merging means we take the bio_list from the victim and
327 * splice it into the destination. The victim should
328 * be discarded afterwards.
330 * must be called with dest->rbio_list_lock held
332 static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
337 dest->generic_bio_cnt += victim->generic_bio_cnt;
338 bio_list_init(&victim->bio_list);
342 * used to prune items that are in the cache. The caller
343 * must hold the hash table lock.
345 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
347 int bucket = rbio_bucket(rbio);
348 struct btrfs_stripe_hash_table *table;
349 struct btrfs_stripe_hash *h;
353 * check the bit again under the hash table lock.
355 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
358 table = rbio->fs_info->stripe_hash_table;
359 h = table->table + bucket;
361 /* hold the lock for the bucket because we may be
362 * removing it from the hash table
367 * hold the lock for the bio list because we need
368 * to make sure the bio list is empty
370 spin_lock(&rbio->bio_list_lock);
372 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
373 list_del_init(&rbio->stripe_cache);
374 table->cache_size -= 1;
377 /* if the bio list isn't empty, this rbio is
378 * still involved in an IO. We take it out
379 * of the cache list, and drop the ref that
380 * was held for the list.
382 * If the bio_list was empty, we also remove
383 * the rbio from the hash_table, and drop
384 * the corresponding ref
386 if (bio_list_empty(&rbio->bio_list)) {
387 if (!list_empty(&rbio->hash_list)) {
388 list_del_init(&rbio->hash_list);
389 refcount_dec(&rbio->refs);
390 BUG_ON(!list_empty(&rbio->plug_list));
395 spin_unlock(&rbio->bio_list_lock);
396 spin_unlock(&h->lock);
399 __free_raid_bio(rbio);
403 * prune a given rbio from the cache
405 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
407 struct btrfs_stripe_hash_table *table;
410 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
413 table = rbio->fs_info->stripe_hash_table;
415 spin_lock_irqsave(&table->cache_lock, flags);
416 __remove_rbio_from_cache(rbio);
417 spin_unlock_irqrestore(&table->cache_lock, flags);
421 * remove everything in the cache
423 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
425 struct btrfs_stripe_hash_table *table;
427 struct btrfs_raid_bio *rbio;
429 table = info->stripe_hash_table;
431 spin_lock_irqsave(&table->cache_lock, flags);
432 while (!list_empty(&table->stripe_cache)) {
433 rbio = list_entry(table->stripe_cache.next,
434 struct btrfs_raid_bio,
436 __remove_rbio_from_cache(rbio);
438 spin_unlock_irqrestore(&table->cache_lock, flags);
442 * remove all cached entries and free the hash table
445 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
447 if (!info->stripe_hash_table)
449 btrfs_clear_rbio_cache(info);
450 kvfree(info->stripe_hash_table);
451 info->stripe_hash_table = NULL;
455 * insert an rbio into the stripe cache. It
456 * must have already been prepared by calling
459 * If this rbio was already cached, it gets
460 * moved to the front of the lru.
462 * If the size of the rbio cache is too big, we
465 static void cache_rbio(struct btrfs_raid_bio *rbio)
467 struct btrfs_stripe_hash_table *table;
470 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
473 table = rbio->fs_info->stripe_hash_table;
475 spin_lock_irqsave(&table->cache_lock, flags);
476 spin_lock(&rbio->bio_list_lock);
478 /* bump our ref if we were not in the list before */
479 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
480 refcount_inc(&rbio->refs);
482 if (!list_empty(&rbio->stripe_cache)){
483 list_move(&rbio->stripe_cache, &table->stripe_cache);
485 list_add(&rbio->stripe_cache, &table->stripe_cache);
486 table->cache_size += 1;
489 spin_unlock(&rbio->bio_list_lock);
491 if (table->cache_size > RBIO_CACHE_SIZE) {
492 struct btrfs_raid_bio *found;
494 found = list_entry(table->stripe_cache.prev,
495 struct btrfs_raid_bio,
499 __remove_rbio_from_cache(found);
502 spin_unlock_irqrestore(&table->cache_lock, flags);
506 * helper function to run the xor_blocks api. It is only
507 * able to do MAX_XOR_BLOCKS at a time, so we need to
510 static void run_xor(void **pages, int src_cnt, ssize_t len)
514 void *dest = pages[src_cnt];
517 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
518 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
520 src_cnt -= xor_src_cnt;
521 src_off += xor_src_cnt;
526 * Returns true if the bio list inside this rbio covers an entire stripe (no
529 static int rbio_is_full(struct btrfs_raid_bio *rbio)
532 unsigned long size = rbio->bio_list_bytes;
535 spin_lock_irqsave(&rbio->bio_list_lock, flags);
536 if (size != rbio->nr_data * rbio->stripe_len)
538 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
539 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
545 * returns 1 if it is safe to merge two rbios together.
546 * The merging is safe if the two rbios correspond to
547 * the same stripe and if they are both going in the same
548 * direction (read vs write), and if neither one is
549 * locked for final IO
551 * The caller is responsible for locking such that
552 * rmw_locked is safe to test
554 static int rbio_can_merge(struct btrfs_raid_bio *last,
555 struct btrfs_raid_bio *cur)
557 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
558 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
562 * we can't merge with cached rbios, since the
563 * idea is that when we merge the destination
564 * rbio is going to run our IO for us. We can
565 * steal from cached rbios though, other functions
568 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
569 test_bit(RBIO_CACHE_BIT, &cur->flags))
572 if (last->bbio->raid_map[0] !=
573 cur->bbio->raid_map[0])
576 /* we can't merge with different operations */
577 if (last->operation != cur->operation)
580 * We've need read the full stripe from the drive.
581 * check and repair the parity and write the new results.
583 * We're not allowed to add any new bios to the
584 * bio list here, anyone else that wants to
585 * change this stripe needs to do their own rmw.
587 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
590 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
593 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
594 int fa = last->faila;
595 int fb = last->failb;
596 int cur_fa = cur->faila;
597 int cur_fb = cur->failb;
599 if (last->faila >= last->failb) {
604 if (cur->faila >= cur->failb) {
609 if (fa != cur_fa || fb != cur_fb)
615 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
618 return stripe * rbio->stripe_npages + index;
622 * these are just the pages from the rbio array, not from anything
623 * the FS sent down to us
625 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
628 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
632 * helper to index into the pstripe
634 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
636 return rbio_stripe_page(rbio, rbio->nr_data, index);
640 * helper to index into the qstripe, returns null
641 * if there is no qstripe
643 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
645 if (rbio->nr_data + 1 == rbio->real_stripes)
647 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
651 * The first stripe in the table for a logical address
652 * has the lock. rbios are added in one of three ways:
654 * 1) Nobody has the stripe locked yet. The rbio is given
655 * the lock and 0 is returned. The caller must start the IO
658 * 2) Someone has the stripe locked, but we're able to merge
659 * with the lock owner. The rbio is freed and the IO will
660 * start automatically along with the existing rbio. 1 is returned.
662 * 3) Someone has the stripe locked, but we're not able to merge.
663 * The rbio is added to the lock owner's plug list, or merged into
664 * an rbio already on the plug list. When the lock owner unlocks,
665 * the next rbio on the list is run and the IO is started automatically.
668 * If we return 0, the caller still owns the rbio and must continue with
669 * IO submission. If we return 1, the caller must assume the rbio has
670 * already been freed.
672 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
674 int bucket = rbio_bucket(rbio);
675 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
676 struct btrfs_raid_bio *cur;
677 struct btrfs_raid_bio *pending;
679 struct btrfs_raid_bio *freeit = NULL;
680 struct btrfs_raid_bio *cache_drop = NULL;
683 spin_lock_irqsave(&h->lock, flags);
684 list_for_each_entry(cur, &h->hash_list, hash_list) {
685 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
686 spin_lock(&cur->bio_list_lock);
688 /* can we steal this cached rbio's pages? */
689 if (bio_list_empty(&cur->bio_list) &&
690 list_empty(&cur->plug_list) &&
691 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
693 list_del_init(&cur->hash_list);
694 refcount_dec(&cur->refs);
696 steal_rbio(cur, rbio);
698 spin_unlock(&cur->bio_list_lock);
703 /* can we merge into the lock owner? */
704 if (rbio_can_merge(cur, rbio)) {
705 merge_rbio(cur, rbio);
706 spin_unlock(&cur->bio_list_lock);
714 * we couldn't merge with the running
715 * rbio, see if we can merge with the
716 * pending ones. We don't have to
717 * check for rmw_locked because there
718 * is no way they are inside finish_rmw
721 list_for_each_entry(pending, &cur->plug_list,
723 if (rbio_can_merge(pending, rbio)) {
724 merge_rbio(pending, rbio);
725 spin_unlock(&cur->bio_list_lock);
732 /* no merging, put us on the tail of the plug list,
733 * our rbio will be started with the currently
734 * running rbio unlocks
736 list_add_tail(&rbio->plug_list, &cur->plug_list);
737 spin_unlock(&cur->bio_list_lock);
743 refcount_inc(&rbio->refs);
744 list_add(&rbio->hash_list, &h->hash_list);
746 spin_unlock_irqrestore(&h->lock, flags);
748 remove_rbio_from_cache(cache_drop);
750 __free_raid_bio(freeit);
755 * called as rmw or parity rebuild is completed. If the plug list has more
756 * rbios waiting for this stripe, the next one on the list will be started
758 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
761 struct btrfs_stripe_hash *h;
765 bucket = rbio_bucket(rbio);
766 h = rbio->fs_info->stripe_hash_table->table + bucket;
768 if (list_empty(&rbio->plug_list))
771 spin_lock_irqsave(&h->lock, flags);
772 spin_lock(&rbio->bio_list_lock);
774 if (!list_empty(&rbio->hash_list)) {
776 * if we're still cached and there is no other IO
777 * to perform, just leave this rbio here for others
778 * to steal from later
780 if (list_empty(&rbio->plug_list) &&
781 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
783 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
784 BUG_ON(!bio_list_empty(&rbio->bio_list));
788 list_del_init(&rbio->hash_list);
789 refcount_dec(&rbio->refs);
792 * we use the plug list to hold all the rbios
793 * waiting for the chance to lock this stripe.
794 * hand the lock over to one of them.
796 if (!list_empty(&rbio->plug_list)) {
797 struct btrfs_raid_bio *next;
798 struct list_head *head = rbio->plug_list.next;
800 next = list_entry(head, struct btrfs_raid_bio,
803 list_del_init(&rbio->plug_list);
805 list_add(&next->hash_list, &h->hash_list);
806 refcount_inc(&next->refs);
807 spin_unlock(&rbio->bio_list_lock);
808 spin_unlock_irqrestore(&h->lock, flags);
810 if (next->operation == BTRFS_RBIO_READ_REBUILD)
811 start_async_work(next, read_rebuild_work);
812 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
813 steal_rbio(rbio, next);
814 start_async_work(next, read_rebuild_work);
815 } else if (next->operation == BTRFS_RBIO_WRITE) {
816 steal_rbio(rbio, next);
817 start_async_work(next, rmw_work);
818 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
819 steal_rbio(rbio, next);
820 start_async_work(next, scrub_parity_work);
827 spin_unlock(&rbio->bio_list_lock);
828 spin_unlock_irqrestore(&h->lock, flags);
832 remove_rbio_from_cache(rbio);
835 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
839 if (!refcount_dec_and_test(&rbio->refs))
842 WARN_ON(!list_empty(&rbio->stripe_cache));
843 WARN_ON(!list_empty(&rbio->hash_list));
844 WARN_ON(!bio_list_empty(&rbio->bio_list));
846 for (i = 0; i < rbio->nr_pages; i++) {
847 if (rbio->stripe_pages[i]) {
848 __free_page(rbio->stripe_pages[i]);
849 rbio->stripe_pages[i] = NULL;
853 btrfs_put_bbio(rbio->bbio);
857 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
864 cur->bi_status = err;
871 * this frees the rbio and runs through all the bios in the
872 * bio_list and calls end_io on them
874 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
876 struct bio *cur = bio_list_get(&rbio->bio_list);
879 if (rbio->generic_bio_cnt)
880 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
883 * At this moment, rbio->bio_list is empty, however since rbio does not
884 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
885 * hash list, rbio may be merged with others so that rbio->bio_list
887 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
888 * more and we can call bio_endio() on all queued bios.
891 extra = bio_list_get(&rbio->bio_list);
892 __free_raid_bio(rbio);
894 rbio_endio_bio_list(cur, err);
896 rbio_endio_bio_list(extra, err);
900 * end io function used by finish_rmw. When we finally
901 * get here, we've written a full stripe
903 static void raid_write_end_io(struct bio *bio)
905 struct btrfs_raid_bio *rbio = bio->bi_private;
906 blk_status_t err = bio->bi_status;
910 fail_bio_stripe(rbio, bio);
914 if (!atomic_dec_and_test(&rbio->stripes_pending))
919 /* OK, we have read all the stripes we need to. */
920 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
921 0 : rbio->bbio->max_errors;
922 if (atomic_read(&rbio->error) > max_errors)
925 rbio_orig_end_io(rbio, err);
929 * the read/modify/write code wants to use the original bio for
930 * any pages it included, and then use the rbio for everything
931 * else. This function decides if a given index (stripe number)
932 * and page number in that stripe fall inside the original bio
935 * if you set bio_list_only, you'll get a NULL back for any ranges
936 * that are outside the bio_list
938 * This doesn't take any refs on anything, you get a bare page pointer
939 * and the caller must bump refs as required.
941 * You must call index_rbio_pages once before you can trust
942 * the answers from this function.
944 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
945 int index, int pagenr, int bio_list_only)
948 struct page *p = NULL;
950 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
952 spin_lock_irq(&rbio->bio_list_lock);
953 p = rbio->bio_pages[chunk_page];
954 spin_unlock_irq(&rbio->bio_list_lock);
956 if (p || bio_list_only)
959 return rbio->stripe_pages[chunk_page];
963 * number of pages we need for the entire stripe across all the
966 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
968 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
972 * allocation and initial setup for the btrfs_raid_bio. Not
973 * this does not allocate any pages for rbio->pages.
975 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
976 struct btrfs_bio *bbio,
979 struct btrfs_raid_bio *rbio;
981 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
982 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
983 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
986 rbio = kzalloc(sizeof(*rbio) +
987 sizeof(*rbio->stripe_pages) * num_pages +
988 sizeof(*rbio->bio_pages) * num_pages +
989 sizeof(*rbio->finish_pointers) * real_stripes +
990 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
991 sizeof(*rbio->finish_pbitmap) *
992 BITS_TO_LONGS(stripe_npages),
995 return ERR_PTR(-ENOMEM);
997 bio_list_init(&rbio->bio_list);
998 INIT_LIST_HEAD(&rbio->plug_list);
999 spin_lock_init(&rbio->bio_list_lock);
1000 INIT_LIST_HEAD(&rbio->stripe_cache);
1001 INIT_LIST_HEAD(&rbio->hash_list);
1003 rbio->fs_info = fs_info;
1004 rbio->stripe_len = stripe_len;
1005 rbio->nr_pages = num_pages;
1006 rbio->real_stripes = real_stripes;
1007 rbio->stripe_npages = stripe_npages;
1010 refcount_set(&rbio->refs, 1);
1011 atomic_set(&rbio->error, 0);
1012 atomic_set(&rbio->stripes_pending, 0);
1015 * the stripe_pages, bio_pages, etc arrays point to the extra
1016 * memory we allocated past the end of the rbio
1019 #define CONSUME_ALLOC(ptr, count) do { \
1021 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1023 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1024 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1025 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1026 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1027 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1028 #undef CONSUME_ALLOC
1030 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1031 nr_data = real_stripes - 1;
1032 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1033 nr_data = real_stripes - 2;
1037 rbio->nr_data = nr_data;
1041 /* allocate pages for all the stripes in the bio, including parity */
1042 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1047 for (i = 0; i < rbio->nr_pages; i++) {
1048 if (rbio->stripe_pages[i])
1050 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1053 rbio->stripe_pages[i] = page;
1058 /* only allocate pages for p/q stripes */
1059 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1064 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1066 for (; i < rbio->nr_pages; i++) {
1067 if (rbio->stripe_pages[i])
1069 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1072 rbio->stripe_pages[i] = page;
1078 * add a single page from a specific stripe into our list of bios for IO
1079 * this will try to merge into existing bios if possible, and returns
1080 * zero if all went well.
1082 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1083 struct bio_list *bio_list,
1086 unsigned long page_index,
1087 unsigned long bio_max_len)
1089 struct bio *last = bio_list->tail;
1093 struct btrfs_bio_stripe *stripe;
1096 stripe = &rbio->bbio->stripes[stripe_nr];
1097 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1099 /* if the device is missing, just fail this stripe */
1100 if (!stripe->dev->bdev)
1101 return fail_rbio_index(rbio, stripe_nr);
1103 /* see if we can add this page onto our existing bio */
1105 last_end = (u64)last->bi_iter.bi_sector << 9;
1106 last_end += last->bi_iter.bi_size;
1109 * we can't merge these if they are from different
1110 * devices or if they are not contiguous
1112 if (last_end == disk_start && stripe->dev->bdev &&
1114 last->bi_disk == stripe->dev->bdev->bd_disk &&
1115 last->bi_partno == stripe->dev->bdev->bd_partno) {
1116 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1117 if (ret == PAGE_SIZE)
1122 /* put a new bio on the list */
1123 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1124 bio->bi_iter.bi_size = 0;
1125 bio_set_dev(bio, stripe->dev->bdev);
1126 bio->bi_iter.bi_sector = disk_start >> 9;
1128 bio_add_page(bio, page, PAGE_SIZE, 0);
1129 bio_list_add(bio_list, bio);
1134 * while we're doing the read/modify/write cycle, we could
1135 * have errors in reading pages off the disk. This checks
1136 * for errors and if we're not able to read the page it'll
1137 * trigger parity reconstruction. The rmw will be finished
1138 * after we've reconstructed the failed stripes
1140 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1142 if (rbio->faila >= 0 || rbio->failb >= 0) {
1143 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1144 __raid56_parity_recover(rbio);
1151 * helper function to walk our bio list and populate the bio_pages array with
1152 * the result. This seems expensive, but it is faster than constantly
1153 * searching through the bio list as we setup the IO in finish_rmw or stripe
1156 * This must be called before you trust the answers from page_in_rbio
1158 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1162 unsigned long stripe_offset;
1163 unsigned long page_index;
1165 spin_lock_irq(&rbio->bio_list_lock);
1166 bio_list_for_each(bio, &rbio->bio_list) {
1167 struct bio_vec bvec;
1168 struct bvec_iter iter;
1171 start = (u64)bio->bi_iter.bi_sector << 9;
1172 stripe_offset = start - rbio->bbio->raid_map[0];
1173 page_index = stripe_offset >> PAGE_SHIFT;
1175 if (bio_flagged(bio, BIO_CLONED))
1176 bio->bi_iter = btrfs_io_bio(bio)->iter;
1178 bio_for_each_segment(bvec, bio, iter) {
1179 rbio->bio_pages[page_index + i] = bvec.bv_page;
1183 spin_unlock_irq(&rbio->bio_list_lock);
1187 * this is called from one of two situations. We either
1188 * have a full stripe from the higher layers, or we've read all
1189 * the missing bits off disk.
1191 * This will calculate the parity and then send down any
1194 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1196 struct btrfs_bio *bbio = rbio->bbio;
1197 void **pointers = rbio->finish_pointers;
1198 int nr_data = rbio->nr_data;
1203 struct bio_list bio_list;
1207 bio_list_init(&bio_list);
1209 if (rbio->real_stripes - rbio->nr_data == 1) {
1210 p_stripe = rbio->real_stripes - 1;
1211 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1212 p_stripe = rbio->real_stripes - 2;
1213 q_stripe = rbio->real_stripes - 1;
1218 /* at this point we either have a full stripe,
1219 * or we've read the full stripe from the drive.
1220 * recalculate the parity and write the new results.
1222 * We're not allowed to add any new bios to the
1223 * bio list here, anyone else that wants to
1224 * change this stripe needs to do their own rmw.
1226 spin_lock_irq(&rbio->bio_list_lock);
1227 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1228 spin_unlock_irq(&rbio->bio_list_lock);
1230 atomic_set(&rbio->error, 0);
1233 * now that we've set rmw_locked, run through the
1234 * bio list one last time and map the page pointers
1236 * We don't cache full rbios because we're assuming
1237 * the higher layers are unlikely to use this area of
1238 * the disk again soon. If they do use it again,
1239 * hopefully they will send another full bio.
1241 index_rbio_pages(rbio);
1242 if (!rbio_is_full(rbio))
1243 cache_rbio_pages(rbio);
1245 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1247 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1249 /* first collect one page from each data stripe */
1250 for (stripe = 0; stripe < nr_data; stripe++) {
1251 p = page_in_rbio(rbio, stripe, pagenr, 0);
1252 pointers[stripe] = kmap(p);
1255 /* then add the parity stripe */
1256 p = rbio_pstripe_page(rbio, pagenr);
1258 pointers[stripe++] = kmap(p);
1260 if (q_stripe != -1) {
1263 * raid6, add the qstripe and call the
1264 * library function to fill in our p/q
1266 p = rbio_qstripe_page(rbio, pagenr);
1268 pointers[stripe++] = kmap(p);
1270 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1274 copy_page(pointers[nr_data], pointers[0]);
1275 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1279 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1280 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1284 * time to start writing. Make bios for everything from the
1285 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1288 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1289 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1291 if (stripe < rbio->nr_data) {
1292 page = page_in_rbio(rbio, stripe, pagenr, 1);
1296 page = rbio_stripe_page(rbio, stripe, pagenr);
1299 ret = rbio_add_io_page(rbio, &bio_list,
1300 page, stripe, pagenr, rbio->stripe_len);
1306 if (likely(!bbio->num_tgtdevs))
1309 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1310 if (!bbio->tgtdev_map[stripe])
1313 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1315 if (stripe < rbio->nr_data) {
1316 page = page_in_rbio(rbio, stripe, pagenr, 1);
1320 page = rbio_stripe_page(rbio, stripe, pagenr);
1323 ret = rbio_add_io_page(rbio, &bio_list, page,
1324 rbio->bbio->tgtdev_map[stripe],
1325 pagenr, rbio->stripe_len);
1332 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1333 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1336 bio = bio_list_pop(&bio_list);
1340 bio->bi_private = rbio;
1341 bio->bi_end_io = raid_write_end_io;
1342 bio->bi_opf = REQ_OP_WRITE;
1349 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1351 while ((bio = bio_list_pop(&bio_list)))
1356 * helper to find the stripe number for a given bio. Used to figure out which
1357 * stripe has failed. This expects the bio to correspond to a physical disk,
1358 * so it looks up based on physical sector numbers.
1360 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1363 u64 physical = bio->bi_iter.bi_sector;
1366 struct btrfs_bio_stripe *stripe;
1370 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1371 stripe = &rbio->bbio->stripes[i];
1372 stripe_start = stripe->physical;
1373 if (physical >= stripe_start &&
1374 physical < stripe_start + rbio->stripe_len &&
1375 stripe->dev->bdev &&
1376 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1377 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1385 * helper to find the stripe number for a given
1386 * bio (before mapping). Used to figure out which stripe has
1387 * failed. This looks up based on logical block numbers.
1389 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1392 u64 logical = bio->bi_iter.bi_sector;
1398 for (i = 0; i < rbio->nr_data; i++) {
1399 stripe_start = rbio->bbio->raid_map[i];
1400 if (logical >= stripe_start &&
1401 logical < stripe_start + rbio->stripe_len) {
1409 * returns -EIO if we had too many failures
1411 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1413 unsigned long flags;
1416 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1418 /* we already know this stripe is bad, move on */
1419 if (rbio->faila == failed || rbio->failb == failed)
1422 if (rbio->faila == -1) {
1423 /* first failure on this rbio */
1424 rbio->faila = failed;
1425 atomic_inc(&rbio->error);
1426 } else if (rbio->failb == -1) {
1427 /* second failure on this rbio */
1428 rbio->failb = failed;
1429 atomic_inc(&rbio->error);
1434 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1440 * helper to fail a stripe based on a physical disk
1443 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1446 int failed = find_bio_stripe(rbio, bio);
1451 return fail_rbio_index(rbio, failed);
1455 * this sets each page in the bio uptodate. It should only be used on private
1456 * rbio pages, nothing that comes in from the higher layers
1458 static void set_bio_pages_uptodate(struct bio *bio)
1460 struct bio_vec *bvec;
1461 struct bvec_iter_all iter_all;
1463 ASSERT(!bio_flagged(bio, BIO_CLONED));
1465 bio_for_each_segment_all(bvec, bio, iter_all)
1466 SetPageUptodate(bvec->bv_page);
1470 * end io for the read phase of the rmw cycle. All the bios here are physical
1471 * stripe bios we've read from the disk so we can recalculate the parity of the
1474 * This will usually kick off finish_rmw once all the bios are read in, but it
1475 * may trigger parity reconstruction if we had any errors along the way
1477 static void raid_rmw_end_io(struct bio *bio)
1479 struct btrfs_raid_bio *rbio = bio->bi_private;
1482 fail_bio_stripe(rbio, bio);
1484 set_bio_pages_uptodate(bio);
1488 if (!atomic_dec_and_test(&rbio->stripes_pending))
1491 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1495 * this will normally call finish_rmw to start our write
1496 * but if there are any failed stripes we'll reconstruct
1499 validate_rbio_for_rmw(rbio);
1504 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1508 * the stripe must be locked by the caller. It will
1509 * unlock after all the writes are done
1511 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1513 int bios_to_read = 0;
1514 struct bio_list bio_list;
1520 bio_list_init(&bio_list);
1522 ret = alloc_rbio_pages(rbio);
1526 index_rbio_pages(rbio);
1528 atomic_set(&rbio->error, 0);
1530 * build a list of bios to read all the missing parts of this
1533 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1534 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1537 * we want to find all the pages missing from
1538 * the rbio and read them from the disk. If
1539 * page_in_rbio finds a page in the bio list
1540 * we don't need to read it off the stripe.
1542 page = page_in_rbio(rbio, stripe, pagenr, 1);
1546 page = rbio_stripe_page(rbio, stripe, pagenr);
1548 * the bio cache may have handed us an uptodate
1549 * page. If so, be happy and use it
1551 if (PageUptodate(page))
1554 ret = rbio_add_io_page(rbio, &bio_list, page,
1555 stripe, pagenr, rbio->stripe_len);
1561 bios_to_read = bio_list_size(&bio_list);
1562 if (!bios_to_read) {
1564 * this can happen if others have merged with
1565 * us, it means there is nothing left to read.
1566 * But if there are missing devices it may not be
1567 * safe to do the full stripe write yet.
1573 * the bbio may be freed once we submit the last bio. Make sure
1574 * not to touch it after that
1576 atomic_set(&rbio->stripes_pending, bios_to_read);
1578 bio = bio_list_pop(&bio_list);
1582 bio->bi_private = rbio;
1583 bio->bi_end_io = raid_rmw_end_io;
1584 bio->bi_opf = REQ_OP_READ;
1586 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1590 /* the actual write will happen once the reads are done */
1594 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1596 while ((bio = bio_list_pop(&bio_list)))
1602 validate_rbio_for_rmw(rbio);
1607 * if the upper layers pass in a full stripe, we thank them by only allocating
1608 * enough pages to hold the parity, and sending it all down quickly.
1610 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1614 ret = alloc_rbio_parity_pages(rbio);
1616 __free_raid_bio(rbio);
1620 ret = lock_stripe_add(rbio);
1627 * partial stripe writes get handed over to async helpers.
1628 * We're really hoping to merge a few more writes into this
1629 * rbio before calculating new parity
1631 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1635 ret = lock_stripe_add(rbio);
1637 start_async_work(rbio, rmw_work);
1642 * sometimes while we were reading from the drive to
1643 * recalculate parity, enough new bios come into create
1644 * a full stripe. So we do a check here to see if we can
1645 * go directly to finish_rmw
1647 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1649 /* head off into rmw land if we don't have a full stripe */
1650 if (!rbio_is_full(rbio))
1651 return partial_stripe_write(rbio);
1652 return full_stripe_write(rbio);
1656 * We use plugging call backs to collect full stripes.
1657 * Any time we get a partial stripe write while plugged
1658 * we collect it into a list. When the unplug comes down,
1659 * we sort the list by logical block number and merge
1660 * everything we can into the same rbios
1662 struct btrfs_plug_cb {
1663 struct blk_plug_cb cb;
1664 struct btrfs_fs_info *info;
1665 struct list_head rbio_list;
1666 struct btrfs_work work;
1670 * rbios on the plug list are sorted for easier merging.
1672 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1674 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1676 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1678 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1679 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1681 if (a_sector < b_sector)
1683 if (a_sector > b_sector)
1688 static void run_plug(struct btrfs_plug_cb *plug)
1690 struct btrfs_raid_bio *cur;
1691 struct btrfs_raid_bio *last = NULL;
1694 * sort our plug list then try to merge
1695 * everything we can in hopes of creating full
1698 list_sort(NULL, &plug->rbio_list, plug_cmp);
1699 while (!list_empty(&plug->rbio_list)) {
1700 cur = list_entry(plug->rbio_list.next,
1701 struct btrfs_raid_bio, plug_list);
1702 list_del_init(&cur->plug_list);
1704 if (rbio_is_full(cur)) {
1707 /* we have a full stripe, send it down */
1708 ret = full_stripe_write(cur);
1713 if (rbio_can_merge(last, cur)) {
1714 merge_rbio(last, cur);
1715 __free_raid_bio(cur);
1719 __raid56_parity_write(last);
1724 __raid56_parity_write(last);
1730 * if the unplug comes from schedule, we have to push the
1731 * work off to a helper thread
1733 static void unplug_work(struct btrfs_work *work)
1735 struct btrfs_plug_cb *plug;
1736 plug = container_of(work, struct btrfs_plug_cb, work);
1740 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1742 struct btrfs_plug_cb *plug;
1743 plug = container_of(cb, struct btrfs_plug_cb, cb);
1745 if (from_schedule) {
1746 btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1747 btrfs_queue_work(plug->info->rmw_workers,
1755 * our main entry point for writes from the rest of the FS.
1757 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1758 struct btrfs_bio *bbio, u64 stripe_len)
1760 struct btrfs_raid_bio *rbio;
1761 struct btrfs_plug_cb *plug = NULL;
1762 struct blk_plug_cb *cb;
1765 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1767 btrfs_put_bbio(bbio);
1768 return PTR_ERR(rbio);
1770 bio_list_add(&rbio->bio_list, bio);
1771 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1772 rbio->operation = BTRFS_RBIO_WRITE;
1774 btrfs_bio_counter_inc_noblocked(fs_info);
1775 rbio->generic_bio_cnt = 1;
1778 * don't plug on full rbios, just get them out the door
1779 * as quickly as we can
1781 if (rbio_is_full(rbio)) {
1782 ret = full_stripe_write(rbio);
1784 btrfs_bio_counter_dec(fs_info);
1788 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1790 plug = container_of(cb, struct btrfs_plug_cb, cb);
1792 plug->info = fs_info;
1793 INIT_LIST_HEAD(&plug->rbio_list);
1795 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1798 ret = __raid56_parity_write(rbio);
1800 btrfs_bio_counter_dec(fs_info);
1806 * all parity reconstruction happens here. We've read in everything
1807 * we can find from the drives and this does the heavy lifting of
1808 * sorting the good from the bad.
1810 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1814 int faila = -1, failb = -1;
1819 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1821 err = BLK_STS_RESOURCE;
1825 faila = rbio->faila;
1826 failb = rbio->failb;
1828 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1829 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1830 spin_lock_irq(&rbio->bio_list_lock);
1831 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1832 spin_unlock_irq(&rbio->bio_list_lock);
1835 index_rbio_pages(rbio);
1837 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1839 * Now we just use bitmap to mark the horizontal stripes in
1840 * which we have data when doing parity scrub.
1842 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1843 !test_bit(pagenr, rbio->dbitmap))
1846 /* setup our array of pointers with pages
1849 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1851 * if we're rebuilding a read, we have to use
1852 * pages from the bio list
1854 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1855 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1856 (stripe == faila || stripe == failb)) {
1857 page = page_in_rbio(rbio, stripe, pagenr, 0);
1859 page = rbio_stripe_page(rbio, stripe, pagenr);
1861 pointers[stripe] = kmap(page);
1864 /* all raid6 handling here */
1865 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1867 * single failure, rebuild from parity raid5
1871 if (faila == rbio->nr_data) {
1873 * Just the P stripe has failed, without
1874 * a bad data or Q stripe.
1875 * TODO, we should redo the xor here.
1877 err = BLK_STS_IOERR;
1881 * a single failure in raid6 is rebuilt
1882 * in the pstripe code below
1887 /* make sure our ps and qs are in order */
1888 if (faila > failb) {
1894 /* if the q stripe is failed, do a pstripe reconstruction
1896 * If both the q stripe and the P stripe are failed, we're
1897 * here due to a crc mismatch and we can't give them the
1900 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1901 if (rbio->bbio->raid_map[faila] ==
1903 err = BLK_STS_IOERR;
1907 * otherwise we have one bad data stripe and
1908 * a good P stripe. raid5!
1913 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1914 raid6_datap_recov(rbio->real_stripes,
1915 PAGE_SIZE, faila, pointers);
1917 raid6_2data_recov(rbio->real_stripes,
1918 PAGE_SIZE, faila, failb,
1924 /* rebuild from P stripe here (raid5 or raid6) */
1925 BUG_ON(failb != -1);
1927 /* Copy parity block into failed block to start with */
1928 copy_page(pointers[faila], pointers[rbio->nr_data]);
1930 /* rearrange the pointer array */
1931 p = pointers[faila];
1932 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1933 pointers[stripe] = pointers[stripe + 1];
1934 pointers[rbio->nr_data - 1] = p;
1936 /* xor in the rest */
1937 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1939 /* if we're doing this rebuild as part of an rmw, go through
1940 * and set all of our private rbio pages in the
1941 * failed stripes as uptodate. This way finish_rmw will
1942 * know they can be trusted. If this was a read reconstruction,
1943 * other endio functions will fiddle the uptodate bits
1945 if (rbio->operation == BTRFS_RBIO_WRITE) {
1946 for (i = 0; i < rbio->stripe_npages; i++) {
1948 page = rbio_stripe_page(rbio, faila, i);
1949 SetPageUptodate(page);
1952 page = rbio_stripe_page(rbio, failb, i);
1953 SetPageUptodate(page);
1957 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1959 * if we're rebuilding a read, we have to use
1960 * pages from the bio list
1962 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1963 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1964 (stripe == faila || stripe == failb)) {
1965 page = page_in_rbio(rbio, stripe, pagenr, 0);
1967 page = rbio_stripe_page(rbio, stripe, pagenr);
1979 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1980 * valid rbio which is consistent with ondisk content, thus such a
1981 * valid rbio can be cached to avoid further disk reads.
1983 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1984 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1986 * - In case of two failures, where rbio->failb != -1:
1988 * Do not cache this rbio since the above read reconstruction
1989 * (raid6_datap_recov() or raid6_2data_recov()) may have
1990 * changed some content of stripes which are not identical to
1991 * on-disk content any more, otherwise, a later write/recover
1992 * may steal stripe_pages from this rbio and end up with
1993 * corruptions or rebuild failures.
1995 * - In case of single failure, where rbio->failb == -1:
1997 * Cache this rbio iff the above read reconstruction is
1998 * executed without problems.
2000 if (err == BLK_STS_OK && rbio->failb < 0)
2001 cache_rbio_pages(rbio);
2003 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2005 rbio_orig_end_io(rbio, err);
2006 } else if (err == BLK_STS_OK) {
2010 if (rbio->operation == BTRFS_RBIO_WRITE)
2012 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2013 finish_parity_scrub(rbio, 0);
2017 rbio_orig_end_io(rbio, err);
2022 * This is called only for stripes we've read from disk to
2023 * reconstruct the parity.
2025 static void raid_recover_end_io(struct bio *bio)
2027 struct btrfs_raid_bio *rbio = bio->bi_private;
2030 * we only read stripe pages off the disk, set them
2031 * up to date if there were no errors
2034 fail_bio_stripe(rbio, bio);
2036 set_bio_pages_uptodate(bio);
2039 if (!atomic_dec_and_test(&rbio->stripes_pending))
2042 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2043 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2045 __raid_recover_end_io(rbio);
2049 * reads everything we need off the disk to reconstruct
2050 * the parity. endio handlers trigger final reconstruction
2051 * when the IO is done.
2053 * This is used both for reads from the higher layers and for
2054 * parity construction required to finish a rmw cycle.
2056 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2058 int bios_to_read = 0;
2059 struct bio_list bio_list;
2065 bio_list_init(&bio_list);
2067 ret = alloc_rbio_pages(rbio);
2071 atomic_set(&rbio->error, 0);
2074 * read everything that hasn't failed. Thanks to the
2075 * stripe cache, it is possible that some or all of these
2076 * pages are going to be uptodate.
2078 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2079 if (rbio->faila == stripe || rbio->failb == stripe) {
2080 atomic_inc(&rbio->error);
2084 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2088 * the rmw code may have already read this
2091 p = rbio_stripe_page(rbio, stripe, pagenr);
2092 if (PageUptodate(p))
2095 ret = rbio_add_io_page(rbio, &bio_list,
2096 rbio_stripe_page(rbio, stripe, pagenr),
2097 stripe, pagenr, rbio->stripe_len);
2103 bios_to_read = bio_list_size(&bio_list);
2104 if (!bios_to_read) {
2106 * we might have no bios to read just because the pages
2107 * were up to date, or we might have no bios to read because
2108 * the devices were gone.
2110 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2111 __raid_recover_end_io(rbio);
2119 * the bbio may be freed once we submit the last bio. Make sure
2120 * not to touch it after that
2122 atomic_set(&rbio->stripes_pending, bios_to_read);
2124 bio = bio_list_pop(&bio_list);
2128 bio->bi_private = rbio;
2129 bio->bi_end_io = raid_recover_end_io;
2130 bio->bi_opf = REQ_OP_READ;
2132 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2140 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2141 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2142 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2144 while ((bio = bio_list_pop(&bio_list)))
2151 * the main entry point for reads from the higher layers. This
2152 * is really only called when the normal read path had a failure,
2153 * so we assume the bio they send down corresponds to a failed part
2156 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2157 struct btrfs_bio *bbio, u64 stripe_len,
2158 int mirror_num, int generic_io)
2160 struct btrfs_raid_bio *rbio;
2164 ASSERT(bbio->mirror_num == mirror_num);
2165 btrfs_io_bio(bio)->mirror_num = mirror_num;
2168 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2171 btrfs_put_bbio(bbio);
2172 return PTR_ERR(rbio);
2175 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2176 bio_list_add(&rbio->bio_list, bio);
2177 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2179 rbio->faila = find_logical_bio_stripe(rbio, bio);
2180 if (rbio->faila == -1) {
2182 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2183 __func__, (u64)bio->bi_iter.bi_sector << 9,
2184 (u64)bio->bi_iter.bi_size, bbio->map_type);
2186 btrfs_put_bbio(bbio);
2192 btrfs_bio_counter_inc_noblocked(fs_info);
2193 rbio->generic_bio_cnt = 1;
2195 btrfs_get_bbio(bbio);
2200 * for 'mirror == 2', reconstruct from all other stripes.
2201 * for 'mirror_num > 2', select a stripe to fail on every retry.
2203 if (mirror_num > 2) {
2205 * 'mirror == 3' is to fail the p stripe and
2206 * reconstruct from the q stripe. 'mirror > 3' is to
2207 * fail a data stripe and reconstruct from p+q stripe.
2209 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2210 ASSERT(rbio->failb > 0);
2211 if (rbio->failb <= rbio->faila)
2215 ret = lock_stripe_add(rbio);
2218 * __raid56_parity_recover will end the bio with
2219 * any errors it hits. We don't want to return
2220 * its error value up the stack because our caller
2221 * will end up calling bio_endio with any nonzero
2225 __raid56_parity_recover(rbio);
2227 * our rbio has been added to the list of
2228 * rbios that will be handled after the
2229 * currently lock owner is done
2235 static void rmw_work(struct btrfs_work *work)
2237 struct btrfs_raid_bio *rbio;
2239 rbio = container_of(work, struct btrfs_raid_bio, work);
2240 raid56_rmw_stripe(rbio);
2243 static void read_rebuild_work(struct btrfs_work *work)
2245 struct btrfs_raid_bio *rbio;
2247 rbio = container_of(work, struct btrfs_raid_bio, work);
2248 __raid56_parity_recover(rbio);
2252 * The following code is used to scrub/replace the parity stripe
2254 * Caller must have already increased bio_counter for getting @bbio.
2256 * Note: We need make sure all the pages that add into the scrub/replace
2257 * raid bio are correct and not be changed during the scrub/replace. That
2258 * is those pages just hold metadata or file data with checksum.
2261 struct btrfs_raid_bio *
2262 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2263 struct btrfs_bio *bbio, u64 stripe_len,
2264 struct btrfs_device *scrub_dev,
2265 unsigned long *dbitmap, int stripe_nsectors)
2267 struct btrfs_raid_bio *rbio;
2270 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2273 bio_list_add(&rbio->bio_list, bio);
2275 * This is a special bio which is used to hold the completion handler
2276 * and make the scrub rbio is similar to the other types
2278 ASSERT(!bio->bi_iter.bi_size);
2279 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2282 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2283 * to the end position, so this search can start from the first parity
2286 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2287 if (bbio->stripes[i].dev == scrub_dev) {
2292 ASSERT(i < rbio->real_stripes);
2294 /* Now we just support the sectorsize equals to page size */
2295 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2296 ASSERT(rbio->stripe_npages == stripe_nsectors);
2297 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2300 * We have already increased bio_counter when getting bbio, record it
2301 * so we can free it at rbio_orig_end_io().
2303 rbio->generic_bio_cnt = 1;
2308 /* Used for both parity scrub and missing. */
2309 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2315 ASSERT(logical >= rbio->bbio->raid_map[0]);
2316 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2317 rbio->stripe_len * rbio->nr_data);
2318 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2319 index = stripe_offset >> PAGE_SHIFT;
2320 rbio->bio_pages[index] = page;
2324 * We just scrub the parity that we have correct data on the same horizontal,
2325 * so we needn't allocate all pages for all the stripes.
2327 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2334 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2335 for (i = 0; i < rbio->real_stripes; i++) {
2336 index = i * rbio->stripe_npages + bit;
2337 if (rbio->stripe_pages[index])
2340 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2343 rbio->stripe_pages[index] = page;
2349 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2352 struct btrfs_bio *bbio = rbio->bbio;
2353 void **pointers = rbio->finish_pointers;
2354 unsigned long *pbitmap = rbio->finish_pbitmap;
2355 int nr_data = rbio->nr_data;
2360 struct page *p_page = NULL;
2361 struct page *q_page = NULL;
2362 struct bio_list bio_list;
2367 bio_list_init(&bio_list);
2369 if (rbio->real_stripes - rbio->nr_data == 1) {
2370 p_stripe = rbio->real_stripes - 1;
2371 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2372 p_stripe = rbio->real_stripes - 2;
2373 q_stripe = rbio->real_stripes - 1;
2378 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2380 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2384 * Because the higher layers(scrubber) are unlikely to
2385 * use this area of the disk again soon, so don't cache
2388 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2393 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2396 SetPageUptodate(p_page);
2398 if (q_stripe != -1) {
2399 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2401 __free_page(p_page);
2404 SetPageUptodate(q_page);
2407 atomic_set(&rbio->error, 0);
2409 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2412 /* first collect one page from each data stripe */
2413 for (stripe = 0; stripe < nr_data; stripe++) {
2414 p = page_in_rbio(rbio, stripe, pagenr, 0);
2415 pointers[stripe] = kmap(p);
2418 /* then add the parity stripe */
2419 pointers[stripe++] = kmap(p_page);
2421 if (q_stripe != -1) {
2424 * raid6, add the qstripe and call the
2425 * library function to fill in our p/q
2427 pointers[stripe++] = kmap(q_page);
2429 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2433 copy_page(pointers[nr_data], pointers[0]);
2434 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2437 /* Check scrubbing parity and repair it */
2438 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2440 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2441 copy_page(parity, pointers[rbio->scrubp]);
2443 /* Parity is right, needn't writeback */
2444 bitmap_clear(rbio->dbitmap, pagenr, 1);
2447 for (stripe = 0; stripe < nr_data; stripe++)
2448 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2452 __free_page(p_page);
2454 __free_page(q_page);
2458 * time to start writing. Make bios for everything from the
2459 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2462 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2465 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2466 ret = rbio_add_io_page(rbio, &bio_list,
2467 page, rbio->scrubp, pagenr, rbio->stripe_len);
2475 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2478 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2479 ret = rbio_add_io_page(rbio, &bio_list, page,
2480 bbio->tgtdev_map[rbio->scrubp],
2481 pagenr, rbio->stripe_len);
2487 nr_data = bio_list_size(&bio_list);
2489 /* Every parity is right */
2490 rbio_orig_end_io(rbio, BLK_STS_OK);
2494 atomic_set(&rbio->stripes_pending, nr_data);
2497 bio = bio_list_pop(&bio_list);
2501 bio->bi_private = rbio;
2502 bio->bi_end_io = raid_write_end_io;
2503 bio->bi_opf = REQ_OP_WRITE;
2510 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2512 while ((bio = bio_list_pop(&bio_list)))
2516 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2518 if (stripe >= 0 && stripe < rbio->nr_data)
2524 * While we're doing the parity check and repair, we could have errors
2525 * in reading pages off the disk. This checks for errors and if we're
2526 * not able to read the page it'll trigger parity reconstruction. The
2527 * parity scrub will be finished after we've reconstructed the failed
2530 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2532 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2535 if (rbio->faila >= 0 || rbio->failb >= 0) {
2536 int dfail = 0, failp = -1;
2538 if (is_data_stripe(rbio, rbio->faila))
2540 else if (is_parity_stripe(rbio->faila))
2541 failp = rbio->faila;
2543 if (is_data_stripe(rbio, rbio->failb))
2545 else if (is_parity_stripe(rbio->failb))
2546 failp = rbio->failb;
2549 * Because we can not use a scrubbing parity to repair
2550 * the data, so the capability of the repair is declined.
2551 * (In the case of RAID5, we can not repair anything)
2553 if (dfail > rbio->bbio->max_errors - 1)
2557 * If all data is good, only parity is correctly, just
2558 * repair the parity.
2561 finish_parity_scrub(rbio, 0);
2566 * Here means we got one corrupted data stripe and one
2567 * corrupted parity on RAID6, if the corrupted parity
2568 * is scrubbing parity, luckily, use the other one to repair
2569 * the data, or we can not repair the data stripe.
2571 if (failp != rbio->scrubp)
2574 __raid_recover_end_io(rbio);
2576 finish_parity_scrub(rbio, 1);
2581 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2585 * end io for the read phase of the rmw cycle. All the bios here are physical
2586 * stripe bios we've read from the disk so we can recalculate the parity of the
2589 * This will usually kick off finish_rmw once all the bios are read in, but it
2590 * may trigger parity reconstruction if we had any errors along the way
2592 static void raid56_parity_scrub_end_io(struct bio *bio)
2594 struct btrfs_raid_bio *rbio = bio->bi_private;
2597 fail_bio_stripe(rbio, bio);
2599 set_bio_pages_uptodate(bio);
2603 if (!atomic_dec_and_test(&rbio->stripes_pending))
2607 * this will normally call finish_rmw to start our write
2608 * but if there are any failed stripes we'll reconstruct
2611 validate_rbio_for_parity_scrub(rbio);
2614 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2616 int bios_to_read = 0;
2617 struct bio_list bio_list;
2623 bio_list_init(&bio_list);
2625 ret = alloc_rbio_essential_pages(rbio);
2629 atomic_set(&rbio->error, 0);
2631 * build a list of bios to read all the missing parts of this
2634 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2635 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2638 * we want to find all the pages missing from
2639 * the rbio and read them from the disk. If
2640 * page_in_rbio finds a page in the bio list
2641 * we don't need to read it off the stripe.
2643 page = page_in_rbio(rbio, stripe, pagenr, 1);
2647 page = rbio_stripe_page(rbio, stripe, pagenr);
2649 * the bio cache may have handed us an uptodate
2650 * page. If so, be happy and use it
2652 if (PageUptodate(page))
2655 ret = rbio_add_io_page(rbio, &bio_list, page,
2656 stripe, pagenr, rbio->stripe_len);
2662 bios_to_read = bio_list_size(&bio_list);
2663 if (!bios_to_read) {
2665 * this can happen if others have merged with
2666 * us, it means there is nothing left to read.
2667 * But if there are missing devices it may not be
2668 * safe to do the full stripe write yet.
2674 * the bbio may be freed once we submit the last bio. Make sure
2675 * not to touch it after that
2677 atomic_set(&rbio->stripes_pending, bios_to_read);
2679 bio = bio_list_pop(&bio_list);
2683 bio->bi_private = rbio;
2684 bio->bi_end_io = raid56_parity_scrub_end_io;
2685 bio->bi_opf = REQ_OP_READ;
2687 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2691 /* the actual write will happen once the reads are done */
2695 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2697 while ((bio = bio_list_pop(&bio_list)))
2703 validate_rbio_for_parity_scrub(rbio);
2706 static void scrub_parity_work(struct btrfs_work *work)
2708 struct btrfs_raid_bio *rbio;
2710 rbio = container_of(work, struct btrfs_raid_bio, work);
2711 raid56_parity_scrub_stripe(rbio);
2714 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2716 if (!lock_stripe_add(rbio))
2717 start_async_work(rbio, scrub_parity_work);
2720 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2722 struct btrfs_raid_bio *
2723 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2724 struct btrfs_bio *bbio, u64 length)
2726 struct btrfs_raid_bio *rbio;
2728 rbio = alloc_rbio(fs_info, bbio, length);
2732 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2733 bio_list_add(&rbio->bio_list, bio);
2735 * This is a special bio which is used to hold the completion handler
2736 * and make the scrub rbio is similar to the other types
2738 ASSERT(!bio->bi_iter.bi_size);
2740 rbio->faila = find_logical_bio_stripe(rbio, bio);
2741 if (rbio->faila == -1) {
2748 * When we get bbio, we have already increased bio_counter, record it
2749 * so we can free it at rbio_orig_end_io()
2751 rbio->generic_bio_cnt = 1;
2756 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2758 if (!lock_stripe_add(rbio))
2759 start_async_work(rbio, read_rebuild_work);