2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
35 #include <asm/div64.h>
37 #include "extent_map.h"
39 #include "transaction.h"
40 #include "print-tree.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT 1
51 * set when this rbio is sitting in the hash, but it is just a cache
54 #define RBIO_CACHE_BIT 2
57 * set when it is safe to trust the stripe_pages for caching
59 #define RBIO_CACHE_READY_BIT 3
61 #define RBIO_CACHE_SIZE 1024
65 BTRFS_RBIO_READ_REBUILD,
66 BTRFS_RBIO_PARITY_SCRUB,
67 BTRFS_RBIO_REBUILD_MISSING,
70 struct btrfs_raid_bio {
71 struct btrfs_fs_info *fs_info;
72 struct btrfs_bio *bbio;
74 /* while we're doing rmw on a stripe
75 * we put it into a hash table so we can
76 * lock the stripe and merge more rbios
79 struct list_head hash_list;
82 * LRU list for the stripe cache
84 struct list_head stripe_cache;
87 * for scheduling work in the helper threads
89 struct btrfs_work work;
92 * bio list and bio_list_lock are used
93 * to add more bios into the stripe
94 * in hopes of avoiding the full rmw
96 struct bio_list bio_list;
97 spinlock_t bio_list_lock;
99 /* also protected by the bio_list_lock, the
100 * plug list is used by the plugging code
101 * to collect partial bios while plugged. The
102 * stripe locking code also uses it to hand off
103 * the stripe lock to the next pending IO
105 struct list_head plug_list;
108 * flags that tell us if it is safe to
109 * merge with this bio
113 /* size of each individual stripe on disk */
116 /* number of data stripes (no p/q) */
123 * set if we're doing a parity rebuild
124 * for a read from higher up, which is handled
125 * differently from a parity rebuild as part of
128 enum btrfs_rbio_ops operation;
130 /* first bad stripe */
133 /* second bad stripe (for raid6 use) */
138 * number of pages needed to represent the full
144 * size of all the bios in the bio_list. This
145 * helps us decide if the rbio maps to a full
154 atomic_t stripes_pending;
158 * these are two arrays of pointers. We allocate the
159 * rbio big enough to hold them both and setup their
160 * locations when the rbio is allocated
163 /* pointers to pages that we allocated for
164 * reading/writing stripes directly from the disk (including P/Q)
166 struct page **stripe_pages;
169 * pointers to the pages in the bio_list. Stored
170 * here for faster lookup
172 struct page **bio_pages;
175 * bitmap to record which horizontal stripe has data
177 unsigned long *dbitmap;
180 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
182 static void rmw_work(struct btrfs_work *work);
183 static void read_rebuild_work(struct btrfs_work *work);
184 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
192 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
194 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
197 * the stripe hash table is used for locking, and to collect
198 * bios in hopes of making a full stripe
200 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202 struct btrfs_stripe_hash_table *table;
203 struct btrfs_stripe_hash_table *x;
204 struct btrfs_stripe_hash *cur;
205 struct btrfs_stripe_hash *h;
206 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
210 if (info->stripe_hash_table)
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
220 table_size = sizeof(*table) + sizeof(*h) * num_entries;
221 table = kvzalloc(table_size, GFP_KERNEL);
225 spin_lock_init(&table->cache_lock);
226 INIT_LIST_HEAD(&table->stripe_cache);
230 for (i = 0; i < num_entries; i++) {
232 INIT_LIST_HEAD(&cur->hash_list);
233 spin_lock_init(&cur->lock);
236 x = cmpxchg(&info->stripe_hash_table, NULL, table);
243 * caching an rbio means to copy anything from the
244 * bio_pages array into the stripe_pages array. We
245 * use the page uptodate bit in the stripe cache array
246 * to indicate if it has valid data
248 * once the caching is done, we set the cache ready
251 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
258 ret = alloc_rbio_pages(rbio);
262 for (i = 0; i < rbio->nr_pages; i++) {
263 if (!rbio->bio_pages[i])
266 s = kmap(rbio->bio_pages[i]);
267 d = kmap(rbio->stripe_pages[i]);
269 memcpy(d, s, PAGE_SIZE);
271 kunmap(rbio->bio_pages[i]);
272 kunmap(rbio->stripe_pages[i]);
273 SetPageUptodate(rbio->stripe_pages[i]);
275 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
279 * we hash on the first logical address of the stripe
281 static int rbio_bucket(struct btrfs_raid_bio *rbio)
283 u64 num = rbio->bbio->raid_map[0];
286 * we shift down quite a bit. We're using byte
287 * addressing, and most of the lower bits are zeros.
288 * This tends to upset hash_64, and it consistently
289 * returns just one or two different values.
291 * shifting off the lower bits fixes things.
293 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
297 * stealing an rbio means taking all the uptodate pages from the stripe
298 * array in the source rbio and putting them into the destination rbio
300 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
306 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
309 for (i = 0; i < dest->nr_pages; i++) {
310 s = src->stripe_pages[i];
311 if (!s || !PageUptodate(s)) {
315 d = dest->stripe_pages[i];
319 dest->stripe_pages[i] = s;
320 src->stripe_pages[i] = NULL;
325 * merging means we take the bio_list from the victim and
326 * splice it into the destination. The victim should
327 * be discarded afterwards.
329 * must be called with dest->rbio_list_lock held
331 static void merge_rbio(struct btrfs_raid_bio *dest,
332 struct btrfs_raid_bio *victim)
334 bio_list_merge(&dest->bio_list, &victim->bio_list);
335 dest->bio_list_bytes += victim->bio_list_bytes;
336 dest->generic_bio_cnt += victim->generic_bio_cnt;
337 bio_list_init(&victim->bio_list);
341 * used to prune items that are in the cache. The caller
342 * must hold the hash table lock.
344 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
346 int bucket = rbio_bucket(rbio);
347 struct btrfs_stripe_hash_table *table;
348 struct btrfs_stripe_hash *h;
352 * check the bit again under the hash table lock.
354 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
357 table = rbio->fs_info->stripe_hash_table;
358 h = table->table + bucket;
360 /* hold the lock for the bucket because we may be
361 * removing it from the hash table
366 * hold the lock for the bio list because we need
367 * to make sure the bio list is empty
369 spin_lock(&rbio->bio_list_lock);
371 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
372 list_del_init(&rbio->stripe_cache);
373 table->cache_size -= 1;
376 /* if the bio list isn't empty, this rbio is
377 * still involved in an IO. We take it out
378 * of the cache list, and drop the ref that
379 * was held for the list.
381 * If the bio_list was empty, we also remove
382 * the rbio from the hash_table, and drop
383 * the corresponding ref
385 if (bio_list_empty(&rbio->bio_list)) {
386 if (!list_empty(&rbio->hash_list)) {
387 list_del_init(&rbio->hash_list);
388 refcount_dec(&rbio->refs);
389 BUG_ON(!list_empty(&rbio->plug_list));
394 spin_unlock(&rbio->bio_list_lock);
395 spin_unlock(&h->lock);
398 __free_raid_bio(rbio);
402 * prune a given rbio from the cache
404 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
406 struct btrfs_stripe_hash_table *table;
409 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
412 table = rbio->fs_info->stripe_hash_table;
414 spin_lock_irqsave(&table->cache_lock, flags);
415 __remove_rbio_from_cache(rbio);
416 spin_unlock_irqrestore(&table->cache_lock, flags);
420 * remove everything in the cache
422 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
424 struct btrfs_stripe_hash_table *table;
426 struct btrfs_raid_bio *rbio;
428 table = info->stripe_hash_table;
430 spin_lock_irqsave(&table->cache_lock, flags);
431 while (!list_empty(&table->stripe_cache)) {
432 rbio = list_entry(table->stripe_cache.next,
433 struct btrfs_raid_bio,
435 __remove_rbio_from_cache(rbio);
437 spin_unlock_irqrestore(&table->cache_lock, flags);
441 * remove all cached entries and free the hash table
444 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
446 if (!info->stripe_hash_table)
448 btrfs_clear_rbio_cache(info);
449 kvfree(info->stripe_hash_table);
450 info->stripe_hash_table = NULL;
454 * insert an rbio into the stripe cache. It
455 * must have already been prepared by calling
458 * If this rbio was already cached, it gets
459 * moved to the front of the lru.
461 * If the size of the rbio cache is too big, we
464 static void cache_rbio(struct btrfs_raid_bio *rbio)
466 struct btrfs_stripe_hash_table *table;
469 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
472 table = rbio->fs_info->stripe_hash_table;
474 spin_lock_irqsave(&table->cache_lock, flags);
475 spin_lock(&rbio->bio_list_lock);
477 /* bump our ref if we were not in the list before */
478 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
479 refcount_inc(&rbio->refs);
481 if (!list_empty(&rbio->stripe_cache)){
482 list_move(&rbio->stripe_cache, &table->stripe_cache);
484 list_add(&rbio->stripe_cache, &table->stripe_cache);
485 table->cache_size += 1;
488 spin_unlock(&rbio->bio_list_lock);
490 if (table->cache_size > RBIO_CACHE_SIZE) {
491 struct btrfs_raid_bio *found;
493 found = list_entry(table->stripe_cache.prev,
494 struct btrfs_raid_bio,
498 __remove_rbio_from_cache(found);
501 spin_unlock_irqrestore(&table->cache_lock, flags);
505 * helper function to run the xor_blocks api. It is only
506 * able to do MAX_XOR_BLOCKS at a time, so we need to
509 static void run_xor(void **pages, int src_cnt, ssize_t len)
513 void *dest = pages[src_cnt];
516 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
517 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
519 src_cnt -= xor_src_cnt;
520 src_off += xor_src_cnt;
525 * returns true if the bio list inside this rbio
526 * covers an entire stripe (no rmw required).
527 * Must be called with the bio list lock held, or
528 * at a time when you know it is impossible to add
529 * new bios into the list
531 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
533 unsigned long size = rbio->bio_list_bytes;
536 if (size != rbio->nr_data * rbio->stripe_len)
539 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
543 static int rbio_is_full(struct btrfs_raid_bio *rbio)
548 spin_lock_irqsave(&rbio->bio_list_lock, flags);
549 ret = __rbio_is_full(rbio);
550 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
555 * returns 1 if it is safe to merge two rbios together.
556 * The merging is safe if the two rbios correspond to
557 * the same stripe and if they are both going in the same
558 * direction (read vs write), and if neither one is
559 * locked for final IO
561 * The caller is responsible for locking such that
562 * rmw_locked is safe to test
564 static int rbio_can_merge(struct btrfs_raid_bio *last,
565 struct btrfs_raid_bio *cur)
567 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
568 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
572 * we can't merge with cached rbios, since the
573 * idea is that when we merge the destination
574 * rbio is going to run our IO for us. We can
575 * steal from cached rbios though, other functions
578 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
579 test_bit(RBIO_CACHE_BIT, &cur->flags))
582 if (last->bbio->raid_map[0] !=
583 cur->bbio->raid_map[0])
586 /* we can't merge with different operations */
587 if (last->operation != cur->operation)
590 * We've need read the full stripe from the drive.
591 * check and repair the parity and write the new results.
593 * We're not allowed to add any new bios to the
594 * bio list here, anyone else that wants to
595 * change this stripe needs to do their own rmw.
597 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
600 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
603 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
604 int fa = last->faila;
605 int fb = last->failb;
606 int cur_fa = cur->faila;
607 int cur_fb = cur->failb;
609 if (last->faila >= last->failb) {
614 if (cur->faila >= cur->failb) {
619 if (fa != cur_fa || fb != cur_fb)
625 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
628 return stripe * rbio->stripe_npages + index;
632 * these are just the pages from the rbio array, not from anything
633 * the FS sent down to us
635 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
638 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
642 * helper to index into the pstripe
644 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
646 return rbio_stripe_page(rbio, rbio->nr_data, index);
650 * helper to index into the qstripe, returns null
651 * if there is no qstripe
653 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
655 if (rbio->nr_data + 1 == rbio->real_stripes)
657 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
661 * The first stripe in the table for a logical address
662 * has the lock. rbios are added in one of three ways:
664 * 1) Nobody has the stripe locked yet. The rbio is given
665 * the lock and 0 is returned. The caller must start the IO
668 * 2) Someone has the stripe locked, but we're able to merge
669 * with the lock owner. The rbio is freed and the IO will
670 * start automatically along with the existing rbio. 1 is returned.
672 * 3) Someone has the stripe locked, but we're not able to merge.
673 * The rbio is added to the lock owner's plug list, or merged into
674 * an rbio already on the plug list. When the lock owner unlocks,
675 * the next rbio on the list is run and the IO is started automatically.
678 * If we return 0, the caller still owns the rbio and must continue with
679 * IO submission. If we return 1, the caller must assume the rbio has
680 * already been freed.
682 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
684 int bucket = rbio_bucket(rbio);
685 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
686 struct btrfs_raid_bio *cur;
687 struct btrfs_raid_bio *pending;
689 struct btrfs_raid_bio *freeit = NULL;
690 struct btrfs_raid_bio *cache_drop = NULL;
693 spin_lock_irqsave(&h->lock, flags);
694 list_for_each_entry(cur, &h->hash_list, hash_list) {
695 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
696 spin_lock(&cur->bio_list_lock);
698 /* can we steal this cached rbio's pages? */
699 if (bio_list_empty(&cur->bio_list) &&
700 list_empty(&cur->plug_list) &&
701 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
702 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
703 list_del_init(&cur->hash_list);
704 refcount_dec(&cur->refs);
706 steal_rbio(cur, rbio);
708 spin_unlock(&cur->bio_list_lock);
713 /* can we merge into the lock owner? */
714 if (rbio_can_merge(cur, rbio)) {
715 merge_rbio(cur, rbio);
716 spin_unlock(&cur->bio_list_lock);
724 * we couldn't merge with the running
725 * rbio, see if we can merge with the
726 * pending ones. We don't have to
727 * check for rmw_locked because there
728 * is no way they are inside finish_rmw
731 list_for_each_entry(pending, &cur->plug_list,
733 if (rbio_can_merge(pending, rbio)) {
734 merge_rbio(pending, rbio);
735 spin_unlock(&cur->bio_list_lock);
742 /* no merging, put us on the tail of the plug list,
743 * our rbio will be started with the currently
744 * running rbio unlocks
746 list_add_tail(&rbio->plug_list, &cur->plug_list);
747 spin_unlock(&cur->bio_list_lock);
753 refcount_inc(&rbio->refs);
754 list_add(&rbio->hash_list, &h->hash_list);
756 spin_unlock_irqrestore(&h->lock, flags);
758 remove_rbio_from_cache(cache_drop);
760 __free_raid_bio(freeit);
765 * called as rmw or parity rebuild is completed. If the plug list has more
766 * rbios waiting for this stripe, the next one on the list will be started
768 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
771 struct btrfs_stripe_hash *h;
775 bucket = rbio_bucket(rbio);
776 h = rbio->fs_info->stripe_hash_table->table + bucket;
778 if (list_empty(&rbio->plug_list))
781 spin_lock_irqsave(&h->lock, flags);
782 spin_lock(&rbio->bio_list_lock);
784 if (!list_empty(&rbio->hash_list)) {
786 * if we're still cached and there is no other IO
787 * to perform, just leave this rbio here for others
788 * to steal from later
790 if (list_empty(&rbio->plug_list) &&
791 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
793 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
794 BUG_ON(!bio_list_empty(&rbio->bio_list));
798 list_del_init(&rbio->hash_list);
799 refcount_dec(&rbio->refs);
802 * we use the plug list to hold all the rbios
803 * waiting for the chance to lock this stripe.
804 * hand the lock over to one of them.
806 if (!list_empty(&rbio->plug_list)) {
807 struct btrfs_raid_bio *next;
808 struct list_head *head = rbio->plug_list.next;
810 next = list_entry(head, struct btrfs_raid_bio,
813 list_del_init(&rbio->plug_list);
815 list_add(&next->hash_list, &h->hash_list);
816 refcount_inc(&next->refs);
817 spin_unlock(&rbio->bio_list_lock);
818 spin_unlock_irqrestore(&h->lock, flags);
820 if (next->operation == BTRFS_RBIO_READ_REBUILD)
821 async_read_rebuild(next);
822 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
823 steal_rbio(rbio, next);
824 async_read_rebuild(next);
825 } else if (next->operation == BTRFS_RBIO_WRITE) {
826 steal_rbio(rbio, next);
827 async_rmw_stripe(next);
828 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
829 steal_rbio(rbio, next);
830 async_scrub_parity(next);
837 spin_unlock(&rbio->bio_list_lock);
838 spin_unlock_irqrestore(&h->lock, flags);
842 remove_rbio_from_cache(rbio);
845 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
849 if (!refcount_dec_and_test(&rbio->refs))
852 WARN_ON(!list_empty(&rbio->stripe_cache));
853 WARN_ON(!list_empty(&rbio->hash_list));
854 WARN_ON(!bio_list_empty(&rbio->bio_list));
856 for (i = 0; i < rbio->nr_pages; i++) {
857 if (rbio->stripe_pages[i]) {
858 __free_page(rbio->stripe_pages[i]);
859 rbio->stripe_pages[i] = NULL;
863 btrfs_put_bbio(rbio->bbio);
867 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
874 cur->bi_status = err;
881 * this frees the rbio and runs through all the bios in the
882 * bio_list and calls end_io on them
884 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
886 struct bio *cur = bio_list_get(&rbio->bio_list);
889 if (rbio->generic_bio_cnt)
890 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
893 * At this moment, rbio->bio_list is empty, however since rbio does not
894 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
895 * hash list, rbio may be merged with others so that rbio->bio_list
897 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
898 * more and we can call bio_endio() on all queued bios.
901 extra = bio_list_get(&rbio->bio_list);
902 __free_raid_bio(rbio);
904 rbio_endio_bio_list(cur, err);
906 rbio_endio_bio_list(extra, err);
910 * end io function used by finish_rmw. When we finally
911 * get here, we've written a full stripe
913 static void raid_write_end_io(struct bio *bio)
915 struct btrfs_raid_bio *rbio = bio->bi_private;
916 blk_status_t err = bio->bi_status;
920 fail_bio_stripe(rbio, bio);
924 if (!atomic_dec_and_test(&rbio->stripes_pending))
929 /* OK, we have read all the stripes we need to. */
930 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
931 0 : rbio->bbio->max_errors;
932 if (atomic_read(&rbio->error) > max_errors)
935 rbio_orig_end_io(rbio, err);
939 * the read/modify/write code wants to use the original bio for
940 * any pages it included, and then use the rbio for everything
941 * else. This function decides if a given index (stripe number)
942 * and page number in that stripe fall inside the original bio
945 * if you set bio_list_only, you'll get a NULL back for any ranges
946 * that are outside the bio_list
948 * This doesn't take any refs on anything, you get a bare page pointer
949 * and the caller must bump refs as required.
951 * You must call index_rbio_pages once before you can trust
952 * the answers from this function.
954 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
955 int index, int pagenr, int bio_list_only)
958 struct page *p = NULL;
960 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
962 spin_lock_irq(&rbio->bio_list_lock);
963 p = rbio->bio_pages[chunk_page];
964 spin_unlock_irq(&rbio->bio_list_lock);
966 if (p || bio_list_only)
969 return rbio->stripe_pages[chunk_page];
973 * number of pages we need for the entire stripe across all the
976 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
978 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
982 * allocation and initial setup for the btrfs_raid_bio. Not
983 * this does not allocate any pages for rbio->pages.
985 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
986 struct btrfs_bio *bbio,
989 struct btrfs_raid_bio *rbio;
991 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
992 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
993 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
996 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
997 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
998 sizeof(long), GFP_NOFS);
1000 return ERR_PTR(-ENOMEM);
1002 bio_list_init(&rbio->bio_list);
1003 INIT_LIST_HEAD(&rbio->plug_list);
1004 spin_lock_init(&rbio->bio_list_lock);
1005 INIT_LIST_HEAD(&rbio->stripe_cache);
1006 INIT_LIST_HEAD(&rbio->hash_list);
1008 rbio->fs_info = fs_info;
1009 rbio->stripe_len = stripe_len;
1010 rbio->nr_pages = num_pages;
1011 rbio->real_stripes = real_stripes;
1012 rbio->stripe_npages = stripe_npages;
1015 refcount_set(&rbio->refs, 1);
1016 atomic_set(&rbio->error, 0);
1017 atomic_set(&rbio->stripes_pending, 0);
1020 * the stripe_pages and bio_pages array point to the extra
1021 * memory we allocated past the end of the rbio
1024 rbio->stripe_pages = p;
1025 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1026 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1028 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1029 nr_data = real_stripes - 1;
1030 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1031 nr_data = real_stripes - 2;
1035 rbio->nr_data = nr_data;
1039 /* allocate pages for all the stripes in the bio, including parity */
1040 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1045 for (i = 0; i < rbio->nr_pages; i++) {
1046 if (rbio->stripe_pages[i])
1048 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1051 rbio->stripe_pages[i] = page;
1056 /* only allocate pages for p/q stripes */
1057 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1062 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1064 for (; i < rbio->nr_pages; i++) {
1065 if (rbio->stripe_pages[i])
1067 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1070 rbio->stripe_pages[i] = page;
1076 * add a single page from a specific stripe into our list of bios for IO
1077 * this will try to merge into existing bios if possible, and returns
1078 * zero if all went well.
1080 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1081 struct bio_list *bio_list,
1084 unsigned long page_index,
1085 unsigned long bio_max_len)
1087 struct bio *last = bio_list->tail;
1091 struct btrfs_bio_stripe *stripe;
1094 stripe = &rbio->bbio->stripes[stripe_nr];
1095 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1097 /* if the device is missing, just fail this stripe */
1098 if (!stripe->dev->bdev)
1099 return fail_rbio_index(rbio, stripe_nr);
1101 /* see if we can add this page onto our existing bio */
1103 last_end = (u64)last->bi_iter.bi_sector << 9;
1104 last_end += last->bi_iter.bi_size;
1107 * we can't merge these if they are from different
1108 * devices or if they are not contiguous
1110 if (last_end == disk_start && stripe->dev->bdev &&
1112 last->bi_disk == stripe->dev->bdev->bd_disk &&
1113 last->bi_partno == stripe->dev->bdev->bd_partno) {
1114 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1115 if (ret == PAGE_SIZE)
1120 /* put a new bio on the list */
1121 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1122 bio->bi_iter.bi_size = 0;
1123 bio_set_dev(bio, stripe->dev->bdev);
1124 bio->bi_iter.bi_sector = disk_start >> 9;
1126 bio_add_page(bio, page, PAGE_SIZE, 0);
1127 bio_list_add(bio_list, bio);
1132 * while we're doing the read/modify/write cycle, we could
1133 * have errors in reading pages off the disk. This checks
1134 * for errors and if we're not able to read the page it'll
1135 * trigger parity reconstruction. The rmw will be finished
1136 * after we've reconstructed the failed stripes
1138 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1140 if (rbio->faila >= 0 || rbio->failb >= 0) {
1141 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1142 __raid56_parity_recover(rbio);
1149 * helper function to walk our bio list and populate the bio_pages array with
1150 * the result. This seems expensive, but it is faster than constantly
1151 * searching through the bio list as we setup the IO in finish_rmw or stripe
1154 * This must be called before you trust the answers from page_in_rbio
1156 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1160 unsigned long stripe_offset;
1161 unsigned long page_index;
1163 spin_lock_irq(&rbio->bio_list_lock);
1164 bio_list_for_each(bio, &rbio->bio_list) {
1165 struct bio_vec bvec;
1166 struct bvec_iter iter;
1169 start = (u64)bio->bi_iter.bi_sector << 9;
1170 stripe_offset = start - rbio->bbio->raid_map[0];
1171 page_index = stripe_offset >> PAGE_SHIFT;
1173 if (bio_flagged(bio, BIO_CLONED))
1174 bio->bi_iter = btrfs_io_bio(bio)->iter;
1176 bio_for_each_segment(bvec, bio, iter) {
1177 rbio->bio_pages[page_index + i] = bvec.bv_page;
1181 spin_unlock_irq(&rbio->bio_list_lock);
1185 * this is called from one of two situations. We either
1186 * have a full stripe from the higher layers, or we've read all
1187 * the missing bits off disk.
1189 * This will calculate the parity and then send down any
1192 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1194 struct btrfs_bio *bbio = rbio->bbio;
1195 void *pointers[rbio->real_stripes];
1196 int nr_data = rbio->nr_data;
1201 struct bio_list bio_list;
1205 bio_list_init(&bio_list);
1207 if (rbio->real_stripes - rbio->nr_data == 1) {
1208 p_stripe = rbio->real_stripes - 1;
1209 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1210 p_stripe = rbio->real_stripes - 2;
1211 q_stripe = rbio->real_stripes - 1;
1216 /* at this point we either have a full stripe,
1217 * or we've read the full stripe from the drive.
1218 * recalculate the parity and write the new results.
1220 * We're not allowed to add any new bios to the
1221 * bio list here, anyone else that wants to
1222 * change this stripe needs to do their own rmw.
1224 spin_lock_irq(&rbio->bio_list_lock);
1225 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1226 spin_unlock_irq(&rbio->bio_list_lock);
1228 atomic_set(&rbio->error, 0);
1231 * now that we've set rmw_locked, run through the
1232 * bio list one last time and map the page pointers
1234 * We don't cache full rbios because we're assuming
1235 * the higher layers are unlikely to use this area of
1236 * the disk again soon. If they do use it again,
1237 * hopefully they will send another full bio.
1239 index_rbio_pages(rbio);
1240 if (!rbio_is_full(rbio))
1241 cache_rbio_pages(rbio);
1243 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1245 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1247 /* first collect one page from each data stripe */
1248 for (stripe = 0; stripe < nr_data; stripe++) {
1249 p = page_in_rbio(rbio, stripe, pagenr, 0);
1250 pointers[stripe] = kmap(p);
1253 /* then add the parity stripe */
1254 p = rbio_pstripe_page(rbio, pagenr);
1256 pointers[stripe++] = kmap(p);
1258 if (q_stripe != -1) {
1261 * raid6, add the qstripe and call the
1262 * library function to fill in our p/q
1264 p = rbio_qstripe_page(rbio, pagenr);
1266 pointers[stripe++] = kmap(p);
1268 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1272 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1273 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1277 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1278 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1282 * time to start writing. Make bios for everything from the
1283 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1286 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1287 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1289 if (stripe < rbio->nr_data) {
1290 page = page_in_rbio(rbio, stripe, pagenr, 1);
1294 page = rbio_stripe_page(rbio, stripe, pagenr);
1297 ret = rbio_add_io_page(rbio, &bio_list,
1298 page, stripe, pagenr, rbio->stripe_len);
1304 if (likely(!bbio->num_tgtdevs))
1307 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1308 if (!bbio->tgtdev_map[stripe])
1311 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1313 if (stripe < rbio->nr_data) {
1314 page = page_in_rbio(rbio, stripe, pagenr, 1);
1318 page = rbio_stripe_page(rbio, stripe, pagenr);
1321 ret = rbio_add_io_page(rbio, &bio_list, page,
1322 rbio->bbio->tgtdev_map[stripe],
1323 pagenr, rbio->stripe_len);
1330 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1331 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1334 bio = bio_list_pop(&bio_list);
1338 bio->bi_private = rbio;
1339 bio->bi_end_io = raid_write_end_io;
1340 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1347 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1349 while ((bio = bio_list_pop(&bio_list)))
1354 * helper to find the stripe number for a given bio. Used to figure out which
1355 * stripe has failed. This expects the bio to correspond to a physical disk,
1356 * so it looks up based on physical sector numbers.
1358 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1361 u64 physical = bio->bi_iter.bi_sector;
1364 struct btrfs_bio_stripe *stripe;
1368 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1369 stripe = &rbio->bbio->stripes[i];
1370 stripe_start = stripe->physical;
1371 if (physical >= stripe_start &&
1372 physical < stripe_start + rbio->stripe_len &&
1373 stripe->dev->bdev &&
1374 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1375 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1383 * helper to find the stripe number for a given
1384 * bio (before mapping). Used to figure out which stripe has
1385 * failed. This looks up based on logical block numbers.
1387 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1390 u64 logical = bio->bi_iter.bi_sector;
1396 for (i = 0; i < rbio->nr_data; i++) {
1397 stripe_start = rbio->bbio->raid_map[i];
1398 if (logical >= stripe_start &&
1399 logical < stripe_start + rbio->stripe_len) {
1407 * returns -EIO if we had too many failures
1409 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1411 unsigned long flags;
1414 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1416 /* we already know this stripe is bad, move on */
1417 if (rbio->faila == failed || rbio->failb == failed)
1420 if (rbio->faila == -1) {
1421 /* first failure on this rbio */
1422 rbio->faila = failed;
1423 atomic_inc(&rbio->error);
1424 } else if (rbio->failb == -1) {
1425 /* second failure on this rbio */
1426 rbio->failb = failed;
1427 atomic_inc(&rbio->error);
1432 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1438 * helper to fail a stripe based on a physical disk
1441 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1444 int failed = find_bio_stripe(rbio, bio);
1449 return fail_rbio_index(rbio, failed);
1453 * this sets each page in the bio uptodate. It should only be used on private
1454 * rbio pages, nothing that comes in from the higher layers
1456 static void set_bio_pages_uptodate(struct bio *bio)
1458 struct bio_vec *bvec;
1461 ASSERT(!bio_flagged(bio, BIO_CLONED));
1463 bio_for_each_segment_all(bvec, bio, i)
1464 SetPageUptodate(bvec->bv_page);
1468 * end io for the read phase of the rmw cycle. All the bios here are physical
1469 * stripe bios we've read from the disk so we can recalculate the parity of the
1472 * This will usually kick off finish_rmw once all the bios are read in, but it
1473 * may trigger parity reconstruction if we had any errors along the way
1475 static void raid_rmw_end_io(struct bio *bio)
1477 struct btrfs_raid_bio *rbio = bio->bi_private;
1480 fail_bio_stripe(rbio, bio);
1482 set_bio_pages_uptodate(bio);
1486 if (!atomic_dec_and_test(&rbio->stripes_pending))
1489 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1493 * this will normally call finish_rmw to start our write
1494 * but if there are any failed stripes we'll reconstruct
1497 validate_rbio_for_rmw(rbio);
1502 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1505 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1507 btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1508 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1511 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1513 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1514 read_rebuild_work, NULL, NULL);
1516 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1520 * the stripe must be locked by the caller. It will
1521 * unlock after all the writes are done
1523 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1525 int bios_to_read = 0;
1526 struct bio_list bio_list;
1532 bio_list_init(&bio_list);
1534 ret = alloc_rbio_pages(rbio);
1538 index_rbio_pages(rbio);
1540 atomic_set(&rbio->error, 0);
1542 * build a list of bios to read all the missing parts of this
1545 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1546 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1549 * we want to find all the pages missing from
1550 * the rbio and read them from the disk. If
1551 * page_in_rbio finds a page in the bio list
1552 * we don't need to read it off the stripe.
1554 page = page_in_rbio(rbio, stripe, pagenr, 1);
1558 page = rbio_stripe_page(rbio, stripe, pagenr);
1560 * the bio cache may have handed us an uptodate
1561 * page. If so, be happy and use it
1563 if (PageUptodate(page))
1566 ret = rbio_add_io_page(rbio, &bio_list, page,
1567 stripe, pagenr, rbio->stripe_len);
1573 bios_to_read = bio_list_size(&bio_list);
1574 if (!bios_to_read) {
1576 * this can happen if others have merged with
1577 * us, it means there is nothing left to read.
1578 * But if there are missing devices it may not be
1579 * safe to do the full stripe write yet.
1585 * the bbio may be freed once we submit the last bio. Make sure
1586 * not to touch it after that
1588 atomic_set(&rbio->stripes_pending, bios_to_read);
1590 bio = bio_list_pop(&bio_list);
1594 bio->bi_private = rbio;
1595 bio->bi_end_io = raid_rmw_end_io;
1596 bio_set_op_attrs(bio, REQ_OP_READ, 0);
1598 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1602 /* the actual write will happen once the reads are done */
1606 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1608 while ((bio = bio_list_pop(&bio_list)))
1614 validate_rbio_for_rmw(rbio);
1619 * if the upper layers pass in a full stripe, we thank them by only allocating
1620 * enough pages to hold the parity, and sending it all down quickly.
1622 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1626 ret = alloc_rbio_parity_pages(rbio);
1628 __free_raid_bio(rbio);
1632 ret = lock_stripe_add(rbio);
1639 * partial stripe writes get handed over to async helpers.
1640 * We're really hoping to merge a few more writes into this
1641 * rbio before calculating new parity
1643 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1647 ret = lock_stripe_add(rbio);
1649 async_rmw_stripe(rbio);
1654 * sometimes while we were reading from the drive to
1655 * recalculate parity, enough new bios come into create
1656 * a full stripe. So we do a check here to see if we can
1657 * go directly to finish_rmw
1659 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1661 /* head off into rmw land if we don't have a full stripe */
1662 if (!rbio_is_full(rbio))
1663 return partial_stripe_write(rbio);
1664 return full_stripe_write(rbio);
1668 * We use plugging call backs to collect full stripes.
1669 * Any time we get a partial stripe write while plugged
1670 * we collect it into a list. When the unplug comes down,
1671 * we sort the list by logical block number and merge
1672 * everything we can into the same rbios
1674 struct btrfs_plug_cb {
1675 struct blk_plug_cb cb;
1676 struct btrfs_fs_info *info;
1677 struct list_head rbio_list;
1678 struct btrfs_work work;
1682 * rbios on the plug list are sorted for easier merging.
1684 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1686 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1688 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1690 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1691 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1693 if (a_sector < b_sector)
1695 if (a_sector > b_sector)
1700 static void run_plug(struct btrfs_plug_cb *plug)
1702 struct btrfs_raid_bio *cur;
1703 struct btrfs_raid_bio *last = NULL;
1706 * sort our plug list then try to merge
1707 * everything we can in hopes of creating full
1710 list_sort(NULL, &plug->rbio_list, plug_cmp);
1711 while (!list_empty(&plug->rbio_list)) {
1712 cur = list_entry(plug->rbio_list.next,
1713 struct btrfs_raid_bio, plug_list);
1714 list_del_init(&cur->plug_list);
1716 if (rbio_is_full(cur)) {
1717 /* we have a full stripe, send it down */
1718 full_stripe_write(cur);
1722 if (rbio_can_merge(last, cur)) {
1723 merge_rbio(last, cur);
1724 __free_raid_bio(cur);
1728 __raid56_parity_write(last);
1733 __raid56_parity_write(last);
1739 * if the unplug comes from schedule, we have to push the
1740 * work off to a helper thread
1742 static void unplug_work(struct btrfs_work *work)
1744 struct btrfs_plug_cb *plug;
1745 plug = container_of(work, struct btrfs_plug_cb, work);
1749 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1751 struct btrfs_plug_cb *plug;
1752 plug = container_of(cb, struct btrfs_plug_cb, cb);
1754 if (from_schedule) {
1755 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1756 unplug_work, NULL, NULL);
1757 btrfs_queue_work(plug->info->rmw_workers,
1765 * our main entry point for writes from the rest of the FS.
1767 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1768 struct btrfs_bio *bbio, u64 stripe_len)
1770 struct btrfs_raid_bio *rbio;
1771 struct btrfs_plug_cb *plug = NULL;
1772 struct blk_plug_cb *cb;
1775 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1777 btrfs_put_bbio(bbio);
1778 return PTR_ERR(rbio);
1780 bio_list_add(&rbio->bio_list, bio);
1781 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1782 rbio->operation = BTRFS_RBIO_WRITE;
1784 btrfs_bio_counter_inc_noblocked(fs_info);
1785 rbio->generic_bio_cnt = 1;
1788 * don't plug on full rbios, just get them out the door
1789 * as quickly as we can
1791 if (rbio_is_full(rbio)) {
1792 ret = full_stripe_write(rbio);
1794 btrfs_bio_counter_dec(fs_info);
1798 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1800 plug = container_of(cb, struct btrfs_plug_cb, cb);
1802 plug->info = fs_info;
1803 INIT_LIST_HEAD(&plug->rbio_list);
1805 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1808 ret = __raid56_parity_write(rbio);
1810 btrfs_bio_counter_dec(fs_info);
1816 * all parity reconstruction happens here. We've read in everything
1817 * we can find from the drives and this does the heavy lifting of
1818 * sorting the good from the bad.
1820 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1824 int faila = -1, failb = -1;
1829 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1831 err = BLK_STS_RESOURCE;
1835 faila = rbio->faila;
1836 failb = rbio->failb;
1838 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1839 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1840 spin_lock_irq(&rbio->bio_list_lock);
1841 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1842 spin_unlock_irq(&rbio->bio_list_lock);
1845 index_rbio_pages(rbio);
1847 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1849 * Now we just use bitmap to mark the horizontal stripes in
1850 * which we have data when doing parity scrub.
1852 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1853 !test_bit(pagenr, rbio->dbitmap))
1856 /* setup our array of pointers with pages
1859 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1861 * if we're rebuilding a read, we have to use
1862 * pages from the bio list
1864 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1865 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1866 (stripe == faila || stripe == failb)) {
1867 page = page_in_rbio(rbio, stripe, pagenr, 0);
1869 page = rbio_stripe_page(rbio, stripe, pagenr);
1871 pointers[stripe] = kmap(page);
1874 /* all raid6 handling here */
1875 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1877 * single failure, rebuild from parity raid5
1881 if (faila == rbio->nr_data) {
1883 * Just the P stripe has failed, without
1884 * a bad data or Q stripe.
1885 * TODO, we should redo the xor here.
1887 err = BLK_STS_IOERR;
1891 * a single failure in raid6 is rebuilt
1892 * in the pstripe code below
1897 /* make sure our ps and qs are in order */
1898 if (faila > failb) {
1904 /* if the q stripe is failed, do a pstripe reconstruction
1906 * If both the q stripe and the P stripe are failed, we're
1907 * here due to a crc mismatch and we can't give them the
1910 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1911 if (rbio->bbio->raid_map[faila] ==
1913 err = BLK_STS_IOERR;
1917 * otherwise we have one bad data stripe and
1918 * a good P stripe. raid5!
1923 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1924 raid6_datap_recov(rbio->real_stripes,
1925 PAGE_SIZE, faila, pointers);
1927 raid6_2data_recov(rbio->real_stripes,
1928 PAGE_SIZE, faila, failb,
1934 /* rebuild from P stripe here (raid5 or raid6) */
1935 BUG_ON(failb != -1);
1937 /* Copy parity block into failed block to start with */
1938 memcpy(pointers[faila],
1939 pointers[rbio->nr_data],
1942 /* rearrange the pointer array */
1943 p = pointers[faila];
1944 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1945 pointers[stripe] = pointers[stripe + 1];
1946 pointers[rbio->nr_data - 1] = p;
1948 /* xor in the rest */
1949 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1951 /* if we're doing this rebuild as part of an rmw, go through
1952 * and set all of our private rbio pages in the
1953 * failed stripes as uptodate. This way finish_rmw will
1954 * know they can be trusted. If this was a read reconstruction,
1955 * other endio functions will fiddle the uptodate bits
1957 if (rbio->operation == BTRFS_RBIO_WRITE) {
1958 for (i = 0; i < rbio->stripe_npages; i++) {
1960 page = rbio_stripe_page(rbio, faila, i);
1961 SetPageUptodate(page);
1964 page = rbio_stripe_page(rbio, failb, i);
1965 SetPageUptodate(page);
1969 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1971 * if we're rebuilding a read, we have to use
1972 * pages from the bio list
1974 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1975 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1976 (stripe == faila || stripe == failb)) {
1977 page = page_in_rbio(rbio, stripe, pagenr, 0);
1979 page = rbio_stripe_page(rbio, stripe, pagenr);
1990 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1992 * - In case of two failures, where rbio->failb != -1:
1994 * Do not cache this rbio since the above read reconstruction
1995 * (raid6_datap_recov() or raid6_2data_recov()) may have
1996 * changed some content of stripes which are not identical to
1997 * on-disk content any more, otherwise, a later write/recover
1998 * may steal stripe_pages from this rbio and end up with
1999 * corruptions or rebuild failures.
2001 * - In case of single failure, where rbio->failb == -1:
2003 * Cache this rbio iff the above read reconstruction is
2004 * excuted without problems.
2006 if (err == BLK_STS_OK && rbio->failb < 0)
2007 cache_rbio_pages(rbio);
2009 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2011 rbio_orig_end_io(rbio, err);
2012 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
2013 rbio_orig_end_io(rbio, err);
2014 } else if (err == BLK_STS_OK) {
2018 if (rbio->operation == BTRFS_RBIO_WRITE)
2020 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2021 finish_parity_scrub(rbio, 0);
2025 rbio_orig_end_io(rbio, err);
2030 * This is called only for stripes we've read from disk to
2031 * reconstruct the parity.
2033 static void raid_recover_end_io(struct bio *bio)
2035 struct btrfs_raid_bio *rbio = bio->bi_private;
2038 * we only read stripe pages off the disk, set them
2039 * up to date if there were no errors
2042 fail_bio_stripe(rbio, bio);
2044 set_bio_pages_uptodate(bio);
2047 if (!atomic_dec_and_test(&rbio->stripes_pending))
2050 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2051 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2053 __raid_recover_end_io(rbio);
2057 * reads everything we need off the disk to reconstruct
2058 * the parity. endio handlers trigger final reconstruction
2059 * when the IO is done.
2061 * This is used both for reads from the higher layers and for
2062 * parity construction required to finish a rmw cycle.
2064 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2066 int bios_to_read = 0;
2067 struct bio_list bio_list;
2073 bio_list_init(&bio_list);
2075 ret = alloc_rbio_pages(rbio);
2079 atomic_set(&rbio->error, 0);
2082 * read everything that hasn't failed. Thanks to the
2083 * stripe cache, it is possible that some or all of these
2084 * pages are going to be uptodate.
2086 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2087 if (rbio->faila == stripe || rbio->failb == stripe) {
2088 atomic_inc(&rbio->error);
2092 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2096 * the rmw code may have already read this
2099 p = rbio_stripe_page(rbio, stripe, pagenr);
2100 if (PageUptodate(p))
2103 ret = rbio_add_io_page(rbio, &bio_list,
2104 rbio_stripe_page(rbio, stripe, pagenr),
2105 stripe, pagenr, rbio->stripe_len);
2111 bios_to_read = bio_list_size(&bio_list);
2112 if (!bios_to_read) {
2114 * we might have no bios to read just because the pages
2115 * were up to date, or we might have no bios to read because
2116 * the devices were gone.
2118 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2119 __raid_recover_end_io(rbio);
2127 * the bbio may be freed once we submit the last bio. Make sure
2128 * not to touch it after that
2130 atomic_set(&rbio->stripes_pending, bios_to_read);
2132 bio = bio_list_pop(&bio_list);
2136 bio->bi_private = rbio;
2137 bio->bi_end_io = raid_recover_end_io;
2138 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2140 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2148 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2149 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2150 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2152 while ((bio = bio_list_pop(&bio_list)))
2159 * the main entry point for reads from the higher layers. This
2160 * is really only called when the normal read path had a failure,
2161 * so we assume the bio they send down corresponds to a failed part
2164 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2165 struct btrfs_bio *bbio, u64 stripe_len,
2166 int mirror_num, int generic_io)
2168 struct btrfs_raid_bio *rbio;
2172 ASSERT(bbio->mirror_num == mirror_num);
2173 btrfs_io_bio(bio)->mirror_num = mirror_num;
2176 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2179 btrfs_put_bbio(bbio);
2180 return PTR_ERR(rbio);
2183 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2184 bio_list_add(&rbio->bio_list, bio);
2185 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2187 rbio->faila = find_logical_bio_stripe(rbio, bio);
2188 if (rbio->faila == -1) {
2190 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2191 __func__, (u64)bio->bi_iter.bi_sector << 9,
2192 (u64)bio->bi_iter.bi_size, bbio->map_type);
2194 btrfs_put_bbio(bbio);
2200 btrfs_bio_counter_inc_noblocked(fs_info);
2201 rbio->generic_bio_cnt = 1;
2203 btrfs_get_bbio(bbio);
2208 * for 'mirror == 2', reconstruct from all other stripes.
2209 * for 'mirror_num > 2', select a stripe to fail on every retry.
2211 if (mirror_num > 2) {
2213 * 'mirror == 3' is to fail the p stripe and
2214 * reconstruct from the q stripe. 'mirror > 3' is to
2215 * fail a data stripe and reconstruct from p+q stripe.
2217 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2218 ASSERT(rbio->failb > 0);
2219 if (rbio->failb <= rbio->faila)
2223 ret = lock_stripe_add(rbio);
2226 * __raid56_parity_recover will end the bio with
2227 * any errors it hits. We don't want to return
2228 * its error value up the stack because our caller
2229 * will end up calling bio_endio with any nonzero
2233 __raid56_parity_recover(rbio);
2235 * our rbio has been added to the list of
2236 * rbios that will be handled after the
2237 * currently lock owner is done
2243 static void rmw_work(struct btrfs_work *work)
2245 struct btrfs_raid_bio *rbio;
2247 rbio = container_of(work, struct btrfs_raid_bio, work);
2248 raid56_rmw_stripe(rbio);
2251 static void read_rebuild_work(struct btrfs_work *work)
2253 struct btrfs_raid_bio *rbio;
2255 rbio = container_of(work, struct btrfs_raid_bio, work);
2256 __raid56_parity_recover(rbio);
2260 * The following code is used to scrub/replace the parity stripe
2262 * Caller must have already increased bio_counter for getting @bbio.
2264 * Note: We need make sure all the pages that add into the scrub/replace
2265 * raid bio are correct and not be changed during the scrub/replace. That
2266 * is those pages just hold metadata or file data with checksum.
2269 struct btrfs_raid_bio *
2270 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2271 struct btrfs_bio *bbio, u64 stripe_len,
2272 struct btrfs_device *scrub_dev,
2273 unsigned long *dbitmap, int stripe_nsectors)
2275 struct btrfs_raid_bio *rbio;
2278 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2281 bio_list_add(&rbio->bio_list, bio);
2283 * This is a special bio which is used to hold the completion handler
2284 * and make the scrub rbio is similar to the other types
2286 ASSERT(!bio->bi_iter.bi_size);
2287 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2290 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2291 * to the end position, so this search can start from the first parity
2294 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2295 if (bbio->stripes[i].dev == scrub_dev) {
2300 ASSERT(i < rbio->real_stripes);
2302 /* Now we just support the sectorsize equals to page size */
2303 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2304 ASSERT(rbio->stripe_npages == stripe_nsectors);
2305 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2308 * We have already increased bio_counter when getting bbio, record it
2309 * so we can free it at rbio_orig_end_io().
2311 rbio->generic_bio_cnt = 1;
2316 /* Used for both parity scrub and missing. */
2317 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2323 ASSERT(logical >= rbio->bbio->raid_map[0]);
2324 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2325 rbio->stripe_len * rbio->nr_data);
2326 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2327 index = stripe_offset >> PAGE_SHIFT;
2328 rbio->bio_pages[index] = page;
2332 * We just scrub the parity that we have correct data on the same horizontal,
2333 * so we needn't allocate all pages for all the stripes.
2335 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2342 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2343 for (i = 0; i < rbio->real_stripes; i++) {
2344 index = i * rbio->stripe_npages + bit;
2345 if (rbio->stripe_pages[index])
2348 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2351 rbio->stripe_pages[index] = page;
2357 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2360 struct btrfs_bio *bbio = rbio->bbio;
2361 void *pointers[rbio->real_stripes];
2362 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2363 int nr_data = rbio->nr_data;
2368 struct page *p_page = NULL;
2369 struct page *q_page = NULL;
2370 struct bio_list bio_list;
2375 bio_list_init(&bio_list);
2377 if (rbio->real_stripes - rbio->nr_data == 1) {
2378 p_stripe = rbio->real_stripes - 1;
2379 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2380 p_stripe = rbio->real_stripes - 2;
2381 q_stripe = rbio->real_stripes - 1;
2386 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2388 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2392 * Because the higher layers(scrubber) are unlikely to
2393 * use this area of the disk again soon, so don't cache
2396 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2401 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2404 SetPageUptodate(p_page);
2406 if (q_stripe != -1) {
2407 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2409 __free_page(p_page);
2412 SetPageUptodate(q_page);
2415 atomic_set(&rbio->error, 0);
2417 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2420 /* first collect one page from each data stripe */
2421 for (stripe = 0; stripe < nr_data; stripe++) {
2422 p = page_in_rbio(rbio, stripe, pagenr, 0);
2423 pointers[stripe] = kmap(p);
2426 /* then add the parity stripe */
2427 pointers[stripe++] = kmap(p_page);
2429 if (q_stripe != -1) {
2432 * raid6, add the qstripe and call the
2433 * library function to fill in our p/q
2435 pointers[stripe++] = kmap(q_page);
2437 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2441 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2442 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2445 /* Check scrubbing parity and repair it */
2446 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2448 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2449 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2451 /* Parity is right, needn't writeback */
2452 bitmap_clear(rbio->dbitmap, pagenr, 1);
2455 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2456 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2459 __free_page(p_page);
2461 __free_page(q_page);
2465 * time to start writing. Make bios for everything from the
2466 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2469 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2472 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2473 ret = rbio_add_io_page(rbio, &bio_list,
2474 page, rbio->scrubp, pagenr, rbio->stripe_len);
2482 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2485 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2486 ret = rbio_add_io_page(rbio, &bio_list, page,
2487 bbio->tgtdev_map[rbio->scrubp],
2488 pagenr, rbio->stripe_len);
2494 nr_data = bio_list_size(&bio_list);
2496 /* Every parity is right */
2497 rbio_orig_end_io(rbio, BLK_STS_OK);
2501 atomic_set(&rbio->stripes_pending, nr_data);
2504 bio = bio_list_pop(&bio_list);
2508 bio->bi_private = rbio;
2509 bio->bi_end_io = raid_write_end_io;
2510 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2517 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2519 while ((bio = bio_list_pop(&bio_list)))
2523 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2525 if (stripe >= 0 && stripe < rbio->nr_data)
2531 * While we're doing the parity check and repair, we could have errors
2532 * in reading pages off the disk. This checks for errors and if we're
2533 * not able to read the page it'll trigger parity reconstruction. The
2534 * parity scrub will be finished after we've reconstructed the failed
2537 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2539 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2542 if (rbio->faila >= 0 || rbio->failb >= 0) {
2543 int dfail = 0, failp = -1;
2545 if (is_data_stripe(rbio, rbio->faila))
2547 else if (is_parity_stripe(rbio->faila))
2548 failp = rbio->faila;
2550 if (is_data_stripe(rbio, rbio->failb))
2552 else if (is_parity_stripe(rbio->failb))
2553 failp = rbio->failb;
2556 * Because we can not use a scrubbing parity to repair
2557 * the data, so the capability of the repair is declined.
2558 * (In the case of RAID5, we can not repair anything)
2560 if (dfail > rbio->bbio->max_errors - 1)
2564 * If all data is good, only parity is correctly, just
2565 * repair the parity.
2568 finish_parity_scrub(rbio, 0);
2573 * Here means we got one corrupted data stripe and one
2574 * corrupted parity on RAID6, if the corrupted parity
2575 * is scrubbing parity, luckily, use the other one to repair
2576 * the data, or we can not repair the data stripe.
2578 if (failp != rbio->scrubp)
2581 __raid_recover_end_io(rbio);
2583 finish_parity_scrub(rbio, 1);
2588 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2592 * end io for the read phase of the rmw cycle. All the bios here are physical
2593 * stripe bios we've read from the disk so we can recalculate the parity of the
2596 * This will usually kick off finish_rmw once all the bios are read in, but it
2597 * may trigger parity reconstruction if we had any errors along the way
2599 static void raid56_parity_scrub_end_io(struct bio *bio)
2601 struct btrfs_raid_bio *rbio = bio->bi_private;
2604 fail_bio_stripe(rbio, bio);
2606 set_bio_pages_uptodate(bio);
2610 if (!atomic_dec_and_test(&rbio->stripes_pending))
2614 * this will normally call finish_rmw to start our write
2615 * but if there are any failed stripes we'll reconstruct
2618 validate_rbio_for_parity_scrub(rbio);
2621 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2623 int bios_to_read = 0;
2624 struct bio_list bio_list;
2630 bio_list_init(&bio_list);
2632 ret = alloc_rbio_essential_pages(rbio);
2636 atomic_set(&rbio->error, 0);
2638 * build a list of bios to read all the missing parts of this
2641 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2642 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2645 * we want to find all the pages missing from
2646 * the rbio and read them from the disk. If
2647 * page_in_rbio finds a page in the bio list
2648 * we don't need to read it off the stripe.
2650 page = page_in_rbio(rbio, stripe, pagenr, 1);
2654 page = rbio_stripe_page(rbio, stripe, pagenr);
2656 * the bio cache may have handed us an uptodate
2657 * page. If so, be happy and use it
2659 if (PageUptodate(page))
2662 ret = rbio_add_io_page(rbio, &bio_list, page,
2663 stripe, pagenr, rbio->stripe_len);
2669 bios_to_read = bio_list_size(&bio_list);
2670 if (!bios_to_read) {
2672 * this can happen if others have merged with
2673 * us, it means there is nothing left to read.
2674 * But if there are missing devices it may not be
2675 * safe to do the full stripe write yet.
2681 * the bbio may be freed once we submit the last bio. Make sure
2682 * not to touch it after that
2684 atomic_set(&rbio->stripes_pending, bios_to_read);
2686 bio = bio_list_pop(&bio_list);
2690 bio->bi_private = rbio;
2691 bio->bi_end_io = raid56_parity_scrub_end_io;
2692 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2694 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2698 /* the actual write will happen once the reads are done */
2702 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2704 while ((bio = bio_list_pop(&bio_list)))
2710 validate_rbio_for_parity_scrub(rbio);
2713 static void scrub_parity_work(struct btrfs_work *work)
2715 struct btrfs_raid_bio *rbio;
2717 rbio = container_of(work, struct btrfs_raid_bio, work);
2718 raid56_parity_scrub_stripe(rbio);
2721 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2723 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2724 scrub_parity_work, NULL, NULL);
2726 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2729 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2731 if (!lock_stripe_add(rbio))
2732 async_scrub_parity(rbio);
2735 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2737 struct btrfs_raid_bio *
2738 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2739 struct btrfs_bio *bbio, u64 length)
2741 struct btrfs_raid_bio *rbio;
2743 rbio = alloc_rbio(fs_info, bbio, length);
2747 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2748 bio_list_add(&rbio->bio_list, bio);
2750 * This is a special bio which is used to hold the completion handler
2751 * and make the scrub rbio is similar to the other types
2753 ASSERT(!bio->bi_iter.bi_size);
2755 rbio->faila = find_logical_bio_stripe(rbio, bio);
2756 if (rbio->faila == -1) {
2763 * When we get bbio, we have already increased bio_counter, record it
2764 * so we can free it at rbio_orig_end_io()
2766 rbio->generic_bio_cnt = 1;
2771 static void missing_raid56_work(struct btrfs_work *work)
2773 struct btrfs_raid_bio *rbio;
2775 rbio = container_of(work, struct btrfs_raid_bio, work);
2776 __raid56_parity_recover(rbio);
2779 static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2781 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2782 missing_raid56_work, NULL, NULL);
2784 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2787 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2789 if (!lock_stripe_add(rbio))
2790 async_missing_raid56(rbio);