Merge commit 'c039c332f23e794deb6d6f37b9f07ff3b27fb2cf' into md
authorNeilBrown <neilb@suse.de>
Wed, 1 Aug 2012 10:40:02 +0000 (20:40 +1000)
committerNeilBrown <neilb@suse.de>
Wed, 1 Aug 2012 10:40:02 +0000 (20:40 +1000)
Pull in pre-requisites for adding raid10 support to dm-raid.

drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid1.h
drivers/md/raid10.c
drivers/md/raid10.h
drivers/md/raid5.c
drivers/md/raid5.h

index d5ab4493c8be656ecd28227994d7f7bbe06b8e2d..f6c46109b071310fdc0a28b89a916cd67cf69b9d 100644 (file)
@@ -3942,17 +3942,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                break;
        case clear:
                /* stopping an active array */
-               if (atomic_read(&mddev->openers) > 0)
-                       return -EBUSY;
                err = do_md_stop(mddev, 0, NULL);
                break;
        case inactive:
                /* stopping an active array */
-               if (mddev->pers) {
-                       if (atomic_read(&mddev->openers) > 0)
-                               return -EBUSY;
+               if (mddev->pers)
                        err = do_md_stop(mddev, 2, NULL);
-               else
+               else
                        err = 0; /* already inactive */
                break;
        case suspended:
index cacd008d68644428914b39822417209cd00dddc8..197f62681db562bd38d643a115be92940c46b9a6 100644 (file)
  */
 #define        NR_RAID1_BIOS 256
 
+/* when we get a read error on a read-only array, we redirect to another
+ * device without failing the first device, or trying to over-write to
+ * correct the read error.  To keep track of bad blocks on a per-bio
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
+ */
+#define IO_BLOCKED ((struct bio *)1)
+/* When we successfully write to a known bad-block, we need to remove the
+ * bad-block marking which must be done from process context.  So we record
+ * the success by setting devs[n].bio to IO_MADE_GOOD
+ */
+#define IO_MADE_GOOD ((struct bio *)2)
+
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
+
 /* When there are this many requests queue to be written by
  * the raid1 thread, we become 'congested' to provide back-pressure
  * for writeback.
@@ -483,12 +497,14 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
        const sector_t this_sector = r1_bio->sector;
        int sectors;
        int best_good_sectors;
-       int start_disk;
-       int best_disk;
-       int i;
+       int best_disk, best_dist_disk, best_pending_disk;
+       int has_nonrot_disk;
+       int disk;
        sector_t best_dist;
+       unsigned int min_pending;
        struct md_rdev *rdev;
        int choose_first;
+       int choose_next_idle;
 
        rcu_read_lock();
        /*
@@ -499,26 +515,26 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
  retry:
        sectors = r1_bio->sectors;
        best_disk = -1;
+       best_dist_disk = -1;
        best_dist = MaxSector;
+       best_pending_disk = -1;
+       min_pending = UINT_MAX;
        best_good_sectors = 0;
+       has_nonrot_disk = 0;
+       choose_next_idle = 0;
 
        if (conf->mddev->recovery_cp < MaxSector &&
-           (this_sector + sectors >= conf->next_resync)) {
+           (this_sector + sectors >= conf->next_resync))
                choose_first = 1;
-               start_disk = 0;
-       } else {
+       else
                choose_first = 0;
-               start_disk = conf->last_used;
-       }
 
-       for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
+       for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
                sector_t dist;
                sector_t first_bad;
                int bad_sectors;
-
-               int disk = start_disk + i;
-               if (disk >= conf->raid_disks * 2)
-                       disk -= conf->raid_disks * 2;
+               unsigned int pending;
+               bool nonrot;
 
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
@@ -577,22 +593,77 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                } else
                        best_good_sectors = sectors;
 
+               nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
+               has_nonrot_disk |= nonrot;
+               pending = atomic_read(&rdev->nr_pending);
                dist = abs(this_sector - conf->mirrors[disk].head_position);
-               if (choose_first
-                   /* Don't change to another disk for sequential reads */
-                   || conf->next_seq_sect == this_sector
-                   || dist == 0
-                   /* If device is idle, use it */
-                   || atomic_read(&rdev->nr_pending) == 0) {
+               if (choose_first) {
+                       best_disk = disk;
+                       break;
+               }
+               /* Don't change to another disk for sequential reads */
+               if (conf->mirrors[disk].next_seq_sect == this_sector
+                   || dist == 0) {
+                       int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
+                       struct raid1_info *mirror = &conf->mirrors[disk];
+
+                       best_disk = disk;
+                       /*
+                        * If buffered sequential IO size exceeds optimal
+                        * iosize, check if there is idle disk. If yes, choose
+                        * the idle disk. read_balance could already choose an
+                        * idle disk before noticing it's a sequential IO in
+                        * this disk. This doesn't matter because this disk
+                        * will idle, next time it will be utilized after the
+                        * first disk has IO size exceeds optimal iosize. In
+                        * this way, iosize of the first disk will be optimal
+                        * iosize at least. iosize of the second disk might be
+                        * small, but not a big deal since when the second disk
+                        * starts IO, the first disk is likely still busy.
+                        */
+                       if (nonrot && opt_iosize > 0 &&
+                           mirror->seq_start != MaxSector &&
+                           mirror->next_seq_sect > opt_iosize &&
+                           mirror->next_seq_sect - opt_iosize >=
+                           mirror->seq_start) {
+                               choose_next_idle = 1;
+                               continue;
+                       }
+                       break;
+               }
+               /* If device is idle, use it */
+               if (pending == 0) {
                        best_disk = disk;
                        break;
                }
+
+               if (choose_next_idle)
+                       continue;
+
+               if (min_pending > pending) {
+                       min_pending = pending;
+                       best_pending_disk = disk;
+               }
+
                if (dist < best_dist) {
                        best_dist = dist;
-                       best_disk = disk;
+                       best_dist_disk = disk;
                }
        }
 
+       /*
+        * If all disks are rotational, choose the closest disk. If any disk is
+        * non-rotational, choose the disk with less pending request even the
+        * disk is rotational, which might/might not be optimal for raids with
+        * mixed ratation/non-rotational disks depending on workload.
+        */
+       if (best_disk == -1) {
+               if (has_nonrot_disk)
+                       best_disk = best_pending_disk;
+               else
+                       best_disk = best_dist_disk;
+       }
+
        if (best_disk >= 0) {
                rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
                if (!rdev)
@@ -606,8 +677,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                        goto retry;
                }
                sectors = best_good_sectors;
-               conf->next_seq_sect = this_sector + sectors;
-               conf->last_used = best_disk;
+
+               if (conf->mirrors[best_disk].next_seq_sect != this_sector)
+                       conf->mirrors[best_disk].seq_start = this_sector;
+
+               conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
        }
        rcu_read_unlock();
        *max_sectors = sectors;
@@ -873,7 +947,7 @@ do_sync_io:
 static void make_request(struct mddev *mddev, struct bio * bio)
 {
        struct r1conf *conf = mddev->private;
-       struct mirror_info *mirror;
+       struct raid1_info *mirror;
        struct r1bio *r1_bio;
        struct bio *read_bio;
        int i, disks;
@@ -1364,7 +1438,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        struct r1conf *conf = mddev->private;
        int err = -EEXIST;
        int mirror = 0;
-       struct mirror_info *p;
+       struct raid1_info *p;
        int first = 0;
        int last = conf->raid_disks - 1;
        struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -1433,7 +1507,7 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
        struct r1conf *conf = mddev->private;
        int err = 0;
        int number = rdev->raid_disk;
-       struct mirror_info *p = conf->mirrors+ number;
+       struct raid1_info *p = conf->mirrors + number;
 
        if (rdev != p->rdev)
                p = conf->mirrors + conf->raid_disks + number;
@@ -2371,6 +2445,18 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                                bio->bi_rw = READ;
                                bio->bi_end_io = end_sync_read;
                                read_targets++;
+                       } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
+                               test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+                               !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
+                               /*
+                                * The device is suitable for reading (InSync),
+                                * but has bad block(s) here. Let's try to correct them,
+                                * if we are doing resync or repair. Otherwise, leave
+                                * this device alone for this sync request.
+                                */
+                               bio->bi_rw = WRITE;
+                               bio->bi_end_io = end_sync_write;
+                               write_targets++;
                        }
                }
                if (bio->bi_end_io) {
@@ -2428,7 +2514,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                /* There is nowhere to write, so all non-sync
                 * drives must be failed - so we are finished
                 */
-               sector_t rv = max_sector - sector_nr;
+               sector_t rv;
+               if (min_bad > 0)
+                       max_sector = sector_nr + min_bad;
+               rv = max_sector - sector_nr;
                *skipped = 1;
                put_buf(r1_bio);
                return rv;
@@ -2521,7 +2610,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 {
        struct r1conf *conf;
        int i;
-       struct mirror_info *disk;
+       struct raid1_info *disk;
        struct md_rdev *rdev;
        int err = -ENOMEM;
 
@@ -2529,7 +2618,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
        if (!conf)
                goto abort;
 
-       conf->mirrors = kzalloc(sizeof(struct mirror_info)
+       conf->mirrors = kzalloc(sizeof(struct raid1_info)
                                * mddev->raid_disks * 2,
                                 GFP_KERNEL);
        if (!conf->mirrors)
@@ -2572,6 +2661,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                        mddev->merge_check_needed = 1;
 
                disk->head_position = 0;
+               disk->seq_start = MaxSector;
        }
        conf->raid_disks = mddev->raid_disks;
        conf->mddev = mddev;
@@ -2585,7 +2675,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
        conf->recovery_disabled = mddev->recovery_disabled - 1;
 
        err = -EIO;
-       conf->last_used = -1;
        for (i = 0; i < conf->raid_disks * 2; i++) {
 
                disk = conf->mirrors + i;
@@ -2611,19 +2700,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                        if (disk->rdev &&
                            (disk->rdev->saved_raid_disk < 0))
                                conf->fullsync = 1;
-               } else if (conf->last_used < 0)
-                       /*
-                        * The first working device is used as a
-                        * starting point to read balancing.
-                        */
-                       conf->last_used = i;
+               }
        }
 
-       if (conf->last_used < 0) {
-               printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
-                      mdname(mddev));
-               goto abort;
-       }
        err = -ENOMEM;
        conf->thread = md_register_thread(raid1d, mddev, "raid1");
        if (!conf->thread) {
@@ -2798,7 +2877,7 @@ static int raid1_reshape(struct mddev *mddev)
         */
        mempool_t *newpool, *oldpool;
        struct pool_info *newpoolinfo;
-       struct mirror_info *newmirrors;
+       struct raid1_info *newmirrors;
        struct r1conf *conf = mddev->private;
        int cnt, raid_disks;
        unsigned long flags;
@@ -2841,7 +2920,7 @@ static int raid1_reshape(struct mddev *mddev)
                kfree(newpoolinfo);
                return -ENOMEM;
        }
-       newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
+       newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
                             GFP_KERNEL);
        if (!newmirrors) {
                kfree(newpoolinfo);
@@ -2880,7 +2959,6 @@ static int raid1_reshape(struct mddev *mddev)
        conf->raid_disks = mddev->raid_disks = raid_disks;
        mddev->delta_disks = 0;
 
-       conf->last_used = 0; /* just make sure it is in-range */
        lower_barrier(conf);
 
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
index 80ded139314cf8a649729ce8e22915ca11bbd17f..0ff3715fb7eba5ec4fed61a9922276b07b363aff 100644 (file)
@@ -1,9 +1,15 @@
 #ifndef _RAID1_H
 #define _RAID1_H
 
-struct mirror_info {
+struct raid1_info {
        struct md_rdev  *rdev;
        sector_t        head_position;
+
+       /* When choose the best device for a read (read_balance())
+        * we try to keep sequential reads one the same device
+        */
+       sector_t        next_seq_sect;
+       sector_t        seq_start;
 };
 
 /*
@@ -24,17 +30,11 @@ struct pool_info {
 
 struct r1conf {
        struct mddev            *mddev;
-       struct mirror_info      *mirrors;       /* twice 'raid_disks' to
+       struct raid1_info       *mirrors;       /* twice 'raid_disks' to
                                                 * allow for replacements.
                                                 */
        int                     raid_disks;
 
-       /* When choose the best device for a read (read_balance())
-        * we try to keep sequential reads one the same device
-        * using 'last_used' and 'next_seq_sect'
-        */
-       int                     last_used;
-       sector_t                next_seq_sect;
        /* During resync, read_balancing is only allowed on the part
         * of the array that has been resynced.  'next_resync' tells us
         * where that is.
@@ -135,20 +135,6 @@ struct r1bio {
        /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
 };
 
-/* when we get a read error on a read-only array, we redirect to another
- * device without failing the first device, or trying to over-write to
- * correct the read error.  To keep track of bad blocks on a per-bio
- * level, we store IO_BLOCKED in the appropriate 'bios' pointer
- */
-#define IO_BLOCKED ((struct bio *)1)
-/* When we successfully write to a known bad-block, we need to remove the
- * bad-block marking which must be done from process context.  So we record
- * the success by setting bios[n] to IO_MADE_GOOD
- */
-#define IO_MADE_GOOD ((struct bio *)2)
-
-#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
-
 /* bits for r1bio.state */
 #define        R1BIO_Uptodate  0
 #define        R1BIO_IsSync    1
index 8da6282254c3e822a27702c469b9afce720bda43..e2549deab7c3a87c9f35c44499fa21e2f9f57b63 100644 (file)
  */
 #define        NR_RAID10_BIOS 256
 
-/* When there are this many requests queue to be written by
+/* when we get a read error on a read-only array, we redirect to another
+ * device without failing the first device, or trying to over-write to
+ * correct the read error.  To keep track of bad blocks on a per-bio
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
+ */
+#define IO_BLOCKED ((struct bio *)1)
+/* When we successfully write to a known bad-block, we need to remove the
+ * bad-block marking which must be done from process context.  So we record
+ * the success by setting devs[n].bio to IO_MADE_GOOD
+ */
+#define IO_MADE_GOOD ((struct bio *)2)
+
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
+
+/* When there are this many requests queued to be written by
  * the raid10 thread, we become 'congested' to provide back-pressure
  * for writeback.
  */
@@ -717,7 +731,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
        int sectors = r10_bio->sectors;
        int best_good_sectors;
        sector_t new_distance, best_dist;
-       struct md_rdev *rdev, *best_rdev;
+       struct md_rdev *best_rdev, *rdev = NULL;
        int do_balance;
        int best_slot;
        struct geom *geo = &conf->geo;
@@ -839,9 +853,8 @@ retry:
        return rdev;
 }
 
-static int raid10_congested(void *data, int bits)
+int md_raid10_congested(struct mddev *mddev, int bits)
 {
-       struct mddev *mddev = data;
        struct r10conf *conf = mddev->private;
        int i, ret = 0;
 
@@ -849,8 +862,6 @@ static int raid10_congested(void *data, int bits)
            conf->pending_count >= max_queued_requests)
                return 1;
 
-       if (mddev_congested(mddev, bits))
-               return 1;
        rcu_read_lock();
        for (i = 0;
             (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
@@ -866,6 +877,15 @@ static int raid10_congested(void *data, int bits)
        rcu_read_unlock();
        return ret;
 }
+EXPORT_SYMBOL_GPL(md_raid10_congested);
+
+static int raid10_congested(void *data, int bits)
+{
+       struct mddev *mddev = data;
+
+       return mddev_congested(mddev, bits) ||
+               md_raid10_congested(mddev, bits);
+}
 
 static void flush_pending_writes(struct r10conf *conf)
 {
@@ -1546,7 +1566,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
 static void print_conf(struct r10conf *conf)
 {
        int i;
-       struct mirror_info *tmp;
+       struct raid10_info *tmp;
 
        printk(KERN_DEBUG "RAID10 conf printout:\n");
        if (!conf) {
@@ -1580,7 +1600,7 @@ static int raid10_spare_active(struct mddev *mddev)
 {
        int i;
        struct r10conf *conf = mddev->private;
-       struct mirror_info *tmp;
+       struct raid10_info *tmp;
        int count = 0;
        unsigned long flags;
 
@@ -1655,7 +1675,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        else
                mirror = first;
        for ( ; mirror <= last ; mirror++) {
-               struct mirror_info *p = &conf->mirrors[mirror];
+               struct raid10_info *p = &conf->mirrors[mirror];
                if (p->recovery_disabled == mddev->recovery_disabled)
                        continue;
                if (p->rdev) {
@@ -1709,7 +1729,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
        int err = 0;
        int number = rdev->raid_disk;
        struct md_rdev **rdevp;
-       struct mirror_info *p = conf->mirrors + number;
+       struct raid10_info *p = conf->mirrors + number;
 
        print_conf(conf);
        if (rdev == p->rdev)
@@ -2876,7 +2896,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        sector_t sect;
                        int must_sync;
                        int any_working;
-                       struct mirror_info *mirror = &conf->mirrors[i];
+                       struct raid10_info *mirror = &conf->mirrors[i];
 
                        if ((mirror->rdev == NULL ||
                             test_bit(In_sync, &mirror->rdev->flags))
@@ -3388,7 +3408,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
                goto out;
 
        /* FIXME calc properly */
-       conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks +
+       conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
                                                            max(0,mddev->delta_disks)),
                                GFP_KERNEL);
        if (!conf->mirrors)
@@ -3452,7 +3472,7 @@ static int run(struct mddev *mddev)
 {
        struct r10conf *conf;
        int i, disk_idx, chunk_size;
-       struct mirror_info *disk;
+       struct raid10_info *disk;
        struct md_rdev *rdev;
        sector_t size;
        sector_t min_offset_diff = 0;
@@ -3472,12 +3492,14 @@ static int run(struct mddev *mddev)
        conf->thread = NULL;
 
        chunk_size = mddev->chunk_sectors << 9;
-       blk_queue_io_min(mddev->queue, chunk_size);
-       if (conf->geo.raid_disks % conf->geo.near_copies)
-               blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
-       else
-               blk_queue_io_opt(mddev->queue, chunk_size *
-                                (conf->geo.raid_disks / conf->geo.near_copies));
+       if (mddev->queue) {
+               blk_queue_io_min(mddev->queue, chunk_size);
+               if (conf->geo.raid_disks % conf->geo.near_copies)
+                       blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
+               else
+                       blk_queue_io_opt(mddev->queue, chunk_size *
+                                        (conf->geo.raid_disks / conf->geo.near_copies));
+       }
 
        rdev_for_each(rdev, mddev) {
                long long diff;
@@ -3511,8 +3533,9 @@ static int run(struct mddev *mddev)
                if (first || diff < min_offset_diff)
                        min_offset_diff = diff;
 
-               disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                 rdev->data_offset << 9);
+               if (mddev->gendisk)
+                       disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                         rdev->data_offset << 9);
 
                disk->head_position = 0;
        }
@@ -3575,22 +3598,22 @@ static int run(struct mddev *mddev)
        md_set_array_sectors(mddev, size);
        mddev->resync_max_sectors = size;
 
-       mddev->queue->backing_dev_info.congested_fn = raid10_congested;
-       mddev->queue->backing_dev_info.congested_data = mddev;
-
-       /* Calculate max read-ahead size.
-        * We need to readahead at least twice a whole stripe....
-        * maybe...
-        */
-       {
+       if (mddev->queue) {
                int stripe = conf->geo.raid_disks *
                        ((mddev->chunk_sectors << 9) / PAGE_SIZE);
+               mddev->queue->backing_dev_info.congested_fn = raid10_congested;
+               mddev->queue->backing_dev_info.congested_data = mddev;
+
+               /* Calculate max read-ahead size.
+                * We need to readahead at least twice a whole stripe....
+                * maybe...
+                */
                stripe /= conf->geo.near_copies;
                if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+               blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
        }
 
-       blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
 
        if (md_integrity_register(mddev))
                goto out_free_conf;
@@ -3641,7 +3664,10 @@ static int stop(struct mddev *mddev)
        lower_barrier(conf);
 
        md_unregister_thread(&mddev->thread);
-       blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+       if (mddev->queue)
+               /* the unplug fn references 'conf'*/
+               blk_sync_queue(mddev->queue);
+
        if (conf->r10bio_pool)
                mempool_destroy(conf->r10bio_pool);
        kfree(conf->mirrors);
@@ -3805,7 +3831,7 @@ static int raid10_check_reshape(struct mddev *mddev)
        if (mddev->delta_disks > 0) {
                /* allocate new 'mirrors' list */
                conf->mirrors_new = kzalloc(
-                       sizeof(struct mirror_info)
+                       sizeof(struct raid10_info)
                        *(mddev->raid_disks +
                          mddev->delta_disks),
                        GFP_KERNEL);
@@ -3930,7 +3956,7 @@ static int raid10_start_reshape(struct mddev *mddev)
        spin_lock_irq(&conf->device_lock);
        if (conf->mirrors_new) {
                memcpy(conf->mirrors_new, conf->mirrors,
-                      sizeof(struct mirror_info)*conf->prev.raid_disks);
+                      sizeof(struct raid10_info)*conf->prev.raid_disks);
                smp_mb();
                kfree(conf->mirrors_old); /* FIXME and elsewhere */
                conf->mirrors_old = conf->mirrors;
index 135b1b0a155438624b56ebadf42d8971463adc6a..007c2c68dd8369f2acbcd9ae8d3c155399d13674 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _RAID10_H
 #define _RAID10_H
 
-struct mirror_info {
+struct raid10_info {
        struct md_rdev  *rdev, *replacement;
        sector_t        head_position;
        int             recovery_disabled;      /* matches
@@ -13,8 +13,8 @@ struct mirror_info {
 
 struct r10conf {
        struct mddev            *mddev;
-       struct mirror_info      *mirrors;
-       struct mirror_info      *mirrors_new, *mirrors_old;
+       struct raid10_info      *mirrors;
+       struct raid10_info      *mirrors_new, *mirrors_old;
        spinlock_t              device_lock;
 
        /* geometry */
@@ -123,20 +123,6 @@ struct r10bio {
        } devs[0];
 };
 
-/* when we get a read error on a read-only array, we redirect to another
- * device without failing the first device, or trying to over-write to
- * correct the read error.  To keep track of bad blocks on a per-bio
- * level, we store IO_BLOCKED in the appropriate 'bios' pointer
- */
-#define IO_BLOCKED ((struct bio*)1)
-/* When we successfully write to a known bad-block, we need to remove the
- * bad-block marking which must be done from process context.  So we record
- * the success by setting devs[n].bio to IO_MADE_GOOD
- */
-#define IO_MADE_GOOD ((struct bio *)2)
-
-#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
-
 /* bits for r10bio.state */
 enum r10bio_state {
        R10BIO_Uptodate,
@@ -159,4 +145,7 @@ enum r10bio_state {
  */
        R10BIO_Previous,
 };
+
+extern int md_raid10_congested(struct mddev *mddev, int bits);
+
 #endif
index 04348d76bb30fa8831964ea980ec2df912a45f92..259f519814ca0f083696d875dc16fdc4bd141659 100644 (file)
@@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
  * We maintain a biased count of active stripes in the bottom 16 bits of
  * bi_phys_segments, and a count of processed stripes in the upper 16 bits
  */
-static inline int raid5_bi_phys_segments(struct bio *bio)
+static inline int raid5_bi_processed_stripes(struct bio *bio)
 {
-       return bio->bi_phys_segments & 0xffff;
+       atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+       return (atomic_read(segments) >> 16) & 0xffff;
 }
 
-static inline int raid5_bi_hw_segments(struct bio *bio)
+static inline int raid5_dec_bi_active_stripes(struct bio *bio)
 {
-       return (bio->bi_phys_segments >> 16) & 0xffff;
+       atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+       return atomic_sub_return(1, segments) & 0xffff;
 }
 
-static inline int raid5_dec_bi_phys_segments(struct bio *bio)
+static inline void raid5_inc_bi_active_stripes(struct bio *bio)
 {
-       --bio->bi_phys_segments;
-       return raid5_bi_phys_segments(bio);
+       atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+       atomic_inc(segments);
 }
 
-static inline int raid5_dec_bi_hw_segments(struct bio *bio)
+static inline void raid5_set_bi_processed_stripes(struct bio *bio,
+       unsigned int cnt)
 {
-       unsigned short val = raid5_bi_hw_segments(bio);
+       atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+       int old, new;
 
-       --val;
-       bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
-       return val;
+       do {
+               old = atomic_read(segments);
+               new = (old & 0xffff) | (cnt << 16);
+       } while (atomic_cmpxchg(segments, old, new) != old);
 }
 
-static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
+static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
 {
-       bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
+       atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+       atomic_set(segments, cnt);
 }
 
 /* Find first data disk in a raid6 stripe */
@@ -190,49 +196,56 @@ static int stripe_operations_active(struct stripe_head *sh)
               test_bit(STRIPE_COMPUTE_RUN, &sh->state);
 }
 
-static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
 {
-       if (atomic_dec_and_test(&sh->count)) {
-               BUG_ON(!list_empty(&sh->lru));
-               BUG_ON(atomic_read(&conf->active_stripes)==0);
-               if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state) &&
-                           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
-                               list_add_tail(&sh->lru, &conf->delayed_list);
-                       else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
-                                  sh->bm_seq - conf->seq_write > 0)
-                               list_add_tail(&sh->lru, &conf->bitmap_list);
-                       else {
-                               clear_bit(STRIPE_DELAYED, &sh->state);
-                               clear_bit(STRIPE_BIT_DELAY, &sh->state);
-                               list_add_tail(&sh->lru, &conf->handle_list);
-                       }
-                       md_wakeup_thread(conf->mddev->thread);
-               } else {
-                       BUG_ON(stripe_operations_active(sh));
-                       if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
-                               if (atomic_dec_return(&conf->preread_active_stripes)
-                                   < IO_THRESHOLD)
-                                       md_wakeup_thread(conf->mddev->thread);
-                       atomic_dec(&conf->active_stripes);
-                       if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
-                               list_add_tail(&sh->lru, &conf->inactive_list);
-                               wake_up(&conf->wait_for_stripe);
-                               if (conf->retry_read_aligned)
-                                       md_wakeup_thread(conf->mddev->thread);
-                       }
+       BUG_ON(!list_empty(&sh->lru));
+       BUG_ON(atomic_read(&conf->active_stripes)==0);
+       if (test_bit(STRIPE_HANDLE, &sh->state)) {
+               if (test_bit(STRIPE_DELAYED, &sh->state) &&
+                   !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+                       list_add_tail(&sh->lru, &conf->delayed_list);
+               else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+                          sh->bm_seq - conf->seq_write > 0)
+                       list_add_tail(&sh->lru, &conf->bitmap_list);
+               else {
+                       clear_bit(STRIPE_DELAYED, &sh->state);
+                       clear_bit(STRIPE_BIT_DELAY, &sh->state);
+                       list_add_tail(&sh->lru, &conf->handle_list);
+               }
+               md_wakeup_thread(conf->mddev->thread);
+       } else {
+               BUG_ON(stripe_operations_active(sh));
+               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+                       if (atomic_dec_return(&conf->preread_active_stripes)
+                           < IO_THRESHOLD)
+                               md_wakeup_thread(conf->mddev->thread);
+               atomic_dec(&conf->active_stripes);
+               if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
+                       list_add_tail(&sh->lru, &conf->inactive_list);
+                       wake_up(&conf->wait_for_stripe);
+                       if (conf->retry_read_aligned)
+                               md_wakeup_thread(conf->mddev->thread);
                }
        }
 }
 
+static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+{
+       if (atomic_dec_and_test(&sh->count))
+               do_release_stripe(conf, sh);
+}
+
 static void release_stripe(struct stripe_head *sh)
 {
        struct r5conf *conf = sh->raid_conf;
        unsigned long flags;
 
-       spin_lock_irqsave(&conf->device_lock, flags);
-       __release_stripe(conf, sh);
-       spin_unlock_irqrestore(&conf->device_lock, flags);
+       local_irq_save(flags);
+       if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
+               do_release_stripe(conf, sh);
+               spin_unlock(&conf->device_lock);
+       }
+       local_irq_restore(flags);
 }
 
 static inline void remove_hash(struct stripe_head *sh)
@@ -640,6 +653,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        else
                                bi->bi_sector = (sh->sector
                                                 + rdev->data_offset);
+                       if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+                               bi->bi_rw |= REQ_FLUSH;
+
                        bi->bi_flags = 1 << BIO_UPTODATE;
                        bi->bi_idx = 0;
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
@@ -749,14 +765,12 @@ static void ops_complete_biofill(void *stripe_head_ref)
 {
        struct stripe_head *sh = stripe_head_ref;
        struct bio *return_bi = NULL;
-       struct r5conf *conf = sh->raid_conf;
        int i;
 
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        /* clear completed biofills */
-       spin_lock_irq(&conf->device_lock);
        for (i = sh->disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
 
@@ -774,7 +788,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
                        while (rbi && rbi->bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                rbi2 = r5_next_bio(rbi, dev->sector);
-                               if (!raid5_dec_bi_phys_segments(rbi)) {
+                               if (!raid5_dec_bi_active_stripes(rbi)) {
                                        rbi->bi_next = return_bi;
                                        return_bi = rbi;
                                }
@@ -782,7 +796,6 @@ static void ops_complete_biofill(void *stripe_head_ref)
                        }
                }
        }
-       spin_unlock_irq(&conf->device_lock);
        clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
 
        return_io(return_bi);
@@ -794,7 +807,6 @@ static void ops_complete_biofill(void *stripe_head_ref)
 static void ops_run_biofill(struct stripe_head *sh)
 {
        struct dma_async_tx_descriptor *tx = NULL;
-       struct r5conf *conf = sh->raid_conf;
        struct async_submit_ctl submit;
        int i;
 
@@ -805,10 +817,10 @@ static void ops_run_biofill(struct stripe_head *sh)
                struct r5dev *dev = &sh->dev[i];
                if (test_bit(R5_Wantfill, &dev->flags)) {
                        struct bio *rbi;
-                       spin_lock_irq(&conf->device_lock);
+                       spin_lock_irq(&sh->stripe_lock);
                        dev->read = rbi = dev->toread;
                        dev->toread = NULL;
-                       spin_unlock_irq(&conf->device_lock);
+                       spin_unlock_irq(&sh->stripe_lock);
                        while (rbi && rbi->bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                tx = async_copy_data(0, rbi, dev->page,
@@ -1144,12 +1156,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
                if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
                        struct bio *wbi;
 
-                       spin_lock_irq(&sh->raid_conf->device_lock);
+                       spin_lock_irq(&sh->stripe_lock);
                        chosen = dev->towrite;
                        dev->towrite = NULL;
                        BUG_ON(dev->written);
                        wbi = dev->written = chosen;
-                       spin_unlock_irq(&sh->raid_conf->device_lock);
+                       spin_unlock_irq(&sh->stripe_lock);
 
                        while (wbi && wbi->bi_sector <
                                dev->sector + STRIPE_SECTORS) {
@@ -1454,6 +1466,8 @@ static int grow_one_stripe(struct r5conf *conf)
        init_waitqueue_head(&sh->ops.wait_for_ops);
        #endif
 
+       spin_lock_init(&sh->stripe_lock);
+
        if (grow_buffers(sh)) {
                shrink_buffers(sh);
                kmem_cache_free(conf->slab_cache, sh);
@@ -1739,7 +1753,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
                        atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
-               }
+               } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+                       clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+
                if (atomic_read(&rdev->read_errors))
                        atomic_set(&rdev->read_errors, 0);
        } else {
@@ -1784,7 +1800,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
                else
                        retry = 1;
                if (retry)
-                       set_bit(R5_ReadError, &sh->dev[i].flags);
+                       if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
+                               set_bit(R5_ReadError, &sh->dev[i].flags);
+                               clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+                       } else
+                               set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
                else {
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
@@ -2340,11 +2360,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                (unsigned long long)bi->bi_sector,
                (unsigned long long)sh->sector);
 
-
-       spin_lock_irq(&conf->device_lock);
+       /*
+        * If several bio share a stripe. The bio bi_phys_segments acts as a
+        * reference count to avoid race. The reference count should already be
+        * increased before this function is called (for example, in
+        * make_request()), so other bio sharing this stripe will not free the
+        * stripe. If a stripe is owned by one stripe, the stripe lock will
+        * protect it.
+        */
+       spin_lock_irq(&sh->stripe_lock);
        if (forwrite) {
                bip = &sh->dev[dd_idx].towrite;
-               if (*bip == NULL && sh->dev[dd_idx].written == NULL)
+               if (*bip == NULL)
                        firstwrite = 1;
        } else
                bip = &sh->dev[dd_idx].toread;
@@ -2360,7 +2387,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        if (*bip)
                bi->bi_next = *bip;
        *bip = bi;
-       bi->bi_phys_segments++;
+       raid5_inc_bi_active_stripes(bi);
 
        if (forwrite) {
                /* check if page is covered */
@@ -2375,7 +2402,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
                        set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
        }
-       spin_unlock_irq(&conf->device_lock);
+       spin_unlock_irq(&sh->stripe_lock);
 
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
                (unsigned long long)(*bip)->bi_sector,
@@ -2391,7 +2418,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
 
  overlap:
        set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
-       spin_unlock_irq(&conf->device_lock);
+       spin_unlock_irq(&sh->stripe_lock);
        return 0;
 }
 
@@ -2441,10 +2468,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                                rdev_dec_pending(rdev, conf->mddev);
                        }
                }
-               spin_lock_irq(&conf->device_lock);
+               spin_lock_irq(&sh->stripe_lock);
                /* fail all writes first */
                bi = sh->dev[i].towrite;
                sh->dev[i].towrite = NULL;
+               spin_unlock_irq(&sh->stripe_lock);
                if (bi) {
                        s->to_write--;
                        bitmap_end = 1;
@@ -2457,13 +2485,17 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                       if (!raid5_dec_bi_phys_segments(bi)) {
+                       if (!raid5_dec_bi_active_stripes(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
                                *return_bi = bi;
                        }
                        bi = nextbi;
                }
+               if (bitmap_end)
+                       bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+                               STRIPE_SECTORS, 0, 0);
+               bitmap_end = 0;
                /* and fail all 'written' */
                bi = sh->dev[i].written;
                sh->dev[i].written = NULL;
@@ -2472,7 +2504,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                       if (!raid5_dec_bi_phys_segments(bi)) {
+                       if (!raid5_dec_bi_active_stripes(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
                                *return_bi = bi;
@@ -2496,14 +2528,13 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
                                clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                               if (!raid5_dec_bi_phys_segments(bi)) {
+                               if (!raid5_dec_bi_active_stripes(bi)) {
                                        bi->bi_next = *return_bi;
                                        *return_bi = bi;
                                }
                                bi = nextbi;
                        }
                }
-               spin_unlock_irq(&conf->device_lock);
                if (bitmap_end)
                        bitmap_endwrite(conf->mddev->bitmap, sh->sector,
                                        STRIPE_SECTORS, 0, 0);
@@ -2707,30 +2738,23 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                                test_bit(R5_UPTODATE, &dev->flags)) {
                                /* We can return any write requests */
                                struct bio *wbi, *wbi2;
-                               int bitmap_end = 0;
                                pr_debug("Return write for disc %d\n", i);
-                               spin_lock_irq(&conf->device_lock);
                                wbi = dev->written;
                                dev->written = NULL;
                                while (wbi && wbi->bi_sector <
                                        dev->sector + STRIPE_SECTORS) {
                                        wbi2 = r5_next_bio(wbi, dev->sector);
-                                       if (!raid5_dec_bi_phys_segments(wbi)) {
+                                       if (!raid5_dec_bi_active_stripes(wbi)) {
                                                md_write_end(conf->mddev);
                                                wbi->bi_next = *return_bi;
                                                *return_bi = wbi;
                                        }
                                        wbi = wbi2;
                                }
-                               if (dev->towrite == NULL)
-                                       bitmap_end = 1;
-                               spin_unlock_irq(&conf->device_lock);
-                               if (bitmap_end)
-                                       bitmap_endwrite(conf->mddev->bitmap,
-                                                       sh->sector,
-                                                       STRIPE_SECTORS,
+                               bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+                                               STRIPE_SECTORS,
                                         !test_bit(STRIPE_DEGRADED, &sh->state),
-                                                       0);
+                                               0);
                        }
                }
 
@@ -3182,7 +3206,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 
        /* Now to look around and see what can be done */
        rcu_read_lock();
-       spin_lock_irq(&conf->device_lock);
        for (i=disks; i--; ) {
                struct md_rdev *rdev;
                sector_t first_bad;
@@ -3328,7 +3351,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                                do_recovery = 1;
                }
        }
-       spin_unlock_irq(&conf->device_lock);
        if (test_bit(STRIPE_SYNCING, &sh->state)) {
                /* If there is a failed device being replaced,
                 *     we must be recovering.
@@ -3791,7 +3813,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
                 * this sets the active strip count to 1 and the processed
                 * strip count to zero (upper 8 bits)
                 */
-               bi->bi_phys_segments = 1; /* biased count of active stripes */
+               raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
        }
 
        return bi;
@@ -4113,7 +4135,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
-                       if ((bi->bi_rw & REQ_SYNC) &&
+                       if ((bi->bi_rw & REQ_NOIDLE) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
                        mddev_check_plugged(mddev);
@@ -4126,9 +4148,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                }
        }
 
-       spin_lock_irq(&conf->device_lock);
-       remaining = raid5_dec_bi_phys_segments(bi);
-       spin_unlock_irq(&conf->device_lock);
+       remaining = raid5_dec_bi_active_stripes(bi);
        if (remaining == 0) {
 
                if ( rw == WRITE )
@@ -4484,7 +4504,7 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
                     sector += STRIPE_SECTORS,
                     scnt++) {
 
-               if (scnt < raid5_bi_hw_segments(raid_bio))
+               if (scnt < raid5_bi_processed_stripes(raid_bio))
                        /* already done this stripe */
                        continue;
 
@@ -4492,25 +4512,24 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
 
                if (!sh) {
                        /* failed to get a stripe - must wait */
-                       raid5_set_bi_hw_segments(raid_bio, scnt);
+                       raid5_set_bi_processed_stripes(raid_bio, scnt);
                        conf->retry_read_aligned = raid_bio;
                        return handled;
                }
 
                if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
                        release_stripe(sh);
-                       raid5_set_bi_hw_segments(raid_bio, scnt);
+                       raid5_set_bi_processed_stripes(raid_bio, scnt);
                        conf->retry_read_aligned = raid_bio;
                        return handled;
                }
 
+               set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
                handle_stripe(sh);
                release_stripe(sh);
                handled++;
        }
-       spin_lock_irq(&conf->device_lock);
-       remaining = raid5_dec_bi_phys_segments(raid_bio);
-       spin_unlock_irq(&conf->device_lock);
+       remaining = raid5_dec_bi_active_stripes(raid_bio);
        if (remaining == 0)
                bio_endio(raid_bio, 0);
        if (atomic_dec_and_test(&conf->active_aligned_reads))
index 2164021f3b5f6548b96ac067aac67bd7874dcd10..61dbb615c30b0f7174d834c01c316a910dda212a 100644 (file)
@@ -210,6 +210,7 @@ struct stripe_head {
        int                     disks;          /* disks in stripe */
        enum check_states       check_state;
        enum reconstruct_states reconstruct_state;
+       spinlock_t              stripe_lock;
        /**
         * struct stripe_operations
         * @target - STRIPE_OP_COMPUTE_BLK target
@@ -273,6 +274,7 @@ enum r5dev_flags {
        R5_Wantwrite,
        R5_Overlap,     /* There is a pending overlapping request
                         * on this block */
+       R5_ReadNoMerge, /* prevent bio from merging in block-layer */
        R5_ReadError,   /* seen a read error here recently */
        R5_ReWrite,     /* have tried to over-write the readerror */