md/raid5: introduce configuration option rmw_level
authorMarkus Stockhausen <stockhausen@collogia.de>
Mon, 15 Dec 2014 01:57:05 +0000 (12:57 +1100)
committerNeilBrown <neilb@suse.de>
Tue, 21 Apr 2015 22:00:42 +0000 (08:00 +1000)
Depending on the available coding we allow optimized rmw logic for write
operations. To support easier testing this patch allows manual control
of the rmw/rcw descision through the interface /sys/block/mdX/md/rmw_level.

The configuration can handle three levels of control.

rmw_level=0: Disable rmw for all RAID types. Hardware assisted P/Q
calculation has no implementation path yet to factor in/out chunks of
a syndrome. Enforcing this level can be benefical for slow CPUs with
hardware syndrome support and fast SSDs.

rmw_level=1: Estimate rmw IOs and rcw IOs. Execute rmw only if we will
save IOs. This equals the "old" unpatched behaviour and will be the
default.

rmw_level=2: Execute rmw even if calculated IOs for rmw and rcw are
equal. We might have higher CPU consumption because of calculating the
parity twice but it can be benefical otherwise. E.g. RAID4 with fast
dedicated parity disk/SSD. The option is implemented just to be
forward-looking and will ONLY work with this patch!

Signed-off-by: Markus Stockhausen <stockhausen@collogia.de>
Signed-off-by: NeilBrown <neilb@suse.de>
drivers/md/raid5.c
drivers/md/raid5.h

index c82ce1fd8723d070fdfee347ccee274928b021ab..f78b1964543b0d711cbb7464bea84d07535016f1 100644 (file)
@@ -5879,6 +5879,49 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
                                raid5_show_stripe_cache_size,
                                raid5_store_stripe_cache_size);
 
+static ssize_t
+raid5_show_rmw_level(struct mddev  *mddev, char *page)
+{
+       struct r5conf *conf = mddev->private;
+       if (conf)
+               return sprintf(page, "%d\n", conf->rmw_level);
+       else
+               return 0;
+}
+
+static ssize_t
+raid5_store_rmw_level(struct mddev  *mddev, const char *page, size_t len)
+{
+       struct r5conf *conf = mddev->private;
+       unsigned long new;
+
+       if (!conf)
+               return -ENODEV;
+
+       if (len >= PAGE_SIZE)
+               return -EINVAL;
+
+       if (kstrtoul(page, 10, &new))
+               return -EINVAL;
+
+       if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
+               return -EINVAL;
+
+       if (new != PARITY_DISABLE_RMW &&
+           new != PARITY_ENABLE_RMW &&
+           new != PARITY_PREFER_RMW)
+               return -EINVAL;
+
+       conf->rmw_level = new;
+       return len;
+}
+
+static struct md_sysfs_entry
+raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
+                        raid5_show_rmw_level,
+                        raid5_store_rmw_level);
+
+
 static ssize_t
 raid5_show_preread_threshold(struct mddev *mddev, char *page)
 {
@@ -6065,6 +6108,7 @@ static struct attribute *raid5_attrs[] =  {
        &raid5_preread_bypass_threshold.attr,
        &raid5_group_thread_cnt.attr,
        &raid5_skip_copy.attr,
+       &raid5_rmw_level.attr,
        NULL,
 };
 static struct attribute_group raid5_attrs_group = {
index 57fef9ba36fa7087483c77f5366d8b53df5e2794..6614ac5ffc0ef08e3411f9bf60b174b2c0b9762a 100644 (file)
@@ -362,6 +362,7 @@ enum {
 enum {
        PARITY_DISABLE_RMW = 0,
        PARITY_ENABLE_RMW,
+       PARITY_PREFER_RMW,
 };
 
 /*