md: protect md_unregister_thread from reentrancy
[linux-2.6-block.git] / drivers / md / raid0.c
CommitLineData
af1a8899 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 raid0.c : Multiple Devices driver for Linux
f72ffdd6 4 Copyright (C) 1994-96 Marc ZYNGIER
1da177e4
LT
5 <zyngier@ufr-info-p7.ibp.fr> or
6 <maz@gloups.fdn.fr>
f72ffdd6 7 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
1da177e4
LT
8
9 RAID-0 management functions.
10
1da177e4
LT
11*/
12
bff61975 13#include <linux/blkdev.h>
bff61975 14#include <linux/seq_file.h>
056075c7 15#include <linux/module.h>
5a0e3ad6 16#include <linux/slab.h>
109e3765 17#include <trace/events/block.h>
43b2e5d8 18#include "md.h"
ef740c37 19#include "raid0.h"
9af204cf 20#include "raid5.h"
1da177e4 21
c84a1372
N
22static int default_layout = 0;
23module_param(default_layout, int, 0644);
24
394ed8e4
SL
25#define UNSUPPORTED_MDDEV_FLAGS \
26 ((1L << MD_HAS_JOURNAL) | \
27 (1L << MD_JOURNAL_CLEAN) | \
ea0213e0 28 (1L << MD_FAILFAST_SUPPORTED) |\
ddc08823
PB
29 (1L << MD_HAS_PPL) | \
30 (1L << MD_HAS_MULTIPLE_PPLS))
394ed8e4 31
46994191 32/*
33 * inform the user of the raid configuration
34*/
fd01b88c 35static void dump_zones(struct mddev *mddev)
46994191 36{
50de8df4 37 int j, k;
46994191 38 sector_t zone_size = 0;
39 sector_t zone_start = 0;
40 char b[BDEVNAME_SIZE];
e373ab10 41 struct r0conf *conf = mddev->private;
84707f38 42 int raid_disks = conf->strip_zone[0].nb_dev;
76603884
N
43 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
44 mdname(mddev),
45 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
46994191 46 for (j = 0; j < conf->nr_strip_zones; j++) {
76603884
N
47 char line[200];
48 int len = 0;
49
46994191 50 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
76603884
N
51 len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
52 bdevname(conf->devlist[j*raid_disks
53 + k]->bdev, b));
54 pr_debug("md: zone%d=[%s]\n", j, line);
46994191 55
56 zone_size = conf->strip_zone[j].zone_end - zone_start;
76603884 57 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
46994191 58 (unsigned long long)zone_start>>1,
59 (unsigned long long)conf->strip_zone[j].dev_start>>1,
60 (unsigned long long)zone_size>>1);
61 zone_start = conf->strip_zone[j].zone_end;
62 }
46994191 63}
64
e373ab10 65static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
1da177e4 66{
a9f326eb 67 int i, c, err;
49f357a2 68 sector_t curr_zone_end, sectors;
3cb03002 69 struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
1da177e4
LT
70 struct strip_zone *zone;
71 int cnt;
72 char b[BDEVNAME_SIZE];
50de8df4 73 char b2[BDEVNAME_SIZE];
e373ab10 74 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
ad6bf88a 75 unsigned blksize = 512;
ed7b0038 76
7dedd15d 77 *private_conf = ERR_PTR(-ENOMEM);
ed7b0038
AN
78 if (!conf)
79 return -ENOMEM;
dafb20fa 80 rdev_for_each(rdev1, mddev) {
50de8df4
N
81 pr_debug("md/raid0:%s: looking at %s\n",
82 mdname(mddev),
83 bdevname(rdev1->bdev, b));
1da177e4 84 c = 0;
13f2682b
N
85
86 /* round size to chunk_size */
87 sectors = rdev1->sectors;
88 sector_div(sectors, mddev->chunk_sectors);
89 rdev1->sectors = sectors * mddev->chunk_sectors;
90
199dc6ed
N
91 blksize = max(blksize, queue_logical_block_size(
92 rdev1->bdev->bd_disk->queue));
93
dafb20fa 94 rdev_for_each(rdev2, mddev) {
50de8df4
N
95 pr_debug("md/raid0:%s: comparing %s(%llu)"
96 " with %s(%llu)\n",
97 mdname(mddev),
98 bdevname(rdev1->bdev,b),
99 (unsigned long long)rdev1->sectors,
100 bdevname(rdev2->bdev,b2),
101 (unsigned long long)rdev2->sectors);
1da177e4 102 if (rdev2 == rdev1) {
50de8df4
N
103 pr_debug("md/raid0:%s: END\n",
104 mdname(mddev));
1da177e4
LT
105 break;
106 }
dd8ac336 107 if (rdev2->sectors == rdev1->sectors) {
1da177e4
LT
108 /*
109 * Not unique, don't count it as a new
110 * group
111 */
50de8df4
N
112 pr_debug("md/raid0:%s: EQUAL\n",
113 mdname(mddev));
1da177e4
LT
114 c = 1;
115 break;
116 }
50de8df4
N
117 pr_debug("md/raid0:%s: NOT EQUAL\n",
118 mdname(mddev));
1da177e4
LT
119 }
120 if (!c) {
50de8df4
N
121 pr_debug("md/raid0:%s: ==> UNIQUE\n",
122 mdname(mddev));
1da177e4 123 conf->nr_strip_zones++;
50de8df4
N
124 pr_debug("md/raid0:%s: %d zones\n",
125 mdname(mddev), conf->nr_strip_zones);
1da177e4
LT
126 }
127 }
50de8df4
N
128 pr_debug("md/raid0:%s: FINAL %d zones\n",
129 mdname(mddev), conf->nr_strip_zones);
c84a1372 130
199dc6ed
N
131 /*
132 * now since we have the hard sector sizes, we can make sure
133 * chunk size is a multiple of that sector size
134 */
135 if ((mddev->chunk_sectors << 9) % blksize) {
76603884
N
136 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
137 mdname(mddev),
138 mddev->chunk_sectors << 9, blksize);
199dc6ed
N
139 err = -EINVAL;
140 goto abort;
141 }
142
ed7b0038 143 err = -ENOMEM;
6396bb22
KC
144 conf->strip_zone = kcalloc(conf->nr_strip_zones,
145 sizeof(struct strip_zone),
146 GFP_KERNEL);
1da177e4 147 if (!conf->strip_zone)
ed7b0038 148 goto abort;
6396bb22
KC
149 conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
150 conf->nr_strip_zones,
151 mddev->raid_disks),
1da177e4
LT
152 GFP_KERNEL);
153 if (!conf->devlist)
ed7b0038 154 goto abort;
1da177e4 155
1da177e4
LT
156 /* The first zone must contain all devices, so here we check that
157 * there is a proper alignment of slots to devices and find them all
158 */
159 zone = &conf->strip_zone[0];
160 cnt = 0;
161 smallest = NULL;
b414579f 162 dev = conf->devlist;
ed7b0038 163 err = -EINVAL;
dafb20fa 164 rdev_for_each(rdev1, mddev) {
1da177e4
LT
165 int j = rdev1->raid_disk;
166
e93f68a1 167 if (mddev->level == 10) {
9af204cf
TM
168 /* taking over a raid10-n2 array */
169 j /= 2;
e93f68a1
N
170 rdev1->new_raid_disk = j;
171 }
9af204cf 172
fc3a08b8
KW
173 if (mddev->level == 1) {
174 /* taiking over a raid1 array-
175 * we have only one active disk
176 */
177 j = 0;
178 rdev1->new_raid_disk = j;
179 }
180
f96c9f30 181 if (j < 0) {
76603884
N
182 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
183 mdname(mddev));
f96c9f30
N
184 goto abort;
185 }
186 if (j >= mddev->raid_disks) {
76603884
N
187 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
188 mdname(mddev), j);
1da177e4
LT
189 goto abort;
190 }
b414579f 191 if (dev[j]) {
76603884
N
192 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
193 mdname(mddev), j);
1da177e4
LT
194 goto abort;
195 }
b414579f 196 dev[j] = rdev1;
1da177e4 197
dd8ac336 198 if (!smallest || (rdev1->sectors < smallest->sectors))
1da177e4
LT
199 smallest = rdev1;
200 cnt++;
201 }
202 if (cnt != mddev->raid_disks) {
76603884
N
203 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
204 mdname(mddev), cnt, mddev->raid_disks);
1da177e4
LT
205 goto abort;
206 }
207 zone->nb_dev = cnt;
49f357a2 208 zone->zone_end = smallest->sectors * cnt;
1da177e4 209
49f357a2 210 curr_zone_end = zone->zone_end;
1da177e4
LT
211
212 /* now do the other zones */
213 for (i = 1; i < conf->nr_strip_zones; i++)
214 {
a9f326eb
N
215 int j;
216
1da177e4 217 zone = conf->strip_zone + i;
b414579f 218 dev = conf->devlist + i * mddev->raid_disks;
1da177e4 219
50de8df4 220 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
d27a43ab 221 zone->dev_start = smallest->sectors;
1da177e4
LT
222 smallest = NULL;
223 c = 0;
224
225 for (j=0; j<cnt; j++) {
b414579f 226 rdev = conf->devlist[j];
d27a43ab 227 if (rdev->sectors <= zone->dev_start) {
50de8df4
N
228 pr_debug("md/raid0:%s: checking %s ... nope\n",
229 mdname(mddev),
230 bdevname(rdev->bdev, b));
dd8ac336
AN
231 continue;
232 }
50de8df4
N
233 pr_debug("md/raid0:%s: checking %s ..."
234 " contained as device %d\n",
235 mdname(mddev),
236 bdevname(rdev->bdev, b), c);
b414579f 237 dev[c] = rdev;
dd8ac336
AN
238 c++;
239 if (!smallest || rdev->sectors < smallest->sectors) {
240 smallest = rdev;
50de8df4
N
241 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
242 mdname(mddev),
243 (unsigned long long)rdev->sectors);
dd8ac336 244 }
1da177e4
LT
245 }
246
247 zone->nb_dev = c;
49f357a2 248 sectors = (smallest->sectors - zone->dev_start) * c;
50de8df4
N
249 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
250 mdname(mddev),
251 zone->nb_dev, (unsigned long long)sectors);
1da177e4 252
49f357a2 253 curr_zone_end += sectors;
d27a43ab 254 zone->zone_end = curr_zone_end;
1da177e4 255
50de8df4
N
256 pr_debug("md/raid0:%s: current zone start: %llu\n",
257 mdname(mddev),
258 (unsigned long long)smallest->sectors);
1da177e4 259 }
1da177e4 260
ea23994e
PH
261 if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
262 conf->layout = RAID0_ORIG_LAYOUT;
263 } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
264 mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
265 conf->layout = mddev->layout;
266 } else if (default_layout == RAID0_ORIG_LAYOUT ||
267 default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
268 conf->layout = default_layout;
269 } else {
270 pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
271 mdname(mddev));
272 pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
273 err = -EOPNOTSUPP;
274 goto abort;
275 }
276
50de8df4 277 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
9af204cf
TM
278 *private_conf = conf;
279
1da177e4 280 return 0;
5568a603 281abort:
ed7b0038
AN
282 kfree(conf->strip_zone);
283 kfree(conf->devlist);
284 kfree(conf);
58ebb34c 285 *private_conf = ERR_PTR(err);
ed7b0038 286 return err;
1da177e4
LT
287}
288
ba13da47
N
289/* Find the zone which holds a particular offset
290 * Update *sectorp to be an offset in that zone
291 */
292static struct strip_zone *find_zone(struct r0conf *conf,
293 sector_t *sectorp)
294{
295 int i;
296 struct strip_zone *z = conf->strip_zone;
297 sector_t sector = *sectorp;
298
299 for (i = 0; i < conf->nr_strip_zones; i++)
300 if (sector < z[i].zone_end) {
301 if (i)
302 *sectorp = sector - z[i-1].zone_end;
303 return z + i;
304 }
305 BUG();
306}
307
308/*
309 * remaps the bio to the target device. we separate two flows.
47d68979 310 * power 2 flow and a general flow for the sake of performance
ba13da47
N
311*/
312static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
313 sector_t sector, sector_t *sector_offset)
314{
315 unsigned int sect_in_chunk;
316 sector_t chunk;
317 struct r0conf *conf = mddev->private;
318 int raid_disks = conf->strip_zone[0].nb_dev;
319 unsigned int chunk_sects = mddev->chunk_sectors;
320
321 if (is_power_of_2(chunk_sects)) {
322 int chunksect_bits = ffz(~chunk_sects);
323 /* find the sector offset inside the chunk */
324 sect_in_chunk = sector & (chunk_sects - 1);
325 sector >>= chunksect_bits;
326 /* chunk in zone */
327 chunk = *sector_offset;
328 /* quotient is the chunk in real device*/
329 sector_div(chunk, zone->nb_dev << chunksect_bits);
330 } else{
331 sect_in_chunk = sector_div(sector, chunk_sects);
332 chunk = *sector_offset;
333 sector_div(chunk, chunk_sects * zone->nb_dev);
334 }
335 /*
336 * position the bio over the real device
337 * real sector = chunk in device + starting of zone
338 * + the position in the chunk
339 */
340 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
341 return conf->devlist[(zone - conf->strip_zone)*raid_disks
342 + sector_div(sector, zone->nb_dev)];
343}
344
fd01b88c 345static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
80c3a6ce
DW
346{
347 sector_t array_sectors = 0;
3cb03002 348 struct md_rdev *rdev;
80c3a6ce
DW
349
350 WARN_ONCE(sectors || raid_disks,
351 "%s does not support generic reshape\n", __func__);
352
dafb20fa 353 rdev_for_each(rdev, mddev)
a6468539
N
354 array_sectors += (rdev->sectors &
355 ~(sector_t)(mddev->chunk_sectors-1));
80c3a6ce
DW
356
357 return array_sectors;
358}
359
0c031fd3
XN
360static void free_conf(struct mddev *mddev, struct r0conf *conf)
361{
362 kfree(conf->strip_zone);
363 kfree(conf->devlist);
364 kfree(conf);
365 mddev->private = NULL;
366}
367
368static void raid0_free(struct mddev *mddev, void *priv)
369{
370 struct r0conf *conf = priv;
371
372 free_conf(mddev, conf);
373 acct_bioset_exit(mddev);
374}
0366ef84 375
fd01b88c 376static int raid0_run(struct mddev *mddev)
1da177e4 377{
e373ab10 378 struct r0conf *conf;
5568a603 379 int ret;
1da177e4 380
9d8f0363 381 if (mddev->chunk_sectors == 0) {
76603884 382 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
2604b703
N
383 return -EINVAL;
384 }
0894cc30
AN
385 if (md_check_no_bitmap(mddev))
386 return -EINVAL;
753f2856 387
0c031fd3
XN
388 if (acct_bioset_init(mddev)) {
389 pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
390 return -ENOMEM;
391 }
392
9af204cf
TM
393 /* if private is not null, we are here after takeover */
394 if (mddev->private == NULL) {
395 ret = create_strip_zones(mddev, &conf);
396 if (ret < 0)
0c031fd3 397 goto exit_acct_set;
9af204cf
TM
398 mddev->private = conf;
399 }
400 conf = mddev->private;
199dc6ed
N
401 if (mddev->queue) {
402 struct md_rdev *rdev;
199dc6ed 403
199dc6ed 404 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
3deff1a7 405 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
29efc390 406 blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
199dc6ed
N
407
408 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
409 blk_queue_io_opt(mddev->queue,
410 (mddev->chunk_sectors << 9) * mddev->raid_disks);
411
66eefe5d
N
412 rdev_for_each(rdev, mddev) {
413 disk_stack_limits(mddev->gendisk, rdev->bdev,
414 rdev->data_offset << 9);
66eefe5d 415 }
199dc6ed 416 }
1da177e4
LT
417
418 /* calculate array device size */
1f403624 419 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
1da177e4 420
76603884
N
421 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
422 mdname(mddev),
423 (unsigned long long)mddev->array_sectors);
753f2856 424
46994191 425 dump_zones(mddev);
0366ef84 426
427 ret = md_integrity_register(mddev);
0c031fd3
XN
428 if (ret)
429 goto free;
0366ef84 430
431 return ret;
1da177e4 432
0c031fd3
XN
433free:
434 free_conf(mddev, conf);
435exit_acct_set:
436 acct_bioset_exit(mddev);
437 return ret;
1da177e4
LT
438}
439
29efc390
SL
440static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
441{
442 struct r0conf *conf = mddev->private;
443 struct strip_zone *zone;
444 sector_t start = bio->bi_iter.bi_sector;
445 sector_t end;
446 unsigned int stripe_size;
447 sector_t first_stripe_index, last_stripe_index;
448 sector_t start_disk_offset;
449 unsigned int start_disk_index;
450 sector_t end_disk_offset;
451 unsigned int end_disk_index;
452 unsigned int disk;
453
454 zone = find_zone(conf, &start);
455
456 if (bio_end_sector(bio) > zone->zone_end) {
457 struct bio *split = bio_split(bio,
458 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
afeee514 459 &mddev->bio_set);
29efc390 460 bio_chain(split, bio);
ed00aabd 461 submit_bio_noacct(bio);
29efc390
SL
462 bio = split;
463 end = zone->zone_end;
464 } else
465 end = bio_end_sector(bio);
466
467 if (zone != conf->strip_zone)
468 end = end - zone[-1].zone_end;
469
470 /* Now start and end is the offset in zone */
471 stripe_size = zone->nb_dev * mddev->chunk_sectors;
472
473 first_stripe_index = start;
474 sector_div(first_stripe_index, stripe_size);
475 last_stripe_index = end;
476 sector_div(last_stripe_index, stripe_size);
477
478 start_disk_index = (int)(start - first_stripe_index * stripe_size) /
479 mddev->chunk_sectors;
480 start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
481 mddev->chunk_sectors) +
482 first_stripe_index * mddev->chunk_sectors;
483 end_disk_index = (int)(end - last_stripe_index * stripe_size) /
484 mddev->chunk_sectors;
485 end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
486 mddev->chunk_sectors) +
487 last_stripe_index * mddev->chunk_sectors;
488
489 for (disk = 0; disk < zone->nb_dev; disk++) {
490 sector_t dev_start, dev_end;
29efc390
SL
491 struct md_rdev *rdev;
492
493 if (disk < start_disk_index)
494 dev_start = (first_stripe_index + 1) *
495 mddev->chunk_sectors;
496 else if (disk > start_disk_index)
497 dev_start = first_stripe_index * mddev->chunk_sectors;
498 else
499 dev_start = start_disk_offset;
500
501 if (disk < end_disk_index)
502 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
503 else if (disk > end_disk_index)
504 dev_end = last_stripe_index * mddev->chunk_sectors;
505 else
506 dev_end = end_disk_offset;
507
508 if (dev_end <= dev_start)
509 continue;
510
511 rdev = conf->devlist[(zone - conf->strip_zone) *
512 conf->strip_zone[0].nb_dev + disk];
cf78408f 513 md_submit_discard_bio(mddev, rdev, bio,
29efc390 514 dev_start + zone->dev_start + rdev->data_offset,
cf78408f 515 dev_end - dev_start);
29efc390
SL
516 }
517 bio_endio(bio);
518}
519
cc27b0c7 520static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
fbb704ef 521{
c84a1372 522 struct r0conf *conf = mddev->private;
1da177e4 523 struct strip_zone *zone;
3cb03002 524 struct md_rdev *tmp_dev;
f00d7c85
N
525 sector_t bio_sector;
526 sector_t sector;
c84a1372 527 sector_t orig_sector;
f00d7c85
N
528 unsigned chunk_sects;
529 unsigned sectors;
1da177e4 530
775d7831
DJ
531 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
532 && md_flush_request(mddev, bio))
cc27b0c7 533 return true;
e5dcdd80 534
29efc390
SL
535 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
536 raid0_handle_discard(mddev, bio);
cc27b0c7 537 return true;
29efc390
SL
538 }
539
f00d7c85
N
540 bio_sector = bio->bi_iter.bi_sector;
541 sector = bio_sector;
542 chunk_sects = mddev->chunk_sectors;
20d0189b 543
f00d7c85
N
544 sectors = chunk_sects -
545 (likely(is_power_of_2(chunk_sects))
546 ? (sector & (chunk_sects-1))
547 : sector_div(sector, chunk_sects));
20d0189b 548
f00d7c85
N
549 /* Restore due to sector_div */
550 sector = bio_sector;
a8115776 551
f00d7c85 552 if (sectors < bio_sectors(bio)) {
afeee514
KO
553 struct bio *split = bio_split(bio, sectors, GFP_NOIO,
554 &mddev->bio_set);
f00d7c85 555 bio_chain(split, bio);
ed00aabd 556 submit_bio_noacct(bio);
f00d7c85
N
557 bio = split;
558 }
1da177e4 559
10764815
GJ
560 if (bio->bi_pool != &mddev->bio_set)
561 md_account_bio(mddev, &bio);
562
c84a1372 563 orig_sector = sector;
f00d7c85 564 zone = find_zone(mddev->private, &sector);
c84a1372
N
565 switch (conf->layout) {
566 case RAID0_ORIG_LAYOUT:
567 tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
568 break;
569 case RAID0_ALT_MULTIZONE_LAYOUT:
570 tmp_dev = map_sector(mddev, zone, sector, &sector);
571 break;
572 default:
e3fc3f3d 573 WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
c84a1372
N
574 bio_io_error(bio);
575 return true;
576 }
62f7b198
GP
577
578 if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
579 bio_io_error(bio);
580 return true;
581 }
582
74d46992 583 bio_set_dev(bio, tmp_dev->bdev);
f00d7c85
N
584 bio->bi_iter.bi_sector = sector + zone->dev_start +
585 tmp_dev->data_offset;
586
29efc390 587 if (mddev->gendisk)
1c02fca6
CH
588 trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
589 bio_sector);
29efc390 590 mddev_check_write_zeroes(mddev, bio);
ed00aabd 591 submit_bio_noacct(bio);
cc27b0c7 592 return true;
1da177e4 593}
8299d7f7 594
fd01b88c 595static void raid0_status(struct seq_file *seq, struct mddev *mddev)
1da177e4 596{
9d8f0363 597 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
1da177e4
LT
598 return;
599}
600
fd01b88c 601static void *raid0_takeover_raid45(struct mddev *mddev)
9af204cf 602{
3cb03002 603 struct md_rdev *rdev;
e373ab10 604 struct r0conf *priv_conf;
9af204cf
TM
605
606 if (mddev->degraded != 1) {
76603884
N
607 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
608 mdname(mddev),
609 mddev->degraded);
9af204cf
TM
610 return ERR_PTR(-EINVAL);
611 }
612
dafb20fa 613 rdev_for_each(rdev, mddev) {
9af204cf
TM
614 /* check slot number for a disk */
615 if (rdev->raid_disk == mddev->raid_disks-1) {
76603884
N
616 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
617 mdname(mddev));
9af204cf
TM
618 return ERR_PTR(-EINVAL);
619 }
eea136d6 620 rdev->sectors = mddev->dev_sectors;
9af204cf
TM
621 }
622
623 /* Set new parameters */
624 mddev->new_level = 0;
001048a3 625 mddev->new_layout = 0;
9af204cf
TM
626 mddev->new_chunk_sectors = mddev->chunk_sectors;
627 mddev->raid_disks--;
628 mddev->delta_disks = -1;
629 /* make sure it will be not marked as dirty */
630 mddev->recovery_cp = MaxSector;
394ed8e4 631 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
9af204cf
TM
632
633 create_strip_zones(mddev, &priv_conf);
6995f0b2 634
9af204cf
TM
635 return priv_conf;
636}
637
fd01b88c 638static void *raid0_takeover_raid10(struct mddev *mddev)
9af204cf 639{
e373ab10 640 struct r0conf *priv_conf;
9af204cf
TM
641
642 /* Check layout:
643 * - far_copies must be 1
644 * - near_copies must be 2
645 * - disks number must be even
646 * - all mirrors must be already degraded
647 */
648 if (mddev->layout != ((1 << 8) + 2)) {
76603884
N
649 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
650 mdname(mddev),
651 mddev->layout);
9af204cf
TM
652 return ERR_PTR(-EINVAL);
653 }
654 if (mddev->raid_disks & 1) {
76603884
N
655 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
656 mdname(mddev));
9af204cf
TM
657 return ERR_PTR(-EINVAL);
658 }
659 if (mddev->degraded != (mddev->raid_disks>>1)) {
76603884
N
660 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
661 mdname(mddev));
9af204cf
TM
662 return ERR_PTR(-EINVAL);
663 }
664
665 /* Set new parameters */
666 mddev->new_level = 0;
001048a3 667 mddev->new_layout = 0;
9af204cf
TM
668 mddev->new_chunk_sectors = mddev->chunk_sectors;
669 mddev->delta_disks = - mddev->raid_disks / 2;
670 mddev->raid_disks += mddev->delta_disks;
671 mddev->degraded = 0;
672 /* make sure it will be not marked as dirty */
673 mddev->recovery_cp = MaxSector;
394ed8e4 674 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
9af204cf
TM
675
676 create_strip_zones(mddev, &priv_conf);
9af204cf
TM
677 return priv_conf;
678}
679
fd01b88c 680static void *raid0_takeover_raid1(struct mddev *mddev)
fc3a08b8 681{
e373ab10 682 struct r0conf *priv_conf;
24b961f8 683 int chunksect;
fc3a08b8
KW
684
685 /* Check layout:
686 * - (N - 1) mirror drives must be already faulty
687 */
688 if ((mddev->raid_disks - 1) != mddev->degraded) {
76603884 689 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
fc3a08b8
KW
690 mdname(mddev));
691 return ERR_PTR(-EINVAL);
692 }
693
24b961f8
JS
694 /*
695 * a raid1 doesn't have the notion of chunk size, so
696 * figure out the largest suitable size we can use.
697 */
698 chunksect = 64 * 2; /* 64K by default */
699
700 /* The array must be an exact multiple of chunksize */
701 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
702 chunksect >>= 1;
703
704 if ((chunksect << 9) < PAGE_SIZE)
705 /* array size does not allow a suitable chunk size */
706 return ERR_PTR(-EINVAL);
707
fc3a08b8
KW
708 /* Set new parameters */
709 mddev->new_level = 0;
710 mddev->new_layout = 0;
24b961f8
JS
711 mddev->new_chunk_sectors = chunksect;
712 mddev->chunk_sectors = chunksect;
fc3a08b8 713 mddev->delta_disks = 1 - mddev->raid_disks;
f7bee809 714 mddev->raid_disks = 1;
fc3a08b8
KW
715 /* make sure it will be not marked as dirty */
716 mddev->recovery_cp = MaxSector;
394ed8e4 717 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
fc3a08b8
KW
718
719 create_strip_zones(mddev, &priv_conf);
720 return priv_conf;
721}
722
fd01b88c 723static void *raid0_takeover(struct mddev *mddev)
9af204cf
TM
724{
725 /* raid0 can take over:
049d6c1e 726 * raid4 - if all data disks are active.
9af204cf
TM
727 * raid5 - providing it is Raid4 layout and one disk is faulty
728 * raid10 - assuming we have all necessary active disks
fc3a08b8 729 * raid1 - with (N -1) mirror drives faulty
9af204cf 730 */
a8461a61
N
731
732 if (mddev->bitmap) {
76603884
N
733 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
734 mdname(mddev));
a8461a61
N
735 return ERR_PTR(-EBUSY);
736 }
049d6c1e
MT
737 if (mddev->level == 4)
738 return raid0_takeover_raid45(mddev);
739
9af204cf
TM
740 if (mddev->level == 5) {
741 if (mddev->layout == ALGORITHM_PARITY_N)
049d6c1e 742 return raid0_takeover_raid45(mddev);
9af204cf 743
76603884
N
744 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
745 mdname(mddev), ALGORITHM_PARITY_N);
9af204cf
TM
746 }
747
748 if (mddev->level == 10)
749 return raid0_takeover_raid10(mddev);
750
fc3a08b8
KW
751 if (mddev->level == 1)
752 return raid0_takeover_raid1(mddev);
753
76603884 754 pr_warn("Takeover from raid%i to raid0 not supported\n",
fc3a08b8
KW
755 mddev->level);
756
9af204cf
TM
757 return ERR_PTR(-EINVAL);
758}
759
b03e0ccb 760static void raid0_quiesce(struct mddev *mddev, int quiesce)
9af204cf
TM
761{
762}
763
84fc4b56 764static struct md_personality raid0_personality=
1da177e4
LT
765{
766 .name = "raid0",
2604b703 767 .level = 0,
1da177e4
LT
768 .owner = THIS_MODULE,
769 .make_request = raid0_make_request,
770 .run = raid0_run,
afa0f557 771 .free = raid0_free,
1da177e4 772 .status = raid0_status,
80c3a6ce 773 .size = raid0_size,
9af204cf
TM
774 .takeover = raid0_takeover,
775 .quiesce = raid0_quiesce,
1da177e4
LT
776};
777
778static int __init raid0_init (void)
779{
2604b703 780 return register_md_personality (&raid0_personality);
1da177e4
LT
781}
782
783static void raid0_exit (void)
784{
2604b703 785 unregister_md_personality (&raid0_personality);
1da177e4
LT
786}
787
788module_init(raid0_init);
789module_exit(raid0_exit);
790MODULE_LICENSE("GPL");
0efb9e61 791MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
1da177e4 792MODULE_ALIAS("md-personality-2"); /* RAID0 */
d9d166c2 793MODULE_ALIAS("md-raid0");
2604b703 794MODULE_ALIAS("md-level-0");