md/failfast: add failfast flag for md to be used by some personalities.
[linux-2.6-block.git] / drivers / md / raid10.c
CommitLineData
1da177e4
LT
1/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
25985edc 8 * Base on code in raid1.c. See raid1.c for further copyright information.
1da177e4
LT
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
5a0e3ad6 21#include <linux/slab.h>
25570727 22#include <linux/delay.h>
bff61975 23#include <linux/blkdev.h>
056075c7 24#include <linux/module.h>
bff61975 25#include <linux/seq_file.h>
8bda470e 26#include <linux/ratelimit.h>
3ea7daa5 27#include <linux/kthread.h>
109e3765 28#include <trace/events/block.h>
43b2e5d8 29#include "md.h"
ef740c37 30#include "raid10.h"
dab8b292 31#include "raid0.h"
ef740c37 32#include "bitmap.h"
1da177e4
LT
33
34/*
35 * RAID10 provides a combination of RAID0 and RAID1 functionality.
36 * The layout of data is defined by
37 * chunk_size
38 * raid_disks
39 * near_copies (stored in low byte of layout)
40 * far_copies (stored in second byte of layout)
c93983bf 41 * far_offset (stored in bit 16 of layout )
475901af 42 * use_far_sets (stored in bit 17 of layout )
8bce6d35 43 * use_far_sets_bugfixed (stored in bit 18 of layout )
1da177e4 44 *
475901af
JB
45 * The data to be stored is divided into chunks using chunksize. Each device
46 * is divided into far_copies sections. In each section, chunks are laid out
47 * in a style similar to raid0, but near_copies copies of each chunk is stored
48 * (each on a different drive). The starting device for each section is offset
49 * near_copies from the starting device of the previous section. Thus there
50 * are (near_copies * far_copies) of each chunk, and each is on a different
51 * drive. near_copies and far_copies must be at least one, and their product
52 * is at most raid_disks.
c93983bf
N
53 *
54 * If far_offset is true, then the far_copies are handled a bit differently.
475901af
JB
55 * The copies are still in different stripes, but instead of being very far
56 * apart on disk, there are adjacent stripes.
57 *
58 * The far and offset algorithms are handled slightly differently if
59 * 'use_far_sets' is true. In this case, the array's devices are grouped into
60 * sets that are (near_copies * far_copies) in size. The far copied stripes
61 * are still shifted by 'near_copies' devices, but this shifting stays confined
62 * to the set rather than the entire array. This is done to improve the number
63 * of device combinations that can fail without causing the array to fail.
64 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
65 * on a device):
66 * A B C D A B C D E
67 * ... ...
68 * D A B C E A B C D
69 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
70 * [A B] [C D] [A B] [C D E]
71 * |...| |...| |...| | ... |
72 * [B A] [D C] [B A] [E C D]
1da177e4
LT
73 */
74
75/*
76 * Number of guaranteed r10bios in case of extreme VM load:
77 */
78#define NR_RAID10_BIOS 256
79
473e87ce
JB
80/* when we get a read error on a read-only array, we redirect to another
81 * device without failing the first device, or trying to over-write to
82 * correct the read error. To keep track of bad blocks on a per-bio
83 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
84 */
85#define IO_BLOCKED ((struct bio *)1)
86/* When we successfully write to a known bad-block, we need to remove the
87 * bad-block marking which must be done from process context. So we record
88 * the success by setting devs[n].bio to IO_MADE_GOOD
89 */
90#define IO_MADE_GOOD ((struct bio *)2)
91
92#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
93
94/* When there are this many requests queued to be written by
34db0cd6
N
95 * the raid10 thread, we become 'congested' to provide back-pressure
96 * for writeback.
97 */
98static int max_queued_requests = 1024;
99
e879a879
N
100static void allow_barrier(struct r10conf *conf);
101static void lower_barrier(struct r10conf *conf);
635f6416 102static int _enough(struct r10conf *conf, int previous, int ignore);
3ea7daa5
N
103static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
104 int *skipped);
105static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
4246a0b6 106static void end_reshape_write(struct bio *bio);
3ea7daa5 107static void end_reshape(struct r10conf *conf);
0a27ec96 108
578b54ad
N
109#define raid10_log(md, fmt, args...) \
110 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
111
dd0fc66f 112static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
1da177e4 113{
e879a879 114 struct r10conf *conf = data;
9f2c9d12 115 int size = offsetof(struct r10bio, devs[conf->copies]);
1da177e4 116
69335ef3
N
117 /* allocate a r10bio with room for raid_disks entries in the
118 * bios array */
7eaceacc 119 return kzalloc(size, gfp_flags);
1da177e4
LT
120}
121
122static void r10bio_pool_free(void *r10_bio, void *data)
123{
124 kfree(r10_bio);
125}
126
0310fa21 127/* Maximum size of each resync request */
1da177e4 128#define RESYNC_BLOCK_SIZE (64*1024)
1da177e4 129#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
0310fa21
N
130/* amount of memory to reserve for resync requests */
131#define RESYNC_WINDOW (1024*1024)
132/* maximum number of concurrent requests, memory permitting */
133#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
1da177e4
LT
134
135/*
136 * When performing a resync, we need to read and compare, so
137 * we need as many pages are there are copies.
138 * When performing a recovery, we need 2 bios, one for read,
139 * one for write (we recover only one drive per r10buf)
140 *
141 */
dd0fc66f 142static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
1da177e4 143{
e879a879 144 struct r10conf *conf = data;
1da177e4 145 struct page *page;
9f2c9d12 146 struct r10bio *r10_bio;
1da177e4
LT
147 struct bio *bio;
148 int i, j;
149 int nalloc;
150
151 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
7eaceacc 152 if (!r10_bio)
1da177e4 153 return NULL;
1da177e4 154
3ea7daa5
N
155 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
156 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
1da177e4
LT
157 nalloc = conf->copies; /* resync */
158 else
159 nalloc = 2; /* recovery */
160
161 /*
162 * Allocate bios.
163 */
164 for (j = nalloc ; j-- ; ) {
6746557f 165 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
1da177e4
LT
166 if (!bio)
167 goto out_free_bio;
168 r10_bio->devs[j].bio = bio;
69335ef3
N
169 if (!conf->have_replacement)
170 continue;
171 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
172 if (!bio)
173 goto out_free_bio;
174 r10_bio->devs[j].repl_bio = bio;
1da177e4
LT
175 }
176 /*
177 * Allocate RESYNC_PAGES data pages and attach them
178 * where needed.
179 */
180 for (j = 0 ; j < nalloc; j++) {
69335ef3 181 struct bio *rbio = r10_bio->devs[j].repl_bio;
1da177e4
LT
182 bio = r10_bio->devs[j].bio;
183 for (i = 0; i < RESYNC_PAGES; i++) {
3ea7daa5
N
184 if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
185 &conf->mddev->recovery)) {
186 /* we can share bv_page's during recovery
187 * and reshape */
c65060ad
NK
188 struct bio *rbio = r10_bio->devs[0].bio;
189 page = rbio->bi_io_vec[i].bv_page;
190 get_page(page);
191 } else
192 page = alloc_page(gfp_flags);
1da177e4
LT
193 if (unlikely(!page))
194 goto out_free_pages;
195
196 bio->bi_io_vec[i].bv_page = page;
69335ef3
N
197 if (rbio)
198 rbio->bi_io_vec[i].bv_page = page;
1da177e4
LT
199 }
200 }
201
202 return r10_bio;
203
204out_free_pages:
205 for ( ; i > 0 ; i--)
1345b1d8 206 safe_put_page(bio->bi_io_vec[i-1].bv_page);
1da177e4
LT
207 while (j--)
208 for (i = 0; i < RESYNC_PAGES ; i++)
1345b1d8 209 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
5fdd2cf8 210 j = 0;
1da177e4 211out_free_bio:
5fdd2cf8 212 for ( ; j < nalloc; j++) {
213 if (r10_bio->devs[j].bio)
214 bio_put(r10_bio->devs[j].bio);
69335ef3
N
215 if (r10_bio->devs[j].repl_bio)
216 bio_put(r10_bio->devs[j].repl_bio);
217 }
1da177e4
LT
218 r10bio_pool_free(r10_bio, conf);
219 return NULL;
220}
221
222static void r10buf_pool_free(void *__r10_bio, void *data)
223{
224 int i;
e879a879 225 struct r10conf *conf = data;
9f2c9d12 226 struct r10bio *r10bio = __r10_bio;
1da177e4
LT
227 int j;
228
229 for (j=0; j < conf->copies; j++) {
230 struct bio *bio = r10bio->devs[j].bio;
231 if (bio) {
232 for (i = 0; i < RESYNC_PAGES; i++) {
1345b1d8 233 safe_put_page(bio->bi_io_vec[i].bv_page);
1da177e4
LT
234 bio->bi_io_vec[i].bv_page = NULL;
235 }
236 bio_put(bio);
237 }
69335ef3
N
238 bio = r10bio->devs[j].repl_bio;
239 if (bio)
240 bio_put(bio);
1da177e4
LT
241 }
242 r10bio_pool_free(r10bio, conf);
243}
244
e879a879 245static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
1da177e4
LT
246{
247 int i;
248
249 for (i = 0; i < conf->copies; i++) {
250 struct bio **bio = & r10_bio->devs[i].bio;
749c55e9 251 if (!BIO_SPECIAL(*bio))
1da177e4
LT
252 bio_put(*bio);
253 *bio = NULL;
69335ef3
N
254 bio = &r10_bio->devs[i].repl_bio;
255 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
256 bio_put(*bio);
257 *bio = NULL;
1da177e4
LT
258 }
259}
260
9f2c9d12 261static void free_r10bio(struct r10bio *r10_bio)
1da177e4 262{
e879a879 263 struct r10conf *conf = r10_bio->mddev->private;
1da177e4 264
1da177e4
LT
265 put_all_bios(conf, r10_bio);
266 mempool_free(r10_bio, conf->r10bio_pool);
267}
268
9f2c9d12 269static void put_buf(struct r10bio *r10_bio)
1da177e4 270{
e879a879 271 struct r10conf *conf = r10_bio->mddev->private;
1da177e4
LT
272
273 mempool_free(r10_bio, conf->r10buf_pool);
274
0a27ec96 275 lower_barrier(conf);
1da177e4
LT
276}
277
9f2c9d12 278static void reschedule_retry(struct r10bio *r10_bio)
1da177e4
LT
279{
280 unsigned long flags;
fd01b88c 281 struct mddev *mddev = r10_bio->mddev;
e879a879 282 struct r10conf *conf = mddev->private;
1da177e4
LT
283
284 spin_lock_irqsave(&conf->device_lock, flags);
285 list_add(&r10_bio->retry_list, &conf->retry_list);
4443ae10 286 conf->nr_queued ++;
1da177e4
LT
287 spin_unlock_irqrestore(&conf->device_lock, flags);
288
388667be
AJ
289 /* wake up frozen array... */
290 wake_up(&conf->wait_barrier);
291
1da177e4
LT
292 md_wakeup_thread(mddev->thread);
293}
294
295/*
296 * raid_end_bio_io() is called when we have finished servicing a mirrored
297 * operation and are ready to return a success/failure code to the buffer
298 * cache layer.
299 */
9f2c9d12 300static void raid_end_bio_io(struct r10bio *r10_bio)
1da177e4
LT
301{
302 struct bio *bio = r10_bio->master_bio;
856e08e2 303 int done;
e879a879 304 struct r10conf *conf = r10_bio->mddev->private;
1da177e4 305
856e08e2
N
306 if (bio->bi_phys_segments) {
307 unsigned long flags;
308 spin_lock_irqsave(&conf->device_lock, flags);
309 bio->bi_phys_segments--;
310 done = (bio->bi_phys_segments == 0);
311 spin_unlock_irqrestore(&conf->device_lock, flags);
312 } else
313 done = 1;
314 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4246a0b6 315 bio->bi_error = -EIO;
856e08e2 316 if (done) {
4246a0b6 317 bio_endio(bio);
856e08e2
N
318 /*
319 * Wake up any possible resync thread that waits for the device
320 * to go idle.
321 */
322 allow_barrier(conf);
323 }
1da177e4
LT
324 free_r10bio(r10_bio);
325}
326
327/*
328 * Update disk head position estimator based on IRQ completion info.
329 */
9f2c9d12 330static inline void update_head_pos(int slot, struct r10bio *r10_bio)
1da177e4 331{
e879a879 332 struct r10conf *conf = r10_bio->mddev->private;
1da177e4
LT
333
334 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
335 r10_bio->devs[slot].addr + (r10_bio->sectors);
336}
337
778ca018
NK
338/*
339 * Find the disk number which triggered given bio
340 */
e879a879 341static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
69335ef3 342 struct bio *bio, int *slotp, int *replp)
778ca018
NK
343{
344 int slot;
69335ef3 345 int repl = 0;
778ca018 346
69335ef3 347 for (slot = 0; slot < conf->copies; slot++) {
778ca018
NK
348 if (r10_bio->devs[slot].bio == bio)
349 break;
69335ef3
N
350 if (r10_bio->devs[slot].repl_bio == bio) {
351 repl = 1;
352 break;
353 }
354 }
778ca018
NK
355
356 BUG_ON(slot == conf->copies);
357 update_head_pos(slot, r10_bio);
358
749c55e9
N
359 if (slotp)
360 *slotp = slot;
69335ef3
N
361 if (replp)
362 *replp = repl;
778ca018
NK
363 return r10_bio->devs[slot].devnum;
364}
365
4246a0b6 366static void raid10_end_read_request(struct bio *bio)
1da177e4 367{
4246a0b6 368 int uptodate = !bio->bi_error;
9f2c9d12 369 struct r10bio *r10_bio = bio->bi_private;
1da177e4 370 int slot, dev;
abbf098e 371 struct md_rdev *rdev;
e879a879 372 struct r10conf *conf = r10_bio->mddev->private;
1da177e4 373
1da177e4
LT
374 slot = r10_bio->read_slot;
375 dev = r10_bio->devs[slot].devnum;
abbf098e 376 rdev = r10_bio->devs[slot].rdev;
1da177e4
LT
377 /*
378 * this branch is our 'one mirror IO has finished' event handler:
379 */
4443ae10
N
380 update_head_pos(slot, r10_bio);
381
382 if (uptodate) {
1da177e4
LT
383 /*
384 * Set R10BIO_Uptodate in our master bio, so that
385 * we will return a good error code to the higher
386 * levels even if IO on some other mirrored buffer fails.
387 *
388 * The 'master' represents the composite IO operation to
389 * user-side. So if something waits for IO, then it will
390 * wait for the 'master' bio.
391 */
392 set_bit(R10BIO_Uptodate, &r10_bio->state);
fae8cc5e
N
393 } else {
394 /* If all other devices that store this block have
395 * failed, we want to return the error upwards rather
396 * than fail the last device. Here we redefine
397 * "uptodate" to mean "Don't want to retry"
398 */
635f6416
N
399 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
400 rdev->raid_disk))
fae8cc5e 401 uptodate = 1;
fae8cc5e
N
402 }
403 if (uptodate) {
1da177e4 404 raid_end_bio_io(r10_bio);
abbf098e 405 rdev_dec_pending(rdev, conf->mddev);
4443ae10 406 } else {
1da177e4 407 /*
7c4e06ff 408 * oops, read error - keep the refcount on the rdev
1da177e4
LT
409 */
410 char b[BDEVNAME_SIZE];
08464e09 411 pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
8bda470e 412 mdname(conf->mddev),
abbf098e 413 bdevname(rdev->bdev, b),
8bda470e 414 (unsigned long long)r10_bio->sector);
856e08e2 415 set_bit(R10BIO_ReadError, &r10_bio->state);
1da177e4
LT
416 reschedule_retry(r10_bio);
417 }
1da177e4
LT
418}
419
9f2c9d12 420static void close_write(struct r10bio *r10_bio)
bd870a16
N
421{
422 /* clear the bitmap if all writes complete successfully */
423 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
424 r10_bio->sectors,
425 !test_bit(R10BIO_Degraded, &r10_bio->state),
426 0);
427 md_write_end(r10_bio->mddev);
428}
429
9f2c9d12 430static void one_write_done(struct r10bio *r10_bio)
19d5f834
N
431{
432 if (atomic_dec_and_test(&r10_bio->remaining)) {
433 if (test_bit(R10BIO_WriteError, &r10_bio->state))
434 reschedule_retry(r10_bio);
435 else {
436 close_write(r10_bio);
437 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
438 reschedule_retry(r10_bio);
439 else
440 raid_end_bio_io(r10_bio);
441 }
442 }
443}
444
4246a0b6 445static void raid10_end_write_request(struct bio *bio)
1da177e4 446{
9f2c9d12 447 struct r10bio *r10_bio = bio->bi_private;
778ca018 448 int dev;
749c55e9 449 int dec_rdev = 1;
e879a879 450 struct r10conf *conf = r10_bio->mddev->private;
475b0321 451 int slot, repl;
4ca40c2c 452 struct md_rdev *rdev = NULL;
579ed34f
SL
453 bool discard_error;
454
455 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
1da177e4 456
475b0321 457 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1da177e4 458
475b0321
N
459 if (repl)
460 rdev = conf->mirrors[dev].replacement;
4ca40c2c
N
461 if (!rdev) {
462 smp_rmb();
463 repl = 0;
475b0321 464 rdev = conf->mirrors[dev].rdev;
4ca40c2c 465 }
1da177e4
LT
466 /*
467 * this branch is our 'one mirror IO has finished' event handler:
468 */
579ed34f 469 if (bio->bi_error && !discard_error) {
475b0321
N
470 if (repl)
471 /* Never record new bad blocks to replacement,
472 * just fail it.
473 */
474 md_error(rdev->mddev, rdev);
475 else {
476 set_bit(WriteErrorSeen, &rdev->flags);
b7044d41
N
477 if (!test_and_set_bit(WantReplacement, &rdev->flags))
478 set_bit(MD_RECOVERY_NEEDED,
479 &rdev->mddev->recovery);
475b0321
N
480 set_bit(R10BIO_WriteError, &r10_bio->state);
481 dec_rdev = 0;
482 }
749c55e9 483 } else {
1da177e4
LT
484 /*
485 * Set R10BIO_Uptodate in our master bio, so that
486 * we will return a good error code for to the higher
487 * levels even if IO on some other mirrored buffer fails.
488 *
489 * The 'master' represents the composite IO operation to
490 * user-side. So if something waits for IO, then it will
491 * wait for the 'master' bio.
492 */
749c55e9
N
493 sector_t first_bad;
494 int bad_sectors;
495
3056e3ae
AL
496 /*
497 * Do not set R10BIO_Uptodate if the current device is
498 * rebuilding or Faulty. This is because we cannot use
499 * such device for properly reading the data back (we could
500 * potentially use it, if the current write would have felt
501 * before rdev->recovery_offset, but for simplicity we don't
502 * check this here.
503 */
504 if (test_bit(In_sync, &rdev->flags) &&
505 !test_bit(Faulty, &rdev->flags))
506 set_bit(R10BIO_Uptodate, &r10_bio->state);
1da177e4 507
749c55e9 508 /* Maybe we can clear some bad blocks. */
475b0321 509 if (is_badblock(rdev,
749c55e9
N
510 r10_bio->devs[slot].addr,
511 r10_bio->sectors,
579ed34f 512 &first_bad, &bad_sectors) && !discard_error) {
749c55e9 513 bio_put(bio);
475b0321
N
514 if (repl)
515 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
516 else
517 r10_bio->devs[slot].bio = IO_MADE_GOOD;
749c55e9
N
518 dec_rdev = 0;
519 set_bit(R10BIO_MadeGood, &r10_bio->state);
520 }
521 }
522
1da177e4
LT
523 /*
524 *
525 * Let's see if all mirrored write operations have finished
526 * already.
527 */
19d5f834 528 one_write_done(r10_bio);
749c55e9 529 if (dec_rdev)
884162df 530 rdev_dec_pending(rdev, conf->mddev);
1da177e4
LT
531}
532
1da177e4
LT
533/*
534 * RAID10 layout manager
25985edc 535 * As well as the chunksize and raid_disks count, there are two
1da177e4
LT
536 * parameters: near_copies and far_copies.
537 * near_copies * far_copies must be <= raid_disks.
538 * Normally one of these will be 1.
539 * If both are 1, we get raid0.
540 * If near_copies == raid_disks, we get raid1.
541 *
25985edc 542 * Chunks are laid out in raid0 style with near_copies copies of the
1da177e4
LT
543 * first chunk, followed by near_copies copies of the next chunk and
544 * so on.
545 * If far_copies > 1, then after 1/far_copies of the array has been assigned
546 * as described above, we start again with a device offset of near_copies.
547 * So we effectively have another copy of the whole array further down all
548 * the drives, but with blocks on different drives.
549 * With this layout, and block is never stored twice on the one device.
550 *
551 * raid10_find_phys finds the sector offset of a given virtual sector
c93983bf 552 * on each device that it is on.
1da177e4
LT
553 *
554 * raid10_find_virt does the reverse mapping, from a device and a
555 * sector offset to a virtual address
556 */
557
f8c9e74f 558static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
1da177e4
LT
559{
560 int n,f;
561 sector_t sector;
562 sector_t chunk;
563 sector_t stripe;
564 int dev;
1da177e4 565 int slot = 0;
9a3152ab
JB
566 int last_far_set_start, last_far_set_size;
567
568 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
569 last_far_set_start *= geo->far_set_size;
570
571 last_far_set_size = geo->far_set_size;
572 last_far_set_size += (geo->raid_disks % geo->far_set_size);
1da177e4
LT
573
574 /* now calculate first sector/dev */
5cf00fcd
N
575 chunk = r10bio->sector >> geo->chunk_shift;
576 sector = r10bio->sector & geo->chunk_mask;
1da177e4 577
5cf00fcd 578 chunk *= geo->near_copies;
1da177e4 579 stripe = chunk;
5cf00fcd
N
580 dev = sector_div(stripe, geo->raid_disks);
581 if (geo->far_offset)
582 stripe *= geo->far_copies;
1da177e4 583
5cf00fcd 584 sector += stripe << geo->chunk_shift;
1da177e4
LT
585
586 /* and calculate all the others */
5cf00fcd 587 for (n = 0; n < geo->near_copies; n++) {
1da177e4 588 int d = dev;
475901af 589 int set;
1da177e4 590 sector_t s = sector;
1da177e4 591 r10bio->devs[slot].devnum = d;
4c0ca26b 592 r10bio->devs[slot].addr = s;
1da177e4
LT
593 slot++;
594
5cf00fcd 595 for (f = 1; f < geo->far_copies; f++) {
475901af 596 set = d / geo->far_set_size;
5cf00fcd 597 d += geo->near_copies;
475901af 598
9a3152ab
JB
599 if ((geo->raid_disks % geo->far_set_size) &&
600 (d > last_far_set_start)) {
601 d -= last_far_set_start;
602 d %= last_far_set_size;
603 d += last_far_set_start;
604 } else {
605 d %= geo->far_set_size;
606 d += geo->far_set_size * set;
607 }
5cf00fcd 608 s += geo->stride;
1da177e4
LT
609 r10bio->devs[slot].devnum = d;
610 r10bio->devs[slot].addr = s;
611 slot++;
612 }
613 dev++;
5cf00fcd 614 if (dev >= geo->raid_disks) {
1da177e4 615 dev = 0;
5cf00fcd 616 sector += (geo->chunk_mask + 1);
1da177e4
LT
617 }
618 }
f8c9e74f
N
619}
620
621static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
622{
623 struct geom *geo = &conf->geo;
624
625 if (conf->reshape_progress != MaxSector &&
626 ((r10bio->sector >= conf->reshape_progress) !=
627 conf->mddev->reshape_backwards)) {
628 set_bit(R10BIO_Previous, &r10bio->state);
629 geo = &conf->prev;
630 } else
631 clear_bit(R10BIO_Previous, &r10bio->state);
632
633 __raid10_find_phys(geo, r10bio);
1da177e4
LT
634}
635
e879a879 636static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
1da177e4
LT
637{
638 sector_t offset, chunk, vchunk;
f8c9e74f
N
639 /* Never use conf->prev as this is only called during resync
640 * or recovery, so reshape isn't happening
641 */
5cf00fcd 642 struct geom *geo = &conf->geo;
475901af
JB
643 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
644 int far_set_size = geo->far_set_size;
9a3152ab
JB
645 int last_far_set_start;
646
647 if (geo->raid_disks % geo->far_set_size) {
648 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
649 last_far_set_start *= geo->far_set_size;
650
651 if (dev >= last_far_set_start) {
652 far_set_size = geo->far_set_size;
653 far_set_size += (geo->raid_disks % geo->far_set_size);
654 far_set_start = last_far_set_start;
655 }
656 }
1da177e4 657
5cf00fcd
N
658 offset = sector & geo->chunk_mask;
659 if (geo->far_offset) {
c93983bf 660 int fc;
5cf00fcd
N
661 chunk = sector >> geo->chunk_shift;
662 fc = sector_div(chunk, geo->far_copies);
663 dev -= fc * geo->near_copies;
475901af
JB
664 if (dev < far_set_start)
665 dev += far_set_size;
c93983bf 666 } else {
5cf00fcd
N
667 while (sector >= geo->stride) {
668 sector -= geo->stride;
475901af
JB
669 if (dev < (geo->near_copies + far_set_start))
670 dev += far_set_size - geo->near_copies;
c93983bf 671 else
5cf00fcd 672 dev -= geo->near_copies;
c93983bf 673 }
5cf00fcd 674 chunk = sector >> geo->chunk_shift;
c93983bf 675 }
5cf00fcd
N
676 vchunk = chunk * geo->raid_disks + dev;
677 sector_div(vchunk, geo->near_copies);
678 return (vchunk << geo->chunk_shift) + offset;
1da177e4
LT
679}
680
1da177e4
LT
681/*
682 * This routine returns the disk from which the requested read should
683 * be done. There is a per-array 'next expected sequential IO' sector
684 * number - if this matches on the next IO then we use the last disk.
685 * There is also a per-disk 'last know head position' sector that is
686 * maintained from IRQ contexts, both the normal and the resync IO
687 * completion handlers update this position correctly. If there is no
688 * perfect sequential match then we pick the disk whose head is closest.
689 *
690 * If there are 2 mirrors in the same 2 devices, performance degrades
691 * because position is mirror, not device based.
692 *
693 * The rdev for the device selected will have nr_pending incremented.
694 */
695
696/*
697 * FIXME: possibly should rethink readbalancing and do it differently
698 * depending on near_copies / far_copies geometry.
699 */
96c3fd1f
N
700static struct md_rdev *read_balance(struct r10conf *conf,
701 struct r10bio *r10_bio,
702 int *max_sectors)
1da177e4 703{
af3a2cd6 704 const sector_t this_sector = r10_bio->sector;
56d99121 705 int disk, slot;
856e08e2
N
706 int sectors = r10_bio->sectors;
707 int best_good_sectors;
56d99121 708 sector_t new_distance, best_dist;
3bbae04b 709 struct md_rdev *best_rdev, *rdev = NULL;
56d99121
N
710 int do_balance;
711 int best_slot;
5cf00fcd 712 struct geom *geo = &conf->geo;
1da177e4
LT
713
714 raid10_find_phys(conf, r10_bio);
715 rcu_read_lock();
856e08e2 716 sectors = r10_bio->sectors;
56d99121 717 best_slot = -1;
abbf098e 718 best_rdev = NULL;
56d99121 719 best_dist = MaxSector;
856e08e2 720 best_good_sectors = 0;
56d99121 721 do_balance = 1;
1da177e4
LT
722 /*
723 * Check if we can balance. We can balance on the whole
6cce3b23
N
724 * device if no resync is going on (recovery is ok), or below
725 * the resync window. We take the first readable disk when
726 * above the resync window.
1da177e4
LT
727 */
728 if (conf->mddev->recovery_cp < MaxSector
56d99121
N
729 && (this_sector + sectors >= conf->next_resync))
730 do_balance = 0;
1da177e4 731
56d99121 732 for (slot = 0; slot < conf->copies ; slot++) {
856e08e2
N
733 sector_t first_bad;
734 int bad_sectors;
735 sector_t dev_sector;
736
56d99121
N
737 if (r10_bio->devs[slot].bio == IO_BLOCKED)
738 continue;
1da177e4 739 disk = r10_bio->devs[slot].devnum;
abbf098e
N
740 rdev = rcu_dereference(conf->mirrors[disk].replacement);
741 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
742 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
743 rdev = rcu_dereference(conf->mirrors[disk].rdev);
050b6615 744 if (rdev == NULL ||
8ae12666 745 test_bit(Faulty, &rdev->flags))
abbf098e
N
746 continue;
747 if (!test_bit(In_sync, &rdev->flags) &&
748 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
56d99121
N
749 continue;
750
856e08e2
N
751 dev_sector = r10_bio->devs[slot].addr;
752 if (is_badblock(rdev, dev_sector, sectors,
753 &first_bad, &bad_sectors)) {
754 if (best_dist < MaxSector)
755 /* Already have a better slot */
756 continue;
757 if (first_bad <= dev_sector) {
758 /* Cannot read here. If this is the
759 * 'primary' device, then we must not read
760 * beyond 'bad_sectors' from another device.
761 */
762 bad_sectors -= (dev_sector - first_bad);
763 if (!do_balance && sectors > bad_sectors)
764 sectors = bad_sectors;
765 if (best_good_sectors > sectors)
766 best_good_sectors = sectors;
767 } else {
768 sector_t good_sectors =
769 first_bad - dev_sector;
770 if (good_sectors > best_good_sectors) {
771 best_good_sectors = good_sectors;
772 best_slot = slot;
abbf098e 773 best_rdev = rdev;
856e08e2
N
774 }
775 if (!do_balance)
776 /* Must read from here */
777 break;
778 }
779 continue;
780 } else
781 best_good_sectors = sectors;
782
56d99121
N
783 if (!do_balance)
784 break;
1da177e4 785
22dfdf52
N
786 /* This optimisation is debatable, and completely destroys
787 * sequential read speed for 'far copies' arrays. So only
788 * keep it for 'near' arrays, and review those later.
789 */
5cf00fcd 790 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
1da177e4 791 break;
8ed3a195
KS
792
793 /* for far > 1 always use the lowest address */
5cf00fcd 794 if (geo->far_copies > 1)
56d99121 795 new_distance = r10_bio->devs[slot].addr;
8ed3a195 796 else
56d99121
N
797 new_distance = abs(r10_bio->devs[slot].addr -
798 conf->mirrors[disk].head_position);
799 if (new_distance < best_dist) {
800 best_dist = new_distance;
801 best_slot = slot;
abbf098e 802 best_rdev = rdev;
1da177e4
LT
803 }
804 }
abbf098e 805 if (slot >= conf->copies) {
56d99121 806 slot = best_slot;
abbf098e
N
807 rdev = best_rdev;
808 }
1da177e4 809
56d99121 810 if (slot >= 0) {
56d99121 811 atomic_inc(&rdev->nr_pending);
56d99121
N
812 r10_bio->read_slot = slot;
813 } else
96c3fd1f 814 rdev = NULL;
1da177e4 815 rcu_read_unlock();
856e08e2 816 *max_sectors = best_good_sectors;
1da177e4 817
96c3fd1f 818 return rdev;
1da177e4
LT
819}
820
5c675f83 821static int raid10_congested(struct mddev *mddev, int bits)
0d129228 822{
e879a879 823 struct r10conf *conf = mddev->private;
0d129228
N
824 int i, ret = 0;
825
4452226e 826 if ((bits & (1 << WB_async_congested)) &&
34db0cd6
N
827 conf->pending_count >= max_queued_requests)
828 return 1;
829
0d129228 830 rcu_read_lock();
f8c9e74f
N
831 for (i = 0;
832 (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
833 && ret == 0;
834 i++) {
3cb03002 835 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
0d129228 836 if (rdev && !test_bit(Faulty, &rdev->flags)) {
165125e1 837 struct request_queue *q = bdev_get_queue(rdev->bdev);
0d129228
N
838
839 ret |= bdi_congested(&q->backing_dev_info, bits);
840 }
841 }
842 rcu_read_unlock();
843 return ret;
844}
845
e879a879 846static void flush_pending_writes(struct r10conf *conf)
a35e63ef
N
847{
848 /* Any writes that have been queued but are awaiting
849 * bitmap updates get flushed here.
a35e63ef 850 */
a35e63ef
N
851 spin_lock_irq(&conf->device_lock);
852
853 if (conf->pending_bio_list.head) {
854 struct bio *bio;
855 bio = bio_list_get(&conf->pending_bio_list);
34db0cd6 856 conf->pending_count = 0;
a35e63ef
N
857 spin_unlock_irq(&conf->device_lock);
858 /* flush any pending bitmap writes to disk
859 * before proceeding w/ I/O */
860 bitmap_unplug(conf->mddev->bitmap);
34db0cd6 861 wake_up(&conf->wait_barrier);
a35e63ef
N
862
863 while (bio) { /* submit pending writes */
864 struct bio *next = bio->bi_next;
a9ae93c8 865 struct md_rdev *rdev = (void*)bio->bi_bdev;
a35e63ef 866 bio->bi_next = NULL;
a9ae93c8
N
867 bio->bi_bdev = rdev->bdev;
868 if (test_bit(Faulty, &rdev->flags)) {
869 bio->bi_error = -EIO;
870 bio_endio(bio);
871 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
872 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
532a2a3f 873 /* Just ignore it */
4246a0b6 874 bio_endio(bio);
532a2a3f
SL
875 else
876 generic_make_request(bio);
a35e63ef
N
877 bio = next;
878 }
a35e63ef
N
879 } else
880 spin_unlock_irq(&conf->device_lock);
a35e63ef 881}
7eaceacc 882
0a27ec96
N
883/* Barriers....
884 * Sometimes we need to suspend IO while we do something else,
885 * either some resync/recovery, or reconfigure the array.
886 * To do this we raise a 'barrier'.
887 * The 'barrier' is a counter that can be raised multiple times
888 * to count how many activities are happening which preclude
889 * normal IO.
890 * We can only raise the barrier if there is no pending IO.
891 * i.e. if nr_pending == 0.
892 * We choose only to raise the barrier if no-one is waiting for the
893 * barrier to go down. This means that as soon as an IO request
894 * is ready, no other operations which require a barrier will start
895 * until the IO request has had a chance.
896 *
897 * So: regular IO calls 'wait_barrier'. When that returns there
898 * is no backgroup IO happening, It must arrange to call
899 * allow_barrier when it has finished its IO.
900 * backgroup IO calls must call raise_barrier. Once that returns
901 * there is no normal IO happeing. It must arrange to call
902 * lower_barrier when the particular background IO completes.
1da177e4 903 */
1da177e4 904
e879a879 905static void raise_barrier(struct r10conf *conf, int force)
1da177e4 906{
6cce3b23 907 BUG_ON(force && !conf->barrier);
1da177e4 908 spin_lock_irq(&conf->resync_lock);
0a27ec96 909
6cce3b23
N
910 /* Wait until no block IO is waiting (unless 'force') */
911 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
eed8c02e 912 conf->resync_lock);
0a27ec96
N
913
914 /* block any new IO from starting */
915 conf->barrier++;
916
c3b328ac 917 /* Now wait for all pending IO to complete */
0a27ec96 918 wait_event_lock_irq(conf->wait_barrier,
0e5313e2 919 !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
eed8c02e 920 conf->resync_lock);
0a27ec96
N
921
922 spin_unlock_irq(&conf->resync_lock);
923}
924
e879a879 925static void lower_barrier(struct r10conf *conf)
0a27ec96
N
926{
927 unsigned long flags;
928 spin_lock_irqsave(&conf->resync_lock, flags);
929 conf->barrier--;
930 spin_unlock_irqrestore(&conf->resync_lock, flags);
931 wake_up(&conf->wait_barrier);
932}
933
e879a879 934static void wait_barrier(struct r10conf *conf)
0a27ec96
N
935{
936 spin_lock_irq(&conf->resync_lock);
937 if (conf->barrier) {
938 conf->nr_waiting++;
d6b42dcb
N
939 /* Wait for the barrier to drop.
940 * However if there are already pending
941 * requests (preventing the barrier from
942 * rising completely), and the
943 * pre-process bio queue isn't empty,
944 * then don't wait, as we need to empty
945 * that queue to get the nr_pending
946 * count down.
947 */
578b54ad 948 raid10_log(conf->mddev, "wait barrier");
d6b42dcb
N
949 wait_event_lock_irq(conf->wait_barrier,
950 !conf->barrier ||
0e5313e2 951 (atomic_read(&conf->nr_pending) &&
d6b42dcb
N
952 current->bio_list &&
953 !bio_list_empty(current->bio_list)),
eed8c02e 954 conf->resync_lock);
0a27ec96 955 conf->nr_waiting--;
0e5313e2
TM
956 if (!conf->nr_waiting)
957 wake_up(&conf->wait_barrier);
1da177e4 958 }
0e5313e2 959 atomic_inc(&conf->nr_pending);
1da177e4
LT
960 spin_unlock_irq(&conf->resync_lock);
961}
962
e879a879 963static void allow_barrier(struct r10conf *conf)
0a27ec96 964{
0e5313e2
TM
965 if ((atomic_dec_and_test(&conf->nr_pending)) ||
966 (conf->array_freeze_pending))
967 wake_up(&conf->wait_barrier);
0a27ec96
N
968}
969
e2d59925 970static void freeze_array(struct r10conf *conf, int extra)
4443ae10
N
971{
972 /* stop syncio and normal IO and wait for everything to
f188593e 973 * go quiet.
4443ae10 974 * We increment barrier and nr_waiting, and then
e2d59925 975 * wait until nr_pending match nr_queued+extra
1c830532
N
976 * This is called in the context of one normal IO request
977 * that has failed. Thus any sync request that might be pending
978 * will be blocked by nr_pending, and we need to wait for
979 * pending IO requests to complete or be queued for re-try.
e2d59925 980 * Thus the number queued (nr_queued) plus this request (extra)
1c830532
N
981 * must match the number of pending IOs (nr_pending) before
982 * we continue.
4443ae10
N
983 */
984 spin_lock_irq(&conf->resync_lock);
0e5313e2 985 conf->array_freeze_pending++;
4443ae10
N
986 conf->barrier++;
987 conf->nr_waiting++;
eed8c02e 988 wait_event_lock_irq_cmd(conf->wait_barrier,
0e5313e2 989 atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
eed8c02e
LC
990 conf->resync_lock,
991 flush_pending_writes(conf));
c3b328ac 992
0e5313e2 993 conf->array_freeze_pending--;
4443ae10
N
994 spin_unlock_irq(&conf->resync_lock);
995}
996
e879a879 997static void unfreeze_array(struct r10conf *conf)
4443ae10
N
998{
999 /* reverse the effect of the freeze */
1000 spin_lock_irq(&conf->resync_lock);
1001 conf->barrier--;
1002 conf->nr_waiting--;
1003 wake_up(&conf->wait_barrier);
1004 spin_unlock_irq(&conf->resync_lock);
1005}
1006
f8c9e74f
N
1007static sector_t choose_data_offset(struct r10bio *r10_bio,
1008 struct md_rdev *rdev)
1009{
1010 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1011 test_bit(R10BIO_Previous, &r10_bio->state))
1012 return rdev->data_offset;
1013 else
1014 return rdev->new_data_offset;
1015}
1016
57c67df4
N
1017struct raid10_plug_cb {
1018 struct blk_plug_cb cb;
1019 struct bio_list pending;
1020 int pending_cnt;
1021};
1022
1023static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1024{
1025 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1026 cb);
1027 struct mddev *mddev = plug->cb.data;
1028 struct r10conf *conf = mddev->private;
1029 struct bio *bio;
1030
874807a8 1031 if (from_schedule || current->bio_list) {
57c67df4
N
1032 spin_lock_irq(&conf->device_lock);
1033 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1034 conf->pending_count += plug->pending_cnt;
1035 spin_unlock_irq(&conf->device_lock);
ee0b0244 1036 wake_up(&conf->wait_barrier);
57c67df4
N
1037 md_wakeup_thread(mddev->thread);
1038 kfree(plug);
1039 return;
1040 }
1041
1042 /* we aren't scheduling, so we can do the write-out directly. */
1043 bio = bio_list_get(&plug->pending);
1044 bitmap_unplug(mddev->bitmap);
1045 wake_up(&conf->wait_barrier);
1046
1047 while (bio) { /* submit pending writes */
1048 struct bio *next = bio->bi_next;
a9ae93c8 1049 struct md_rdev *rdev = (void*)bio->bi_bdev;
57c67df4 1050 bio->bi_next = NULL;
a9ae93c8
N
1051 bio->bi_bdev = rdev->bdev;
1052 if (test_bit(Faulty, &rdev->flags)) {
1053 bio->bi_error = -EIO;
1054 bio_endio(bio);
1055 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
1056 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
32f9f570 1057 /* Just ignore it */
4246a0b6 1058 bio_endio(bio);
32f9f570
SL
1059 else
1060 generic_make_request(bio);
57c67df4
N
1061 bio = next;
1062 }
1063 kfree(plug);
1064}
1065
20d0189b 1066static void __make_request(struct mddev *mddev, struct bio *bio)
1da177e4 1067{
e879a879 1068 struct r10conf *conf = mddev->private;
9f2c9d12 1069 struct r10bio *r10_bio;
1da177e4
LT
1070 struct bio *read_bio;
1071 int i;
796a5cf0 1072 const int op = bio_op(bio);
a362357b 1073 const int rw = bio_data_dir(bio);
1eff9d32
JA
1074 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1075 const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
6cce3b23 1076 unsigned long flags;
3cb03002 1077 struct md_rdev *blocked_rdev;
57c67df4
N
1078 struct blk_plug_cb *cb;
1079 struct raid10_plug_cb *plug = NULL;
d4432c23
N
1080 int sectors_handled;
1081 int max_sectors;
3ea7daa5 1082 int sectors;
1da177e4 1083
9b622e2b
TM
1084 md_write_start(mddev, bio);
1085
cc13b1d1
N
1086 /*
1087 * Register the new request and wait if the reconstruction
1088 * thread has put up a bar for new requests.
1089 * Continue immediately if no resync is active currently.
1090 */
1091 wait_barrier(conf);
1092
aa8b57aa 1093 sectors = bio_sectors(bio);
3ea7daa5 1094 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
4f024f37
KO
1095 bio->bi_iter.bi_sector < conf->reshape_progress &&
1096 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
3ea7daa5
N
1097 /* IO spans the reshape position. Need to wait for
1098 * reshape to pass
1099 */
578b54ad 1100 raid10_log(conf->mddev, "wait reshape");
3ea7daa5
N
1101 allow_barrier(conf);
1102 wait_event(conf->wait_barrier,
4f024f37
KO
1103 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1104 conf->reshape_progress >= bio->bi_iter.bi_sector +
1105 sectors);
3ea7daa5
N
1106 wait_barrier(conf);
1107 }
1108 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1109 bio_data_dir(bio) == WRITE &&
1110 (mddev->reshape_backwards
4f024f37
KO
1111 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1112 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1113 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1114 bio->bi_iter.bi_sector < conf->reshape_progress))) {
3ea7daa5
N
1115 /* Need to update reshape_position in metadata */
1116 mddev->reshape_position = conf->reshape_progress;
85ad1d13
GJ
1117 set_mask_bits(&mddev->flags, 0,
1118 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
3ea7daa5 1119 md_wakeup_thread(mddev->thread);
578b54ad 1120 raid10_log(conf->mddev, "wait reshape metadata");
3ea7daa5
N
1121 wait_event(mddev->sb_wait,
1122 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
1123
1124 conf->reshape_safe = mddev->reshape_position;
1125 }
1126
1da177e4
LT
1127 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1128
1129 r10_bio->master_bio = bio;
3ea7daa5 1130 r10_bio->sectors = sectors;
1da177e4
LT
1131
1132 r10_bio->mddev = mddev;
4f024f37 1133 r10_bio->sector = bio->bi_iter.bi_sector;
6cce3b23 1134 r10_bio->state = 0;
1da177e4 1135
856e08e2
N
1136 /* We might need to issue multiple reads to different
1137 * devices if there are bad blocks around, so we keep
1138 * track of the number of reads in bio->bi_phys_segments.
1139 * If this is 0, there is only one r10_bio and no locking
1140 * will be needed when the request completes. If it is
1141 * non-zero, then it is the number of not-completed requests.
1142 */
1143 bio->bi_phys_segments = 0;
b7c44ed9 1144 bio_clear_flag(bio, BIO_SEG_VALID);
856e08e2 1145
a362357b 1146 if (rw == READ) {
1da177e4
LT
1147 /*
1148 * read balancing logic:
1149 */
96c3fd1f 1150 struct md_rdev *rdev;
856e08e2
N
1151 int slot;
1152
1153read_again:
96c3fd1f
N
1154 rdev = read_balance(conf, r10_bio, &max_sectors);
1155 if (!rdev) {
1da177e4 1156 raid_end_bio_io(r10_bio);
5a7bbad2 1157 return;
1da177e4 1158 }
96c3fd1f 1159 slot = r10_bio->read_slot;
1da177e4 1160
a167f663 1161 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
4f024f37 1162 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
6678d83f 1163 max_sectors);
1da177e4
LT
1164
1165 r10_bio->devs[slot].bio = read_bio;
abbf098e 1166 r10_bio->devs[slot].rdev = rdev;
1da177e4 1167
4f024f37 1168 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
f8c9e74f 1169 choose_data_offset(r10_bio, rdev);
96c3fd1f 1170 read_bio->bi_bdev = rdev->bdev;
1da177e4 1171 read_bio->bi_end_io = raid10_end_read_request;
796a5cf0 1172 bio_set_op_attrs(read_bio, op, do_sync);
1da177e4
LT
1173 read_bio->bi_private = r10_bio;
1174
109e3765
N
1175 if (mddev->gendisk)
1176 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
1177 read_bio, disk_devt(mddev->gendisk),
1178 r10_bio->sector);
856e08e2
N
1179 if (max_sectors < r10_bio->sectors) {
1180 /* Could not read all from this device, so we will
1181 * need another r10_bio.
1182 */
b50c259e 1183 sectors_handled = (r10_bio->sector + max_sectors
4f024f37 1184 - bio->bi_iter.bi_sector);
856e08e2
N
1185 r10_bio->sectors = max_sectors;
1186 spin_lock_irq(&conf->device_lock);
1187 if (bio->bi_phys_segments == 0)
1188 bio->bi_phys_segments = 2;
1189 else
1190 bio->bi_phys_segments++;
b50c259e 1191 spin_unlock_irq(&conf->device_lock);
856e08e2
N
1192 /* Cannot call generic_make_request directly
1193 * as that will be queued in __generic_make_request
1194 * and subsequent mempool_alloc might block
1195 * waiting for it. so hand bio over to raid10d.
1196 */
1197 reschedule_retry(r10_bio);
1198
1199 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1200
1201 r10_bio->master_bio = bio;
aa8b57aa 1202 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
856e08e2
N
1203 r10_bio->state = 0;
1204 r10_bio->mddev = mddev;
4f024f37
KO
1205 r10_bio->sector = bio->bi_iter.bi_sector +
1206 sectors_handled;
856e08e2
N
1207 goto read_again;
1208 } else
1209 generic_make_request(read_bio);
5a7bbad2 1210 return;
1da177e4
LT
1211 }
1212
1213 /*
1214 * WRITE:
1215 */
34db0cd6
N
1216 if (conf->pending_count >= max_queued_requests) {
1217 md_wakeup_thread(mddev->thread);
578b54ad 1218 raid10_log(mddev, "wait queued");
34db0cd6
N
1219 wait_event(conf->wait_barrier,
1220 conf->pending_count < max_queued_requests);
1221 }
6bfe0b49 1222 /* first select target devices under rcu_lock and
1da177e4
LT
1223 * inc refcount on their rdev. Record them by setting
1224 * bios[x] to bio
d4432c23
N
1225 * If there are known/acknowledged bad blocks on any device
1226 * on which we have seen a write error, we want to avoid
1227 * writing to those blocks. This potentially requires several
1228 * writes to write around the bad blocks. Each set of writes
1229 * gets its own r10_bio with a set of bios attached. The number
1230 * of r10_bios is recored in bio->bi_phys_segments just as with
1231 * the read case.
1da177e4 1232 */
c3b328ac 1233
69335ef3 1234 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1da177e4 1235 raid10_find_phys(conf, r10_bio);
d4432c23 1236retry_write:
cb6969e8 1237 blocked_rdev = NULL;
1da177e4 1238 rcu_read_lock();
d4432c23
N
1239 max_sectors = r10_bio->sectors;
1240
1da177e4
LT
1241 for (i = 0; i < conf->copies; i++) {
1242 int d = r10_bio->devs[i].devnum;
3cb03002 1243 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
475b0321
N
1244 struct md_rdev *rrdev = rcu_dereference(
1245 conf->mirrors[d].replacement);
4ca40c2c
N
1246 if (rdev == rrdev)
1247 rrdev = NULL;
6bfe0b49
DW
1248 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1249 atomic_inc(&rdev->nr_pending);
1250 blocked_rdev = rdev;
1251 break;
1252 }
475b0321
N
1253 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1254 atomic_inc(&rrdev->nr_pending);
1255 blocked_rdev = rrdev;
1256 break;
1257 }
8ae12666 1258 if (rdev && (test_bit(Faulty, &rdev->flags)))
e7c0c3fa 1259 rdev = NULL;
8ae12666 1260 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
475b0321
N
1261 rrdev = NULL;
1262
d4432c23 1263 r10_bio->devs[i].bio = NULL;
475b0321 1264 r10_bio->devs[i].repl_bio = NULL;
e7c0c3fa
N
1265
1266 if (!rdev && !rrdev) {
6cce3b23 1267 set_bit(R10BIO_Degraded, &r10_bio->state);
d4432c23
N
1268 continue;
1269 }
e7c0c3fa 1270 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
d4432c23
N
1271 sector_t first_bad;
1272 sector_t dev_sector = r10_bio->devs[i].addr;
1273 int bad_sectors;
1274 int is_bad;
1275
1276 is_bad = is_badblock(rdev, dev_sector,
1277 max_sectors,
1278 &first_bad, &bad_sectors);
1279 if (is_bad < 0) {
1280 /* Mustn't write here until the bad block
1281 * is acknowledged
1282 */
1283 atomic_inc(&rdev->nr_pending);
1284 set_bit(BlockedBadBlocks, &rdev->flags);
1285 blocked_rdev = rdev;
1286 break;
1287 }
1288 if (is_bad && first_bad <= dev_sector) {
1289 /* Cannot write here at all */
1290 bad_sectors -= (dev_sector - first_bad);
1291 if (bad_sectors < max_sectors)
1292 /* Mustn't write more than bad_sectors
1293 * to other devices yet
1294 */
1295 max_sectors = bad_sectors;
1296 /* We don't set R10BIO_Degraded as that
1297 * only applies if the disk is missing,
1298 * so it might be re-added, and we want to
1299 * know to recover this chunk.
1300 * In this case the device is here, and the
1301 * fact that this chunk is not in-sync is
1302 * recorded in the bad block log.
1303 */
1304 continue;
1305 }
1306 if (is_bad) {
1307 int good_sectors = first_bad - dev_sector;
1308 if (good_sectors < max_sectors)
1309 max_sectors = good_sectors;
1310 }
6cce3b23 1311 }
e7c0c3fa
N
1312 if (rdev) {
1313 r10_bio->devs[i].bio = bio;
1314 atomic_inc(&rdev->nr_pending);
1315 }
475b0321
N
1316 if (rrdev) {
1317 r10_bio->devs[i].repl_bio = bio;
1318 atomic_inc(&rrdev->nr_pending);
1319 }
1da177e4
LT
1320 }
1321 rcu_read_unlock();
1322
6bfe0b49
DW
1323 if (unlikely(blocked_rdev)) {
1324 /* Have to wait for this device to get unblocked, then retry */
1325 int j;
1326 int d;
1327
475b0321 1328 for (j = 0; j < i; j++) {
6bfe0b49
DW
1329 if (r10_bio->devs[j].bio) {
1330 d = r10_bio->devs[j].devnum;
1331 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1332 }
475b0321 1333 if (r10_bio->devs[j].repl_bio) {
4ca40c2c 1334 struct md_rdev *rdev;
475b0321 1335 d = r10_bio->devs[j].devnum;
4ca40c2c
N
1336 rdev = conf->mirrors[d].replacement;
1337 if (!rdev) {
1338 /* Race with remove_disk */
1339 smp_mb();
1340 rdev = conf->mirrors[d].rdev;
1341 }
1342 rdev_dec_pending(rdev, mddev);
475b0321
N
1343 }
1344 }
6bfe0b49 1345 allow_barrier(conf);
578b54ad 1346 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
6bfe0b49
DW
1347 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1348 wait_barrier(conf);
1349 goto retry_write;
1350 }
1351
d4432c23
N
1352 if (max_sectors < r10_bio->sectors) {
1353 /* We are splitting this into multiple parts, so
1354 * we need to prepare for allocating another r10_bio.
1355 */
1356 r10_bio->sectors = max_sectors;
1357 spin_lock_irq(&conf->device_lock);
1358 if (bio->bi_phys_segments == 0)
1359 bio->bi_phys_segments = 2;
1360 else
1361 bio->bi_phys_segments++;
1362 spin_unlock_irq(&conf->device_lock);
1363 }
4f024f37
KO
1364 sectors_handled = r10_bio->sector + max_sectors -
1365 bio->bi_iter.bi_sector;
d4432c23 1366
4e78064f 1367 atomic_set(&r10_bio->remaining, 1);
d4432c23 1368 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
06d91a5f 1369
1da177e4
LT
1370 for (i = 0; i < conf->copies; i++) {
1371 struct bio *mbio;
1372 int d = r10_bio->devs[i].devnum;
e7c0c3fa
N
1373 if (r10_bio->devs[i].bio) {
1374 struct md_rdev *rdev = conf->mirrors[d].rdev;
1375 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
4f024f37 1376 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
6678d83f 1377 max_sectors);
e7c0c3fa
N
1378 r10_bio->devs[i].bio = mbio;
1379
4f024f37 1380 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
e7c0c3fa
N
1381 choose_data_offset(r10_bio,
1382 rdev));
109e3765 1383 mbio->bi_bdev = rdev->bdev;
e7c0c3fa 1384 mbio->bi_end_io = raid10_end_write_request;
288dab8a 1385 bio_set_op_attrs(mbio, op, do_sync | do_fua);
e7c0c3fa
N
1386 mbio->bi_private = r10_bio;
1387
109e3765
N
1388 if (conf->mddev->gendisk)
1389 trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
1390 mbio, disk_devt(conf->mddev->gendisk),
1391 r10_bio->sector);
1392 /* flush_pending_writes() needs access to the rdev so...*/
1393 mbio->bi_bdev = (void*)rdev;
1394
e7c0c3fa
N
1395 atomic_inc(&r10_bio->remaining);
1396
1397 cb = blk_check_plugged(raid10_unplug, mddev,
1398 sizeof(*plug));
1399 if (cb)
1400 plug = container_of(cb, struct raid10_plug_cb,
1401 cb);
1402 else
1403 plug = NULL;
1404 spin_lock_irqsave(&conf->device_lock, flags);
1405 if (plug) {
1406 bio_list_add(&plug->pending, mbio);
1407 plug->pending_cnt++;
1408 } else {
1409 bio_list_add(&conf->pending_bio_list, mbio);
1410 conf->pending_count++;
1411 }
1412 spin_unlock_irqrestore(&conf->device_lock, flags);
1413 if (!plug)
1414 md_wakeup_thread(mddev->thread);
1415 }
57c67df4 1416
e7c0c3fa
N
1417 if (r10_bio->devs[i].repl_bio) {
1418 struct md_rdev *rdev = conf->mirrors[d].replacement;
1419 if (rdev == NULL) {
1420 /* Replacement just got moved to main 'rdev' */
1421 smp_mb();
1422 rdev = conf->mirrors[d].rdev;
1423 }
1424 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
4f024f37 1425 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
6678d83f 1426 max_sectors);
e7c0c3fa
N
1427 r10_bio->devs[i].repl_bio = mbio;
1428
4f024f37 1429 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
e7c0c3fa
N
1430 choose_data_offset(
1431 r10_bio, rdev));
109e3765 1432 mbio->bi_bdev = rdev->bdev;
e7c0c3fa 1433 mbio->bi_end_io = raid10_end_write_request;
288dab8a 1434 bio_set_op_attrs(mbio, op, do_sync | do_fua);
e7c0c3fa
N
1435 mbio->bi_private = r10_bio;
1436
109e3765
N
1437 if (conf->mddev->gendisk)
1438 trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
1439 mbio, disk_devt(conf->mddev->gendisk),
1440 r10_bio->sector);
1441 /* flush_pending_writes() needs access to the rdev so...*/
1442 mbio->bi_bdev = (void*)rdev;
1443
e7c0c3fa
N
1444 atomic_inc(&r10_bio->remaining);
1445 spin_lock_irqsave(&conf->device_lock, flags);
57c67df4
N
1446 bio_list_add(&conf->pending_bio_list, mbio);
1447 conf->pending_count++;
e7c0c3fa
N
1448 spin_unlock_irqrestore(&conf->device_lock, flags);
1449 if (!mddev_check_plugged(mddev))
1450 md_wakeup_thread(mddev->thread);
57c67df4 1451 }
1da177e4
LT
1452 }
1453
079fa166
N
1454 /* Don't remove the bias on 'remaining' (one_write_done) until
1455 * after checking if we need to go around again.
1456 */
a35e63ef 1457
aa8b57aa 1458 if (sectors_handled < bio_sectors(bio)) {
079fa166 1459 one_write_done(r10_bio);
5e570289 1460 /* We need another r10_bio. It has already been counted
d4432c23
N
1461 * in bio->bi_phys_segments.
1462 */
1463 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1464
1465 r10_bio->master_bio = bio;
aa8b57aa 1466 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
d4432c23
N
1467
1468 r10_bio->mddev = mddev;
4f024f37 1469 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
d4432c23
N
1470 r10_bio->state = 0;
1471 goto retry_write;
1472 }
079fa166 1473 one_write_done(r10_bio);
20d0189b
KO
1474}
1475
849674e4 1476static void raid10_make_request(struct mddev *mddev, struct bio *bio)
20d0189b
KO
1477{
1478 struct r10conf *conf = mddev->private;
1479 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1480 int chunk_sects = chunk_mask + 1;
1481
1482 struct bio *split;
1483
1eff9d32 1484 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
20d0189b
KO
1485 md_flush_request(mddev, bio);
1486 return;
1487 }
1488
20d0189b
KO
1489 do {
1490
1491 /*
1492 * If this request crosses a chunk boundary, we need to split
1493 * it.
1494 */
1495 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1496 bio_sectors(bio) > chunk_sects
1497 && (conf->geo.near_copies < conf->geo.raid_disks
1498 || conf->prev.near_copies <
1499 conf->prev.raid_disks))) {
1500 split = bio_split(bio, chunk_sects -
1501 (bio->bi_iter.bi_sector &
1502 (chunk_sects - 1)),
1503 GFP_NOIO, fs_bio_set);
1504 bio_chain(split, bio);
1505 } else {
1506 split = bio;
1507 }
1508
1509 __make_request(mddev, split);
1510 } while (split != bio);
079fa166
N
1511
1512 /* In case raid10d snuck in to freeze_array */
1513 wake_up(&conf->wait_barrier);
1da177e4
LT
1514}
1515
849674e4 1516static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1da177e4 1517{
e879a879 1518 struct r10conf *conf = mddev->private;
1da177e4
LT
1519 int i;
1520
5cf00fcd 1521 if (conf->geo.near_copies < conf->geo.raid_disks)
9d8f0363 1522 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
5cf00fcd
N
1523 if (conf->geo.near_copies > 1)
1524 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1525 if (conf->geo.far_copies > 1) {
1526 if (conf->geo.far_offset)
1527 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
c93983bf 1528 else
5cf00fcd 1529 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
8bce6d35
N
1530 if (conf->geo.far_set_size != conf->geo.raid_disks)
1531 seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
c93983bf 1532 }
5cf00fcd
N
1533 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1534 conf->geo.raid_disks - mddev->degraded);
d44b0a92
N
1535 rcu_read_lock();
1536 for (i = 0; i < conf->geo.raid_disks; i++) {
1537 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1538 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1539 }
1540 rcu_read_unlock();
1da177e4
LT
1541 seq_printf(seq, "]");
1542}
1543
700c7213
N
1544/* check if there are enough drives for
1545 * every block to appear on atleast one.
1546 * Don't consider the device numbered 'ignore'
1547 * as we might be about to remove it.
1548 */
635f6416 1549static int _enough(struct r10conf *conf, int previous, int ignore)
700c7213
N
1550{
1551 int first = 0;
725d6e57 1552 int has_enough = 0;
635f6416
N
1553 int disks, ncopies;
1554 if (previous) {
1555 disks = conf->prev.raid_disks;
1556 ncopies = conf->prev.near_copies;
1557 } else {
1558 disks = conf->geo.raid_disks;
1559 ncopies = conf->geo.near_copies;
1560 }
700c7213 1561
725d6e57 1562 rcu_read_lock();
700c7213
N
1563 do {
1564 int n = conf->copies;
1565 int cnt = 0;
80b48124 1566 int this = first;
700c7213 1567 while (n--) {
725d6e57
N
1568 struct md_rdev *rdev;
1569 if (this != ignore &&
1570 (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1571 test_bit(In_sync, &rdev->flags))
700c7213 1572 cnt++;
635f6416 1573 this = (this+1) % disks;
700c7213
N
1574 }
1575 if (cnt == 0)
725d6e57 1576 goto out;
635f6416 1577 first = (first + ncopies) % disks;
700c7213 1578 } while (first != 0);
725d6e57
N
1579 has_enough = 1;
1580out:
1581 rcu_read_unlock();
1582 return has_enough;
700c7213
N
1583}
1584
f8c9e74f
N
1585static int enough(struct r10conf *conf, int ignore)
1586{
635f6416
N
1587 /* when calling 'enough', both 'prev' and 'geo' must
1588 * be stable.
1589 * This is ensured if ->reconfig_mutex or ->device_lock
1590 * is held.
1591 */
1592 return _enough(conf, 0, ignore) &&
1593 _enough(conf, 1, ignore);
f8c9e74f
N
1594}
1595
849674e4 1596static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1da177e4
LT
1597{
1598 char b[BDEVNAME_SIZE];
e879a879 1599 struct r10conf *conf = mddev->private;
635f6416 1600 unsigned long flags;
1da177e4
LT
1601
1602 /*
1603 * If it is not operational, then we have already marked it as dead
1604 * else if it is the last working disks, ignore the error, let the
1605 * next level up know.
1606 * else mark the drive as failed
1607 */
635f6416 1608 spin_lock_irqsave(&conf->device_lock, flags);
b2d444d7 1609 if (test_bit(In_sync, &rdev->flags)
635f6416 1610 && !enough(conf, rdev->raid_disk)) {
1da177e4
LT
1611 /*
1612 * Don't fail the drive, just return an IO error.
1da177e4 1613 */
635f6416 1614 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 1615 return;
635f6416 1616 }
2446dba0 1617 if (test_and_clear_bit(In_sync, &rdev->flags))
1da177e4 1618 mddev->degraded++;
2446dba0
N
1619 /*
1620 * If recovery is running, make sure it aborts.
1621 */
1622 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
de393cde 1623 set_bit(Blocked, &rdev->flags);
b2d444d7 1624 set_bit(Faulty, &rdev->flags);
85ad1d13
GJ
1625 set_mask_bits(&mddev->flags, 0,
1626 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
635f6416 1627 spin_unlock_irqrestore(&conf->device_lock, flags);
08464e09
N
1628 pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
1629 "md/raid10:%s: Operation continuing on %d devices.\n",
1630 mdname(mddev), bdevname(rdev->bdev, b),
1631 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1da177e4
LT
1632}
1633
e879a879 1634static void print_conf(struct r10conf *conf)
1da177e4
LT
1635{
1636 int i;
4056ca51 1637 struct md_rdev *rdev;
1da177e4 1638
08464e09 1639 pr_debug("RAID10 conf printout:\n");
1da177e4 1640 if (!conf) {
08464e09 1641 pr_debug("(!conf)\n");
1da177e4
LT
1642 return;
1643 }
08464e09
N
1644 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1645 conf->geo.raid_disks);
1da177e4 1646
4056ca51
N
1647 /* This is only called with ->reconfix_mutex held, so
1648 * rcu protection of rdev is not needed */
5cf00fcd 1649 for (i = 0; i < conf->geo.raid_disks; i++) {
1da177e4 1650 char b[BDEVNAME_SIZE];
4056ca51
N
1651 rdev = conf->mirrors[i].rdev;
1652 if (rdev)
08464e09
N
1653 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1654 i, !test_bit(In_sync, &rdev->flags),
1655 !test_bit(Faulty, &rdev->flags),
1656 bdevname(rdev->bdev,b));
1da177e4
LT
1657 }
1658}
1659
e879a879 1660static void close_sync(struct r10conf *conf)
1da177e4 1661{
0a27ec96
N
1662 wait_barrier(conf);
1663 allow_barrier(conf);
1da177e4
LT
1664
1665 mempool_destroy(conf->r10buf_pool);
1666 conf->r10buf_pool = NULL;
1667}
1668
fd01b88c 1669static int raid10_spare_active(struct mddev *mddev)
1da177e4
LT
1670{
1671 int i;
e879a879 1672 struct r10conf *conf = mddev->private;
dc280d98 1673 struct raid10_info *tmp;
6b965620
N
1674 int count = 0;
1675 unsigned long flags;
1da177e4
LT
1676
1677 /*
1678 * Find all non-in_sync disks within the RAID10 configuration
1679 * and mark them in_sync
1680 */
5cf00fcd 1681 for (i = 0; i < conf->geo.raid_disks; i++) {
1da177e4 1682 tmp = conf->mirrors + i;
4ca40c2c
N
1683 if (tmp->replacement
1684 && tmp->replacement->recovery_offset == MaxSector
1685 && !test_bit(Faulty, &tmp->replacement->flags)
1686 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1687 /* Replacement has just become active */
1688 if (!tmp->rdev
1689 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1690 count++;
1691 if (tmp->rdev) {
1692 /* Replaced device not technically faulty,
1693 * but we need to be sure it gets removed
1694 * and never re-added.
1695 */
1696 set_bit(Faulty, &tmp->rdev->flags);
1697 sysfs_notify_dirent_safe(
1698 tmp->rdev->sysfs_state);
1699 }
1700 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1701 } else if (tmp->rdev
61e4947c 1702 && tmp->rdev->recovery_offset == MaxSector
4ca40c2c
N
1703 && !test_bit(Faulty, &tmp->rdev->flags)
1704 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
6b965620 1705 count++;
2863b9eb 1706 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1da177e4
LT
1707 }
1708 }
6b965620
N
1709 spin_lock_irqsave(&conf->device_lock, flags);
1710 mddev->degraded -= count;
1711 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
1712
1713 print_conf(conf);
6b965620 1714 return count;
1da177e4
LT
1715}
1716
fd01b88c 1717static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1da177e4 1718{
e879a879 1719 struct r10conf *conf = mddev->private;
199050ea 1720 int err = -EEXIST;
1da177e4 1721 int mirror;
6c2fce2e 1722 int first = 0;
5cf00fcd 1723 int last = conf->geo.raid_disks - 1;
1da177e4
LT
1724
1725 if (mddev->recovery_cp < MaxSector)
1726 /* only hot-add to in-sync arrays, as recovery is
1727 * very different from resync
1728 */
199050ea 1729 return -EBUSY;
635f6416 1730 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
199050ea 1731 return -EINVAL;
1da177e4 1732
1501efad
DW
1733 if (md_integrity_add_rdev(rdev, mddev))
1734 return -ENXIO;
1735
a53a6c85 1736 if (rdev->raid_disk >= 0)
6c2fce2e 1737 first = last = rdev->raid_disk;
1da177e4 1738
2c4193df 1739 if (rdev->saved_raid_disk >= first &&
6cce3b23
N
1740 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1741 mirror = rdev->saved_raid_disk;
1742 else
6c2fce2e 1743 mirror = first;
2bb77736 1744 for ( ; mirror <= last ; mirror++) {
dc280d98 1745 struct raid10_info *p = &conf->mirrors[mirror];
2bb77736
N
1746 if (p->recovery_disabled == mddev->recovery_disabled)
1747 continue;
b7044d41
N
1748 if (p->rdev) {
1749 if (!test_bit(WantReplacement, &p->rdev->flags) ||
1750 p->replacement != NULL)
1751 continue;
1752 clear_bit(In_sync, &rdev->flags);
1753 set_bit(Replacement, &rdev->flags);
1754 rdev->raid_disk = mirror;
1755 err = 0;
9092c02d
JB
1756 if (mddev->gendisk)
1757 disk_stack_limits(mddev->gendisk, rdev->bdev,
1758 rdev->data_offset << 9);
b7044d41
N
1759 conf->fullsync = 1;
1760 rcu_assign_pointer(p->replacement, rdev);
1761 break;
1762 }
1da177e4 1763
9092c02d
JB
1764 if (mddev->gendisk)
1765 disk_stack_limits(mddev->gendisk, rdev->bdev,
1766 rdev->data_offset << 9);
1da177e4 1767
2bb77736 1768 p->head_position = 0;
d890fa2b 1769 p->recovery_disabled = mddev->recovery_disabled - 1;
2bb77736
N
1770 rdev->raid_disk = mirror;
1771 err = 0;
1772 if (rdev->saved_raid_disk != mirror)
1773 conf->fullsync = 1;
1774 rcu_assign_pointer(p->rdev, rdev);
1775 break;
1776 }
ed30be07 1777 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
532a2a3f
SL
1778 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1779
1da177e4 1780 print_conf(conf);
199050ea 1781 return err;
1da177e4
LT
1782}
1783
b8321b68 1784static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1da177e4 1785{
e879a879 1786 struct r10conf *conf = mddev->private;
1da177e4 1787 int err = 0;
b8321b68 1788 int number = rdev->raid_disk;
c8ab903e 1789 struct md_rdev **rdevp;
dc280d98 1790 struct raid10_info *p = conf->mirrors + number;
1da177e4
LT
1791
1792 print_conf(conf);
c8ab903e
N
1793 if (rdev == p->rdev)
1794 rdevp = &p->rdev;
1795 else if (rdev == p->replacement)
1796 rdevp = &p->replacement;
1797 else
1798 return 0;
1799
1800 if (test_bit(In_sync, &rdev->flags) ||
1801 atomic_read(&rdev->nr_pending)) {
1802 err = -EBUSY;
1803 goto abort;
1804 }
d787be40 1805 /* Only remove non-faulty devices if recovery
c8ab903e
N
1806 * is not possible.
1807 */
1808 if (!test_bit(Faulty, &rdev->flags) &&
1809 mddev->recovery_disabled != p->recovery_disabled &&
4ca40c2c 1810 (!p->replacement || p->replacement == rdev) &&
63aced61 1811 number < conf->geo.raid_disks &&
c8ab903e
N
1812 enough(conf, -1)) {
1813 err = -EBUSY;
1814 goto abort;
1da177e4 1815 }
c8ab903e 1816 *rdevp = NULL;
d787be40
N
1817 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1818 synchronize_rcu();
1819 if (atomic_read(&rdev->nr_pending)) {
1820 /* lost the race, try later */
1821 err = -EBUSY;
1822 *rdevp = rdev;
1823 goto abort;
1824 }
1825 }
1826 if (p->replacement) {
4ca40c2c
N
1827 /* We must have just cleared 'rdev' */
1828 p->rdev = p->replacement;
1829 clear_bit(Replacement, &p->replacement->flags);
1830 smp_mb(); /* Make sure other CPUs may see both as identical
1831 * but will never see neither -- if they are careful.
1832 */
1833 p->replacement = NULL;
1834 clear_bit(WantReplacement, &rdev->flags);
1835 } else
1836 /* We might have just remove the Replacement as faulty
1837 * Clear the flag just in case
1838 */
1839 clear_bit(WantReplacement, &rdev->flags);
1840
c8ab903e
N
1841 err = md_integrity_register(mddev);
1842
1da177e4
LT
1843abort:
1844
1845 print_conf(conf);
1846 return err;
1847}
1848
4246a0b6 1849static void end_sync_read(struct bio *bio)
1da177e4 1850{
9f2c9d12 1851 struct r10bio *r10_bio = bio->bi_private;
e879a879 1852 struct r10conf *conf = r10_bio->mddev->private;
778ca018 1853 int d;
1da177e4 1854
3ea7daa5
N
1855 if (bio == r10_bio->master_bio) {
1856 /* this is a reshape read */
1857 d = r10_bio->read_slot; /* really the read dev */
1858 } else
1859 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
0eb3ff12 1860
4246a0b6 1861 if (!bio->bi_error)
0eb3ff12 1862 set_bit(R10BIO_Uptodate, &r10_bio->state);
e684e41d
N
1863 else
1864 /* The write handler will notice the lack of
1865 * R10BIO_Uptodate and record any errors etc
1866 */
4dbcdc75
N
1867 atomic_add(r10_bio->sectors,
1868 &conf->mirrors[d].rdev->corrected_errors);
1da177e4
LT
1869
1870 /* for reconstruct, we always reschedule after a read.
1871 * for resync, only after all reads
1872 */
73d5c38a 1873 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1da177e4
LT
1874 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1875 atomic_dec_and_test(&r10_bio->remaining)) {
1876 /* we have read all the blocks,
1877 * do the comparison in process context in raid10d
1878 */
1879 reschedule_retry(r10_bio);
1880 }
1da177e4
LT
1881}
1882
9f2c9d12 1883static void end_sync_request(struct r10bio *r10_bio)
1da177e4 1884{
fd01b88c 1885 struct mddev *mddev = r10_bio->mddev;
dfc70645 1886
1da177e4
LT
1887 while (atomic_dec_and_test(&r10_bio->remaining)) {
1888 if (r10_bio->master_bio == NULL) {
1889 /* the primary of several recovery bios */
73d5c38a 1890 sector_t s = r10_bio->sectors;
1a0b7cd8
N
1891 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1892 test_bit(R10BIO_WriteError, &r10_bio->state))
749c55e9
N
1893 reschedule_retry(r10_bio);
1894 else
1895 put_buf(r10_bio);
73d5c38a 1896 md_done_sync(mddev, s, 1);
1da177e4
LT
1897 break;
1898 } else {
9f2c9d12 1899 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1a0b7cd8
N
1900 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1901 test_bit(R10BIO_WriteError, &r10_bio->state))
749c55e9
N
1902 reschedule_retry(r10_bio);
1903 else
1904 put_buf(r10_bio);
1da177e4
LT
1905 r10_bio = r10_bio2;
1906 }
1907 }
1da177e4
LT
1908}
1909
4246a0b6 1910static void end_sync_write(struct bio *bio)
5e570289 1911{
9f2c9d12 1912 struct r10bio *r10_bio = bio->bi_private;
fd01b88c 1913 struct mddev *mddev = r10_bio->mddev;
e879a879 1914 struct r10conf *conf = mddev->private;
5e570289
N
1915 int d;
1916 sector_t first_bad;
1917 int bad_sectors;
1918 int slot;
9ad1aefc 1919 int repl;
4ca40c2c 1920 struct md_rdev *rdev = NULL;
5e570289 1921
9ad1aefc
N
1922 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1923 if (repl)
1924 rdev = conf->mirrors[d].replacement;
547414d1 1925 else
9ad1aefc 1926 rdev = conf->mirrors[d].rdev;
5e570289 1927
4246a0b6 1928 if (bio->bi_error) {
9ad1aefc
N
1929 if (repl)
1930 md_error(mddev, rdev);
1931 else {
1932 set_bit(WriteErrorSeen, &rdev->flags);
b7044d41
N
1933 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1934 set_bit(MD_RECOVERY_NEEDED,
1935 &rdev->mddev->recovery);
9ad1aefc
N
1936 set_bit(R10BIO_WriteError, &r10_bio->state);
1937 }
1938 } else if (is_badblock(rdev,
5e570289
N
1939 r10_bio->devs[slot].addr,
1940 r10_bio->sectors,
1941 &first_bad, &bad_sectors))
1942 set_bit(R10BIO_MadeGood, &r10_bio->state);
1943
9ad1aefc 1944 rdev_dec_pending(rdev, mddev);
5e570289
N
1945
1946 end_sync_request(r10_bio);
1947}
1948
1da177e4
LT
1949/*
1950 * Note: sync and recover and handled very differently for raid10
1951 * This code is for resync.
1952 * For resync, we read through virtual addresses and read all blocks.
1953 * If there is any error, we schedule a write. The lowest numbered
1954 * drive is authoritative.
1955 * However requests come for physical address, so we need to map.
1956 * For every physical address there are raid_disks/copies virtual addresses,
1957 * which is always are least one, but is not necessarly an integer.
1958 * This means that a physical address can span multiple chunks, so we may
1959 * have to submit multiple io requests for a single sync request.
1960 */
1961/*
1962 * We check if all blocks are in-sync and only write to blocks that
1963 * aren't in sync
1964 */
9f2c9d12 1965static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1da177e4 1966{
e879a879 1967 struct r10conf *conf = mddev->private;
1da177e4
LT
1968 int i, first;
1969 struct bio *tbio, *fbio;
f4380a91 1970 int vcnt;
1da177e4
LT
1971
1972 atomic_set(&r10_bio->remaining, 1);
1973
1974 /* find the first device with a block */
1975 for (i=0; i<conf->copies; i++)
4246a0b6 1976 if (!r10_bio->devs[i].bio->bi_error)
1da177e4
LT
1977 break;
1978
1979 if (i == conf->copies)
1980 goto done;
1981
1982 first = i;
1983 fbio = r10_bio->devs[i].bio;
cc578588
AP
1984 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
1985 fbio->bi_iter.bi_idx = 0;
1da177e4 1986
f4380a91 1987 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
1da177e4 1988 /* now find blocks with errors */
0eb3ff12
N
1989 for (i=0 ; i < conf->copies ; i++) {
1990 int j, d;
1da177e4 1991
1da177e4 1992 tbio = r10_bio->devs[i].bio;
0eb3ff12
N
1993
1994 if (tbio->bi_end_io != end_sync_read)
1995 continue;
1996 if (i == first)
1da177e4 1997 continue;
4246a0b6 1998 if (!r10_bio->devs[i].bio->bi_error) {
0eb3ff12
N
1999 /* We know that the bi_io_vec layout is the same for
2000 * both 'first' and 'i', so we just compare them.
2001 * All vec entries are PAGE_SIZE;
2002 */
7bb23c49
N
2003 int sectors = r10_bio->sectors;
2004 for (j = 0; j < vcnt; j++) {
2005 int len = PAGE_SIZE;
2006 if (sectors < (len / 512))
2007 len = sectors * 512;
0eb3ff12
N
2008 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
2009 page_address(tbio->bi_io_vec[j].bv_page),
7bb23c49 2010 len))
0eb3ff12 2011 break;
7bb23c49
N
2012 sectors -= len/512;
2013 }
0eb3ff12
N
2014 if (j == vcnt)
2015 continue;
7f7583d4 2016 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
f84ee364
N
2017 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2018 /* Don't fix anything. */
2019 continue;
0eb3ff12 2020 }
f84ee364
N
2021 /* Ok, we need to write this bio, either to correct an
2022 * inconsistency or to correct an unreadable block.
1da177e4
LT
2023 * First we need to fixup bv_offset, bv_len and
2024 * bi_vecs, as the read request might have corrupted these
2025 */
8be185f2
KO
2026 bio_reset(tbio);
2027
1da177e4 2028 tbio->bi_vcnt = vcnt;
cc578588 2029 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
1da177e4 2030 tbio->bi_private = r10_bio;
4f024f37 2031 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
1da177e4 2032 tbio->bi_end_io = end_sync_write;
796a5cf0 2033 bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
1da177e4 2034
c31df25f
KO
2035 bio_copy_data(tbio, fbio);
2036
1da177e4
LT
2037 d = r10_bio->devs[i].devnum;
2038 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2039 atomic_inc(&r10_bio->remaining);
aa8b57aa 2040 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
1da177e4 2041
4f024f37 2042 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
1da177e4
LT
2043 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2044 generic_make_request(tbio);
2045 }
2046
9ad1aefc
N
2047 /* Now write out to any replacement devices
2048 * that are active
2049 */
2050 for (i = 0; i < conf->copies; i++) {
c31df25f 2051 int d;
9ad1aefc
N
2052
2053 tbio = r10_bio->devs[i].repl_bio;
2054 if (!tbio || !tbio->bi_end_io)
2055 continue;
2056 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2057 && r10_bio->devs[i].bio != fbio)
c31df25f 2058 bio_copy_data(tbio, fbio);
9ad1aefc
N
2059 d = r10_bio->devs[i].devnum;
2060 atomic_inc(&r10_bio->remaining);
2061 md_sync_acct(conf->mirrors[d].replacement->bdev,
aa8b57aa 2062 bio_sectors(tbio));
9ad1aefc
N
2063 generic_make_request(tbio);
2064 }
2065
1da177e4
LT
2066done:
2067 if (atomic_dec_and_test(&r10_bio->remaining)) {
2068 md_done_sync(mddev, r10_bio->sectors, 1);
2069 put_buf(r10_bio);
2070 }
2071}
2072
2073/*
2074 * Now for the recovery code.
2075 * Recovery happens across physical sectors.
2076 * We recover all non-is_sync drives by finding the virtual address of
2077 * each, and then choose a working drive that also has that virt address.
2078 * There is a separate r10_bio for each non-in_sync drive.
2079 * Only the first two slots are in use. The first for reading,
2080 * The second for writing.
2081 *
2082 */
9f2c9d12 2083static void fix_recovery_read_error(struct r10bio *r10_bio)
5e570289
N
2084{
2085 /* We got a read error during recovery.
2086 * We repeat the read in smaller page-sized sections.
2087 * If a read succeeds, write it to the new device or record
2088 * a bad block if we cannot.
2089 * If a read fails, record a bad block on both old and
2090 * new devices.
2091 */
fd01b88c 2092 struct mddev *mddev = r10_bio->mddev;
e879a879 2093 struct r10conf *conf = mddev->private;
5e570289
N
2094 struct bio *bio = r10_bio->devs[0].bio;
2095 sector_t sect = 0;
2096 int sectors = r10_bio->sectors;
2097 int idx = 0;
2098 int dr = r10_bio->devs[0].devnum;
2099 int dw = r10_bio->devs[1].devnum;
2100
2101 while (sectors) {
2102 int s = sectors;
3cb03002 2103 struct md_rdev *rdev;
5e570289
N
2104 sector_t addr;
2105 int ok;
2106
2107 if (s > (PAGE_SIZE>>9))
2108 s = PAGE_SIZE >> 9;
2109
2110 rdev = conf->mirrors[dr].rdev;
2111 addr = r10_bio->devs[0].addr + sect,
2112 ok = sync_page_io(rdev,
2113 addr,
2114 s << 9,
2115 bio->bi_io_vec[idx].bv_page,
796a5cf0 2116 REQ_OP_READ, 0, false);
5e570289
N
2117 if (ok) {
2118 rdev = conf->mirrors[dw].rdev;
2119 addr = r10_bio->devs[1].addr + sect;
2120 ok = sync_page_io(rdev,
2121 addr,
2122 s << 9,
2123 bio->bi_io_vec[idx].bv_page,
796a5cf0 2124 REQ_OP_WRITE, 0, false);
b7044d41 2125 if (!ok) {
5e570289 2126 set_bit(WriteErrorSeen, &rdev->flags);
b7044d41
N
2127 if (!test_and_set_bit(WantReplacement,
2128 &rdev->flags))
2129 set_bit(MD_RECOVERY_NEEDED,
2130 &rdev->mddev->recovery);
2131 }
5e570289
N
2132 }
2133 if (!ok) {
2134 /* We don't worry if we cannot set a bad block -
2135 * it really is bad so there is no loss in not
2136 * recording it yet
2137 */
2138 rdev_set_badblocks(rdev, addr, s, 0);
2139
2140 if (rdev != conf->mirrors[dw].rdev) {
2141 /* need bad block on destination too */
3cb03002 2142 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
5e570289
N
2143 addr = r10_bio->devs[1].addr + sect;
2144 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2145 if (!ok) {
2146 /* just abort the recovery */
08464e09
N
2147 pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2148 mdname(mddev));
5e570289
N
2149
2150 conf->mirrors[dw].recovery_disabled
2151 = mddev->recovery_disabled;
2152 set_bit(MD_RECOVERY_INTR,
2153 &mddev->recovery);
2154 break;
2155 }
2156 }
2157 }
2158
2159 sectors -= s;
2160 sect += s;
2161 idx++;
2162 }
2163}
1da177e4 2164
9f2c9d12 2165static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1da177e4 2166{
e879a879 2167 struct r10conf *conf = mddev->private;
c65060ad 2168 int d;
24afd80d 2169 struct bio *wbio, *wbio2;
1da177e4 2170
5e570289
N
2171 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2172 fix_recovery_read_error(r10_bio);
2173 end_sync_request(r10_bio);
2174 return;
2175 }
2176
c65060ad
NK
2177 /*
2178 * share the pages with the first bio
1da177e4
LT
2179 * and submit the write request
2180 */
1da177e4 2181 d = r10_bio->devs[1].devnum;
24afd80d
N
2182 wbio = r10_bio->devs[1].bio;
2183 wbio2 = r10_bio->devs[1].repl_bio;
0eb25bb0
N
2184 /* Need to test wbio2->bi_end_io before we call
2185 * generic_make_request as if the former is NULL,
2186 * the latter is free to free wbio2.
2187 */
2188 if (wbio2 && !wbio2->bi_end_io)
2189 wbio2 = NULL;
24afd80d
N
2190 if (wbio->bi_end_io) {
2191 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
aa8b57aa 2192 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
24afd80d
N
2193 generic_make_request(wbio);
2194 }
0eb25bb0 2195 if (wbio2) {
24afd80d
N
2196 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2197 md_sync_acct(conf->mirrors[d].replacement->bdev,
aa8b57aa 2198 bio_sectors(wbio2));
24afd80d
N
2199 generic_make_request(wbio2);
2200 }
1da177e4
LT
2201}
2202
1e50915f
RB
2203/*
2204 * Used by fix_read_error() to decay the per rdev read_errors.
2205 * We halve the read error count for every hour that has elapsed
2206 * since the last recorded read error.
2207 *
2208 */
fd01b88c 2209static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
1e50915f 2210{
0e3ef49e 2211 long cur_time_mon;
1e50915f
RB
2212 unsigned long hours_since_last;
2213 unsigned int read_errors = atomic_read(&rdev->read_errors);
2214
0e3ef49e 2215 cur_time_mon = ktime_get_seconds();
1e50915f 2216
0e3ef49e 2217 if (rdev->last_read_error == 0) {
1e50915f
RB
2218 /* first time we've seen a read error */
2219 rdev->last_read_error = cur_time_mon;
2220 return;
2221 }
2222
0e3ef49e
AB
2223 hours_since_last = (long)(cur_time_mon -
2224 rdev->last_read_error) / 3600;
1e50915f
RB
2225
2226 rdev->last_read_error = cur_time_mon;
2227
2228 /*
2229 * if hours_since_last is > the number of bits in read_errors
2230 * just set read errors to 0. We do this to avoid
2231 * overflowing the shift of read_errors by hours_since_last.
2232 */
2233 if (hours_since_last >= 8 * sizeof(read_errors))
2234 atomic_set(&rdev->read_errors, 0);
2235 else
2236 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2237}
2238
3cb03002 2239static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
58c54fcc
N
2240 int sectors, struct page *page, int rw)
2241{
2242 sector_t first_bad;
2243 int bad_sectors;
2244
2245 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2246 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2247 return -1;
796a5cf0 2248 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
58c54fcc
N
2249 /* success */
2250 return 1;
b7044d41 2251 if (rw == WRITE) {
58c54fcc 2252 set_bit(WriteErrorSeen, &rdev->flags);
b7044d41
N
2253 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2254 set_bit(MD_RECOVERY_NEEDED,
2255 &rdev->mddev->recovery);
2256 }
58c54fcc
N
2257 /* need to record an error - either for the block or the device */
2258 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2259 md_error(rdev->mddev, rdev);
2260 return 0;
2261}
2262
1da177e4
LT
2263/*
2264 * This is a kernel thread which:
2265 *
2266 * 1. Retries failed read operations on working mirrors.
2267 * 2. Updates the raid superblock when problems encounter.
6814d536 2268 * 3. Performs writes following reads for array synchronising.
1da177e4
LT
2269 */
2270
e879a879 2271static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
6814d536
N
2272{
2273 int sect = 0; /* Offset from r10_bio->sector */
2274 int sectors = r10_bio->sectors;
3cb03002 2275 struct md_rdev*rdev;
1e50915f 2276 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
0544a21d 2277 int d = r10_bio->devs[r10_bio->read_slot].devnum;
1e50915f 2278
7c4e06ff
N
2279 /* still own a reference to this rdev, so it cannot
2280 * have been cleared recently.
2281 */
2282 rdev = conf->mirrors[d].rdev;
1e50915f 2283
7c4e06ff
N
2284 if (test_bit(Faulty, &rdev->flags))
2285 /* drive has already been failed, just ignore any
2286 more fix_read_error() attempts */
2287 return;
1e50915f 2288
7c4e06ff
N
2289 check_decay_read_errors(mddev, rdev);
2290 atomic_inc(&rdev->read_errors);
2291 if (atomic_read(&rdev->read_errors) > max_read_errors) {
2292 char b[BDEVNAME_SIZE];
2293 bdevname(rdev->bdev, b);
1e50915f 2294
08464e09
N
2295 pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
2296 mdname(mddev), b,
2297 atomic_read(&rdev->read_errors), max_read_errors);
2298 pr_notice("md/raid10:%s: %s: Failing raid device\n",
2299 mdname(mddev), b);
d683c8e0 2300 md_error(mddev, rdev);
fae8cc5e 2301 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
7c4e06ff 2302 return;
1e50915f 2303 }
1e50915f 2304
6814d536
N
2305 while(sectors) {
2306 int s = sectors;
2307 int sl = r10_bio->read_slot;
2308 int success = 0;
2309 int start;
2310
2311 if (s > (PAGE_SIZE>>9))
2312 s = PAGE_SIZE >> 9;
2313
2314 rcu_read_lock();
2315 do {
8dbed5ce
N
2316 sector_t first_bad;
2317 int bad_sectors;
2318
0544a21d 2319 d = r10_bio->devs[sl].devnum;
6814d536
N
2320 rdev = rcu_dereference(conf->mirrors[d].rdev);
2321 if (rdev &&
8dbed5ce 2322 test_bit(In_sync, &rdev->flags) &&
f5b67ae8 2323 !test_bit(Faulty, &rdev->flags) &&
8dbed5ce
N
2324 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2325 &first_bad, &bad_sectors) == 0) {
6814d536
N
2326 atomic_inc(&rdev->nr_pending);
2327 rcu_read_unlock();
2b193363 2328 success = sync_page_io(rdev,
6814d536 2329 r10_bio->devs[sl].addr +
ccebd4c4 2330 sect,
6814d536 2331 s<<9,
796a5cf0
MC
2332 conf->tmppage,
2333 REQ_OP_READ, 0, false);
6814d536
N
2334 rdev_dec_pending(rdev, mddev);
2335 rcu_read_lock();
2336 if (success)
2337 break;
2338 }
2339 sl++;
2340 if (sl == conf->copies)
2341 sl = 0;
2342 } while (!success && sl != r10_bio->read_slot);
2343 rcu_read_unlock();
2344
2345 if (!success) {
58c54fcc
N
2346 /* Cannot read from anywhere, just mark the block
2347 * as bad on the first device to discourage future
2348 * reads.
2349 */
6814d536 2350 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
58c54fcc
N
2351 rdev = conf->mirrors[dn].rdev;
2352
2353 if (!rdev_set_badblocks(
2354 rdev,
2355 r10_bio->devs[r10_bio->read_slot].addr
2356 + sect,
fae8cc5e 2357 s, 0)) {
58c54fcc 2358 md_error(mddev, rdev);
fae8cc5e
N
2359 r10_bio->devs[r10_bio->read_slot].bio
2360 = IO_BLOCKED;
2361 }
6814d536
N
2362 break;
2363 }
2364
2365 start = sl;
2366 /* write it back and re-read */
2367 rcu_read_lock();
2368 while (sl != r10_bio->read_slot) {
67b8dc4b 2369 char b[BDEVNAME_SIZE];
0544a21d 2370
6814d536
N
2371 if (sl==0)
2372 sl = conf->copies;
2373 sl--;
2374 d = r10_bio->devs[sl].devnum;
2375 rdev = rcu_dereference(conf->mirrors[d].rdev);
1294b9c9 2376 if (!rdev ||
f5b67ae8 2377 test_bit(Faulty, &rdev->flags) ||
1294b9c9
N
2378 !test_bit(In_sync, &rdev->flags))
2379 continue;
2380
2381 atomic_inc(&rdev->nr_pending);
2382 rcu_read_unlock();
58c54fcc
N
2383 if (r10_sync_page_io(rdev,
2384 r10_bio->devs[sl].addr +
2385 sect,
055d3747 2386 s, conf->tmppage, WRITE)
1294b9c9
N
2387 == 0) {
2388 /* Well, this device is dead */
08464e09
N
2389 pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
2390 mdname(mddev), s,
2391 (unsigned long long)(
2392 sect +
2393 choose_data_offset(r10_bio,
2394 rdev)),
2395 bdevname(rdev->bdev, b));
2396 pr_notice("md/raid10:%s: %s: failing drive\n",
2397 mdname(mddev),
2398 bdevname(rdev->bdev, b));
6814d536 2399 }
1294b9c9
N
2400 rdev_dec_pending(rdev, mddev);
2401 rcu_read_lock();
6814d536
N
2402 }
2403 sl = start;
2404 while (sl != r10_bio->read_slot) {
1294b9c9 2405 char b[BDEVNAME_SIZE];
0544a21d 2406
6814d536
N
2407 if (sl==0)
2408 sl = conf->copies;
2409 sl--;
2410 d = r10_bio->devs[sl].devnum;
2411 rdev = rcu_dereference(conf->mirrors[d].rdev);
1294b9c9 2412 if (!rdev ||
f5b67ae8 2413 test_bit(Faulty, &rdev->flags) ||
1294b9c9
N
2414 !test_bit(In_sync, &rdev->flags))
2415 continue;
6814d536 2416
1294b9c9
N
2417 atomic_inc(&rdev->nr_pending);
2418 rcu_read_unlock();
58c54fcc
N
2419 switch (r10_sync_page_io(rdev,
2420 r10_bio->devs[sl].addr +
2421 sect,
055d3747 2422 s, conf->tmppage,
58c54fcc
N
2423 READ)) {
2424 case 0:
1294b9c9 2425 /* Well, this device is dead */
08464e09 2426 pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
1294b9c9
N
2427 mdname(mddev), s,
2428 (unsigned long long)(
f8c9e74f
N
2429 sect +
2430 choose_data_offset(r10_bio, rdev)),
1294b9c9 2431 bdevname(rdev->bdev, b));
08464e09 2432 pr_notice("md/raid10:%s: %s: failing drive\n",
1294b9c9
N
2433 mdname(mddev),
2434 bdevname(rdev->bdev, b));
58c54fcc
N
2435 break;
2436 case 1:
08464e09 2437 pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
1294b9c9
N
2438 mdname(mddev), s,
2439 (unsigned long long)(
f8c9e74f
N
2440 sect +
2441 choose_data_offset(r10_bio, rdev)),
1294b9c9
N
2442 bdevname(rdev->bdev, b));
2443 atomic_add(s, &rdev->corrected_errors);
6814d536 2444 }
1294b9c9
N
2445
2446 rdev_dec_pending(rdev, mddev);
2447 rcu_read_lock();
6814d536
N
2448 }
2449 rcu_read_unlock();
2450
2451 sectors -= s;
2452 sect += s;
2453 }
2454}
2455
9f2c9d12 2456static int narrow_write_error(struct r10bio *r10_bio, int i)
bd870a16
N
2457{
2458 struct bio *bio = r10_bio->master_bio;
fd01b88c 2459 struct mddev *mddev = r10_bio->mddev;
e879a879 2460 struct r10conf *conf = mddev->private;
3cb03002 2461 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
bd870a16
N
2462 /* bio has the data to be written to slot 'i' where
2463 * we just recently had a write error.
2464 * We repeatedly clone the bio and trim down to one block,
2465 * then try the write. Where the write fails we record
2466 * a bad block.
2467 * It is conceivable that the bio doesn't exactly align with
2468 * blocks. We must handle this.
2469 *
2470 * We currently own a reference to the rdev.
2471 */
2472
2473 int block_sectors;
2474 sector_t sector;
2475 int sectors;
2476 int sect_to_write = r10_bio->sectors;
2477 int ok = 1;
2478
2479 if (rdev->badblocks.shift < 0)
2480 return 0;
2481
f04ebb0b
N
2482 block_sectors = roundup(1 << rdev->badblocks.shift,
2483 bdev_logical_block_size(rdev->bdev) >> 9);
bd870a16
N
2484 sector = r10_bio->sector;
2485 sectors = ((r10_bio->sector + block_sectors)
2486 & ~(sector_t)(block_sectors - 1))
2487 - sector;
2488
2489 while (sect_to_write) {
2490 struct bio *wbio;
27028626 2491 sector_t wsector;
bd870a16
N
2492 if (sectors > sect_to_write)
2493 sectors = sect_to_write;
2494 /* Write at 'sector' for 'sectors' */
2495 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
4f024f37 2496 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
27028626
TM
2497 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2498 wbio->bi_iter.bi_sector = wsector +
2499 choose_data_offset(r10_bio, rdev);
bd870a16 2500 wbio->bi_bdev = rdev->bdev;
796a5cf0 2501 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
4e49ea4a
MC
2502
2503 if (submit_bio_wait(wbio) < 0)
bd870a16 2504 /* Failure! */
27028626 2505 ok = rdev_set_badblocks(rdev, wsector,
bd870a16
N
2506 sectors, 0)
2507 && ok;
2508
2509 bio_put(wbio);
2510 sect_to_write -= sectors;
2511 sector += sectors;
2512 sectors = block_sectors;
2513 }
2514 return ok;
2515}
2516
9f2c9d12 2517static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
560f8e55
N
2518{
2519 int slot = r10_bio->read_slot;
560f8e55 2520 struct bio *bio;
e879a879 2521 struct r10conf *conf = mddev->private;
abbf098e 2522 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
560f8e55
N
2523 char b[BDEVNAME_SIZE];
2524 unsigned long do_sync;
856e08e2 2525 int max_sectors;
109e3765
N
2526 dev_t bio_dev;
2527 sector_t bio_last_sector;
560f8e55
N
2528
2529 /* we got a read error. Maybe the drive is bad. Maybe just
2530 * the block and we can fix it.
2531 * We freeze all other IO, and try reading the block from
2532 * other devices. When we find one, we re-write
2533 * and check it that fixes the read error.
2534 * This is all done synchronously while the array is
2535 * frozen.
2536 */
fae8cc5e
N
2537 bio = r10_bio->devs[slot].bio;
2538 bdevname(bio->bi_bdev, b);
109e3765
N
2539 bio_dev = bio->bi_bdev->bd_dev;
2540 bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
fae8cc5e
N
2541 bio_put(bio);
2542 r10_bio->devs[slot].bio = NULL;
2543
560f8e55 2544 if (mddev->ro == 0) {
e2d59925 2545 freeze_array(conf, 1);
560f8e55
N
2546 fix_read_error(conf, mddev, r10_bio);
2547 unfreeze_array(conf);
fae8cc5e
N
2548 } else
2549 r10_bio->devs[slot].bio = IO_BLOCKED;
2550
abbf098e 2551 rdev_dec_pending(rdev, mddev);
560f8e55 2552
7399c31b 2553read_more:
96c3fd1f
N
2554 rdev = read_balance(conf, r10_bio, &max_sectors);
2555 if (rdev == NULL) {
08464e09
N
2556 pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
2557 mdname(mddev), b,
2558 (unsigned long long)r10_bio->sector);
560f8e55 2559 raid_end_bio_io(r10_bio);
560f8e55
N
2560 return;
2561 }
2562
1eff9d32 2563 do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
560f8e55 2564 slot = r10_bio->read_slot;
08464e09
N
2565 pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
2566 mdname(mddev),
2567 bdevname(rdev->bdev, b),
2568 (unsigned long long)r10_bio->sector);
560f8e55
N
2569 bio = bio_clone_mddev(r10_bio->master_bio,
2570 GFP_NOIO, mddev);
4f024f37 2571 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
560f8e55 2572 r10_bio->devs[slot].bio = bio;
abbf098e 2573 r10_bio->devs[slot].rdev = rdev;
4f024f37 2574 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
f8c9e74f 2575 + choose_data_offset(r10_bio, rdev);
560f8e55 2576 bio->bi_bdev = rdev->bdev;
796a5cf0 2577 bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
560f8e55
N
2578 bio->bi_private = r10_bio;
2579 bio->bi_end_io = raid10_end_read_request;
109e3765
N
2580 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
2581 bio, bio_dev,
2582 bio_last_sector - r10_bio->sectors);
2583
7399c31b
N
2584 if (max_sectors < r10_bio->sectors) {
2585 /* Drat - have to split this up more */
2586 struct bio *mbio = r10_bio->master_bio;
2587 int sectors_handled =
2588 r10_bio->sector + max_sectors
4f024f37 2589 - mbio->bi_iter.bi_sector;
7399c31b
N
2590 r10_bio->sectors = max_sectors;
2591 spin_lock_irq(&conf->device_lock);
2592 if (mbio->bi_phys_segments == 0)
2593 mbio->bi_phys_segments = 2;
2594 else
2595 mbio->bi_phys_segments++;
2596 spin_unlock_irq(&conf->device_lock);
2597 generic_make_request(bio);
7399c31b
N
2598
2599 r10_bio = mempool_alloc(conf->r10bio_pool,
2600 GFP_NOIO);
2601 r10_bio->master_bio = mbio;
aa8b57aa 2602 r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
7399c31b
N
2603 r10_bio->state = 0;
2604 set_bit(R10BIO_ReadError,
2605 &r10_bio->state);
2606 r10_bio->mddev = mddev;
4f024f37 2607 r10_bio->sector = mbio->bi_iter.bi_sector
7399c31b
N
2608 + sectors_handled;
2609
2610 goto read_more;
2611 } else
2612 generic_make_request(bio);
560f8e55
N
2613}
2614
e879a879 2615static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
749c55e9
N
2616{
2617 /* Some sort of write request has finished and it
2618 * succeeded in writing where we thought there was a
2619 * bad block. So forget the bad block.
1a0b7cd8
N
2620 * Or possibly if failed and we need to record
2621 * a bad block.
749c55e9
N
2622 */
2623 int m;
3cb03002 2624 struct md_rdev *rdev;
749c55e9
N
2625
2626 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2627 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1a0b7cd8
N
2628 for (m = 0; m < conf->copies; m++) {
2629 int dev = r10_bio->devs[m].devnum;
2630 rdev = conf->mirrors[dev].rdev;
2631 if (r10_bio->devs[m].bio == NULL)
2632 continue;
4246a0b6 2633 if (!r10_bio->devs[m].bio->bi_error) {
749c55e9
N
2634 rdev_clear_badblocks(
2635 rdev,
2636 r10_bio->devs[m].addr,
c6563a8c 2637 r10_bio->sectors, 0);
1a0b7cd8
N
2638 } else {
2639 if (!rdev_set_badblocks(
2640 rdev,
2641 r10_bio->devs[m].addr,
2642 r10_bio->sectors, 0))
2643 md_error(conf->mddev, rdev);
749c55e9 2644 }
9ad1aefc
N
2645 rdev = conf->mirrors[dev].replacement;
2646 if (r10_bio->devs[m].repl_bio == NULL)
2647 continue;
4246a0b6
CH
2648
2649 if (!r10_bio->devs[m].repl_bio->bi_error) {
9ad1aefc
N
2650 rdev_clear_badblocks(
2651 rdev,
2652 r10_bio->devs[m].addr,
c6563a8c 2653 r10_bio->sectors, 0);
9ad1aefc
N
2654 } else {
2655 if (!rdev_set_badblocks(
2656 rdev,
2657 r10_bio->devs[m].addr,
2658 r10_bio->sectors, 0))
2659 md_error(conf->mddev, rdev);
2660 }
1a0b7cd8 2661 }
749c55e9
N
2662 put_buf(r10_bio);
2663 } else {
95af587e 2664 bool fail = false;
bd870a16
N
2665 for (m = 0; m < conf->copies; m++) {
2666 int dev = r10_bio->devs[m].devnum;
2667 struct bio *bio = r10_bio->devs[m].bio;
2668 rdev = conf->mirrors[dev].rdev;
2669 if (bio == IO_MADE_GOOD) {
749c55e9
N
2670 rdev_clear_badblocks(
2671 rdev,
2672 r10_bio->devs[m].addr,
c6563a8c 2673 r10_bio->sectors, 0);
749c55e9 2674 rdev_dec_pending(rdev, conf->mddev);
4246a0b6 2675 } else if (bio != NULL && bio->bi_error) {
95af587e 2676 fail = true;
bd870a16
N
2677 if (!narrow_write_error(r10_bio, m)) {
2678 md_error(conf->mddev, rdev);
2679 set_bit(R10BIO_Degraded,
2680 &r10_bio->state);
2681 }
2682 rdev_dec_pending(rdev, conf->mddev);
749c55e9 2683 }
475b0321
N
2684 bio = r10_bio->devs[m].repl_bio;
2685 rdev = conf->mirrors[dev].replacement;
4ca40c2c 2686 if (rdev && bio == IO_MADE_GOOD) {
475b0321
N
2687 rdev_clear_badblocks(
2688 rdev,
2689 r10_bio->devs[m].addr,
c6563a8c 2690 r10_bio->sectors, 0);
475b0321
N
2691 rdev_dec_pending(rdev, conf->mddev);
2692 }
bd870a16 2693 }
95af587e
N
2694 if (fail) {
2695 spin_lock_irq(&conf->device_lock);
2696 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
23ddba80 2697 conf->nr_queued++;
95af587e
N
2698 spin_unlock_irq(&conf->device_lock);
2699 md_wakeup_thread(conf->mddev->thread);
c340702c
N
2700 } else {
2701 if (test_bit(R10BIO_WriteError,
2702 &r10_bio->state))
2703 close_write(r10_bio);
95af587e 2704 raid_end_bio_io(r10_bio);
c340702c 2705 }
749c55e9
N
2706 }
2707}
2708
4ed8731d 2709static void raid10d(struct md_thread *thread)
1da177e4 2710{
4ed8731d 2711 struct mddev *mddev = thread->mddev;
9f2c9d12 2712 struct r10bio *r10_bio;
1da177e4 2713 unsigned long flags;
e879a879 2714 struct r10conf *conf = mddev->private;
1da177e4 2715 struct list_head *head = &conf->retry_list;
e1dfa0a2 2716 struct blk_plug plug;
1da177e4
LT
2717
2718 md_check_recovery(mddev);
1da177e4 2719
95af587e
N
2720 if (!list_empty_careful(&conf->bio_end_io_list) &&
2721 !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
2722 LIST_HEAD(tmp);
2723 spin_lock_irqsave(&conf->device_lock, flags);
2724 if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
23ddba80
SL
2725 while (!list_empty(&conf->bio_end_io_list)) {
2726 list_move(conf->bio_end_io_list.prev, &tmp);
2727 conf->nr_queued--;
2728 }
95af587e
N
2729 }
2730 spin_unlock_irqrestore(&conf->device_lock, flags);
2731 while (!list_empty(&tmp)) {
a452744b
MP
2732 r10_bio = list_first_entry(&tmp, struct r10bio,
2733 retry_list);
95af587e 2734 list_del(&r10_bio->retry_list);
c340702c
N
2735 if (mddev->degraded)
2736 set_bit(R10BIO_Degraded, &r10_bio->state);
2737
2738 if (test_bit(R10BIO_WriteError,
2739 &r10_bio->state))
2740 close_write(r10_bio);
95af587e
N
2741 raid_end_bio_io(r10_bio);
2742 }
2743 }
2744
e1dfa0a2 2745 blk_start_plug(&plug);
1da177e4 2746 for (;;) {
6cce3b23 2747
0021b7bc 2748 flush_pending_writes(conf);
6cce3b23 2749
a35e63ef
N
2750 spin_lock_irqsave(&conf->device_lock, flags);
2751 if (list_empty(head)) {
2752 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 2753 break;
a35e63ef 2754 }
9f2c9d12 2755 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
1da177e4 2756 list_del(head->prev);
4443ae10 2757 conf->nr_queued--;
1da177e4
LT
2758 spin_unlock_irqrestore(&conf->device_lock, flags);
2759
2760 mddev = r10_bio->mddev;
070ec55d 2761 conf = mddev->private;
bd870a16
N
2762 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2763 test_bit(R10BIO_WriteError, &r10_bio->state))
749c55e9 2764 handle_write_completed(conf, r10_bio);
3ea7daa5
N
2765 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2766 reshape_request_write(mddev, r10_bio);
749c55e9 2767 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
1da177e4 2768 sync_request_write(mddev, r10_bio);
7eaceacc 2769 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
1da177e4 2770 recovery_request_write(mddev, r10_bio);
856e08e2 2771 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
560f8e55 2772 handle_read_error(mddev, r10_bio);
856e08e2
N
2773 else {
2774 /* just a partial read to be scheduled from a
2775 * separate context
2776 */
2777 int slot = r10_bio->read_slot;
2778 generic_make_request(r10_bio->devs[slot].bio);
2779 }
560f8e55 2780
1d9d5241 2781 cond_resched();
de393cde
N
2782 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2783 md_check_recovery(mddev);
1da177e4 2784 }
e1dfa0a2 2785 blk_finish_plug(&plug);
1da177e4
LT
2786}
2787
e879a879 2788static int init_resync(struct r10conf *conf)
1da177e4
LT
2789{
2790 int buffs;
69335ef3 2791 int i;
1da177e4
LT
2792
2793 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
b6385483 2794 BUG_ON(conf->r10buf_pool);
69335ef3 2795 conf->have_replacement = 0;
5cf00fcd 2796 for (i = 0; i < conf->geo.raid_disks; i++)
69335ef3
N
2797 if (conf->mirrors[i].replacement)
2798 conf->have_replacement = 1;
1da177e4
LT
2799 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2800 if (!conf->r10buf_pool)
2801 return -ENOMEM;
2802 conf->next_resync = 0;
2803 return 0;
2804}
2805
2806/*
2807 * perform a "sync" on one "block"
2808 *
2809 * We need to make sure that no normal I/O request - particularly write
2810 * requests - conflict with active sync requests.
2811 *
2812 * This is achieved by tracking pending requests and a 'barrier' concept
2813 * that can be installed to exclude normal IO requests.
2814 *
2815 * Resync and recovery are handled very differently.
2816 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2817 *
2818 * For resync, we iterate over virtual addresses, read all copies,
2819 * and update if there are differences. If only one copy is live,
2820 * skip it.
2821 * For recovery, we iterate over physical addresses, read a good
2822 * value for each non-in_sync drive, and over-write.
2823 *
2824 * So, for recovery we may have several outstanding complex requests for a
2825 * given address, one for each out-of-sync device. We model this by allocating
2826 * a number of r10_bio structures, one for each out-of-sync device.
2827 * As we setup these structures, we collect all bio's together into a list
2828 * which we then process collectively to add pages, and then process again
2829 * to pass to generic_make_request.
2830 *
2831 * The r10_bio structures are linked using a borrowed master_bio pointer.
2832 * This link is counted in ->remaining. When the r10_bio that points to NULL
2833 * has its remaining count decremented to 0, the whole complex operation
2834 * is complete.
2835 *
2836 */
2837
849674e4 2838static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
09314799 2839 int *skipped)
1da177e4 2840{
e879a879 2841 struct r10conf *conf = mddev->private;
9f2c9d12 2842 struct r10bio *r10_bio;
1da177e4
LT
2843 struct bio *biolist = NULL, *bio;
2844 sector_t max_sector, nr_sectors;
1da177e4 2845 int i;
6cce3b23 2846 int max_sync;
57dab0bd 2847 sector_t sync_blocks;
1da177e4
LT
2848 sector_t sectors_skipped = 0;
2849 int chunks_skipped = 0;
5cf00fcd 2850 sector_t chunk_mask = conf->geo.chunk_mask;
1da177e4
LT
2851
2852 if (!conf->r10buf_pool)
2853 if (init_resync(conf))
57afd89f 2854 return 0;
1da177e4 2855
7e83ccbe
MW
2856 /*
2857 * Allow skipping a full rebuild for incremental assembly
2858 * of a clean array, like RAID1 does.
2859 */
2860 if (mddev->bitmap == NULL &&
2861 mddev->recovery_cp == MaxSector &&
13765120
N
2862 mddev->reshape_position == MaxSector &&
2863 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
7e83ccbe 2864 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
13765120 2865 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7e83ccbe
MW
2866 conf->fullsync == 0) {
2867 *skipped = 1;
13765120 2868 return mddev->dev_sectors - sector_nr;
7e83ccbe
MW
2869 }
2870
1da177e4 2871 skipped:
58c0fed4 2872 max_sector = mddev->dev_sectors;
3ea7daa5
N
2873 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2874 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1da177e4
LT
2875 max_sector = mddev->resync_max_sectors;
2876 if (sector_nr >= max_sector) {
6cce3b23
N
2877 /* If we aborted, we need to abort the
2878 * sync on the 'current' bitmap chucks (there can
2879 * be several when recovering multiple devices).
2880 * as we may have started syncing it but not finished.
2881 * We can find the current address in
2882 * mddev->curr_resync, but for recovery,
2883 * we need to convert that to several
2884 * virtual addresses.
2885 */
3ea7daa5
N
2886 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2887 end_reshape(conf);
b3968552 2888 close_sync(conf);
3ea7daa5
N
2889 return 0;
2890 }
2891
6cce3b23
N
2892 if (mddev->curr_resync < max_sector) { /* aborted */
2893 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2894 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2895 &sync_blocks, 1);
5cf00fcd 2896 else for (i = 0; i < conf->geo.raid_disks; i++) {
6cce3b23
N
2897 sector_t sect =
2898 raid10_find_virt(conf, mddev->curr_resync, i);
2899 bitmap_end_sync(mddev->bitmap, sect,
2900 &sync_blocks, 1);
2901 }
9ad1aefc
N
2902 } else {
2903 /* completed sync */
2904 if ((!mddev->bitmap || conf->fullsync)
2905 && conf->have_replacement
2906 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2907 /* Completed a full sync so the replacements
2908 * are now fully recovered.
2909 */
f90145f3
N
2910 rcu_read_lock();
2911 for (i = 0; i < conf->geo.raid_disks; i++) {
2912 struct md_rdev *rdev =
2913 rcu_dereference(conf->mirrors[i].replacement);
2914 if (rdev)
2915 rdev->recovery_offset = MaxSector;
2916 }
2917 rcu_read_unlock();
9ad1aefc 2918 }
6cce3b23 2919 conf->fullsync = 0;
9ad1aefc 2920 }
6cce3b23 2921 bitmap_close_sync(mddev->bitmap);
1da177e4 2922 close_sync(conf);
57afd89f 2923 *skipped = 1;
1da177e4
LT
2924 return sectors_skipped;
2925 }
3ea7daa5
N
2926
2927 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2928 return reshape_request(mddev, sector_nr, skipped);
2929
5cf00fcd 2930 if (chunks_skipped >= conf->geo.raid_disks) {
1da177e4
LT
2931 /* if there has been nothing to do on any drive,
2932 * then there is nothing to do at all..
2933 */
57afd89f
N
2934 *skipped = 1;
2935 return (max_sector - sector_nr) + sectors_skipped;
1da177e4
LT
2936 }
2937
c6207277
N
2938 if (max_sector > mddev->resync_max)
2939 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2940
1da177e4
LT
2941 /* make sure whole request will fit in a chunk - if chunks
2942 * are meaningful
2943 */
5cf00fcd
N
2944 if (conf->geo.near_copies < conf->geo.raid_disks &&
2945 max_sector > (sector_nr | chunk_mask))
2946 max_sector = (sector_nr | chunk_mask) + 1;
1da177e4 2947
7ac50447
TM
2948 /*
2949 * If there is non-resync activity waiting for a turn, then let it
2950 * though before starting on this new sync request.
2951 */
2952 if (conf->nr_waiting)
2953 schedule_timeout_uninterruptible(1);
2954
1da177e4
LT
2955 /* Again, very different code for resync and recovery.
2956 * Both must result in an r10bio with a list of bios that
2957 * have bi_end_io, bi_sector, bi_bdev set,
2958 * and bi_private set to the r10bio.
2959 * For recovery, we may actually create several r10bios
2960 * with 2 bios in each, that correspond to the bios in the main one.
2961 * In this case, the subordinate r10bios link back through a
2962 * borrowed master_bio pointer, and the counter in the master
2963 * includes a ref from each subordinate.
2964 */
2965 /* First, we decide what to do and set ->bi_end_io
2966 * To end_sync_read if we want to read, and
2967 * end_sync_write if we will want to write.
2968 */
2969
6cce3b23 2970 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1da177e4
LT
2971 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2972 /* recovery... the complicated one */
e875ecea 2973 int j;
1da177e4
LT
2974 r10_bio = NULL;
2975
5cf00fcd 2976 for (i = 0 ; i < conf->geo.raid_disks; i++) {
ab9d47e9 2977 int still_degraded;
9f2c9d12 2978 struct r10bio *rb2;
ab9d47e9
N
2979 sector_t sect;
2980 int must_sync;
e875ecea 2981 int any_working;
dc280d98 2982 struct raid10_info *mirror = &conf->mirrors[i];
f90145f3 2983 struct md_rdev *mrdev, *mreplace;
24afd80d 2984
f90145f3
N
2985 rcu_read_lock();
2986 mrdev = rcu_dereference(mirror->rdev);
2987 mreplace = rcu_dereference(mirror->replacement);
2988
2989 if ((mrdev == NULL ||
f5b67ae8 2990 test_bit(Faulty, &mrdev->flags) ||
f90145f3
N
2991 test_bit(In_sync, &mrdev->flags)) &&
2992 (mreplace == NULL ||
2993 test_bit(Faulty, &mreplace->flags))) {
2994 rcu_read_unlock();
ab9d47e9 2995 continue;
f90145f3 2996 }
1da177e4 2997
ab9d47e9
N
2998 still_degraded = 0;
2999 /* want to reconstruct this device */
3000 rb2 = r10_bio;
3001 sect = raid10_find_virt(conf, sector_nr, i);
fc448a18
N
3002 if (sect >= mddev->resync_max_sectors) {
3003 /* last stripe is not complete - don't
3004 * try to recover this sector.
3005 */
f90145f3 3006 rcu_read_unlock();
fc448a18
N
3007 continue;
3008 }
f5b67ae8
N
3009 if (mreplace && test_bit(Faulty, &mreplace->flags))
3010 mreplace = NULL;
24afd80d
N
3011 /* Unless we are doing a full sync, or a replacement
3012 * we only need to recover the block if it is set in
3013 * the bitmap
ab9d47e9
N
3014 */
3015 must_sync = bitmap_start_sync(mddev->bitmap, sect,
3016 &sync_blocks, 1);
3017 if (sync_blocks < max_sync)
3018 max_sync = sync_blocks;
3019 if (!must_sync &&
f90145f3 3020 mreplace == NULL &&
ab9d47e9
N
3021 !conf->fullsync) {
3022 /* yep, skip the sync_blocks here, but don't assume
3023 * that there will never be anything to do here
3024 */
3025 chunks_skipped = -1;
f90145f3 3026 rcu_read_unlock();
ab9d47e9
N
3027 continue;
3028 }
f90145f3
N
3029 atomic_inc(&mrdev->nr_pending);
3030 if (mreplace)
3031 atomic_inc(&mreplace->nr_pending);
3032 rcu_read_unlock();
6cce3b23 3033
ab9d47e9 3034 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
cb8b12b5 3035 r10_bio->state = 0;
ab9d47e9
N
3036 raise_barrier(conf, rb2 != NULL);
3037 atomic_set(&r10_bio->remaining, 0);
18055569 3038
ab9d47e9
N
3039 r10_bio->master_bio = (struct bio*)rb2;
3040 if (rb2)
3041 atomic_inc(&rb2->remaining);
3042 r10_bio->mddev = mddev;
3043 set_bit(R10BIO_IsRecover, &r10_bio->state);
3044 r10_bio->sector = sect;
1da177e4 3045
ab9d47e9
N
3046 raid10_find_phys(conf, r10_bio);
3047
3048 /* Need to check if the array will still be
3049 * degraded
3050 */
f90145f3
N
3051 rcu_read_lock();
3052 for (j = 0; j < conf->geo.raid_disks; j++) {
3053 struct md_rdev *rdev = rcu_dereference(
3054 conf->mirrors[j].rdev);
3055 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
ab9d47e9 3056 still_degraded = 1;
87fc767b 3057 break;
1da177e4 3058 }
f90145f3 3059 }
ab9d47e9
N
3060
3061 must_sync = bitmap_start_sync(mddev->bitmap, sect,
3062 &sync_blocks, still_degraded);
3063
e875ecea 3064 any_working = 0;
ab9d47e9 3065 for (j=0; j<conf->copies;j++) {
e875ecea 3066 int k;
ab9d47e9 3067 int d = r10_bio->devs[j].devnum;
5e570289 3068 sector_t from_addr, to_addr;
f90145f3
N
3069 struct md_rdev *rdev =
3070 rcu_dereference(conf->mirrors[d].rdev);
40c356ce
N
3071 sector_t sector, first_bad;
3072 int bad_sectors;
f90145f3
N
3073 if (!rdev ||
3074 !test_bit(In_sync, &rdev->flags))
ab9d47e9
N
3075 continue;
3076 /* This is where we read from */
e875ecea 3077 any_working = 1;
40c356ce
N
3078 sector = r10_bio->devs[j].addr;
3079
3080 if (is_badblock(rdev, sector, max_sync,
3081 &first_bad, &bad_sectors)) {
3082 if (first_bad > sector)
3083 max_sync = first_bad - sector;
3084 else {
3085 bad_sectors -= (sector
3086 - first_bad);
3087 if (max_sync > bad_sectors)
3088 max_sync = bad_sectors;
3089 continue;
3090 }
3091 }
ab9d47e9 3092 bio = r10_bio->devs[0].bio;
8be185f2 3093 bio_reset(bio);
ab9d47e9
N
3094 bio->bi_next = biolist;
3095 biolist = bio;
3096 bio->bi_private = r10_bio;
3097 bio->bi_end_io = end_sync_read;
796a5cf0 3098 bio_set_op_attrs(bio, REQ_OP_READ, 0);
5e570289 3099 from_addr = r10_bio->devs[j].addr;
4f024f37
KO
3100 bio->bi_iter.bi_sector = from_addr +
3101 rdev->data_offset;
24afd80d
N
3102 bio->bi_bdev = rdev->bdev;
3103 atomic_inc(&rdev->nr_pending);
3104 /* and we write to 'i' (if not in_sync) */
ab9d47e9
N
3105
3106 for (k=0; k<conf->copies; k++)
3107 if (r10_bio->devs[k].devnum == i)
3108 break;
3109 BUG_ON(k == conf->copies);
5e570289 3110 to_addr = r10_bio->devs[k].addr;
ab9d47e9 3111 r10_bio->devs[0].devnum = d;
5e570289 3112 r10_bio->devs[0].addr = from_addr;
ab9d47e9 3113 r10_bio->devs[1].devnum = i;
5e570289 3114 r10_bio->devs[1].addr = to_addr;
ab9d47e9 3115
f90145f3 3116 if (!test_bit(In_sync, &mrdev->flags)) {
24afd80d 3117 bio = r10_bio->devs[1].bio;
8be185f2 3118 bio_reset(bio);
24afd80d
N
3119 bio->bi_next = biolist;
3120 biolist = bio;
3121 bio->bi_private = r10_bio;
3122 bio->bi_end_io = end_sync_write;
796a5cf0 3123 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
4f024f37 3124 bio->bi_iter.bi_sector = to_addr
f90145f3
N
3125 + mrdev->data_offset;
3126 bio->bi_bdev = mrdev->bdev;
24afd80d
N
3127 atomic_inc(&r10_bio->remaining);
3128 } else
3129 r10_bio->devs[1].bio->bi_end_io = NULL;
3130
3131 /* and maybe write to replacement */
3132 bio = r10_bio->devs[1].repl_bio;
3133 if (bio)
3134 bio->bi_end_io = NULL;
f90145f3 3135 /* Note: if mreplace != NULL, then bio
24afd80d
N
3136 * cannot be NULL as r10buf_pool_alloc will
3137 * have allocated it.
3138 * So the second test here is pointless.
3139 * But it keeps semantic-checkers happy, and
3140 * this comment keeps human reviewers
3141 * happy.
3142 */
f90145f3
N
3143 if (mreplace == NULL || bio == NULL ||
3144 test_bit(Faulty, &mreplace->flags))
24afd80d 3145 break;
8be185f2 3146 bio_reset(bio);
24afd80d
N
3147 bio->bi_next = biolist;
3148 biolist = bio;
3149 bio->bi_private = r10_bio;
3150 bio->bi_end_io = end_sync_write;
796a5cf0 3151 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
4f024f37 3152 bio->bi_iter.bi_sector = to_addr +
f90145f3
N
3153 mreplace->data_offset;
3154 bio->bi_bdev = mreplace->bdev;
24afd80d 3155 atomic_inc(&r10_bio->remaining);
ab9d47e9
N
3156 break;
3157 }
f90145f3 3158 rcu_read_unlock();
ab9d47e9 3159 if (j == conf->copies) {
e875ecea
N
3160 /* Cannot recover, so abort the recovery or
3161 * record a bad block */
e875ecea
N
3162 if (any_working) {
3163 /* problem is that there are bad blocks
3164 * on other device(s)
3165 */
3166 int k;
3167 for (k = 0; k < conf->copies; k++)
3168 if (r10_bio->devs[k].devnum == i)
3169 break;
24afd80d 3170 if (!test_bit(In_sync,
f90145f3 3171 &mrdev->flags)
24afd80d 3172 && !rdev_set_badblocks(
f90145f3 3173 mrdev,
24afd80d
N
3174 r10_bio->devs[k].addr,
3175 max_sync, 0))
3176 any_working = 0;
f90145f3 3177 if (mreplace &&
24afd80d 3178 !rdev_set_badblocks(
f90145f3 3179 mreplace,
e875ecea
N
3180 r10_bio->devs[k].addr,
3181 max_sync, 0))
3182 any_working = 0;
3183 }
3184 if (!any_working) {
3185 if (!test_and_set_bit(MD_RECOVERY_INTR,
3186 &mddev->recovery))
08464e09 3187 pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
e875ecea 3188 mdname(mddev));
24afd80d 3189 mirror->recovery_disabled
e875ecea
N
3190 = mddev->recovery_disabled;
3191 }
e8b84915
N
3192 put_buf(r10_bio);
3193 if (rb2)
3194 atomic_dec(&rb2->remaining);
3195 r10_bio = rb2;
f90145f3
N
3196 rdev_dec_pending(mrdev, mddev);
3197 if (mreplace)
3198 rdev_dec_pending(mreplace, mddev);
ab9d47e9 3199 break;
1da177e4 3200 }
f90145f3
N
3201 rdev_dec_pending(mrdev, mddev);
3202 if (mreplace)
3203 rdev_dec_pending(mreplace, mddev);
ab9d47e9 3204 }
1da177e4
LT
3205 if (biolist == NULL) {
3206 while (r10_bio) {
9f2c9d12
N
3207 struct r10bio *rb2 = r10_bio;
3208 r10_bio = (struct r10bio*) rb2->master_bio;
1da177e4
LT
3209 rb2->master_bio = NULL;
3210 put_buf(rb2);
3211 }
3212 goto giveup;
3213 }
3214 } else {
3215 /* resync. Schedule a read for every block at this virt offset */
3216 int count = 0;
6cce3b23 3217
c40f341f 3218 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
78200d45 3219
6cce3b23
N
3220 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
3221 &sync_blocks, mddev->degraded) &&
ab9d47e9
N
3222 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3223 &mddev->recovery)) {
6cce3b23
N
3224 /* We can skip this block */
3225 *skipped = 1;
3226 return sync_blocks + sectors_skipped;
3227 }
3228 if (sync_blocks < max_sync)
3229 max_sync = sync_blocks;
1da177e4 3230 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
cb8b12b5 3231 r10_bio->state = 0;
1da177e4 3232
1da177e4
LT
3233 r10_bio->mddev = mddev;
3234 atomic_set(&r10_bio->remaining, 0);
6cce3b23
N
3235 raise_barrier(conf, 0);
3236 conf->next_resync = sector_nr;
1da177e4
LT
3237
3238 r10_bio->master_bio = NULL;
3239 r10_bio->sector = sector_nr;
3240 set_bit(R10BIO_IsSync, &r10_bio->state);
3241 raid10_find_phys(conf, r10_bio);
5cf00fcd 3242 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
1da177e4 3243
5cf00fcd 3244 for (i = 0; i < conf->copies; i++) {
1da177e4 3245 int d = r10_bio->devs[i].devnum;
40c356ce
N
3246 sector_t first_bad, sector;
3247 int bad_sectors;
f90145f3 3248 struct md_rdev *rdev;
40c356ce 3249
9ad1aefc
N
3250 if (r10_bio->devs[i].repl_bio)
3251 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3252
1da177e4 3253 bio = r10_bio->devs[i].bio;
8be185f2 3254 bio_reset(bio);
4246a0b6 3255 bio->bi_error = -EIO;
f90145f3
N
3256 rcu_read_lock();
3257 rdev = rcu_dereference(conf->mirrors[d].rdev);
3258 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3259 rcu_read_unlock();
1da177e4 3260 continue;
f90145f3 3261 }
40c356ce 3262 sector = r10_bio->devs[i].addr;
f90145f3 3263 if (is_badblock(rdev, sector, max_sync,
40c356ce
N
3264 &first_bad, &bad_sectors)) {
3265 if (first_bad > sector)
3266 max_sync = first_bad - sector;
3267 else {
3268 bad_sectors -= (sector - first_bad);
3269 if (max_sync > bad_sectors)
91502f09 3270 max_sync = bad_sectors;
f90145f3 3271 rcu_read_unlock();
40c356ce
N
3272 continue;
3273 }
3274 }
f90145f3 3275 atomic_inc(&rdev->nr_pending);
1da177e4
LT
3276 atomic_inc(&r10_bio->remaining);
3277 bio->bi_next = biolist;
3278 biolist = bio;
3279 bio->bi_private = r10_bio;
3280 bio->bi_end_io = end_sync_read;
796a5cf0 3281 bio_set_op_attrs(bio, REQ_OP_READ, 0);
f90145f3
N
3282 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3283 bio->bi_bdev = rdev->bdev;
1da177e4 3284 count++;
9ad1aefc 3285
f90145f3
N
3286 rdev = rcu_dereference(conf->mirrors[d].replacement);
3287 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3288 rcu_read_unlock();
9ad1aefc 3289 continue;
f90145f3
N
3290 }
3291 atomic_inc(&rdev->nr_pending);
3292 rcu_read_unlock();
9ad1aefc
N
3293
3294 /* Need to set up for writing to the replacement */
3295 bio = r10_bio->devs[i].repl_bio;
8be185f2 3296 bio_reset(bio);
4246a0b6 3297 bio->bi_error = -EIO;
9ad1aefc
N
3298
3299 sector = r10_bio->devs[i].addr;
9ad1aefc
N
3300 bio->bi_next = biolist;
3301 biolist = bio;
3302 bio->bi_private = r10_bio;
3303 bio->bi_end_io = end_sync_write;
796a5cf0 3304 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
f90145f3
N
3305 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3306 bio->bi_bdev = rdev->bdev;
9ad1aefc 3307 count++;
1da177e4
LT
3308 }
3309
3310 if (count < 2) {
3311 for (i=0; i<conf->copies; i++) {
3312 int d = r10_bio->devs[i].devnum;
3313 if (r10_bio->devs[i].bio->bi_end_io)
ab9d47e9
N
3314 rdev_dec_pending(conf->mirrors[d].rdev,
3315 mddev);
9ad1aefc
N
3316 if (r10_bio->devs[i].repl_bio &&
3317 r10_bio->devs[i].repl_bio->bi_end_io)
3318 rdev_dec_pending(
3319 conf->mirrors[d].replacement,
3320 mddev);
1da177e4
LT
3321 }
3322 put_buf(r10_bio);
3323 biolist = NULL;
3324 goto giveup;
3325 }
3326 }
3327
1da177e4 3328 nr_sectors = 0;
6cce3b23
N
3329 if (sector_nr + max_sync < max_sector)
3330 max_sector = sector_nr + max_sync;
1da177e4
LT
3331 do {
3332 struct page *page;
3333 int len = PAGE_SIZE;
1da177e4
LT
3334 if (sector_nr + (len>>9) > max_sector)
3335 len = (max_sector - sector_nr) << 9;
3336 if (len == 0)
3337 break;
3338 for (bio= biolist ; bio ; bio=bio->bi_next) {
ab9d47e9 3339 struct bio *bio2;
1da177e4 3340 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
ab9d47e9
N
3341 if (bio_add_page(bio, page, len, 0))
3342 continue;
3343
3344 /* stop here */
3345 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3346 for (bio2 = biolist;
3347 bio2 && bio2 != bio;
3348 bio2 = bio2->bi_next) {
3349 /* remove last page from this bio */
3350 bio2->bi_vcnt--;
4f024f37 3351 bio2->bi_iter.bi_size -= len;
b7c44ed9 3352 bio_clear_flag(bio2, BIO_SEG_VALID);
1da177e4 3353 }
ab9d47e9 3354 goto bio_full;
1da177e4
LT
3355 }
3356 nr_sectors += len>>9;
3357 sector_nr += len>>9;
3358 } while (biolist->bi_vcnt < RESYNC_PAGES);
3359 bio_full:
3360 r10_bio->sectors = nr_sectors;
3361
3362 while (biolist) {
3363 bio = biolist;
3364 biolist = biolist->bi_next;
3365
3366 bio->bi_next = NULL;
3367 r10_bio = bio->bi_private;
3368 r10_bio->sectors = nr_sectors;
3369
3370 if (bio->bi_end_io == end_sync_read) {
3371 md_sync_acct(bio->bi_bdev, nr_sectors);
4246a0b6 3372 bio->bi_error = 0;
1da177e4
LT
3373 generic_make_request(bio);
3374 }
3375 }
3376
57afd89f
N
3377 if (sectors_skipped)
3378 /* pretend they weren't skipped, it makes
3379 * no important difference in this case
3380 */
3381 md_done_sync(mddev, sectors_skipped, 1);
3382
1da177e4
LT
3383 return sectors_skipped + nr_sectors;
3384 giveup:
3385 /* There is nowhere to write, so all non-sync
e875ecea
N
3386 * drives must be failed or in resync, all drives
3387 * have a bad block, so try the next chunk...
1da177e4 3388 */
09b4068a
N
3389 if (sector_nr + max_sync < max_sector)
3390 max_sector = sector_nr + max_sync;
3391
3392 sectors_skipped += (max_sector - sector_nr);
1da177e4
LT
3393 chunks_skipped ++;
3394 sector_nr = max_sector;
1da177e4 3395 goto skipped;
1da177e4
LT
3396}
3397
80c3a6ce 3398static sector_t
fd01b88c 3399raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
80c3a6ce
DW
3400{
3401 sector_t size;
e879a879 3402 struct r10conf *conf = mddev->private;
80c3a6ce
DW
3403
3404 if (!raid_disks)
3ea7daa5
N
3405 raid_disks = min(conf->geo.raid_disks,
3406 conf->prev.raid_disks);
80c3a6ce 3407 if (!sectors)
dab8b292 3408 sectors = conf->dev_sectors;
80c3a6ce 3409
5cf00fcd
N
3410 size = sectors >> conf->geo.chunk_shift;
3411 sector_div(size, conf->geo.far_copies);
80c3a6ce 3412 size = size * raid_disks;
5cf00fcd 3413 sector_div(size, conf->geo.near_copies);
80c3a6ce 3414
5cf00fcd 3415 return size << conf->geo.chunk_shift;
80c3a6ce
DW
3416}
3417
6508fdbf
N
3418static void calc_sectors(struct r10conf *conf, sector_t size)
3419{
3420 /* Calculate the number of sectors-per-device that will
3421 * actually be used, and set conf->dev_sectors and
3422 * conf->stride
3423 */
3424
5cf00fcd
N
3425 size = size >> conf->geo.chunk_shift;
3426 sector_div(size, conf->geo.far_copies);
3427 size = size * conf->geo.raid_disks;
3428 sector_div(size, conf->geo.near_copies);
6508fdbf
N
3429 /* 'size' is now the number of chunks in the array */
3430 /* calculate "used chunks per device" */
3431 size = size * conf->copies;
3432
3433 /* We need to round up when dividing by raid_disks to
3434 * get the stride size.
3435 */
5cf00fcd 3436 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
6508fdbf 3437
5cf00fcd 3438 conf->dev_sectors = size << conf->geo.chunk_shift;
6508fdbf 3439
5cf00fcd
N
3440 if (conf->geo.far_offset)
3441 conf->geo.stride = 1 << conf->geo.chunk_shift;
6508fdbf 3442 else {
5cf00fcd
N
3443 sector_div(size, conf->geo.far_copies);
3444 conf->geo.stride = size << conf->geo.chunk_shift;
6508fdbf
N
3445 }
3446}
dab8b292 3447
deb200d0
N
3448enum geo_type {geo_new, geo_old, geo_start};
3449static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3450{
3451 int nc, fc, fo;
3452 int layout, chunk, disks;
3453 switch (new) {
3454 case geo_old:
3455 layout = mddev->layout;
3456 chunk = mddev->chunk_sectors;
3457 disks = mddev->raid_disks - mddev->delta_disks;
3458 break;
3459 case geo_new:
3460 layout = mddev->new_layout;
3461 chunk = mddev->new_chunk_sectors;
3462 disks = mddev->raid_disks;
3463 break;
3464 default: /* avoid 'may be unused' warnings */
3465 case geo_start: /* new when starting reshape - raid_disks not
3466 * updated yet. */
3467 layout = mddev->new_layout;
3468 chunk = mddev->new_chunk_sectors;
3469 disks = mddev->raid_disks + mddev->delta_disks;
3470 break;
3471 }
8bce6d35 3472 if (layout >> 19)
deb200d0
N
3473 return -1;
3474 if (chunk < (PAGE_SIZE >> 9) ||
3475 !is_power_of_2(chunk))
3476 return -2;
3477 nc = layout & 255;
3478 fc = (layout >> 8) & 255;
3479 fo = layout & (1<<16);
3480 geo->raid_disks = disks;
3481 geo->near_copies = nc;
3482 geo->far_copies = fc;
3483 geo->far_offset = fo;
8bce6d35
N
3484 switch (layout >> 17) {
3485 case 0: /* original layout. simple but not always optimal */
3486 geo->far_set_size = disks;
3487 break;
3488 case 1: /* "improved" layout which was buggy. Hopefully no-one is
3489 * actually using this, but leave code here just in case.*/
3490 geo->far_set_size = disks/fc;
3491 WARN(geo->far_set_size < fc,
3492 "This RAID10 layout does not provide data safety - please backup and create new array\n");
3493 break;
3494 case 2: /* "improved" layout fixed to match documentation */
3495 geo->far_set_size = fc * nc;
3496 break;
3497 default: /* Not a valid layout */
3498 return -1;
3499 }
deb200d0
N
3500 geo->chunk_mask = chunk - 1;
3501 geo->chunk_shift = ffz(~chunk);
3502 return nc*fc;
3503}
3504
e879a879 3505static struct r10conf *setup_conf(struct mddev *mddev)
1da177e4 3506{
e879a879 3507 struct r10conf *conf = NULL;
dab8b292 3508 int err = -EINVAL;
deb200d0
N
3509 struct geom geo;
3510 int copies;
3511
3512 copies = setup_geo(&geo, mddev, geo_new);
1da177e4 3513
deb200d0 3514 if (copies == -2) {
08464e09
N
3515 pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3516 mdname(mddev), PAGE_SIZE);
dab8b292 3517 goto out;
1da177e4 3518 }
2604b703 3519
deb200d0 3520 if (copies < 2 || copies > mddev->raid_disks) {
08464e09
N
3521 pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3522 mdname(mddev), mddev->new_layout);
1da177e4
LT
3523 goto out;
3524 }
dab8b292
TM
3525
3526 err = -ENOMEM;
e879a879 3527 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
dab8b292 3528 if (!conf)
1da177e4 3529 goto out;
dab8b292 3530
3ea7daa5 3531 /* FIXME calc properly */
dc280d98 3532 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
78eaa0d4 3533 max(0,-mddev->delta_disks)),
dab8b292
TM
3534 GFP_KERNEL);
3535 if (!conf->mirrors)
3536 goto out;
4443ae10
N
3537
3538 conf->tmppage = alloc_page(GFP_KERNEL);
3539 if (!conf->tmppage)
dab8b292
TM
3540 goto out;
3541
deb200d0
N
3542 conf->geo = geo;
3543 conf->copies = copies;
dab8b292
TM
3544 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3545 r10bio_pool_free, conf);
3546 if (!conf->r10bio_pool)
3547 goto out;
3548
6508fdbf 3549 calc_sectors(conf, mddev->dev_sectors);
3ea7daa5
N
3550 if (mddev->reshape_position == MaxSector) {
3551 conf->prev = conf->geo;
3552 conf->reshape_progress = MaxSector;
3553 } else {
3554 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3555 err = -EINVAL;
3556 goto out;
3557 }
3558 conf->reshape_progress = mddev->reshape_position;
3559 if (conf->prev.far_offset)
3560 conf->prev.stride = 1 << conf->prev.chunk_shift;
3561 else
3562 /* far_copies must be 1 */
3563 conf->prev.stride = conf->dev_sectors;
3564 }
299b0685 3565 conf->reshape_safe = conf->reshape_progress;
e7e72bf6 3566 spin_lock_init(&conf->device_lock);
dab8b292 3567 INIT_LIST_HEAD(&conf->retry_list);
95af587e 3568 INIT_LIST_HEAD(&conf->bio_end_io_list);
dab8b292
TM
3569
3570 spin_lock_init(&conf->resync_lock);
3571 init_waitqueue_head(&conf->wait_barrier);
0e5313e2 3572 atomic_set(&conf->nr_pending, 0);
dab8b292 3573
0232605d 3574 conf->thread = md_register_thread(raid10d, mddev, "raid10");
dab8b292
TM
3575 if (!conf->thread)
3576 goto out;
3577
dab8b292
TM
3578 conf->mddev = mddev;
3579 return conf;
3580
3581 out:
dab8b292 3582 if (conf) {
644df1a8 3583 mempool_destroy(conf->r10bio_pool);
dab8b292
TM
3584 kfree(conf->mirrors);
3585 safe_put_page(conf->tmppage);
3586 kfree(conf);
3587 }
3588 return ERR_PTR(err);
3589}
3590
849674e4 3591static int raid10_run(struct mddev *mddev)
dab8b292 3592{
e879a879 3593 struct r10conf *conf;
dab8b292 3594 int i, disk_idx, chunk_size;
dc280d98 3595 struct raid10_info *disk;
3cb03002 3596 struct md_rdev *rdev;
dab8b292 3597 sector_t size;
3ea7daa5
N
3598 sector_t min_offset_diff = 0;
3599 int first = 1;
532a2a3f 3600 bool discard_supported = false;
dab8b292
TM
3601
3602 if (mddev->private == NULL) {
3603 conf = setup_conf(mddev);
3604 if (IS_ERR(conf))
3605 return PTR_ERR(conf);
3606 mddev->private = conf;
3607 }
3608 conf = mddev->private;
3609 if (!conf)
3610 goto out;
3611
dab8b292
TM
3612 mddev->thread = conf->thread;
3613 conf->thread = NULL;
3614
8f6c2e4b 3615 chunk_size = mddev->chunk_sectors << 9;
cc4d1efd 3616 if (mddev->queue) {
532a2a3f
SL
3617 blk_queue_max_discard_sectors(mddev->queue,
3618 mddev->chunk_sectors);
5026d7a9 3619 blk_queue_max_write_same_sectors(mddev->queue, 0);
cc4d1efd
JB
3620 blk_queue_io_min(mddev->queue, chunk_size);
3621 if (conf->geo.raid_disks % conf->geo.near_copies)
3622 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3623 else
3624 blk_queue_io_opt(mddev->queue, chunk_size *
3625 (conf->geo.raid_disks / conf->geo.near_copies));
3626 }
8f6c2e4b 3627
dafb20fa 3628 rdev_for_each(rdev, mddev) {
3ea7daa5 3629 long long diff;
aba336bd 3630 struct request_queue *q;
34b343cf 3631
1da177e4 3632 disk_idx = rdev->raid_disk;
f8c9e74f
N
3633 if (disk_idx < 0)
3634 continue;
3635 if (disk_idx >= conf->geo.raid_disks &&
3636 disk_idx >= conf->prev.raid_disks)
1da177e4
LT
3637 continue;
3638 disk = conf->mirrors + disk_idx;
3639
56a2559b
N
3640 if (test_bit(Replacement, &rdev->flags)) {
3641 if (disk->replacement)
3642 goto out_free_conf;
3643 disk->replacement = rdev;
3644 } else {
3645 if (disk->rdev)
3646 goto out_free_conf;
3647 disk->rdev = rdev;
3648 }
aba336bd 3649 q = bdev_get_queue(rdev->bdev);
3ea7daa5
N
3650 diff = (rdev->new_data_offset - rdev->data_offset);
3651 if (!mddev->reshape_backwards)
3652 diff = -diff;
3653 if (diff < 0)
3654 diff = 0;
3655 if (first || diff < min_offset_diff)
3656 min_offset_diff = diff;
56a2559b 3657
cc4d1efd
JB
3658 if (mddev->gendisk)
3659 disk_stack_limits(mddev->gendisk, rdev->bdev,
3660 rdev->data_offset << 9);
1da177e4
LT
3661
3662 disk->head_position = 0;
532a2a3f
SL
3663
3664 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3665 discard_supported = true;
1da177e4 3666 }
3ea7daa5 3667
ed30be07
JB
3668 if (mddev->queue) {
3669 if (discard_supported)
3670 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3671 mddev->queue);
3672 else
3673 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3674 mddev->queue);
3675 }
6d508242 3676 /* need to check that every block has at least one working mirror */
700c7213 3677 if (!enough(conf, -1)) {
08464e09 3678 pr_err("md/raid10:%s: not enough operational mirrors.\n",
6d508242 3679 mdname(mddev));
1da177e4
LT
3680 goto out_free_conf;
3681 }
3682
3ea7daa5
N
3683 if (conf->reshape_progress != MaxSector) {
3684 /* must ensure that shape change is supported */
3685 if (conf->geo.far_copies != 1 &&
3686 conf->geo.far_offset == 0)
3687 goto out_free_conf;
3688 if (conf->prev.far_copies != 1 &&
78eaa0d4 3689 conf->prev.far_offset == 0)
3ea7daa5
N
3690 goto out_free_conf;
3691 }
3692
1da177e4 3693 mddev->degraded = 0;
f8c9e74f
N
3694 for (i = 0;
3695 i < conf->geo.raid_disks
3696 || i < conf->prev.raid_disks;
3697 i++) {
1da177e4
LT
3698
3699 disk = conf->mirrors + i;
3700
56a2559b
N
3701 if (!disk->rdev && disk->replacement) {
3702 /* The replacement is all we have - use it */
3703 disk->rdev = disk->replacement;
3704 disk->replacement = NULL;
3705 clear_bit(Replacement, &disk->rdev->flags);
3706 }
3707
5fd6c1dc 3708 if (!disk->rdev ||
2e333e89 3709 !test_bit(In_sync, &disk->rdev->flags)) {
1da177e4
LT
3710 disk->head_position = 0;
3711 mddev->degraded++;
0b59bb64
N
3712 if (disk->rdev &&
3713 disk->rdev->saved_raid_disk < 0)
8c2e870a 3714 conf->fullsync = 1;
1da177e4 3715 }
d890fa2b 3716 disk->recovery_disabled = mddev->recovery_disabled - 1;
1da177e4
LT
3717 }
3718
8c6ac868 3719 if (mddev->recovery_cp != MaxSector)
08464e09
N
3720 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
3721 mdname(mddev));
3722 pr_info("md/raid10:%s: active with %d out of %d devices\n",
5cf00fcd
N
3723 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3724 conf->geo.raid_disks);
1da177e4
LT
3725 /*
3726 * Ok, everything is just fine now
3727 */
dab8b292
TM
3728 mddev->dev_sectors = conf->dev_sectors;
3729 size = raid10_size(mddev, 0, 0);
3730 md_set_array_sectors(mddev, size);
3731 mddev->resync_max_sectors = size;
1da177e4 3732
cc4d1efd 3733 if (mddev->queue) {
5cf00fcd 3734 int stripe = conf->geo.raid_disks *
9d8f0363 3735 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
cc4d1efd
JB
3736
3737 /* Calculate max read-ahead size.
3738 * We need to readahead at least twice a whole stripe....
3739 * maybe...
3740 */
5cf00fcd 3741 stripe /= conf->geo.near_copies;
3ea7daa5
N
3742 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3743 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1da177e4
LT
3744 }
3745
a91a2785
MP
3746 if (md_integrity_register(mddev))
3747 goto out_free_conf;
3748
3ea7daa5
N
3749 if (conf->reshape_progress != MaxSector) {
3750 unsigned long before_length, after_length;
3751
3752 before_length = ((1 << conf->prev.chunk_shift) *
3753 conf->prev.far_copies);
3754 after_length = ((1 << conf->geo.chunk_shift) *
3755 conf->geo.far_copies);
3756
3757 if (max(before_length, after_length) > min_offset_diff) {
3758 /* This cannot work */
08464e09 3759 pr_warn("md/raid10: offset difference not enough to continue reshape\n");
3ea7daa5
N
3760 goto out_free_conf;
3761 }
3762 conf->offset_diff = min_offset_diff;
3763
3ea7daa5
N
3764 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3765 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3766 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3767 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3768 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3769 "reshape");
3770 }
3771
1da177e4
LT
3772 return 0;
3773
3774out_free_conf:
01f96c0a 3775 md_unregister_thread(&mddev->thread);
644df1a8 3776 mempool_destroy(conf->r10bio_pool);
1345b1d8 3777 safe_put_page(conf->tmppage);
990a8baf 3778 kfree(conf->mirrors);
1da177e4
LT
3779 kfree(conf);
3780 mddev->private = NULL;
3781out:
3782 return -EIO;
3783}
3784
afa0f557 3785static void raid10_free(struct mddev *mddev, void *priv)
1da177e4 3786{
afa0f557 3787 struct r10conf *conf = priv;
1da177e4 3788
644df1a8 3789 mempool_destroy(conf->r10bio_pool);
0fea7ed8 3790 safe_put_page(conf->tmppage);
990a8baf 3791 kfree(conf->mirrors);
c4796e21
N
3792 kfree(conf->mirrors_old);
3793 kfree(conf->mirrors_new);
1da177e4 3794 kfree(conf);
1da177e4
LT
3795}
3796
fd01b88c 3797static void raid10_quiesce(struct mddev *mddev, int state)
6cce3b23 3798{
e879a879 3799 struct r10conf *conf = mddev->private;
6cce3b23
N
3800
3801 switch(state) {
3802 case 1:
3803 raise_barrier(conf, 0);
3804 break;
3805 case 0:
3806 lower_barrier(conf);
3807 break;
3808 }
6cce3b23 3809}
1da177e4 3810
006a09a0
N
3811static int raid10_resize(struct mddev *mddev, sector_t sectors)
3812{
3813 /* Resize of 'far' arrays is not supported.
3814 * For 'near' and 'offset' arrays we can set the
3815 * number of sectors used to be an appropriate multiple
3816 * of the chunk size.
3817 * For 'offset', this is far_copies*chunksize.
3818 * For 'near' the multiplier is the LCM of
3819 * near_copies and raid_disks.
3820 * So if far_copies > 1 && !far_offset, fail.
3821 * Else find LCM(raid_disks, near_copy)*far_copies and
3822 * multiply by chunk_size. Then round to this number.
3823 * This is mostly done by raid10_size()
3824 */
3825 struct r10conf *conf = mddev->private;
3826 sector_t oldsize, size;
3827
f8c9e74f
N
3828 if (mddev->reshape_position != MaxSector)
3829 return -EBUSY;
3830
5cf00fcd 3831 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
006a09a0
N
3832 return -EINVAL;
3833
3834 oldsize = raid10_size(mddev, 0, 0);
3835 size = raid10_size(mddev, sectors, 0);
a4a6125a
N
3836 if (mddev->external_size &&
3837 mddev->array_sectors > size)
006a09a0 3838 return -EINVAL;
a4a6125a
N
3839 if (mddev->bitmap) {
3840 int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
3841 if (ret)
3842 return ret;
3843 }
3844 md_set_array_sectors(mddev, size);
859644f0
HM
3845 if (mddev->queue) {
3846 set_capacity(mddev->gendisk, mddev->array_sectors);
3847 revalidate_disk(mddev->gendisk);
3848 }
006a09a0
N
3849 if (sectors > mddev->dev_sectors &&
3850 mddev->recovery_cp > oldsize) {
3851 mddev->recovery_cp = oldsize;
3852 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3853 }
6508fdbf
N
3854 calc_sectors(conf, sectors);
3855 mddev->dev_sectors = conf->dev_sectors;
006a09a0
N
3856 mddev->resync_max_sectors = size;
3857 return 0;
3858}
3859
53a6ab4d 3860static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
dab8b292 3861{
3cb03002 3862 struct md_rdev *rdev;
e879a879 3863 struct r10conf *conf;
dab8b292
TM
3864
3865 if (mddev->degraded > 0) {
08464e09
N
3866 pr_warn("md/raid10:%s: Error: degraded raid0!\n",
3867 mdname(mddev));
dab8b292
TM
3868 return ERR_PTR(-EINVAL);
3869 }
53a6ab4d 3870 sector_div(size, devs);
dab8b292 3871
dab8b292
TM
3872 /* Set new parameters */
3873 mddev->new_level = 10;
3874 /* new layout: far_copies = 1, near_copies = 2 */
3875 mddev->new_layout = (1<<8) + 2;
3876 mddev->new_chunk_sectors = mddev->chunk_sectors;
3877 mddev->delta_disks = mddev->raid_disks;
dab8b292
TM
3878 mddev->raid_disks *= 2;
3879 /* make sure it will be not marked as dirty */
3880 mddev->recovery_cp = MaxSector;
53a6ab4d 3881 mddev->dev_sectors = size;
dab8b292
TM
3882
3883 conf = setup_conf(mddev);
02214dc5 3884 if (!IS_ERR(conf)) {
dafb20fa 3885 rdev_for_each(rdev, mddev)
53a6ab4d 3886 if (rdev->raid_disk >= 0) {
e93f68a1 3887 rdev->new_raid_disk = rdev->raid_disk * 2;
53a6ab4d
N
3888 rdev->sectors = size;
3889 }
02214dc5
KW
3890 conf->barrier = 1;
3891 }
3892
dab8b292
TM
3893 return conf;
3894}
3895
fd01b88c 3896static void *raid10_takeover(struct mddev *mddev)
dab8b292 3897{
e373ab10 3898 struct r0conf *raid0_conf;
dab8b292
TM
3899
3900 /* raid10 can take over:
3901 * raid0 - providing it has only two drives
3902 */
3903 if (mddev->level == 0) {
3904 /* for raid0 takeover only one zone is supported */
e373ab10
N
3905 raid0_conf = mddev->private;
3906 if (raid0_conf->nr_strip_zones > 1) {
08464e09
N
3907 pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
3908 mdname(mddev));
dab8b292
TM
3909 return ERR_PTR(-EINVAL);
3910 }
53a6ab4d
N
3911 return raid10_takeover_raid0(mddev,
3912 raid0_conf->strip_zone->zone_end,
3913 raid0_conf->strip_zone->nb_dev);
dab8b292
TM
3914 }
3915 return ERR_PTR(-EINVAL);
3916}
3917
3ea7daa5
N
3918static int raid10_check_reshape(struct mddev *mddev)
3919{
3920 /* Called when there is a request to change
3921 * - layout (to ->new_layout)
3922 * - chunk size (to ->new_chunk_sectors)
3923 * - raid_disks (by delta_disks)
3924 * or when trying to restart a reshape that was ongoing.
3925 *
3926 * We need to validate the request and possibly allocate
3927 * space if that might be an issue later.
3928 *
3929 * Currently we reject any reshape of a 'far' mode array,
3930 * allow chunk size to change if new is generally acceptable,
3931 * allow raid_disks to increase, and allow
3932 * a switch between 'near' mode and 'offset' mode.
3933 */
3934 struct r10conf *conf = mddev->private;
3935 struct geom geo;
3936
3937 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
3938 return -EINVAL;
3939
3940 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
3941 /* mustn't change number of copies */
3942 return -EINVAL;
3943 if (geo.far_copies > 1 && !geo.far_offset)
3944 /* Cannot switch to 'far' mode */
3945 return -EINVAL;
3946
3947 if (mddev->array_sectors & geo.chunk_mask)
3948 /* not factor of array size */
3949 return -EINVAL;
3950
3ea7daa5
N
3951 if (!enough(conf, -1))
3952 return -EINVAL;
3953
3954 kfree(conf->mirrors_new);
3955 conf->mirrors_new = NULL;
3956 if (mddev->delta_disks > 0) {
3957 /* allocate new 'mirrors' list */
3958 conf->mirrors_new = kzalloc(
dc280d98 3959 sizeof(struct raid10_info)
3ea7daa5
N
3960 *(mddev->raid_disks +
3961 mddev->delta_disks),
3962 GFP_KERNEL);
3963 if (!conf->mirrors_new)
3964 return -ENOMEM;
3965 }
3966 return 0;
3967}
3968
3969/*
3970 * Need to check if array has failed when deciding whether to:
3971 * - start an array
3972 * - remove non-faulty devices
3973 * - add a spare
3974 * - allow a reshape
3975 * This determination is simple when no reshape is happening.
3976 * However if there is a reshape, we need to carefully check
3977 * both the before and after sections.
3978 * This is because some failed devices may only affect one
3979 * of the two sections, and some non-in_sync devices may
3980 * be insync in the section most affected by failed devices.
3981 */
3982static int calc_degraded(struct r10conf *conf)
3983{
3984 int degraded, degraded2;
3985 int i;
3986
3987 rcu_read_lock();
3988 degraded = 0;
3989 /* 'prev' section first */
3990 for (i = 0; i < conf->prev.raid_disks; i++) {
3991 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3992 if (!rdev || test_bit(Faulty, &rdev->flags))
3993 degraded++;
3994 else if (!test_bit(In_sync, &rdev->flags))
3995 /* When we can reduce the number of devices in
3996 * an array, this might not contribute to
3997 * 'degraded'. It does now.
3998 */
3999 degraded++;
4000 }
4001 rcu_read_unlock();
4002 if (conf->geo.raid_disks == conf->prev.raid_disks)
4003 return degraded;
4004 rcu_read_lock();
4005 degraded2 = 0;
4006 for (i = 0; i < conf->geo.raid_disks; i++) {
4007 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4008 if (!rdev || test_bit(Faulty, &rdev->flags))
4009 degraded2++;
4010 else if (!test_bit(In_sync, &rdev->flags)) {
4011 /* If reshape is increasing the number of devices,
4012 * this section has already been recovered, so
4013 * it doesn't contribute to degraded.
4014 * else it does.
4015 */
4016 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4017 degraded2++;
4018 }
4019 }
4020 rcu_read_unlock();
4021 if (degraded2 > degraded)
4022 return degraded2;
4023 return degraded;
4024}
4025
4026static int raid10_start_reshape(struct mddev *mddev)
4027{
4028 /* A 'reshape' has been requested. This commits
4029 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4030 * This also checks if there are enough spares and adds them
4031 * to the array.
4032 * We currently require enough spares to make the final
4033 * array non-degraded. We also require that the difference
4034 * between old and new data_offset - on each device - is
4035 * enough that we never risk over-writing.
4036 */
4037
4038 unsigned long before_length, after_length;
4039 sector_t min_offset_diff = 0;
4040 int first = 1;
4041 struct geom new;
4042 struct r10conf *conf = mddev->private;
4043 struct md_rdev *rdev;
4044 int spares = 0;
bb63a701 4045 int ret;
3ea7daa5
N
4046
4047 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4048 return -EBUSY;
4049
4050 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4051 return -EINVAL;
4052
4053 before_length = ((1 << conf->prev.chunk_shift) *
4054 conf->prev.far_copies);
4055 after_length = ((1 << conf->geo.chunk_shift) *
4056 conf->geo.far_copies);
4057
4058 rdev_for_each(rdev, mddev) {
4059 if (!test_bit(In_sync, &rdev->flags)
4060 && !test_bit(Faulty, &rdev->flags))
4061 spares++;
4062 if (rdev->raid_disk >= 0) {
4063 long long diff = (rdev->new_data_offset
4064 - rdev->data_offset);
4065 if (!mddev->reshape_backwards)
4066 diff = -diff;
4067 if (diff < 0)
4068 diff = 0;
4069 if (first || diff < min_offset_diff)
4070 min_offset_diff = diff;
4071 }
4072 }
4073
4074 if (max(before_length, after_length) > min_offset_diff)
4075 return -EINVAL;
4076
4077 if (spares < mddev->delta_disks)
4078 return -EINVAL;
4079
4080 conf->offset_diff = min_offset_diff;
4081 spin_lock_irq(&conf->device_lock);
4082 if (conf->mirrors_new) {
4083 memcpy(conf->mirrors_new, conf->mirrors,
dc280d98 4084 sizeof(struct raid10_info)*conf->prev.raid_disks);
3ea7daa5 4085 smp_mb();
c4796e21 4086 kfree(conf->mirrors_old);
3ea7daa5
N
4087 conf->mirrors_old = conf->mirrors;
4088 conf->mirrors = conf->mirrors_new;
4089 conf->mirrors_new = NULL;
4090 }
4091 setup_geo(&conf->geo, mddev, geo_start);
4092 smp_mb();
4093 if (mddev->reshape_backwards) {
4094 sector_t size = raid10_size(mddev, 0, 0);
4095 if (size < mddev->array_sectors) {
4096 spin_unlock_irq(&conf->device_lock);
08464e09
N
4097 pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4098 mdname(mddev));
3ea7daa5
N
4099 return -EINVAL;
4100 }
4101 mddev->resync_max_sectors = size;
4102 conf->reshape_progress = size;
4103 } else
4104 conf->reshape_progress = 0;
299b0685 4105 conf->reshape_safe = conf->reshape_progress;
3ea7daa5
N
4106 spin_unlock_irq(&conf->device_lock);
4107
bb63a701
N
4108 if (mddev->delta_disks && mddev->bitmap) {
4109 ret = bitmap_resize(mddev->bitmap,
4110 raid10_size(mddev, 0,
4111 conf->geo.raid_disks),
4112 0, 0);
4113 if (ret)
4114 goto abort;
4115 }
3ea7daa5
N
4116 if (mddev->delta_disks > 0) {
4117 rdev_for_each(rdev, mddev)
4118 if (rdev->raid_disk < 0 &&
4119 !test_bit(Faulty, &rdev->flags)) {
4120 if (raid10_add_disk(mddev, rdev) == 0) {
4121 if (rdev->raid_disk >=
4122 conf->prev.raid_disks)
4123 set_bit(In_sync, &rdev->flags);
4124 else
4125 rdev->recovery_offset = 0;
4126
4127 if (sysfs_link_rdev(mddev, rdev))
4128 /* Failure here is OK */;
4129 }
4130 } else if (rdev->raid_disk >= conf->prev.raid_disks
4131 && !test_bit(Faulty, &rdev->flags)) {
4132 /* This is a spare that was manually added */
4133 set_bit(In_sync, &rdev->flags);
4134 }
4135 }
4136 /* When a reshape changes the number of devices,
4137 * ->degraded is measured against the larger of the
4138 * pre and post numbers.
4139 */
4140 spin_lock_irq(&conf->device_lock);
4141 mddev->degraded = calc_degraded(conf);
4142 spin_unlock_irq(&conf->device_lock);
4143 mddev->raid_disks = conf->geo.raid_disks;
4144 mddev->reshape_position = conf->reshape_progress;
4145 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4146
4147 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4148 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
ea358cd0 4149 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
3ea7daa5
N
4150 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4151 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4152
4153 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4154 "reshape");
4155 if (!mddev->sync_thread) {
bb63a701
N
4156 ret = -EAGAIN;
4157 goto abort;
3ea7daa5
N
4158 }
4159 conf->reshape_checkpoint = jiffies;
4160 md_wakeup_thread(mddev->sync_thread);
4161 md_new_event(mddev);
4162 return 0;
bb63a701
N
4163
4164abort:
4165 mddev->recovery = 0;
4166 spin_lock_irq(&conf->device_lock);
4167 conf->geo = conf->prev;
4168 mddev->raid_disks = conf->geo.raid_disks;
4169 rdev_for_each(rdev, mddev)
4170 rdev->new_data_offset = rdev->data_offset;
4171 smp_wmb();
4172 conf->reshape_progress = MaxSector;
299b0685 4173 conf->reshape_safe = MaxSector;
bb63a701
N
4174 mddev->reshape_position = MaxSector;
4175 spin_unlock_irq(&conf->device_lock);
4176 return ret;
3ea7daa5
N
4177}
4178
4179/* Calculate the last device-address that could contain
4180 * any block from the chunk that includes the array-address 's'
4181 * and report the next address.
4182 * i.e. the address returned will be chunk-aligned and after
4183 * any data that is in the chunk containing 's'.
4184 */
4185static sector_t last_dev_address(sector_t s, struct geom *geo)
4186{
4187 s = (s | geo->chunk_mask) + 1;
4188 s >>= geo->chunk_shift;
4189 s *= geo->near_copies;
4190 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4191 s *= geo->far_copies;
4192 s <<= geo->chunk_shift;
4193 return s;
4194}
4195
4196/* Calculate the first device-address that could contain
4197 * any block from the chunk that includes the array-address 's'.
4198 * This too will be the start of a chunk
4199 */
4200static sector_t first_dev_address(sector_t s, struct geom *geo)
4201{
4202 s >>= geo->chunk_shift;
4203 s *= geo->near_copies;
4204 sector_div(s, geo->raid_disks);
4205 s *= geo->far_copies;
4206 s <<= geo->chunk_shift;
4207 return s;
4208}
4209
4210static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4211 int *skipped)
4212{
4213 /* We simply copy at most one chunk (smallest of old and new)
4214 * at a time, possibly less if that exceeds RESYNC_PAGES,
4215 * or we hit a bad block or something.
4216 * This might mean we pause for normal IO in the middle of
02ec5026 4217 * a chunk, but that is not a problem as mddev->reshape_position
3ea7daa5
N
4218 * can record any location.
4219 *
4220 * If we will want to write to a location that isn't
4221 * yet recorded as 'safe' (i.e. in metadata on disk) then
4222 * we need to flush all reshape requests and update the metadata.
4223 *
4224 * When reshaping forwards (e.g. to more devices), we interpret
4225 * 'safe' as the earliest block which might not have been copied
4226 * down yet. We divide this by previous stripe size and multiply
4227 * by previous stripe length to get lowest device offset that we
4228 * cannot write to yet.
4229 * We interpret 'sector_nr' as an address that we want to write to.
4230 * From this we use last_device_address() to find where we might
4231 * write to, and first_device_address on the 'safe' position.
4232 * If this 'next' write position is after the 'safe' position,
4233 * we must update the metadata to increase the 'safe' position.
4234 *
4235 * When reshaping backwards, we round in the opposite direction
4236 * and perform the reverse test: next write position must not be
4237 * less than current safe position.
4238 *
4239 * In all this the minimum difference in data offsets
4240 * (conf->offset_diff - always positive) allows a bit of slack,
02ec5026 4241 * so next can be after 'safe', but not by more than offset_diff
3ea7daa5
N
4242 *
4243 * We need to prepare all the bios here before we start any IO
4244 * to ensure the size we choose is acceptable to all devices.
4245 * The means one for each copy for write-out and an extra one for
4246 * read-in.
4247 * We store the read-in bio in ->master_bio and the others in
4248 * ->devs[x].bio and ->devs[x].repl_bio.
4249 */
4250 struct r10conf *conf = mddev->private;
4251 struct r10bio *r10_bio;
4252 sector_t next, safe, last;
4253 int max_sectors;
4254 int nr_sectors;
4255 int s;
4256 struct md_rdev *rdev;
4257 int need_flush = 0;
4258 struct bio *blist;
4259 struct bio *bio, *read_bio;
4260 int sectors_done = 0;
4261
4262 if (sector_nr == 0) {
4263 /* If restarting in the middle, skip the initial sectors */
4264 if (mddev->reshape_backwards &&
4265 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4266 sector_nr = (raid10_size(mddev, 0, 0)
4267 - conf->reshape_progress);
4268 } else if (!mddev->reshape_backwards &&
4269 conf->reshape_progress > 0)
4270 sector_nr = conf->reshape_progress;
4271 if (sector_nr) {
4272 mddev->curr_resync_completed = sector_nr;
4273 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4274 *skipped = 1;
4275 return sector_nr;
4276 }
4277 }
4278
4279 /* We don't use sector_nr to track where we are up to
4280 * as that doesn't work well for ->reshape_backwards.
4281 * So just use ->reshape_progress.
4282 */
4283 if (mddev->reshape_backwards) {
4284 /* 'next' is the earliest device address that we might
4285 * write to for this chunk in the new layout
4286 */
4287 next = first_dev_address(conf->reshape_progress - 1,
4288 &conf->geo);
4289
4290 /* 'safe' is the last device address that we might read from
4291 * in the old layout after a restart
4292 */
4293 safe = last_dev_address(conf->reshape_safe - 1,
4294 &conf->prev);
4295
4296 if (next + conf->offset_diff < safe)
4297 need_flush = 1;
4298
4299 last = conf->reshape_progress - 1;
4300 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4301 & conf->prev.chunk_mask);
4302 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4303 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4304 } else {
4305 /* 'next' is after the last device address that we
4306 * might write to for this chunk in the new layout
4307 */
4308 next = last_dev_address(conf->reshape_progress, &conf->geo);
4309
4310 /* 'safe' is the earliest device address that we might
4311 * read from in the old layout after a restart
4312 */
4313 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4314
4315 /* Need to update metadata if 'next' might be beyond 'safe'
4316 * as that would possibly corrupt data
4317 */
4318 if (next > safe + conf->offset_diff)
4319 need_flush = 1;
4320
4321 sector_nr = conf->reshape_progress;
4322 last = sector_nr | (conf->geo.chunk_mask
4323 & conf->prev.chunk_mask);
4324
4325 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4326 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4327 }
4328
4329 if (need_flush ||
4330 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4331 /* Need to update reshape_position in metadata */
4332 wait_barrier(conf);
4333 mddev->reshape_position = conf->reshape_progress;
4334 if (mddev->reshape_backwards)
4335 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4336 - conf->reshape_progress;
4337 else
4338 mddev->curr_resync_completed = conf->reshape_progress;
4339 conf->reshape_checkpoint = jiffies;
4340 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4341 md_wakeup_thread(mddev->thread);
4342 wait_event(mddev->sb_wait, mddev->flags == 0 ||
c91abf5a
N
4343 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4344 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4345 allow_barrier(conf);
4346 return sectors_done;
4347 }
3ea7daa5
N
4348 conf->reshape_safe = mddev->reshape_position;
4349 allow_barrier(conf);
4350 }
4351
4352read_more:
4353 /* Now schedule reads for blocks from sector_nr to last */
4354 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
cb8b12b5 4355 r10_bio->state = 0;
3ea7daa5
N
4356 raise_barrier(conf, sectors_done != 0);
4357 atomic_set(&r10_bio->remaining, 0);
4358 r10_bio->mddev = mddev;
4359 r10_bio->sector = sector_nr;
4360 set_bit(R10BIO_IsReshape, &r10_bio->state);
4361 r10_bio->sectors = last - sector_nr + 1;
4362 rdev = read_balance(conf, r10_bio, &max_sectors);
4363 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4364
4365 if (!rdev) {
4366 /* Cannot read from here, so need to record bad blocks
4367 * on all the target devices.
4368 */
4369 // FIXME
e337aead 4370 mempool_free(r10_bio, conf->r10buf_pool);
3ea7daa5
N
4371 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4372 return sectors_done;
4373 }
4374
4375 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4376
4377 read_bio->bi_bdev = rdev->bdev;
4f024f37 4378 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
3ea7daa5
N
4379 + rdev->data_offset);
4380 read_bio->bi_private = r10_bio;
4381 read_bio->bi_end_io = end_sync_read;
796a5cf0 4382 bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
ce0b0a46 4383 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4246a0b6 4384 read_bio->bi_error = 0;
3ea7daa5 4385 read_bio->bi_vcnt = 0;
4f024f37 4386 read_bio->bi_iter.bi_size = 0;
3ea7daa5
N
4387 r10_bio->master_bio = read_bio;
4388 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4389
4390 /* Now find the locations in the new layout */
4391 __raid10_find_phys(&conf->geo, r10_bio);
4392
4393 blist = read_bio;
4394 read_bio->bi_next = NULL;
4395
d094d686 4396 rcu_read_lock();
3ea7daa5
N
4397 for (s = 0; s < conf->copies*2; s++) {
4398 struct bio *b;
4399 int d = r10_bio->devs[s/2].devnum;
4400 struct md_rdev *rdev2;
4401 if (s&1) {
d094d686 4402 rdev2 = rcu_dereference(conf->mirrors[d].replacement);
3ea7daa5
N
4403 b = r10_bio->devs[s/2].repl_bio;
4404 } else {
d094d686 4405 rdev2 = rcu_dereference(conf->mirrors[d].rdev);
3ea7daa5
N
4406 b = r10_bio->devs[s/2].bio;
4407 }
4408 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4409 continue;
8be185f2
KO
4410
4411 bio_reset(b);
3ea7daa5 4412 b->bi_bdev = rdev2->bdev;
4f024f37
KO
4413 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4414 rdev2->new_data_offset;
3ea7daa5
N
4415 b->bi_private = r10_bio;
4416 b->bi_end_io = end_reshape_write;
796a5cf0 4417 bio_set_op_attrs(b, REQ_OP_WRITE, 0);
3ea7daa5 4418 b->bi_next = blist;
3ea7daa5
N
4419 blist = b;
4420 }
4421
4422 /* Now add as many pages as possible to all of these bios. */
4423
4424 nr_sectors = 0;
4425 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4426 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
4427 int len = (max_sectors - s) << 9;
4428 if (len > PAGE_SIZE)
4429 len = PAGE_SIZE;
4430 for (bio = blist; bio ; bio = bio->bi_next) {
4431 struct bio *bio2;
4432 if (bio_add_page(bio, page, len, 0))
4433 continue;
4434
4435 /* Didn't fit, must stop */
4436 for (bio2 = blist;
4437 bio2 && bio2 != bio;
4438 bio2 = bio2->bi_next) {
4439 /* Remove last page from this bio */
4440 bio2->bi_vcnt--;
4f024f37 4441 bio2->bi_iter.bi_size -= len;
b7c44ed9 4442 bio_clear_flag(bio2, BIO_SEG_VALID);
3ea7daa5
N
4443 }
4444 goto bio_full;
4445 }
4446 sector_nr += len >> 9;
4447 nr_sectors += len >> 9;
4448 }
4449bio_full:
d094d686 4450 rcu_read_unlock();
3ea7daa5
N
4451 r10_bio->sectors = nr_sectors;
4452
4453 /* Now submit the read */
4454 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
4455 atomic_inc(&r10_bio->remaining);
4456 read_bio->bi_next = NULL;
4457 generic_make_request(read_bio);
4458 sector_nr += nr_sectors;
4459 sectors_done += nr_sectors;
4460 if (sector_nr <= last)
4461 goto read_more;
4462
4463 /* Now that we have done the whole section we can
4464 * update reshape_progress
4465 */
4466 if (mddev->reshape_backwards)
4467 conf->reshape_progress -= sectors_done;
4468 else
4469 conf->reshape_progress += sectors_done;
4470
4471 return sectors_done;
4472}
4473
4474static void end_reshape_request(struct r10bio *r10_bio);
4475static int handle_reshape_read_error(struct mddev *mddev,
4476 struct r10bio *r10_bio);
4477static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4478{
4479 /* Reshape read completed. Hopefully we have a block
4480 * to write out.
4481 * If we got a read error then we do sync 1-page reads from
4482 * elsewhere until we find the data - or give up.
4483 */
4484 struct r10conf *conf = mddev->private;
4485 int s;
4486
4487 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4488 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4489 /* Reshape has been aborted */
4490 md_done_sync(mddev, r10_bio->sectors, 0);
4491 return;
4492 }
4493
4494 /* We definitely have the data in the pages, schedule the
4495 * writes.
4496 */
4497 atomic_set(&r10_bio->remaining, 1);
4498 for (s = 0; s < conf->copies*2; s++) {
4499 struct bio *b;
4500 int d = r10_bio->devs[s/2].devnum;
4501 struct md_rdev *rdev;
d094d686 4502 rcu_read_lock();
3ea7daa5 4503 if (s&1) {
d094d686 4504 rdev = rcu_dereference(conf->mirrors[d].replacement);
3ea7daa5
N
4505 b = r10_bio->devs[s/2].repl_bio;
4506 } else {
d094d686 4507 rdev = rcu_dereference(conf->mirrors[d].rdev);
3ea7daa5
N
4508 b = r10_bio->devs[s/2].bio;
4509 }
d094d686
N
4510 if (!rdev || test_bit(Faulty, &rdev->flags)) {
4511 rcu_read_unlock();
3ea7daa5 4512 continue;
d094d686 4513 }
3ea7daa5 4514 atomic_inc(&rdev->nr_pending);
d094d686 4515 rcu_read_unlock();
3ea7daa5
N
4516 md_sync_acct(b->bi_bdev, r10_bio->sectors);
4517 atomic_inc(&r10_bio->remaining);
4518 b->bi_next = NULL;
4519 generic_make_request(b);
4520 }
4521 end_reshape_request(r10_bio);
4522}
4523
4524static void end_reshape(struct r10conf *conf)
4525{
4526 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4527 return;
4528
4529 spin_lock_irq(&conf->device_lock);
4530 conf->prev = conf->geo;
4531 md_finish_reshape(conf->mddev);
4532 smp_wmb();
4533 conf->reshape_progress = MaxSector;
299b0685 4534 conf->reshape_safe = MaxSector;
3ea7daa5
N
4535 spin_unlock_irq(&conf->device_lock);
4536
4537 /* read-ahead size must cover two whole stripes, which is
4538 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4539 */
4540 if (conf->mddev->queue) {
4541 int stripe = conf->geo.raid_disks *
4542 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4543 stripe /= conf->geo.near_copies;
4544 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4545 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4546 }
4547 conf->fullsync = 0;
4548}
4549
3ea7daa5
N
4550static int handle_reshape_read_error(struct mddev *mddev,
4551 struct r10bio *r10_bio)
4552{
4553 /* Use sync reads to get the blocks from somewhere else */
4554 int sectors = r10_bio->sectors;
3ea7daa5 4555 struct r10conf *conf = mddev->private;
e0ee7785
N
4556 struct {
4557 struct r10bio r10_bio;
4558 struct r10dev devs[conf->copies];
4559 } on_stack;
4560 struct r10bio *r10b = &on_stack.r10_bio;
3ea7daa5
N
4561 int slot = 0;
4562 int idx = 0;
4563 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
4564
e0ee7785
N
4565 r10b->sector = r10_bio->sector;
4566 __raid10_find_phys(&conf->prev, r10b);
3ea7daa5
N
4567
4568 while (sectors) {
4569 int s = sectors;
4570 int success = 0;
4571 int first_slot = slot;
4572
4573 if (s > (PAGE_SIZE >> 9))
4574 s = PAGE_SIZE >> 9;
4575
d094d686 4576 rcu_read_lock();
3ea7daa5 4577 while (!success) {
e0ee7785 4578 int d = r10b->devs[slot].devnum;
d094d686 4579 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
3ea7daa5
N
4580 sector_t addr;
4581 if (rdev == NULL ||
4582 test_bit(Faulty, &rdev->flags) ||
4583 !test_bit(In_sync, &rdev->flags))
4584 goto failed;
4585
e0ee7785 4586 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
d094d686
N
4587 atomic_inc(&rdev->nr_pending);
4588 rcu_read_unlock();
3ea7daa5
N
4589 success = sync_page_io(rdev,
4590 addr,
4591 s << 9,
4592 bvec[idx].bv_page,
796a5cf0 4593 REQ_OP_READ, 0, false);
d094d686
N
4594 rdev_dec_pending(rdev, mddev);
4595 rcu_read_lock();
3ea7daa5
N
4596 if (success)
4597 break;
4598 failed:
4599 slot++;
4600 if (slot >= conf->copies)
4601 slot = 0;
4602 if (slot == first_slot)
4603 break;
4604 }
d094d686 4605 rcu_read_unlock();
3ea7daa5
N
4606 if (!success) {
4607 /* couldn't read this block, must give up */
4608 set_bit(MD_RECOVERY_INTR,
4609 &mddev->recovery);
4610 return -EIO;
4611 }
4612 sectors -= s;
4613 idx++;
4614 }
4615 return 0;
4616}
4617
4246a0b6 4618static void end_reshape_write(struct bio *bio)
3ea7daa5 4619{
3ea7daa5
N
4620 struct r10bio *r10_bio = bio->bi_private;
4621 struct mddev *mddev = r10_bio->mddev;
4622 struct r10conf *conf = mddev->private;
4623 int d;
4624 int slot;
4625 int repl;
4626 struct md_rdev *rdev = NULL;
4627
4628 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4629 if (repl)
4630 rdev = conf->mirrors[d].replacement;
4631 if (!rdev) {
4632 smp_mb();
4633 rdev = conf->mirrors[d].rdev;
4634 }
4635
4246a0b6 4636 if (bio->bi_error) {
3ea7daa5
N
4637 /* FIXME should record badblock */
4638 md_error(mddev, rdev);
4639 }
4640
4641 rdev_dec_pending(rdev, mddev);
4642 end_reshape_request(r10_bio);
4643}
4644
4645static void end_reshape_request(struct r10bio *r10_bio)
4646{
4647 if (!atomic_dec_and_test(&r10_bio->remaining))
4648 return;
4649 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4650 bio_put(r10_bio->master_bio);
4651 put_buf(r10_bio);
4652}
4653
4654static void raid10_finish_reshape(struct mddev *mddev)
4655{
4656 struct r10conf *conf = mddev->private;
4657
4658 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4659 return;
4660
4661 if (mddev->delta_disks > 0) {
4662 sector_t size = raid10_size(mddev, 0, 0);
4663 md_set_array_sectors(mddev, size);
4664 if (mddev->recovery_cp > mddev->resync_max_sectors) {
4665 mddev->recovery_cp = mddev->resync_max_sectors;
4666 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4667 }
4668 mddev->resync_max_sectors = size;
859644f0
HM
4669 if (mddev->queue) {
4670 set_capacity(mddev->gendisk, mddev->array_sectors);
4671 revalidate_disk(mddev->gendisk);
4672 }
63aced61
N
4673 } else {
4674 int d;
d094d686 4675 rcu_read_lock();
63aced61
N
4676 for (d = conf->geo.raid_disks ;
4677 d < conf->geo.raid_disks - mddev->delta_disks;
4678 d++) {
d094d686 4679 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
63aced61
N
4680 if (rdev)
4681 clear_bit(In_sync, &rdev->flags);
d094d686 4682 rdev = rcu_dereference(conf->mirrors[d].replacement);
63aced61
N
4683 if (rdev)
4684 clear_bit(In_sync, &rdev->flags);
4685 }
d094d686 4686 rcu_read_unlock();
3ea7daa5
N
4687 }
4688 mddev->layout = mddev->new_layout;
4689 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4690 mddev->reshape_position = MaxSector;
4691 mddev->delta_disks = 0;
4692 mddev->reshape_backwards = 0;
4693}
4694
84fc4b56 4695static struct md_personality raid10_personality =
1da177e4
LT
4696{
4697 .name = "raid10",
2604b703 4698 .level = 10,
1da177e4 4699 .owner = THIS_MODULE,
849674e4
SL
4700 .make_request = raid10_make_request,
4701 .run = raid10_run,
afa0f557 4702 .free = raid10_free,
849674e4
SL
4703 .status = raid10_status,
4704 .error_handler = raid10_error,
1da177e4
LT
4705 .hot_add_disk = raid10_add_disk,
4706 .hot_remove_disk= raid10_remove_disk,
4707 .spare_active = raid10_spare_active,
849674e4 4708 .sync_request = raid10_sync_request,
6cce3b23 4709 .quiesce = raid10_quiesce,
80c3a6ce 4710 .size = raid10_size,
006a09a0 4711 .resize = raid10_resize,
dab8b292 4712 .takeover = raid10_takeover,
3ea7daa5
N
4713 .check_reshape = raid10_check_reshape,
4714 .start_reshape = raid10_start_reshape,
4715 .finish_reshape = raid10_finish_reshape,
5c675f83 4716 .congested = raid10_congested,
1da177e4
LT
4717};
4718
4719static int __init raid_init(void)
4720{
2604b703 4721 return register_md_personality(&raid10_personality);
1da177e4
LT
4722}
4723
4724static void raid_exit(void)
4725{
2604b703 4726 unregister_md_personality(&raid10_personality);
1da177e4
LT
4727}
4728
4729module_init(raid_init);
4730module_exit(raid_exit);
4731MODULE_LICENSE("GPL");
0efb9e61 4732MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
1da177e4 4733MODULE_ALIAS("md-personality-9"); /* RAID10 */
d9d166c2 4734MODULE_ALIAS("md-raid10");
2604b703 4735MODULE_ALIAS("md-level-10");
34db0cd6
N
4736
4737module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);