Merge tag 'dax-fixes-5.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[linux-block.git] / drivers / md / raid1.c
CommitLineData
af1a8899 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * raid1.c : Multiple Devices driver for Linux
4 *
5 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
6 *
7 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
8 *
9 * RAID-1 management functions.
10 *
11 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
12 *
96de0e25 13 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
1da177e4
LT
14 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
15 *
191ea9b2
N
16 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
17 * bitmapped intelligence in resync:
18 *
19 * - bitmap marked during normal i/o
20 * - bitmap used to skip nondirty blocks during sync
21 *
22 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
23 * - persistent bitmap code
1da177e4
LT
24 */
25
5a0e3ad6 26#include <linux/slab.h>
25570727 27#include <linux/delay.h>
bff61975 28#include <linux/blkdev.h>
056075c7 29#include <linux/module.h>
bff61975 30#include <linux/seq_file.h>
8bda470e 31#include <linux/ratelimit.h>
69b00b5b 32#include <linux/interval_tree_generic.h>
3f07c014 33
109e3765 34#include <trace/events/block.h>
3f07c014 35
43b2e5d8 36#include "md.h"
ef740c37 37#include "raid1.h"
935fe098 38#include "md-bitmap.h"
191ea9b2 39
394ed8e4
SL
40#define UNSUPPORTED_MDDEV_FLAGS \
41 ((1L << MD_HAS_JOURNAL) | \
ea0213e0 42 (1L << MD_JOURNAL_CLEAN) | \
ddc08823
PB
43 (1L << MD_HAS_PPL) | \
44 (1L << MD_HAS_MULTIPLE_PPLS))
394ed8e4 45
fd76863e 46static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
1da177e4 48
578b54ad
N
49#define raid1_log(md, fmt, args...) \
50 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
51
fb0eb5df
ML
52#include "raid1-10.c"
53
69b00b5b
GJ
54#define START(node) ((node)->start)
55#define LAST(node) ((node)->last)
56INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
57 START, LAST, static inline, raid1_rb);
58
d0d2d8ba
GJ
59static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
60 struct serial_info *si, int idx)
3e148a32 61{
3e148a32
GJ
62 unsigned long flags;
63 int ret = 0;
d0d2d8ba
GJ
64 sector_t lo = r1_bio->sector;
65 sector_t hi = lo + r1_bio->sectors;
025471f9 66 struct serial_in_rdev *serial = &rdev->serial[idx];
3e148a32 67
69b00b5b
GJ
68 spin_lock_irqsave(&serial->serial_lock, flags);
69 /* collision happened */
70 if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
71 ret = -EBUSY;
d0d2d8ba 72 else {
69b00b5b
GJ
73 si->start = lo;
74 si->last = hi;
75 raid1_rb_insert(si, &serial->serial_rb);
d0d2d8ba 76 }
69b00b5b 77 spin_unlock_irqrestore(&serial->serial_lock, flags);
3e148a32
GJ
78
79 return ret;
80}
81
d0d2d8ba
GJ
82static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
83{
84 struct mddev *mddev = rdev->mddev;
85 struct serial_info *si;
86 int idx = sector_to_idx(r1_bio->sector);
87 struct serial_in_rdev *serial = &rdev->serial[idx];
88
89 if (WARN_ON(!mddev->serial_info_pool))
90 return;
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
92 wait_event(serial->serial_io_wait,
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0);
94}
95
404659cf 96static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
3e148a32 97{
69b00b5b 98 struct serial_info *si;
3e148a32
GJ
99 unsigned long flags;
100 int found = 0;
101 struct mddev *mddev = rdev->mddev;
025471f9
GJ
102 int idx = sector_to_idx(lo);
103 struct serial_in_rdev *serial = &rdev->serial[idx];
69b00b5b
GJ
104
105 spin_lock_irqsave(&serial->serial_lock, flags);
106 for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
107 si; si = raid1_rb_iter_next(si, lo, hi)) {
108 if (si->start == lo && si->last == hi) {
109 raid1_rb_remove(si, &serial->serial_rb);
110 mempool_free(si, mddev->serial_info_pool);
3e148a32
GJ
111 found = 1;
112 break;
113 }
69b00b5b 114 }
3e148a32 115 if (!found)
404659cf 116 WARN(1, "The write IO is not recorded for serialization\n");
69b00b5b
GJ
117 spin_unlock_irqrestore(&serial->serial_lock, flags);
118 wake_up(&serial->serial_io_wait);
3e148a32
GJ
119}
120
98d30c58
ML
121/*
122 * for resync bio, r1bio pointer can be retrieved from the per-bio
123 * 'struct resync_pages'.
124 */
125static inline struct r1bio *get_resync_r1bio(struct bio *bio)
126{
127 return get_resync_pages(bio)->raid_bio;
128}
129
dd0fc66f 130static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
1da177e4
LT
131{
132 struct pool_info *pi = data;
9f2c9d12 133 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
1da177e4
LT
134
135 /* allocate a r1bio with room for raid_disks entries in the bios array */
7eaceacc 136 return kzalloc(size, gfp_flags);
1da177e4
LT
137}
138
8e005f7c 139#define RESYNC_DEPTH 32
1da177e4 140#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
8e005f7c 141#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
142#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
c40f341f
GR
143#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
144#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
1da177e4 145
dd0fc66f 146static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
1da177e4
LT
147{
148 struct pool_info *pi = data;
9f2c9d12 149 struct r1bio *r1_bio;
1da177e4 150 struct bio *bio;
da1aab3d 151 int need_pages;
98d30c58
ML
152 int j;
153 struct resync_pages *rps;
1da177e4
LT
154
155 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
7eaceacc 156 if (!r1_bio)
1da177e4 157 return NULL;
1da177e4 158
6da2ec56
KC
159 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
160 gfp_flags);
98d30c58
ML
161 if (!rps)
162 goto out_free_r1bio;
163
1da177e4
LT
164 /*
165 * Allocate bios : 1 for reading, n-1 for writing
166 */
167 for (j = pi->raid_disks ; j-- ; ) {
6746557f 168 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
1da177e4
LT
169 if (!bio)
170 goto out_free_bio;
171 r1_bio->bios[j] = bio;
172 }
173 /*
174 * Allocate RESYNC_PAGES data pages and attach them to
d11c171e
N
175 * the first bio.
176 * If this is a user-requested check/repair, allocate
177 * RESYNC_PAGES for each bio.
1da177e4 178 */
d11c171e 179 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
da1aab3d 180 need_pages = pi->raid_disks;
d11c171e 181 else
da1aab3d 182 need_pages = 1;
98d30c58
ML
183 for (j = 0; j < pi->raid_disks; j++) {
184 struct resync_pages *rp = &rps[j];
185
d11c171e 186 bio = r1_bio->bios[j];
d11c171e 187
98d30c58
ML
188 if (j < need_pages) {
189 if (resync_alloc_pages(rp, gfp_flags))
190 goto out_free_pages;
191 } else {
192 memcpy(rp, &rps[0], sizeof(*rp));
193 resync_get_all_pages(rp);
194 }
195
98d30c58
ML
196 rp->raid_bio = r1_bio;
197 bio->bi_private = rp;
1da177e4
LT
198 }
199
200 r1_bio->master_bio = NULL;
201
202 return r1_bio;
203
da1aab3d 204out_free_pages:
491221f8 205 while (--j >= 0)
98d30c58 206 resync_free_pages(&rps[j]);
da1aab3d 207
1da177e4 208out_free_bio:
8f19ccb2 209 while (++j < pi->raid_disks)
1da177e4 210 bio_put(r1_bio->bios[j]);
98d30c58
ML
211 kfree(rps);
212
213out_free_r1bio:
c7afa803 214 rbio_pool_free(r1_bio, data);
1da177e4
LT
215 return NULL;
216}
217
218static void r1buf_pool_free(void *__r1_bio, void *data)
219{
220 struct pool_info *pi = data;
98d30c58 221 int i;
9f2c9d12 222 struct r1bio *r1bio = __r1_bio;
98d30c58 223 struct resync_pages *rp = NULL;
1da177e4 224
98d30c58
ML
225 for (i = pi->raid_disks; i--; ) {
226 rp = get_resync_pages(r1bio->bios[i]);
227 resync_free_pages(rp);
1da177e4 228 bio_put(r1bio->bios[i]);
98d30c58
ML
229 }
230
231 /* resync pages array stored in the 1st bio's .bi_private */
232 kfree(rp);
1da177e4 233
c7afa803 234 rbio_pool_free(r1bio, data);
1da177e4
LT
235}
236
e8096360 237static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
1da177e4
LT
238{
239 int i;
240
8f19ccb2 241 for (i = 0; i < conf->raid_disks * 2; i++) {
1da177e4 242 struct bio **bio = r1_bio->bios + i;
4367af55 243 if (!BIO_SPECIAL(*bio))
1da177e4
LT
244 bio_put(*bio);
245 *bio = NULL;
246 }
247}
248
9f2c9d12 249static void free_r1bio(struct r1bio *r1_bio)
1da177e4 250{
e8096360 251 struct r1conf *conf = r1_bio->mddev->private;
1da177e4 252
1da177e4 253 put_all_bios(conf, r1_bio);
afeee514 254 mempool_free(r1_bio, &conf->r1bio_pool);
1da177e4
LT
255}
256
9f2c9d12 257static void put_buf(struct r1bio *r1_bio)
1da177e4 258{
e8096360 259 struct r1conf *conf = r1_bio->mddev->private;
af5f42a7 260 sector_t sect = r1_bio->sector;
3e198f78
N
261 int i;
262
8f19ccb2 263 for (i = 0; i < conf->raid_disks * 2; i++) {
3e198f78
N
264 struct bio *bio = r1_bio->bios[i];
265 if (bio->bi_end_io)
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
267 }
1da177e4 268
afeee514 269 mempool_free(r1_bio, &conf->r1buf_pool);
1da177e4 270
af5f42a7 271 lower_barrier(conf, sect);
1da177e4
LT
272}
273
9f2c9d12 274static void reschedule_retry(struct r1bio *r1_bio)
1da177e4
LT
275{
276 unsigned long flags;
fd01b88c 277 struct mddev *mddev = r1_bio->mddev;
e8096360 278 struct r1conf *conf = mddev->private;
fd76863e 279 int idx;
1da177e4 280
fd76863e 281 idx = sector_to_idx(r1_bio->sector);
1da177e4
LT
282 spin_lock_irqsave(&conf->device_lock, flags);
283 list_add(&r1_bio->retry_list, &conf->retry_list);
824e47da 284 atomic_inc(&conf->nr_queued[idx]);
1da177e4
LT
285 spin_unlock_irqrestore(&conf->device_lock, flags);
286
17999be4 287 wake_up(&conf->wait_barrier);
1da177e4
LT
288 md_wakeup_thread(mddev->thread);
289}
290
291/*
292 * raid_end_bio_io() is called when we have finished servicing a mirrored
293 * operation and are ready to return a success/failure code to the buffer
294 * cache layer.
295 */
9f2c9d12 296static void call_bio_endio(struct r1bio *r1_bio)
d2eb35ac
N
297{
298 struct bio *bio = r1_bio->master_bio;
d2eb35ac
N
299
300 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
4e4cbee9 301 bio->bi_status = BLK_STS_IOERR;
4246a0b6 302
37011e3a 303 bio_endio(bio);
d2eb35ac
N
304}
305
9f2c9d12 306static void raid_end_bio_io(struct r1bio *r1_bio)
1da177e4
LT
307{
308 struct bio *bio = r1_bio->master_bio;
c91114c2 309 struct r1conf *conf = r1_bio->mddev->private;
1da177e4 310
4b6d287f
N
311 /* if nobody has done the final endio yet, do it now */
312 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
36a4e1fe
N
313 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
314 (bio_data_dir(bio) == WRITE) ? "write" : "read",
4f024f37
KO
315 (unsigned long long) bio->bi_iter.bi_sector,
316 (unsigned long long) bio_end_sector(bio) - 1);
4b6d287f 317
d2eb35ac 318 call_bio_endio(r1_bio);
4b6d287f 319 }
c91114c2
DJ
320 /*
321 * Wake up any possible resync thread that waits for the device
322 * to go idle. All I/Os, even write-behind writes, are done.
323 */
324 allow_barrier(conf, r1_bio->sector);
325
1da177e4
LT
326 free_r1bio(r1_bio);
327}
328
329/*
330 * Update disk head position estimator based on IRQ completion info.
331 */
9f2c9d12 332static inline void update_head_pos(int disk, struct r1bio *r1_bio)
1da177e4 333{
e8096360 334 struct r1conf *conf = r1_bio->mddev->private;
1da177e4
LT
335
336 conf->mirrors[disk].head_position =
337 r1_bio->sector + (r1_bio->sectors);
338}
339
ba3ae3be
NK
340/*
341 * Find the disk number which triggered given bio
342 */
9f2c9d12 343static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
ba3ae3be
NK
344{
345 int mirror;
30194636
N
346 struct r1conf *conf = r1_bio->mddev->private;
347 int raid_disks = conf->raid_disks;
ba3ae3be 348
8f19ccb2 349 for (mirror = 0; mirror < raid_disks * 2; mirror++)
ba3ae3be
NK
350 if (r1_bio->bios[mirror] == bio)
351 break;
352
8f19ccb2 353 BUG_ON(mirror == raid_disks * 2);
ba3ae3be
NK
354 update_head_pos(mirror, r1_bio);
355
356 return mirror;
357}
358
4246a0b6 359static void raid1_end_read_request(struct bio *bio)
1da177e4 360{
4e4cbee9 361 int uptodate = !bio->bi_status;
9f2c9d12 362 struct r1bio *r1_bio = bio->bi_private;
e8096360 363 struct r1conf *conf = r1_bio->mddev->private;
e5872d58 364 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
1da177e4 365
1da177e4
LT
366 /*
367 * this branch is our 'one mirror IO has finished' event handler:
368 */
e5872d58 369 update_head_pos(r1_bio->read_disk, r1_bio);
ddaf22ab 370
dd00a99e
N
371 if (uptodate)
372 set_bit(R1BIO_Uptodate, &r1_bio->state);
2e52d449
N
373 else if (test_bit(FailFast, &rdev->flags) &&
374 test_bit(R1BIO_FailFast, &r1_bio->state))
375 /* This was a fail-fast read so we definitely
376 * want to retry */
377 ;
dd00a99e
N
378 else {
379 /* If all other devices have failed, we want to return
380 * the error upwards rather than fail the last device.
381 * Here we redefine "uptodate" to mean "Don't want to retry"
1da177e4 382 */
dd00a99e
N
383 unsigned long flags;
384 spin_lock_irqsave(&conf->device_lock, flags);
385 if (r1_bio->mddev->degraded == conf->raid_disks ||
386 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
e5872d58 387 test_bit(In_sync, &rdev->flags)))
dd00a99e
N
388 uptodate = 1;
389 spin_unlock_irqrestore(&conf->device_lock, flags);
390 }
1da177e4 391
7ad4d4a6 392 if (uptodate) {
1da177e4 393 raid_end_bio_io(r1_bio);
e5872d58 394 rdev_dec_pending(rdev, conf->mddev);
7ad4d4a6 395 } else {
1da177e4
LT
396 /*
397 * oops, read error:
398 */
399 char b[BDEVNAME_SIZE];
1d41c216
N
400 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
401 mdname(conf->mddev),
402 bdevname(rdev->bdev, b),
403 (unsigned long long)r1_bio->sector);
d2eb35ac 404 set_bit(R1BIO_ReadError, &r1_bio->state);
1da177e4 405 reschedule_retry(r1_bio);
7ad4d4a6 406 /* don't drop the reference on read_disk yet */
1da177e4 407 }
1da177e4
LT
408}
409
9f2c9d12 410static void close_write(struct r1bio *r1_bio)
cd5ff9a1
N
411{
412 /* it really is the end of this request */
413 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
841c1316
ML
414 bio_free_pages(r1_bio->behind_master_bio);
415 bio_put(r1_bio->behind_master_bio);
416 r1_bio->behind_master_bio = NULL;
cd5ff9a1
N
417 }
418 /* clear the bitmap if all writes complete successfully */
e64e4018
AS
419 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
420 r1_bio->sectors,
421 !test_bit(R1BIO_Degraded, &r1_bio->state),
422 test_bit(R1BIO_BehindIO, &r1_bio->state));
cd5ff9a1
N
423 md_write_end(r1_bio->mddev);
424}
425
9f2c9d12 426static void r1_bio_write_done(struct r1bio *r1_bio)
4e78064f 427{
cd5ff9a1
N
428 if (!atomic_dec_and_test(&r1_bio->remaining))
429 return;
430
431 if (test_bit(R1BIO_WriteError, &r1_bio->state))
432 reschedule_retry(r1_bio);
433 else {
434 close_write(r1_bio);
4367af55
N
435 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
436 reschedule_retry(r1_bio);
437 else
438 raid_end_bio_io(r1_bio);
4e78064f
N
439 }
440}
441
4246a0b6 442static void raid1_end_write_request(struct bio *bio)
1da177e4 443{
9f2c9d12 444 struct r1bio *r1_bio = bio->bi_private;
e5872d58 445 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
e8096360 446 struct r1conf *conf = r1_bio->mddev->private;
04b857f7 447 struct bio *to_put = NULL;
e5872d58
N
448 int mirror = find_bio_disk(r1_bio, bio);
449 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
e3f948cd 450 bool discard_error;
69df9cfc
GJ
451 sector_t lo = r1_bio->sector;
452 sector_t hi = r1_bio->sector + r1_bio->sectors;
e3f948cd 453
4e4cbee9 454 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
1da177e4 455
e9c7469b
TH
456 /*
457 * 'one mirror IO has finished' event handler:
458 */
4e4cbee9 459 if (bio->bi_status && !discard_error) {
e5872d58
N
460 set_bit(WriteErrorSeen, &rdev->flags);
461 if (!test_and_set_bit(WantReplacement, &rdev->flags))
19d67169
N
462 set_bit(MD_RECOVERY_NEEDED, &
463 conf->mddev->recovery);
464
212e7eb7
N
465 if (test_bit(FailFast, &rdev->flags) &&
466 (bio->bi_opf & MD_FAILFAST) &&
467 /* We never try FailFast to WriteMostly devices */
468 !test_bit(WriteMostly, &rdev->flags)) {
469 md_error(r1_bio->mddev, rdev);
eeba6809
YY
470 }
471
472 /*
473 * When the device is faulty, it is not necessary to
474 * handle write error.
475 * For failfast, this is the only remaining device,
476 * We need to retry the write without FailFast.
477 */
478 if (!test_bit(Faulty, &rdev->flags))
212e7eb7 479 set_bit(R1BIO_WriteError, &r1_bio->state);
eeba6809 480 else {
2417b986
PC
481 /* Fail the request */
482 set_bit(R1BIO_Degraded, &r1_bio->state);
eeba6809
YY
483 /* Finished with this branch */
484 r1_bio->bios[mirror] = NULL;
485 to_put = bio;
486 }
4367af55 487 } else {
1da177e4 488 /*
e9c7469b
TH
489 * Set R1BIO_Uptodate in our master bio, so that we
490 * will return a good error code for to the higher
491 * levels even if IO on some other mirrored buffer
492 * fails.
493 *
494 * The 'master' represents the composite IO operation
495 * to user-side. So if something waits for IO, then it
496 * will wait for the 'master' bio.
1da177e4 497 */
4367af55
N
498 sector_t first_bad;
499 int bad_sectors;
500
cd5ff9a1
N
501 r1_bio->bios[mirror] = NULL;
502 to_put = bio;
3056e3ae
AL
503 /*
504 * Do not set R1BIO_Uptodate if the current device is
505 * rebuilding or Faulty. This is because we cannot use
506 * such device for properly reading the data back (we could
507 * potentially use it, if the current write would have felt
508 * before rdev->recovery_offset, but for simplicity we don't
509 * check this here.
510 */
e5872d58
N
511 if (test_bit(In_sync, &rdev->flags) &&
512 !test_bit(Faulty, &rdev->flags))
3056e3ae 513 set_bit(R1BIO_Uptodate, &r1_bio->state);
e9c7469b 514
4367af55 515 /* Maybe we can clear some bad blocks. */
e5872d58 516 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
e3f948cd 517 &first_bad, &bad_sectors) && !discard_error) {
4367af55
N
518 r1_bio->bios[mirror] = IO_MADE_GOOD;
519 set_bit(R1BIO_MadeGood, &r1_bio->state);
520 }
521 }
522
e9c7469b 523 if (behind) {
69df9cfc 524 if (test_bit(CollisionCheck, &rdev->flags))
404659cf 525 remove_serial(rdev, lo, hi);
e5872d58 526 if (test_bit(WriteMostly, &rdev->flags))
e9c7469b
TH
527 atomic_dec(&r1_bio->behind_remaining);
528
529 /*
530 * In behind mode, we ACK the master bio once the I/O
531 * has safely reached all non-writemostly
532 * disks. Setting the Returned bit ensures that this
533 * gets done only once -- we don't ever want to return
534 * -EIO here, instead we'll wait
535 */
536 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
537 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
538 /* Maybe we can return now */
539 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
540 struct bio *mbio = r1_bio->master_bio;
36a4e1fe
N
541 pr_debug("raid1: behind end write sectors"
542 " %llu-%llu\n",
4f024f37
KO
543 (unsigned long long) mbio->bi_iter.bi_sector,
544 (unsigned long long) bio_end_sector(mbio) - 1);
d2eb35ac 545 call_bio_endio(r1_bio);
4b6d287f
N
546 }
547 }
69df9cfc
GJ
548 } else if (rdev->mddev->serialize_policy)
549 remove_serial(rdev, lo, hi);
4367af55 550 if (r1_bio->bios[mirror] == NULL)
e5872d58 551 rdev_dec_pending(rdev, conf->mddev);
e9c7469b 552
1da177e4 553 /*
1da177e4
LT
554 * Let's see if all mirrored write operations have finished
555 * already.
556 */
af6d7b76 557 r1_bio_write_done(r1_bio);
c70810b3 558
04b857f7
N
559 if (to_put)
560 bio_put(to_put);
1da177e4
LT
561}
562
fd76863e 563static sector_t align_to_barrier_unit_end(sector_t start_sector,
564 sector_t sectors)
565{
566 sector_t len;
567
568 WARN_ON(sectors == 0);
569 /*
570 * len is the number of sectors from start_sector to end of the
571 * barrier unit which start_sector belongs to.
572 */
573 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
574 start_sector;
575
576 if (len > sectors)
577 len = sectors;
578
579 return len;
580}
581
1da177e4
LT
582/*
583 * This routine returns the disk from which the requested read should
584 * be done. There is a per-array 'next expected sequential IO' sector
585 * number - if this matches on the next IO then we use the last disk.
586 * There is also a per-disk 'last know head position' sector that is
587 * maintained from IRQ contexts, both the normal and the resync IO
588 * completion handlers update this position correctly. If there is no
589 * perfect sequential match then we pick the disk whose head is closest.
590 *
591 * If there are 2 mirrors in the same 2 devices, performance degrades
592 * because position is mirror, not device based.
593 *
594 * The rdev for the device selected will have nr_pending incremented.
595 */
e8096360 596static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
1da177e4 597{
af3a2cd6 598 const sector_t this_sector = r1_bio->sector;
d2eb35ac
N
599 int sectors;
600 int best_good_sectors;
9dedf603
SL
601 int best_disk, best_dist_disk, best_pending_disk;
602 int has_nonrot_disk;
be4d3280 603 int disk;
76073054 604 sector_t best_dist;
9dedf603 605 unsigned int min_pending;
3cb03002 606 struct md_rdev *rdev;
f3ac8bf7 607 int choose_first;
12cee5a8 608 int choose_next_idle;
1da177e4
LT
609
610 rcu_read_lock();
611 /*
8ddf9efe 612 * Check if we can balance. We can balance on the whole
1da177e4
LT
613 * device if no resync is going on, or below the resync window.
614 * We take the first readable disk when above the resync window.
615 */
616 retry:
d2eb35ac 617 sectors = r1_bio->sectors;
76073054 618 best_disk = -1;
9dedf603 619 best_dist_disk = -1;
76073054 620 best_dist = MaxSector;
9dedf603
SL
621 best_pending_disk = -1;
622 min_pending = UINT_MAX;
d2eb35ac 623 best_good_sectors = 0;
9dedf603 624 has_nonrot_disk = 0;
12cee5a8 625 choose_next_idle = 0;
2e52d449 626 clear_bit(R1BIO_FailFast, &r1_bio->state);
d2eb35ac 627
7d49ffcf
GR
628 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
629 (mddev_is_clustered(conf->mddev) &&
90382ed9 630 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
7d49ffcf
GR
631 this_sector + sectors)))
632 choose_first = 1;
633 else
634 choose_first = 0;
1da177e4 635
be4d3280 636 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
76073054 637 sector_t dist;
d2eb35ac
N
638 sector_t first_bad;
639 int bad_sectors;
9dedf603 640 unsigned int pending;
12cee5a8 641 bool nonrot;
d2eb35ac 642
f3ac8bf7
N
643 rdev = rcu_dereference(conf->mirrors[disk].rdev);
644 if (r1_bio->bios[disk] == IO_BLOCKED
645 || rdev == NULL
76073054 646 || test_bit(Faulty, &rdev->flags))
f3ac8bf7 647 continue;
76073054
N
648 if (!test_bit(In_sync, &rdev->flags) &&
649 rdev->recovery_offset < this_sector + sectors)
1da177e4 650 continue;
76073054
N
651 if (test_bit(WriteMostly, &rdev->flags)) {
652 /* Don't balance among write-mostly, just
653 * use the first as a last resort */
d1901ef0 654 if (best_dist_disk < 0) {
307729c8
N
655 if (is_badblock(rdev, this_sector, sectors,
656 &first_bad, &bad_sectors)) {
816b0acf 657 if (first_bad <= this_sector)
307729c8
N
658 /* Cannot use this */
659 continue;
660 best_good_sectors = first_bad - this_sector;
661 } else
662 best_good_sectors = sectors;
d1901ef0
TH
663 best_dist_disk = disk;
664 best_pending_disk = disk;
307729c8 665 }
76073054
N
666 continue;
667 }
668 /* This is a reasonable device to use. It might
669 * even be best.
670 */
d2eb35ac
N
671 if (is_badblock(rdev, this_sector, sectors,
672 &first_bad, &bad_sectors)) {
673 if (best_dist < MaxSector)
674 /* already have a better device */
675 continue;
676 if (first_bad <= this_sector) {
677 /* cannot read here. If this is the 'primary'
678 * device, then we must not read beyond
679 * bad_sectors from another device..
680 */
681 bad_sectors -= (this_sector - first_bad);
682 if (choose_first && sectors > bad_sectors)
683 sectors = bad_sectors;
684 if (best_good_sectors > sectors)
685 best_good_sectors = sectors;
686
687 } else {
688 sector_t good_sectors = first_bad - this_sector;
689 if (good_sectors > best_good_sectors) {
690 best_good_sectors = good_sectors;
691 best_disk = disk;
692 }
693 if (choose_first)
694 break;
695 }
696 continue;
d82dd0e3
TM
697 } else {
698 if ((sectors > best_good_sectors) && (best_disk >= 0))
699 best_disk = -1;
d2eb35ac 700 best_good_sectors = sectors;
d82dd0e3 701 }
d2eb35ac 702
2e52d449
N
703 if (best_disk >= 0)
704 /* At least two disks to choose from so failfast is OK */
705 set_bit(R1BIO_FailFast, &r1_bio->state);
706
12cee5a8
SL
707 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
708 has_nonrot_disk |= nonrot;
9dedf603 709 pending = atomic_read(&rdev->nr_pending);
76073054 710 dist = abs(this_sector - conf->mirrors[disk].head_position);
12cee5a8 711 if (choose_first) {
76073054 712 best_disk = disk;
1da177e4
LT
713 break;
714 }
12cee5a8
SL
715 /* Don't change to another disk for sequential reads */
716 if (conf->mirrors[disk].next_seq_sect == this_sector
717 || dist == 0) {
718 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
719 struct raid1_info *mirror = &conf->mirrors[disk];
720
721 best_disk = disk;
722 /*
723 * If buffered sequential IO size exceeds optimal
724 * iosize, check if there is idle disk. If yes, choose
725 * the idle disk. read_balance could already choose an
726 * idle disk before noticing it's a sequential IO in
727 * this disk. This doesn't matter because this disk
728 * will idle, next time it will be utilized after the
729 * first disk has IO size exceeds optimal iosize. In
730 * this way, iosize of the first disk will be optimal
731 * iosize at least. iosize of the second disk might be
732 * small, but not a big deal since when the second disk
733 * starts IO, the first disk is likely still busy.
734 */
735 if (nonrot && opt_iosize > 0 &&
736 mirror->seq_start != MaxSector &&
737 mirror->next_seq_sect > opt_iosize &&
738 mirror->next_seq_sect - opt_iosize >=
739 mirror->seq_start) {
740 choose_next_idle = 1;
741 continue;
742 }
743 break;
744 }
12cee5a8
SL
745
746 if (choose_next_idle)
747 continue;
9dedf603
SL
748
749 if (min_pending > pending) {
750 min_pending = pending;
751 best_pending_disk = disk;
752 }
753
76073054
N
754 if (dist < best_dist) {
755 best_dist = dist;
9dedf603 756 best_dist_disk = disk;
1da177e4 757 }
f3ac8bf7 758 }
1da177e4 759
9dedf603
SL
760 /*
761 * If all disks are rotational, choose the closest disk. If any disk is
762 * non-rotational, choose the disk with less pending request even the
763 * disk is rotational, which might/might not be optimal for raids with
764 * mixed ratation/non-rotational disks depending on workload.
765 */
766 if (best_disk == -1) {
2e52d449 767 if (has_nonrot_disk || min_pending == 0)
9dedf603
SL
768 best_disk = best_pending_disk;
769 else
770 best_disk = best_dist_disk;
771 }
772
76073054
N
773 if (best_disk >= 0) {
774 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
8ddf9efe
N
775 if (!rdev)
776 goto retry;
777 atomic_inc(&rdev->nr_pending);
d2eb35ac 778 sectors = best_good_sectors;
12cee5a8
SL
779
780 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
781 conf->mirrors[best_disk].seq_start = this_sector;
782
be4d3280 783 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
1da177e4
LT
784 }
785 rcu_read_unlock();
d2eb35ac 786 *max_sectors = sectors;
1da177e4 787
76073054 788 return best_disk;
1da177e4
LT
789}
790
673ca68d
N
791static void flush_bio_list(struct r1conf *conf, struct bio *bio)
792{
793 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
e64e4018 794 md_bitmap_unplug(conf->mddev->bitmap);
673ca68d
N
795 wake_up(&conf->wait_barrier);
796
797 while (bio) { /* submit pending writes */
798 struct bio *next = bio->bi_next;
309dca30 799 struct md_rdev *rdev = (void *)bio->bi_bdev;
673ca68d 800 bio->bi_next = NULL;
74d46992 801 bio_set_dev(bio, rdev->bdev);
673ca68d 802 if (test_bit(Faulty, &rdev->flags)) {
6308d8e3 803 bio_io_error(bio);
673ca68d 804 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
309dca30 805 !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
673ca68d
N
806 /* Just ignore it */
807 bio_endio(bio);
808 else
ed00aabd 809 submit_bio_noacct(bio);
673ca68d 810 bio = next;
5fa4f8ba 811 cond_resched();
673ca68d
N
812 }
813}
814
e8096360 815static void flush_pending_writes(struct r1conf *conf)
a35e63ef
N
816{
817 /* Any writes that have been queued but are awaiting
818 * bitmap updates get flushed here.
a35e63ef 819 */
a35e63ef
N
820 spin_lock_irq(&conf->device_lock);
821
822 if (conf->pending_bio_list.head) {
18022a1b 823 struct blk_plug plug;
a35e63ef 824 struct bio *bio;
18022a1b 825
a35e63ef 826 bio = bio_list_get(&conf->pending_bio_list);
34db0cd6 827 conf->pending_count = 0;
a35e63ef 828 spin_unlock_irq(&conf->device_lock);
474beb57
N
829
830 /*
831 * As this is called in a wait_event() loop (see freeze_array),
832 * current->state might be TASK_UNINTERRUPTIBLE which will
833 * cause a warning when we prepare to wait again. As it is
834 * rare that this path is taken, it is perfectly safe to force
835 * us to go around the wait_event() loop again, so the warning
836 * is a false-positive. Silence the warning by resetting
837 * thread state
838 */
839 __set_current_state(TASK_RUNNING);
18022a1b 840 blk_start_plug(&plug);
673ca68d 841 flush_bio_list(conf, bio);
18022a1b 842 blk_finish_plug(&plug);
a35e63ef
N
843 } else
844 spin_unlock_irq(&conf->device_lock);
7eaceacc
JA
845}
846
17999be4
N
847/* Barriers....
848 * Sometimes we need to suspend IO while we do something else,
849 * either some resync/recovery, or reconfigure the array.
850 * To do this we raise a 'barrier'.
851 * The 'barrier' is a counter that can be raised multiple times
852 * to count how many activities are happening which preclude
853 * normal IO.
854 * We can only raise the barrier if there is no pending IO.
855 * i.e. if nr_pending == 0.
856 * We choose only to raise the barrier if no-one is waiting for the
857 * barrier to go down. This means that as soon as an IO request
858 * is ready, no other operations which require a barrier will start
859 * until the IO request has had a chance.
860 *
861 * So: regular IO calls 'wait_barrier'. When that returns there
862 * is no backgroup IO happening, It must arrange to call
863 * allow_barrier when it has finished its IO.
864 * backgroup IO calls must call raise_barrier. Once that returns
865 * there is no normal IO happeing. It must arrange to call
866 * lower_barrier when the particular background IO completes.
4675719d
HT
867 *
868 * If resync/recovery is interrupted, returns -EINTR;
869 * Otherwise, returns 0.
1da177e4 870 */
4675719d 871static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
1da177e4 872{
fd76863e 873 int idx = sector_to_idx(sector_nr);
874
1da177e4 875 spin_lock_irq(&conf->resync_lock);
17999be4
N
876
877 /* Wait until no block IO is waiting */
824e47da 878 wait_event_lock_irq(conf->wait_barrier,
879 !atomic_read(&conf->nr_waiting[idx]),
eed8c02e 880 conf->resync_lock);
17999be4
N
881
882 /* block any new IO from starting */
824e47da 883 atomic_inc(&conf->barrier[idx]);
884 /*
885 * In raise_barrier() we firstly increase conf->barrier[idx] then
886 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
887 * increase conf->nr_pending[idx] then check conf->barrier[idx].
888 * A memory barrier here to make sure conf->nr_pending[idx] won't
889 * be fetched before conf->barrier[idx] is increased. Otherwise
890 * there will be a race between raise_barrier() and _wait_barrier().
891 */
892 smp_mb__after_atomic();
17999be4 893
79ef3a8a 894 /* For these conditions we must wait:
895 * A: while the array is in frozen state
fd76863e 896 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
897 * existing in corresponding I/O barrier bucket.
898 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
899 * max resync count which allowed on current I/O barrier bucket.
79ef3a8a 900 */
17999be4 901 wait_event_lock_irq(conf->wait_barrier,
8c242593 902 (!conf->array_frozen &&
824e47da 903 !atomic_read(&conf->nr_pending[idx]) &&
8c242593
YY
904 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
905 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
eed8c02e 906 conf->resync_lock);
17999be4 907
8c242593
YY
908 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
909 atomic_dec(&conf->barrier[idx]);
910 spin_unlock_irq(&conf->resync_lock);
911 wake_up(&conf->wait_barrier);
912 return -EINTR;
913 }
914
43ac9b84 915 atomic_inc(&conf->nr_sync_pending);
17999be4 916 spin_unlock_irq(&conf->resync_lock);
8c242593
YY
917
918 return 0;
17999be4
N
919}
920
fd76863e 921static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
17999be4 922{
fd76863e 923 int idx = sector_to_idx(sector_nr);
924
824e47da 925 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
fd76863e 926
824e47da 927 atomic_dec(&conf->barrier[idx]);
43ac9b84 928 atomic_dec(&conf->nr_sync_pending);
17999be4
N
929 wake_up(&conf->wait_barrier);
930}
931
fd76863e 932static void _wait_barrier(struct r1conf *conf, int idx)
17999be4 933{
824e47da 934 /*
935 * We need to increase conf->nr_pending[idx] very early here,
936 * then raise_barrier() can be blocked when it waits for
937 * conf->nr_pending[idx] to be 0. Then we can avoid holding
938 * conf->resync_lock when there is no barrier raised in same
939 * barrier unit bucket. Also if the array is frozen, I/O
940 * should be blocked until array is unfrozen.
941 */
942 atomic_inc(&conf->nr_pending[idx]);
943 /*
944 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
945 * check conf->barrier[idx]. In raise_barrier() we firstly increase
946 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
947 * barrier is necessary here to make sure conf->barrier[idx] won't be
948 * fetched before conf->nr_pending[idx] is increased. Otherwise there
949 * will be a race between _wait_barrier() and raise_barrier().
950 */
951 smp_mb__after_atomic();
79ef3a8a 952
824e47da 953 /*
954 * Don't worry about checking two atomic_t variables at same time
955 * here. If during we check conf->barrier[idx], the array is
956 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
957 * 0, it is safe to return and make the I/O continue. Because the
958 * array is frozen, all I/O returned here will eventually complete
959 * or be queued, no race will happen. See code comment in
960 * frozen_array().
961 */
962 if (!READ_ONCE(conf->array_frozen) &&
963 !atomic_read(&conf->barrier[idx]))
964 return;
79ef3a8a 965
824e47da 966 /*
967 * After holding conf->resync_lock, conf->nr_pending[idx]
968 * should be decreased before waiting for barrier to drop.
969 * Otherwise, we may encounter a race condition because
970 * raise_barrer() might be waiting for conf->nr_pending[idx]
971 * to be 0 at same time.
972 */
973 spin_lock_irq(&conf->resync_lock);
974 atomic_inc(&conf->nr_waiting[idx]);
975 atomic_dec(&conf->nr_pending[idx]);
976 /*
977 * In case freeze_array() is waiting for
978 * get_unqueued_pending() == extra
979 */
980 wake_up(&conf->wait_barrier);
981 /* Wait for the barrier in same barrier unit bucket to drop. */
982 wait_event_lock_irq(conf->wait_barrier,
983 !conf->array_frozen &&
984 !atomic_read(&conf->barrier[idx]),
985 conf->resync_lock);
986 atomic_inc(&conf->nr_pending[idx]);
987 atomic_dec(&conf->nr_waiting[idx]);
fd76863e 988 spin_unlock_irq(&conf->resync_lock);
79ef3a8a 989}
990
fd76863e 991static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
79ef3a8a 992{
fd76863e 993 int idx = sector_to_idx(sector_nr);
79ef3a8a 994
824e47da 995 /*
996 * Very similar to _wait_barrier(). The difference is, for read
997 * I/O we don't need wait for sync I/O, but if the whole array
998 * is frozen, the read I/O still has to wait until the array is
999 * unfrozen. Since there is no ordering requirement with
1000 * conf->barrier[idx] here, memory barrier is unnecessary as well.
1001 */
1002 atomic_inc(&conf->nr_pending[idx]);
79ef3a8a 1003
824e47da 1004 if (!READ_ONCE(conf->array_frozen))
1005 return;
1006
1007 spin_lock_irq(&conf->resync_lock);
1008 atomic_inc(&conf->nr_waiting[idx]);
1009 atomic_dec(&conf->nr_pending[idx]);
1010 /*
1011 * In case freeze_array() is waiting for
1012 * get_unqueued_pending() == extra
1013 */
1014 wake_up(&conf->wait_barrier);
1015 /* Wait for array to be unfrozen */
1016 wait_event_lock_irq(conf->wait_barrier,
1017 !conf->array_frozen,
1018 conf->resync_lock);
1019 atomic_inc(&conf->nr_pending[idx]);
1020 atomic_dec(&conf->nr_waiting[idx]);
1da177e4
LT
1021 spin_unlock_irq(&conf->resync_lock);
1022}
1023
fd76863e 1024static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
17999be4 1025{
fd76863e 1026 int idx = sector_to_idx(sector_nr);
79ef3a8a 1027
fd76863e 1028 _wait_barrier(conf, idx);
1029}
1030
fd76863e 1031static void _allow_barrier(struct r1conf *conf, int idx)
17999be4 1032{
824e47da 1033 atomic_dec(&conf->nr_pending[idx]);
17999be4
N
1034 wake_up(&conf->wait_barrier);
1035}
1036
fd76863e 1037static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1038{
1039 int idx = sector_to_idx(sector_nr);
1040
1041 _allow_barrier(conf, idx);
1042}
1043
fd76863e 1044/* conf->resync_lock should be held */
1045static int get_unqueued_pending(struct r1conf *conf)
1046{
1047 int idx, ret;
1048
43ac9b84
XN
1049 ret = atomic_read(&conf->nr_sync_pending);
1050 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
824e47da 1051 ret += atomic_read(&conf->nr_pending[idx]) -
1052 atomic_read(&conf->nr_queued[idx]);
fd76863e 1053
1054 return ret;
1055}
1056
e2d59925 1057static void freeze_array(struct r1conf *conf, int extra)
ddaf22ab 1058{
fd76863e 1059 /* Stop sync I/O and normal I/O and wait for everything to
11353b9d 1060 * go quiet.
fd76863e 1061 * This is called in two situations:
1062 * 1) management command handlers (reshape, remove disk, quiesce).
1063 * 2) one normal I/O request failed.
1064
1065 * After array_frozen is set to 1, new sync IO will be blocked at
1066 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1067 * or wait_read_barrier(). The flying I/Os will either complete or be
1068 * queued. When everything goes quite, there are only queued I/Os left.
1069
1070 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1071 * barrier bucket index which this I/O request hits. When all sync and
1072 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1073 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1074 * in handle_read_error(), we may call freeze_array() before trying to
1075 * fix the read error. In this case, the error read I/O is not queued,
1076 * so get_unqueued_pending() == 1.
1077 *
1078 * Therefore before this function returns, we need to wait until
1079 * get_unqueued_pendings(conf) gets equal to extra. For
1080 * normal I/O context, extra is 1, in rested situations extra is 0.
ddaf22ab
N
1081 */
1082 spin_lock_irq(&conf->resync_lock);
b364e3d0 1083 conf->array_frozen = 1;
578b54ad 1084 raid1_log(conf->mddev, "wait freeze");
fd76863e 1085 wait_event_lock_irq_cmd(
1086 conf->wait_barrier,
1087 get_unqueued_pending(conf) == extra,
1088 conf->resync_lock,
1089 flush_pending_writes(conf));
ddaf22ab
N
1090 spin_unlock_irq(&conf->resync_lock);
1091}
e8096360 1092static void unfreeze_array(struct r1conf *conf)
ddaf22ab
N
1093{
1094 /* reverse the effect of the freeze */
1095 spin_lock_irq(&conf->resync_lock);
b364e3d0 1096 conf->array_frozen = 0;
ddaf22ab 1097 spin_unlock_irq(&conf->resync_lock);
824e47da 1098 wake_up(&conf->wait_barrier);
ddaf22ab
N
1099}
1100
16d56e2f 1101static void alloc_behind_master_bio(struct r1bio *r1_bio,
cb83efcf 1102 struct bio *bio)
4b6d287f 1103{
cb83efcf 1104 int size = bio->bi_iter.bi_size;
841c1316
ML
1105 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1106 int i = 0;
1107 struct bio *behind_bio = NULL;
1108
a78f18da 1109 behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
841c1316 1110 if (!behind_bio)
16d56e2f 1111 return;
4b6d287f 1112
41743c1f 1113 /* discard op, we don't support writezero/writesame yet */
16d56e2f
SL
1114 if (!bio_has_data(bio)) {
1115 behind_bio->bi_iter.bi_size = size;
41743c1f 1116 goto skip_copy;
16d56e2f 1117 }
41743c1f 1118
dba40d46
MD
1119 behind_bio->bi_write_hint = bio->bi_write_hint;
1120
841c1316
ML
1121 while (i < vcnt && size) {
1122 struct page *page;
1123 int len = min_t(int, PAGE_SIZE, size);
1124
1125 page = alloc_page(GFP_NOIO);
1126 if (unlikely(!page))
1127 goto free_pages;
1128
1129 bio_add_page(behind_bio, page, len, 0);
1130
1131 size -= len;
1132 i++;
4b6d287f 1133 }
841c1316 1134
cb83efcf 1135 bio_copy_data(behind_bio, bio);
41743c1f 1136skip_copy:
56a64c17 1137 r1_bio->behind_master_bio = behind_bio;
af6d7b76 1138 set_bit(R1BIO_BehindIO, &r1_bio->state);
4b6d287f 1139
16d56e2f 1140 return;
841c1316
ML
1141
1142free_pages:
4f024f37
KO
1143 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1144 bio->bi_iter.bi_size);
841c1316 1145 bio_free_pages(behind_bio);
16d56e2f 1146 bio_put(behind_bio);
4b6d287f
N
1147}
1148
f54a9d0e
N
1149struct raid1_plug_cb {
1150 struct blk_plug_cb cb;
1151 struct bio_list pending;
1152 int pending_cnt;
1153};
1154
1155static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1156{
1157 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1158 cb);
1159 struct mddev *mddev = plug->cb.data;
1160 struct r1conf *conf = mddev->private;
1161 struct bio *bio;
1162
874807a8 1163 if (from_schedule || current->bio_list) {
f54a9d0e
N
1164 spin_lock_irq(&conf->device_lock);
1165 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1166 conf->pending_count += plug->pending_cnt;
1167 spin_unlock_irq(&conf->device_lock);
ee0b0244 1168 wake_up(&conf->wait_barrier);
f54a9d0e
N
1169 md_wakeup_thread(mddev->thread);
1170 kfree(plug);
1171 return;
1172 }
1173
1174 /* we aren't scheduling, so we can do the write-out directly. */
1175 bio = bio_list_get(&plug->pending);
673ca68d 1176 flush_bio_list(conf, bio);
f54a9d0e
N
1177 kfree(plug);
1178}
1179
689389a0
N
1180static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1181{
1182 r1_bio->master_bio = bio;
1183 r1_bio->sectors = bio_sectors(bio);
1184 r1_bio->state = 0;
1185 r1_bio->mddev = mddev;
1186 r1_bio->sector = bio->bi_iter.bi_sector;
1187}
1188
fd76863e 1189static inline struct r1bio *
689389a0 1190alloc_r1bio(struct mddev *mddev, struct bio *bio)
fd76863e 1191{
1192 struct r1conf *conf = mddev->private;
1193 struct r1bio *r1_bio;
1194
afeee514 1195 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
689389a0
N
1196 /* Ensure no bio records IO_BLOCKED */
1197 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1198 init_r1bio(r1_bio, mddev, bio);
fd76863e 1199 return r1_bio;
1200}
1201
c230e7e5 1202static void raid1_read_request(struct mddev *mddev, struct bio *bio,
689389a0 1203 int max_read_sectors, struct r1bio *r1_bio)
1da177e4 1204{
e8096360 1205 struct r1conf *conf = mddev->private;
0eaf822c 1206 struct raid1_info *mirror;
1da177e4 1207 struct bio *read_bio;
3b046a97
RL
1208 struct bitmap *bitmap = mddev->bitmap;
1209 const int op = bio_op(bio);
1210 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
3b046a97
RL
1211 int max_sectors;
1212 int rdisk;
689389a0
N
1213 bool print_msg = !!r1_bio;
1214 char b[BDEVNAME_SIZE];
3b046a97 1215
fd76863e 1216 /*
689389a0
N
1217 * If r1_bio is set, we are blocking the raid1d thread
1218 * so there is a tiny risk of deadlock. So ask for
1219 * emergency memory if needed.
fd76863e 1220 */
689389a0 1221 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
fd76863e 1222
689389a0
N
1223 if (print_msg) {
1224 /* Need to get the block device name carefully */
1225 struct md_rdev *rdev;
1226 rcu_read_lock();
1227 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1228 if (rdev)
1229 bdevname(rdev->bdev, b);
1230 else
1231 strcpy(b, "???");
1232 rcu_read_unlock();
1233 }
3b046a97 1234
fd76863e 1235 /*
fd76863e 1236 * Still need barrier for READ in case that whole
1237 * array is frozen.
fd76863e 1238 */
fd76863e 1239 wait_read_barrier(conf, bio->bi_iter.bi_sector);
1240
689389a0
N
1241 if (!r1_bio)
1242 r1_bio = alloc_r1bio(mddev, bio);
1243 else
1244 init_r1bio(r1_bio, mddev, bio);
c230e7e5 1245 r1_bio->sectors = max_read_sectors;
fd76863e 1246
1247 /*
1248 * make_request() can abort the operation when read-ahead is being
1249 * used and no empty request is available.
1250 */
3b046a97
RL
1251 rdisk = read_balance(conf, r1_bio, &max_sectors);
1252
1253 if (rdisk < 0) {
1254 /* couldn't find anywhere to read from */
689389a0
N
1255 if (print_msg) {
1256 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1257 mdname(mddev),
1258 b,
1259 (unsigned long long)r1_bio->sector);
1260 }
3b046a97
RL
1261 raid_end_bio_io(r1_bio);
1262 return;
1263 }
1264 mirror = conf->mirrors + rdisk;
1265
689389a0
N
1266 if (print_msg)
1267 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1268 mdname(mddev),
1269 (unsigned long long)r1_bio->sector,
1270 bdevname(mirror->rdev->bdev, b));
1271
3b046a97
RL
1272 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1273 bitmap) {
1274 /*
1275 * Reading from a write-mostly device must take care not to
1276 * over-take any writes that are 'behind'
1277 */
1278 raid1_log(mddev, "wait behind writes");
1279 wait_event(bitmap->behind_wait,
1280 atomic_read(&bitmap->behind_writes) == 0);
1281 }
c230e7e5
N
1282
1283 if (max_sectors < bio_sectors(bio)) {
1284 struct bio *split = bio_split(bio, max_sectors,
afeee514 1285 gfp, &conf->bio_split);
c230e7e5 1286 bio_chain(split, bio);
ed00aabd 1287 submit_bio_noacct(bio);
c230e7e5
N
1288 bio = split;
1289 r1_bio->master_bio = bio;
1290 r1_bio->sectors = max_sectors;
1291 }
1292
3b046a97 1293 r1_bio->read_disk = rdisk;
3b046a97 1294
afeee514 1295 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
3b046a97
RL
1296
1297 r1_bio->bios[rdisk] = read_bio;
1298
1299 read_bio->bi_iter.bi_sector = r1_bio->sector +
1300 mirror->rdev->data_offset;
74d46992 1301 bio_set_dev(read_bio, mirror->rdev->bdev);
3b046a97
RL
1302 read_bio->bi_end_io = raid1_end_read_request;
1303 bio_set_op_attrs(read_bio, op, do_sync);
1304 if (test_bit(FailFast, &mirror->rdev->flags) &&
1305 test_bit(R1BIO_FailFast, &r1_bio->state))
1306 read_bio->bi_opf |= MD_FAILFAST;
1307 read_bio->bi_private = r1_bio;
1308
1309 if (mddev->gendisk)
1c02fca6
CH
1310 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
1311 r1_bio->sector);
3b046a97 1312
ed00aabd 1313 submit_bio_noacct(read_bio);
3b046a97
RL
1314}
1315
c230e7e5
N
1316static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1317 int max_write_sectors)
3b046a97
RL
1318{
1319 struct r1conf *conf = mddev->private;
fd76863e 1320 struct r1bio *r1_bio;
1f68f0c4 1321 int i, disks;
3b046a97 1322 struct bitmap *bitmap = mddev->bitmap;
191ea9b2 1323 unsigned long flags;
3cb03002 1324 struct md_rdev *blocked_rdev;
f54a9d0e
N
1325 struct blk_plug_cb *cb;
1326 struct raid1_plug_cb *plug = NULL;
1f68f0c4 1327 int first_clone;
1f68f0c4 1328 int max_sectors;
191ea9b2 1329
b3143b9a 1330 if (mddev_is_clustered(mddev) &&
90382ed9 1331 md_cluster_ops->area_resyncing(mddev, WRITE,
b3143b9a 1332 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
3b046a97 1333
6eef4b21
N
1334 DEFINE_WAIT(w);
1335 for (;;) {
6eef4b21 1336 prepare_to_wait(&conf->wait_barrier,
ae89fd3d 1337 &w, TASK_IDLE);
f81f7302 1338 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
385f4d7f 1339 bio->bi_iter.bi_sector,
b3143b9a 1340 bio_end_sector(bio)))
6eef4b21
N
1341 break;
1342 schedule();
1343 }
1344 finish_wait(&conf->wait_barrier, &w);
1345 }
f81f7302
GJ
1346
1347 /*
1348 * Register the new request and wait if the reconstruction
1349 * thread has put up a bar for new requests.
1350 * Continue immediately if no resync is active currently.
1351 */
fd76863e 1352 wait_barrier(conf, bio->bi_iter.bi_sector);
1353
689389a0 1354 r1_bio = alloc_r1bio(mddev, bio);
c230e7e5 1355 r1_bio->sectors = max_write_sectors;
1da177e4 1356
34db0cd6
N
1357 if (conf->pending_count >= max_queued_requests) {
1358 md_wakeup_thread(mddev->thread);
578b54ad 1359 raid1_log(mddev, "wait queued");
34db0cd6
N
1360 wait_event(conf->wait_barrier,
1361 conf->pending_count < max_queued_requests);
1362 }
1f68f0c4 1363 /* first select target devices under rcu_lock and
1da177e4
LT
1364 * inc refcount on their rdev. Record them by setting
1365 * bios[x] to bio
1f68f0c4
N
1366 * If there are known/acknowledged bad blocks on any device on
1367 * which we have seen a write error, we want to avoid writing those
1368 * blocks.
1369 * This potentially requires several writes to write around
1370 * the bad blocks. Each set of writes gets it's own r1bio
1371 * with a set of bios attached.
1da177e4 1372 */
c3b328ac 1373
8f19ccb2 1374 disks = conf->raid_disks * 2;
6bfe0b49
DW
1375 retry_write:
1376 blocked_rdev = NULL;
1da177e4 1377 rcu_read_lock();
1f68f0c4 1378 max_sectors = r1_bio->sectors;
1da177e4 1379 for (i = 0; i < disks; i++) {
3cb03002 1380 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
6bfe0b49
DW
1381 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1382 atomic_inc(&rdev->nr_pending);
1383 blocked_rdev = rdev;
1384 break;
1385 }
1f68f0c4 1386 r1_bio->bios[i] = NULL;
8ae12666 1387 if (!rdev || test_bit(Faulty, &rdev->flags)) {
8f19ccb2
N
1388 if (i < conf->raid_disks)
1389 set_bit(R1BIO_Degraded, &r1_bio->state);
1f68f0c4
N
1390 continue;
1391 }
1392
1393 atomic_inc(&rdev->nr_pending);
1394 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1395 sector_t first_bad;
1396 int bad_sectors;
1397 int is_bad;
1398
3b046a97 1399 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1f68f0c4
N
1400 &first_bad, &bad_sectors);
1401 if (is_bad < 0) {
1402 /* mustn't write here until the bad block is
1403 * acknowledged*/
1404 set_bit(BlockedBadBlocks, &rdev->flags);
1405 blocked_rdev = rdev;
1406 break;
1407 }
1408 if (is_bad && first_bad <= r1_bio->sector) {
1409 /* Cannot write here at all */
1410 bad_sectors -= (r1_bio->sector - first_bad);
1411 if (bad_sectors < max_sectors)
1412 /* mustn't write more than bad_sectors
1413 * to other devices yet
1414 */
1415 max_sectors = bad_sectors;
03c902e1 1416 rdev_dec_pending(rdev, mddev);
1f68f0c4
N
1417 /* We don't set R1BIO_Degraded as that
1418 * only applies if the disk is
1419 * missing, so it might be re-added,
1420 * and we want to know to recover this
1421 * chunk.
1422 * In this case the device is here,
1423 * and the fact that this chunk is not
1424 * in-sync is recorded in the bad
1425 * block log
1426 */
1427 continue;
964147d5 1428 }
1f68f0c4
N
1429 if (is_bad) {
1430 int good_sectors = first_bad - r1_bio->sector;
1431 if (good_sectors < max_sectors)
1432 max_sectors = good_sectors;
1433 }
1434 }
1435 r1_bio->bios[i] = bio;
1da177e4
LT
1436 }
1437 rcu_read_unlock();
1438
6bfe0b49
DW
1439 if (unlikely(blocked_rdev)) {
1440 /* Wait for this device to become unblocked */
1441 int j;
1442
1443 for (j = 0; j < i; j++)
1444 if (r1_bio->bios[j])
1445 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1f68f0c4 1446 r1_bio->state = 0;
fd76863e 1447 allow_barrier(conf, bio->bi_iter.bi_sector);
578b54ad 1448 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
6bfe0b49 1449 md_wait_for_blocked_rdev(blocked_rdev, mddev);
fd76863e 1450 wait_barrier(conf, bio->bi_iter.bi_sector);
6bfe0b49
DW
1451 goto retry_write;
1452 }
1453
c230e7e5
N
1454 if (max_sectors < bio_sectors(bio)) {
1455 struct bio *split = bio_split(bio, max_sectors,
afeee514 1456 GFP_NOIO, &conf->bio_split);
c230e7e5 1457 bio_chain(split, bio);
ed00aabd 1458 submit_bio_noacct(bio);
c230e7e5
N
1459 bio = split;
1460 r1_bio->master_bio = bio;
1f68f0c4 1461 r1_bio->sectors = max_sectors;
191ea9b2 1462 }
4b6d287f 1463
4e78064f 1464 atomic_set(&r1_bio->remaining, 1);
4b6d287f 1465 atomic_set(&r1_bio->behind_remaining, 0);
06d91a5f 1466
1f68f0c4 1467 first_clone = 1;
d8c84c4f 1468
1da177e4 1469 for (i = 0; i < disks; i++) {
8e58e327 1470 struct bio *mbio = NULL;
69df9cfc 1471 struct md_rdev *rdev = conf->mirrors[i].rdev;
1da177e4
LT
1472 if (!r1_bio->bios[i])
1473 continue;
1474
1f68f0c4
N
1475 if (first_clone) {
1476 /* do behind I/O ?
1477 * Not if there are too many, or cannot
1478 * allocate memory, or a reader on WriteMostly
1479 * is waiting for behind writes to flush */
1480 if (bitmap &&
1481 (atomic_read(&bitmap->behind_writes)
1482 < mddev->bitmap_info.max_write_behind) &&
8e58e327 1483 !waitqueue_active(&bitmap->behind_wait)) {
16d56e2f 1484 alloc_behind_master_bio(r1_bio, bio);
8e58e327 1485 }
1f68f0c4 1486
e64e4018
AS
1487 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1488 test_bit(R1BIO_BehindIO, &r1_bio->state));
1f68f0c4
N
1489 first_clone = 0;
1490 }
8e58e327 1491
16d56e2f
SL
1492 if (r1_bio->behind_master_bio)
1493 mbio = bio_clone_fast(r1_bio->behind_master_bio,
afeee514 1494 GFP_NOIO, &mddev->bio_set);
16d56e2f 1495 else
afeee514 1496 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
8e58e327 1497
841c1316 1498 if (r1_bio->behind_master_bio) {
69df9cfc 1499 if (test_bit(CollisionCheck, &rdev->flags))
d0d2d8ba 1500 wait_for_serialization(rdev, r1_bio);
3e148a32 1501 if (test_bit(WriteMostly, &rdev->flags))
4b6d287f 1502 atomic_inc(&r1_bio->behind_remaining);
69df9cfc 1503 } else if (mddev->serialize_policy)
d0d2d8ba 1504 wait_for_serialization(rdev, r1_bio);
4b6d287f 1505
1f68f0c4
N
1506 r1_bio->bios[i] = mbio;
1507
4f024f37 1508 mbio->bi_iter.bi_sector = (r1_bio->sector +
1f68f0c4 1509 conf->mirrors[i].rdev->data_offset);
74d46992 1510 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1f68f0c4 1511 mbio->bi_end_io = raid1_end_write_request;
a682e003 1512 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
212e7eb7
N
1513 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1514 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1515 conf->raid_disks - mddev->degraded > 1)
1516 mbio->bi_opf |= MD_FAILFAST;
1f68f0c4
N
1517 mbio->bi_private = r1_bio;
1518
1da177e4 1519 atomic_inc(&r1_bio->remaining);
f54a9d0e 1520
109e3765 1521 if (mddev->gendisk)
1c02fca6 1522 trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
109e3765
N
1523 r1_bio->sector);
1524 /* flush_pending_writes() needs access to the rdev so...*/
309dca30 1525 mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
109e3765 1526
f54a9d0e
N
1527 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1528 if (cb)
1529 plug = container_of(cb, struct raid1_plug_cb, cb);
1530 else
1531 plug = NULL;
f54a9d0e
N
1532 if (plug) {
1533 bio_list_add(&plug->pending, mbio);
1534 plug->pending_cnt++;
1535 } else {
23b245c0 1536 spin_lock_irqsave(&conf->device_lock, flags);
f54a9d0e
N
1537 bio_list_add(&conf->pending_bio_list, mbio);
1538 conf->pending_count++;
23b245c0 1539 spin_unlock_irqrestore(&conf->device_lock, flags);
b357f04a 1540 md_wakeup_thread(mddev->thread);
23b245c0 1541 }
1da177e4 1542 }
1f68f0c4 1543
079fa166
N
1544 r1_bio_write_done(r1_bio);
1545
1546 /* In case raid1d snuck in to freeze_array */
1547 wake_up(&conf->wait_barrier);
1da177e4
LT
1548}
1549
cc27b0c7 1550static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
3b046a97 1551{
fd76863e 1552 sector_t sectors;
3b046a97 1553
775d7831
DJ
1554 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1555 && md_flush_request(mddev, bio))
cc27b0c7 1556 return true;
3b046a97 1557
c230e7e5
N
1558 /*
1559 * There is a limit to the maximum size, but
1560 * the read/write handler might find a lower limit
1561 * due to bad blocks. To avoid multiple splits,
1562 * we pass the maximum number of sectors down
1563 * and let the lower level perform the split.
1564 */
1565 sectors = align_to_barrier_unit_end(
1566 bio->bi_iter.bi_sector, bio_sectors(bio));
61eb2b43 1567
c230e7e5 1568 if (bio_data_dir(bio) == READ)
689389a0 1569 raid1_read_request(mddev, bio, sectors, NULL);
cc27b0c7
N
1570 else {
1571 if (!md_write_start(mddev,bio))
1572 return false;
c230e7e5 1573 raid1_write_request(mddev, bio, sectors);
cc27b0c7
N
1574 }
1575 return true;
3b046a97
RL
1576}
1577
849674e4 1578static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1da177e4 1579{
e8096360 1580 struct r1conf *conf = mddev->private;
1da177e4
LT
1581 int i;
1582
1583 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
11ce99e6 1584 conf->raid_disks - mddev->degraded);
ddac7c7e
N
1585 rcu_read_lock();
1586 for (i = 0; i < conf->raid_disks; i++) {
3cb03002 1587 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1da177e4 1588 seq_printf(seq, "%s",
ddac7c7e
N
1589 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1590 }
1591 rcu_read_unlock();
1da177e4
LT
1592 seq_printf(seq, "]");
1593}
1594
849674e4 1595static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1da177e4
LT
1596{
1597 char b[BDEVNAME_SIZE];
e8096360 1598 struct r1conf *conf = mddev->private;
423f04d6 1599 unsigned long flags;
1da177e4
LT
1600
1601 /*
1602 * If it is not operational, then we have already marked it as dead
9a567843
GJ
1603 * else if it is the last working disks with "fail_last_dev == false",
1604 * ignore the error, let the next level up know.
1da177e4
LT
1605 * else mark the drive as failed
1606 */
2e52d449 1607 spin_lock_irqsave(&conf->device_lock, flags);
9a567843 1608 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
4044ba58 1609 && (conf->raid_disks - mddev->degraded) == 1) {
1da177e4
LT
1610 /*
1611 * Don't fail the drive, act as though we were just a
4044ba58
N
1612 * normal single drive.
1613 * However don't try a recovery from this drive as
1614 * it is very likely to fail.
1da177e4 1615 */
5389042f 1616 conf->recovery_disabled = mddev->recovery_disabled;
2e52d449 1617 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 1618 return;
4044ba58 1619 }
de393cde 1620 set_bit(Blocked, &rdev->flags);
ebda52fa 1621 if (test_and_clear_bit(In_sync, &rdev->flags))
1da177e4 1622 mddev->degraded++;
ebda52fa 1623 set_bit(Faulty, &rdev->flags);
423f04d6 1624 spin_unlock_irqrestore(&conf->device_lock, flags);
2446dba0
N
1625 /*
1626 * if recovery is running, make sure it aborts.
1627 */
1628 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2953079c
SL
1629 set_mask_bits(&mddev->sb_flags, 0,
1630 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1d41c216
N
1631 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1632 "md/raid1:%s: Operation continuing on %d devices.\n",
1633 mdname(mddev), bdevname(rdev->bdev, b),
1634 mdname(mddev), conf->raid_disks - mddev->degraded);
1da177e4
LT
1635}
1636
e8096360 1637static void print_conf(struct r1conf *conf)
1da177e4
LT
1638{
1639 int i;
1da177e4 1640
1d41c216 1641 pr_debug("RAID1 conf printout:\n");
1da177e4 1642 if (!conf) {
1d41c216 1643 pr_debug("(!conf)\n");
1da177e4
LT
1644 return;
1645 }
1d41c216
N
1646 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1647 conf->raid_disks);
1da177e4 1648
ddac7c7e 1649 rcu_read_lock();
1da177e4
LT
1650 for (i = 0; i < conf->raid_disks; i++) {
1651 char b[BDEVNAME_SIZE];
3cb03002 1652 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
ddac7c7e 1653 if (rdev)
1d41c216
N
1654 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1655 i, !test_bit(In_sync, &rdev->flags),
1656 !test_bit(Faulty, &rdev->flags),
1657 bdevname(rdev->bdev,b));
1da177e4 1658 }
ddac7c7e 1659 rcu_read_unlock();
1da177e4
LT
1660}
1661
e8096360 1662static void close_sync(struct r1conf *conf)
1da177e4 1663{
f6eca2d4
ND
1664 int idx;
1665
1666 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1667 _wait_barrier(conf, idx);
1668 _allow_barrier(conf, idx);
1669 }
1da177e4 1670
afeee514 1671 mempool_exit(&conf->r1buf_pool);
1da177e4
LT
1672}
1673
fd01b88c 1674static int raid1_spare_active(struct mddev *mddev)
1da177e4
LT
1675{
1676 int i;
e8096360 1677 struct r1conf *conf = mddev->private;
6b965620
N
1678 int count = 0;
1679 unsigned long flags;
1da177e4
LT
1680
1681 /*
f72ffdd6 1682 * Find all failed disks within the RAID1 configuration
ddac7c7e
N
1683 * and mark them readable.
1684 * Called under mddev lock, so rcu protection not needed.
423f04d6
N
1685 * device_lock used to avoid races with raid1_end_read_request
1686 * which expects 'In_sync' flags and ->degraded to be consistent.
1da177e4 1687 */
423f04d6 1688 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 1689 for (i = 0; i < conf->raid_disks; i++) {
3cb03002 1690 struct md_rdev *rdev = conf->mirrors[i].rdev;
8c7a2c2b
N
1691 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1692 if (repl
1aee41f6 1693 && !test_bit(Candidate, &repl->flags)
8c7a2c2b
N
1694 && repl->recovery_offset == MaxSector
1695 && !test_bit(Faulty, &repl->flags)
1696 && !test_and_set_bit(In_sync, &repl->flags)) {
1697 /* replacement has just become active */
1698 if (!rdev ||
1699 !test_and_clear_bit(In_sync, &rdev->flags))
1700 count++;
1701 if (rdev) {
1702 /* Replaced device not technically
1703 * faulty, but we need to be sure
1704 * it gets removed and never re-added
1705 */
1706 set_bit(Faulty, &rdev->flags);
1707 sysfs_notify_dirent_safe(
1708 rdev->sysfs_state);
1709 }
1710 }
ddac7c7e 1711 if (rdev
61e4947c 1712 && rdev->recovery_offset == MaxSector
ddac7c7e 1713 && !test_bit(Faulty, &rdev->flags)
c04be0aa 1714 && !test_and_set_bit(In_sync, &rdev->flags)) {
6b965620 1715 count++;
654e8b5a 1716 sysfs_notify_dirent_safe(rdev->sysfs_state);
1da177e4
LT
1717 }
1718 }
6b965620
N
1719 mddev->degraded -= count;
1720 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
1721
1722 print_conf(conf);
6b965620 1723 return count;
1da177e4
LT
1724}
1725
fd01b88c 1726static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1da177e4 1727{
e8096360 1728 struct r1conf *conf = mddev->private;
199050ea 1729 int err = -EEXIST;
41158c7e 1730 int mirror = 0;
0eaf822c 1731 struct raid1_info *p;
6c2fce2e 1732 int first = 0;
30194636 1733 int last = conf->raid_disks - 1;
1da177e4 1734
5389042f
N
1735 if (mddev->recovery_disabled == conf->recovery_disabled)
1736 return -EBUSY;
1737
1501efad
DW
1738 if (md_integrity_add_rdev(rdev, mddev))
1739 return -ENXIO;
1740
6c2fce2e
NB
1741 if (rdev->raid_disk >= 0)
1742 first = last = rdev->raid_disk;
1743
70bcecdb
GR
1744 /*
1745 * find the disk ... but prefer rdev->saved_raid_disk
1746 * if possible.
1747 */
1748 if (rdev->saved_raid_disk >= 0 &&
1749 rdev->saved_raid_disk >= first &&
9e753ba9 1750 rdev->saved_raid_disk < conf->raid_disks &&
70bcecdb
GR
1751 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1752 first = last = rdev->saved_raid_disk;
1753
7ef449d1 1754 for (mirror = first; mirror <= last; mirror++) {
ebfeb444 1755 p = conf->mirrors + mirror;
7ef449d1 1756 if (!p->rdev) {
9092c02d
JB
1757 if (mddev->gendisk)
1758 disk_stack_limits(mddev->gendisk, rdev->bdev,
1759 rdev->data_offset << 9);
1da177e4
LT
1760
1761 p->head_position = 0;
1762 rdev->raid_disk = mirror;
199050ea 1763 err = 0;
6aea114a
N
1764 /* As all devices are equivalent, we don't need a full recovery
1765 * if this was recently any drive of the array
1766 */
1767 if (rdev->saved_raid_disk < 0)
41158c7e 1768 conf->fullsync = 1;
d6065f7b 1769 rcu_assign_pointer(p->rdev, rdev);
1da177e4
LT
1770 break;
1771 }
7ef449d1
N
1772 if (test_bit(WantReplacement, &p->rdev->flags) &&
1773 p[conf->raid_disks].rdev == NULL) {
1774 /* Add this device as a replacement */
1775 clear_bit(In_sync, &rdev->flags);
1776 set_bit(Replacement, &rdev->flags);
1777 rdev->raid_disk = mirror;
1778 err = 0;
1779 conf->fullsync = 1;
1780 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1781 break;
1782 }
1783 }
9092c02d 1784 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
8b904b5b 1785 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1da177e4 1786 print_conf(conf);
199050ea 1787 return err;
1da177e4
LT
1788}
1789
b8321b68 1790static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1da177e4 1791{
e8096360 1792 struct r1conf *conf = mddev->private;
1da177e4 1793 int err = 0;
b8321b68 1794 int number = rdev->raid_disk;
0eaf822c 1795 struct raid1_info *p = conf->mirrors + number;
1da177e4 1796
b014f14c
N
1797 if (rdev != p->rdev)
1798 p = conf->mirrors + conf->raid_disks + number;
1799
1da177e4 1800 print_conf(conf);
b8321b68 1801 if (rdev == p->rdev) {
b2d444d7 1802 if (test_bit(In_sync, &rdev->flags) ||
1da177e4
LT
1803 atomic_read(&rdev->nr_pending)) {
1804 err = -EBUSY;
1805 goto abort;
1806 }
046abeed 1807 /* Only remove non-faulty devices if recovery
dfc70645
N
1808 * is not possible.
1809 */
1810 if (!test_bit(Faulty, &rdev->flags) &&
5389042f 1811 mddev->recovery_disabled != conf->recovery_disabled &&
dfc70645
N
1812 mddev->degraded < conf->raid_disks) {
1813 err = -EBUSY;
1814 goto abort;
1815 }
1da177e4 1816 p->rdev = NULL;
d787be40
N
1817 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1818 synchronize_rcu();
1819 if (atomic_read(&rdev->nr_pending)) {
1820 /* lost the race, try later */
1821 err = -EBUSY;
1822 p->rdev = rdev;
1823 goto abort;
1824 }
1825 }
1826 if (conf->mirrors[conf->raid_disks + number].rdev) {
8c7a2c2b
N
1827 /* We just removed a device that is being replaced.
1828 * Move down the replacement. We drain all IO before
1829 * doing this to avoid confusion.
1830 */
1831 struct md_rdev *repl =
1832 conf->mirrors[conf->raid_disks + number].rdev;
e2d59925 1833 freeze_array(conf, 0);
3de59bb9
YY
1834 if (atomic_read(&repl->nr_pending)) {
1835 /* It means that some queued IO of retry_list
1836 * hold repl. Thus, we cannot set replacement
1837 * as NULL, avoiding rdev NULL pointer
1838 * dereference in sync_request_write and
1839 * handle_write_finished.
1840 */
1841 err = -EBUSY;
1842 unfreeze_array(conf);
1843 goto abort;
1844 }
8c7a2c2b
N
1845 clear_bit(Replacement, &repl->flags);
1846 p->rdev = repl;
1847 conf->mirrors[conf->raid_disks + number].rdev = NULL;
e2d59925 1848 unfreeze_array(conf);
e5bc9c3c
GJ
1849 }
1850
1851 clear_bit(WantReplacement, &rdev->flags);
a91a2785 1852 err = md_integrity_register(mddev);
1da177e4
LT
1853 }
1854abort:
1855
1856 print_conf(conf);
1857 return err;
1858}
1859
4246a0b6 1860static void end_sync_read(struct bio *bio)
1da177e4 1861{
98d30c58 1862 struct r1bio *r1_bio = get_resync_r1bio(bio);
1da177e4 1863
0fc280f6 1864 update_head_pos(r1_bio->read_disk, r1_bio);
ba3ae3be 1865
1da177e4
LT
1866 /*
1867 * we have read a block, now it needs to be re-written,
1868 * or re-read if the read failed.
1869 * We don't do much here, just schedule handling by raid1d
1870 */
4e4cbee9 1871 if (!bio->bi_status)
1da177e4 1872 set_bit(R1BIO_Uptodate, &r1_bio->state);
d11c171e
N
1873
1874 if (atomic_dec_and_test(&r1_bio->remaining))
1875 reschedule_retry(r1_bio);
1da177e4
LT
1876}
1877
dfcc34c9
ND
1878static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1879{
1880 sector_t sync_blocks = 0;
1881 sector_t s = r1_bio->sector;
1882 long sectors_to_go = r1_bio->sectors;
1883
1884 /* make sure these bits don't get cleared. */
1885 do {
1886 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1887 s += sync_blocks;
1888 sectors_to_go -= sync_blocks;
1889 } while (sectors_to_go > 0);
1890}
1891
449808a2
HT
1892static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1893{
1894 if (atomic_dec_and_test(&r1_bio->remaining)) {
1895 struct mddev *mddev = r1_bio->mddev;
1896 int s = r1_bio->sectors;
1897
1898 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1899 test_bit(R1BIO_WriteError, &r1_bio->state))
1900 reschedule_retry(r1_bio);
1901 else {
1902 put_buf(r1_bio);
1903 md_done_sync(mddev, s, uptodate);
1904 }
1905 }
1906}
1907
4246a0b6 1908static void end_sync_write(struct bio *bio)
1da177e4 1909{
4e4cbee9 1910 int uptodate = !bio->bi_status;
98d30c58 1911 struct r1bio *r1_bio = get_resync_r1bio(bio);
fd01b88c 1912 struct mddev *mddev = r1_bio->mddev;
e8096360 1913 struct r1conf *conf = mddev->private;
4367af55
N
1914 sector_t first_bad;
1915 int bad_sectors;
854abd75 1916 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
ba3ae3be 1917
6b1117d5 1918 if (!uptodate) {
dfcc34c9 1919 abort_sync_write(mddev, r1_bio);
854abd75
N
1920 set_bit(WriteErrorSeen, &rdev->flags);
1921 if (!test_and_set_bit(WantReplacement, &rdev->flags))
19d67169
N
1922 set_bit(MD_RECOVERY_NEEDED, &
1923 mddev->recovery);
d8f05d29 1924 set_bit(R1BIO_WriteError, &r1_bio->state);
854abd75 1925 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
3a9f28a5
N
1926 &first_bad, &bad_sectors) &&
1927 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1928 r1_bio->sector,
1929 r1_bio->sectors,
1930 &first_bad, &bad_sectors)
1931 )
4367af55 1932 set_bit(R1BIO_MadeGood, &r1_bio->state);
e3b9703e 1933
449808a2 1934 put_sync_write_buf(r1_bio, uptodate);
1da177e4
LT
1935}
1936
3cb03002 1937static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
d8f05d29
N
1938 int sectors, struct page *page, int rw)
1939{
796a5cf0 1940 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
d8f05d29
N
1941 /* success */
1942 return 1;
19d67169 1943 if (rw == WRITE) {
d8f05d29 1944 set_bit(WriteErrorSeen, &rdev->flags);
19d67169
N
1945 if (!test_and_set_bit(WantReplacement,
1946 &rdev->flags))
1947 set_bit(MD_RECOVERY_NEEDED, &
1948 rdev->mddev->recovery);
1949 }
d8f05d29
N
1950 /* need to record an error - either for the block or the device */
1951 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1952 md_error(rdev->mddev, rdev);
1953 return 0;
1954}
1955
9f2c9d12 1956static int fix_sync_read_error(struct r1bio *r1_bio)
1da177e4 1957{
a68e5870
N
1958 /* Try some synchronous reads of other devices to get
1959 * good data, much like with normal read errors. Only
1960 * read into the pages we already have so we don't
1961 * need to re-issue the read request.
1962 * We don't need to freeze the array, because being in an
1963 * active sync request, there is no normal IO, and
1964 * no overlapping syncs.
06f60385
N
1965 * We don't need to check is_badblock() again as we
1966 * made sure that anything with a bad block in range
1967 * will have bi_end_io clear.
a68e5870 1968 */
fd01b88c 1969 struct mddev *mddev = r1_bio->mddev;
e8096360 1970 struct r1conf *conf = mddev->private;
a68e5870 1971 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
44cf0f4d 1972 struct page **pages = get_resync_pages(bio)->pages;
a68e5870
N
1973 sector_t sect = r1_bio->sector;
1974 int sectors = r1_bio->sectors;
1975 int idx = 0;
2e52d449
N
1976 struct md_rdev *rdev;
1977
1978 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1979 if (test_bit(FailFast, &rdev->flags)) {
1980 /* Don't try recovering from here - just fail it
1981 * ... unless it is the last working device of course */
1982 md_error(mddev, rdev);
1983 if (test_bit(Faulty, &rdev->flags))
1984 /* Don't try to read from here, but make sure
1985 * put_buf does it's thing
1986 */
1987 bio->bi_end_io = end_sync_write;
1988 }
a68e5870
N
1989
1990 while(sectors) {
1991 int s = sectors;
1992 int d = r1_bio->read_disk;
1993 int success = 0;
78d7f5f7 1994 int start;
a68e5870
N
1995
1996 if (s > (PAGE_SIZE>>9))
1997 s = PAGE_SIZE >> 9;
1998 do {
1999 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2000 /* No rcu protection needed here devices
2001 * can only be removed when no resync is
2002 * active, and resync is currently active
2003 */
2004 rdev = conf->mirrors[d].rdev;
9d3d8011 2005 if (sync_page_io(rdev, sect, s<<9,
44cf0f4d 2006 pages[idx],
796a5cf0 2007 REQ_OP_READ, 0, false)) {
a68e5870
N
2008 success = 1;
2009 break;
2010 }
2011 }
2012 d++;
8f19ccb2 2013 if (d == conf->raid_disks * 2)
a68e5870
N
2014 d = 0;
2015 } while (!success && d != r1_bio->read_disk);
2016
78d7f5f7 2017 if (!success) {
a68e5870 2018 char b[BDEVNAME_SIZE];
3a9f28a5
N
2019 int abort = 0;
2020 /* Cannot read from anywhere, this block is lost.
2021 * Record a bad block on each device. If that doesn't
2022 * work just disable and interrupt the recovery.
2023 * Don't fail devices as that won't really help.
2024 */
1d41c216 2025 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
74d46992 2026 mdname(mddev), bio_devname(bio, b),
1d41c216 2027 (unsigned long long)r1_bio->sector);
8f19ccb2 2028 for (d = 0; d < conf->raid_disks * 2; d++) {
3a9f28a5
N
2029 rdev = conf->mirrors[d].rdev;
2030 if (!rdev || test_bit(Faulty, &rdev->flags))
2031 continue;
2032 if (!rdev_set_badblocks(rdev, sect, s, 0))
2033 abort = 1;
2034 }
2035 if (abort) {
d890fa2b
N
2036 conf->recovery_disabled =
2037 mddev->recovery_disabled;
3a9f28a5
N
2038 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2039 md_done_sync(mddev, r1_bio->sectors, 0);
2040 put_buf(r1_bio);
2041 return 0;
2042 }
2043 /* Try next page */
2044 sectors -= s;
2045 sect += s;
2046 idx++;
2047 continue;
d11c171e 2048 }
78d7f5f7
N
2049
2050 start = d;
2051 /* write it back and re-read */
2052 while (d != r1_bio->read_disk) {
2053 if (d == 0)
8f19ccb2 2054 d = conf->raid_disks * 2;
78d7f5f7
N
2055 d--;
2056 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2057 continue;
2058 rdev = conf->mirrors[d].rdev;
d8f05d29 2059 if (r1_sync_page_io(rdev, sect, s,
44cf0f4d 2060 pages[idx],
d8f05d29 2061 WRITE) == 0) {
78d7f5f7
N
2062 r1_bio->bios[d]->bi_end_io = NULL;
2063 rdev_dec_pending(rdev, mddev);
9d3d8011 2064 }
78d7f5f7
N
2065 }
2066 d = start;
2067 while (d != r1_bio->read_disk) {
2068 if (d == 0)
8f19ccb2 2069 d = conf->raid_disks * 2;
78d7f5f7
N
2070 d--;
2071 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2072 continue;
2073 rdev = conf->mirrors[d].rdev;
d8f05d29 2074 if (r1_sync_page_io(rdev, sect, s,
44cf0f4d 2075 pages[idx],
d8f05d29 2076 READ) != 0)
9d3d8011 2077 atomic_add(s, &rdev->corrected_errors);
78d7f5f7 2078 }
a68e5870
N
2079 sectors -= s;
2080 sect += s;
2081 idx ++;
2082 }
78d7f5f7 2083 set_bit(R1BIO_Uptodate, &r1_bio->state);
4e4cbee9 2084 bio->bi_status = 0;
a68e5870
N
2085 return 1;
2086}
2087
c95e6385 2088static void process_checks(struct r1bio *r1_bio)
a68e5870
N
2089{
2090 /* We have read all readable devices. If we haven't
2091 * got the block, then there is no hope left.
2092 * If we have, then we want to do a comparison
2093 * and skip the write if everything is the same.
2094 * If any blocks failed to read, then we need to
2095 * attempt an over-write
2096 */
fd01b88c 2097 struct mddev *mddev = r1_bio->mddev;
e8096360 2098 struct r1conf *conf = mddev->private;
a68e5870
N
2099 int primary;
2100 int i;
f4380a91 2101 int vcnt;
a68e5870 2102
30bc9b53
N
2103 /* Fix variable parts of all bios */
2104 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2105 for (i = 0; i < conf->raid_disks * 2; i++) {
4e4cbee9 2106 blk_status_t status;
30bc9b53 2107 struct bio *b = r1_bio->bios[i];
98d30c58 2108 struct resync_pages *rp = get_resync_pages(b);
30bc9b53
N
2109 if (b->bi_end_io != end_sync_read)
2110 continue;
4246a0b6 2111 /* fixup the bio for reuse, but preserve errno */
4e4cbee9 2112 status = b->bi_status;
30bc9b53 2113 bio_reset(b);
4e4cbee9 2114 b->bi_status = status;
4f024f37 2115 b->bi_iter.bi_sector = r1_bio->sector +
30bc9b53 2116 conf->mirrors[i].rdev->data_offset;
74d46992 2117 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
30bc9b53 2118 b->bi_end_io = end_sync_read;
98d30c58
ML
2119 rp->raid_bio = r1_bio;
2120 b->bi_private = rp;
30bc9b53 2121
fb0eb5df
ML
2122 /* initialize bvec table again */
2123 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
30bc9b53 2124 }
8f19ccb2 2125 for (primary = 0; primary < conf->raid_disks * 2; primary++)
a68e5870 2126 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
4e4cbee9 2127 !r1_bio->bios[primary]->bi_status) {
a68e5870
N
2128 r1_bio->bios[primary]->bi_end_io = NULL;
2129 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2130 break;
2131 }
2132 r1_bio->read_disk = primary;
8f19ccb2 2133 for (i = 0; i < conf->raid_disks * 2; i++) {
2b070cfe 2134 int j = 0;
78d7f5f7
N
2135 struct bio *pbio = r1_bio->bios[primary];
2136 struct bio *sbio = r1_bio->bios[i];
4e4cbee9 2137 blk_status_t status = sbio->bi_status;
44cf0f4d
ML
2138 struct page **ppages = get_resync_pages(pbio)->pages;
2139 struct page **spages = get_resync_pages(sbio)->pages;
60928a91 2140 struct bio_vec *bi;
8fc04e6e 2141 int page_len[RESYNC_PAGES] = { 0 };
6dc4f100 2142 struct bvec_iter_all iter_all;
a68e5870 2143
2aabaa65 2144 if (sbio->bi_end_io != end_sync_read)
78d7f5f7 2145 continue;
4246a0b6 2146 /* Now we can 'fixup' the error value */
4e4cbee9 2147 sbio->bi_status = 0;
78d7f5f7 2148
2b070cfe
CH
2149 bio_for_each_segment_all(bi, sbio, iter_all)
2150 page_len[j++] = bi->bv_len;
60928a91 2151
4e4cbee9 2152 if (!status) {
78d7f5f7 2153 for (j = vcnt; j-- ; ) {
44cf0f4d
ML
2154 if (memcmp(page_address(ppages[j]),
2155 page_address(spages[j]),
60928a91 2156 page_len[j]))
78d7f5f7 2157 break;
69382e85 2158 }
78d7f5f7
N
2159 } else
2160 j = 0;
2161 if (j >= 0)
7f7583d4 2162 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
78d7f5f7 2163 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
4e4cbee9 2164 && !status)) {
78d7f5f7
N
2165 /* No need to write to this device. */
2166 sbio->bi_end_io = NULL;
2167 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2168 continue;
2169 }
d3b45c2a
KO
2170
2171 bio_copy_data(sbio, pbio);
78d7f5f7 2172 }
a68e5870
N
2173}
2174
9f2c9d12 2175static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
a68e5870 2176{
e8096360 2177 struct r1conf *conf = mddev->private;
a68e5870 2178 int i;
8f19ccb2 2179 int disks = conf->raid_disks * 2;
037d2ff6 2180 struct bio *wbio;
a68e5870 2181
a68e5870
N
2182 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2183 /* ouch - failed to read all of that. */
2184 if (!fix_sync_read_error(r1_bio))
2185 return;
7ca78d57
N
2186
2187 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
c95e6385
N
2188 process_checks(r1_bio);
2189
d11c171e
N
2190 /*
2191 * schedule writes
2192 */
1da177e4
LT
2193 atomic_set(&r1_bio->remaining, 1);
2194 for (i = 0; i < disks ; i++) {
2195 wbio = r1_bio->bios[i];
3e198f78
N
2196 if (wbio->bi_end_io == NULL ||
2197 (wbio->bi_end_io == end_sync_read &&
2198 (i == r1_bio->read_disk ||
2199 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1da177e4 2200 continue;
dfcc34c9
ND
2201 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2202 abort_sync_write(mddev, r1_bio);
0c9d5b12 2203 continue;
dfcc34c9 2204 }
1da177e4 2205
796a5cf0 2206 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
212e7eb7
N
2207 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2208 wbio->bi_opf |= MD_FAILFAST;
2209
3e198f78 2210 wbio->bi_end_io = end_sync_write;
1da177e4 2211 atomic_inc(&r1_bio->remaining);
aa8b57aa 2212 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
191ea9b2 2213
ed00aabd 2214 submit_bio_noacct(wbio);
1da177e4
LT
2215 }
2216
449808a2 2217 put_sync_write_buf(r1_bio, 1);
1da177e4
LT
2218}
2219
2220/*
2221 * This is a kernel thread which:
2222 *
2223 * 1. Retries failed read operations on working mirrors.
2224 * 2. Updates the raid superblock when problems encounter.
d2eb35ac 2225 * 3. Performs writes following reads for array synchronising.
1da177e4
LT
2226 */
2227
e8096360 2228static void fix_read_error(struct r1conf *conf, int read_disk,
867868fb
N
2229 sector_t sect, int sectors)
2230{
fd01b88c 2231 struct mddev *mddev = conf->mddev;
867868fb
N
2232 while(sectors) {
2233 int s = sectors;
2234 int d = read_disk;
2235 int success = 0;
2236 int start;
3cb03002 2237 struct md_rdev *rdev;
867868fb
N
2238
2239 if (s > (PAGE_SIZE>>9))
2240 s = PAGE_SIZE >> 9;
2241
2242 do {
d2eb35ac
N
2243 sector_t first_bad;
2244 int bad_sectors;
2245
707a6a42
N
2246 rcu_read_lock();
2247 rdev = rcu_dereference(conf->mirrors[d].rdev);
867868fb 2248 if (rdev &&
da8840a7 2249 (test_bit(In_sync, &rdev->flags) ||
2250 (!test_bit(Faulty, &rdev->flags) &&
2251 rdev->recovery_offset >= sect + s)) &&
d2eb35ac 2252 is_badblock(rdev, sect, s,
707a6a42
N
2253 &first_bad, &bad_sectors) == 0) {
2254 atomic_inc(&rdev->nr_pending);
2255 rcu_read_unlock();
2256 if (sync_page_io(rdev, sect, s<<9,
796a5cf0 2257 conf->tmppage, REQ_OP_READ, 0, false))
707a6a42
N
2258 success = 1;
2259 rdev_dec_pending(rdev, mddev);
2260 if (success)
2261 break;
2262 } else
2263 rcu_read_unlock();
2264 d++;
2265 if (d == conf->raid_disks * 2)
2266 d = 0;
867868fb
N
2267 } while (!success && d != read_disk);
2268
2269 if (!success) {
d8f05d29 2270 /* Cannot read from anywhere - mark it bad */
3cb03002 2271 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
d8f05d29
N
2272 if (!rdev_set_badblocks(rdev, sect, s, 0))
2273 md_error(mddev, rdev);
867868fb
N
2274 break;
2275 }
2276 /* write it back and re-read */
2277 start = d;
2278 while (d != read_disk) {
2279 if (d==0)
8f19ccb2 2280 d = conf->raid_disks * 2;
867868fb 2281 d--;
707a6a42
N
2282 rcu_read_lock();
2283 rdev = rcu_dereference(conf->mirrors[d].rdev);
867868fb 2284 if (rdev &&
707a6a42
N
2285 !test_bit(Faulty, &rdev->flags)) {
2286 atomic_inc(&rdev->nr_pending);
2287 rcu_read_unlock();
d8f05d29
N
2288 r1_sync_page_io(rdev, sect, s,
2289 conf->tmppage, WRITE);
707a6a42
N
2290 rdev_dec_pending(rdev, mddev);
2291 } else
2292 rcu_read_unlock();
867868fb
N
2293 }
2294 d = start;
2295 while (d != read_disk) {
2296 char b[BDEVNAME_SIZE];
2297 if (d==0)
8f19ccb2 2298 d = conf->raid_disks * 2;
867868fb 2299 d--;
707a6a42
N
2300 rcu_read_lock();
2301 rdev = rcu_dereference(conf->mirrors[d].rdev);
867868fb 2302 if (rdev &&
b8cb6b4c 2303 !test_bit(Faulty, &rdev->flags)) {
707a6a42
N
2304 atomic_inc(&rdev->nr_pending);
2305 rcu_read_unlock();
d8f05d29
N
2306 if (r1_sync_page_io(rdev, sect, s,
2307 conf->tmppage, READ)) {
867868fb 2308 atomic_add(s, &rdev->corrected_errors);
1d41c216
N
2309 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2310 mdname(mddev), s,
2311 (unsigned long long)(sect +
2312 rdev->data_offset),
2313 bdevname(rdev->bdev, b));
867868fb 2314 }
707a6a42
N
2315 rdev_dec_pending(rdev, mddev);
2316 } else
2317 rcu_read_unlock();
867868fb
N
2318 }
2319 sectors -= s;
2320 sect += s;
2321 }
2322}
2323
9f2c9d12 2324static int narrow_write_error(struct r1bio *r1_bio, int i)
cd5ff9a1 2325{
fd01b88c 2326 struct mddev *mddev = r1_bio->mddev;
e8096360 2327 struct r1conf *conf = mddev->private;
3cb03002 2328 struct md_rdev *rdev = conf->mirrors[i].rdev;
cd5ff9a1
N
2329
2330 /* bio has the data to be written to device 'i' where
2331 * we just recently had a write error.
2332 * We repeatedly clone the bio and trim down to one block,
2333 * then try the write. Where the write fails we record
2334 * a bad block.
2335 * It is conceivable that the bio doesn't exactly align with
2336 * blocks. We must handle this somehow.
2337 *
2338 * We currently own a reference on the rdev.
2339 */
2340
2341 int block_sectors;
2342 sector_t sector;
2343 int sectors;
2344 int sect_to_write = r1_bio->sectors;
2345 int ok = 1;
2346
2347 if (rdev->badblocks.shift < 0)
2348 return 0;
2349
ab713cdc
ND
2350 block_sectors = roundup(1 << rdev->badblocks.shift,
2351 bdev_logical_block_size(rdev->bdev) >> 9);
cd5ff9a1
N
2352 sector = r1_bio->sector;
2353 sectors = ((sector + block_sectors)
2354 & ~(sector_t)(block_sectors - 1))
2355 - sector;
2356
cd5ff9a1
N
2357 while (sect_to_write) {
2358 struct bio *wbio;
2359 if (sectors > sect_to_write)
2360 sectors = sect_to_write;
2361 /* Write at 'sector' for 'sectors'*/
2362
b783863f 2363 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
841c1316
ML
2364 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2365 GFP_NOIO,
afeee514 2366 &mddev->bio_set);
b783863f 2367 } else {
d7a10308 2368 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
afeee514 2369 &mddev->bio_set);
b783863f
KO
2370 }
2371
796a5cf0 2372 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
4f024f37
KO
2373 wbio->bi_iter.bi_sector = r1_bio->sector;
2374 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
cd5ff9a1 2375
6678d83f 2376 bio_trim(wbio, sector - r1_bio->sector, sectors);
4f024f37 2377 wbio->bi_iter.bi_sector += rdev->data_offset;
74d46992 2378 bio_set_dev(wbio, rdev->bdev);
4e49ea4a
MC
2379
2380 if (submit_bio_wait(wbio) < 0)
cd5ff9a1
N
2381 /* failure! */
2382 ok = rdev_set_badblocks(rdev, sector,
2383 sectors, 0)
2384 && ok;
2385
2386 bio_put(wbio);
2387 sect_to_write -= sectors;
2388 sector += sectors;
2389 sectors = block_sectors;
2390 }
2391 return ok;
2392}
2393
e8096360 2394static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
62096bce
N
2395{
2396 int m;
2397 int s = r1_bio->sectors;
8f19ccb2 2398 for (m = 0; m < conf->raid_disks * 2 ; m++) {
3cb03002 2399 struct md_rdev *rdev = conf->mirrors[m].rdev;
62096bce
N
2400 struct bio *bio = r1_bio->bios[m];
2401 if (bio->bi_end_io == NULL)
2402 continue;
4e4cbee9 2403 if (!bio->bi_status &&
62096bce 2404 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
c6563a8c 2405 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
62096bce 2406 }
4e4cbee9 2407 if (bio->bi_status &&
62096bce
N
2408 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2409 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2410 md_error(conf->mddev, rdev);
2411 }
2412 }
2413 put_buf(r1_bio);
2414 md_done_sync(conf->mddev, s, 1);
2415}
2416
e8096360 2417static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
62096bce 2418{
fd76863e 2419 int m, idx;
55ce74d4 2420 bool fail = false;
fd76863e 2421
8f19ccb2 2422 for (m = 0; m < conf->raid_disks * 2 ; m++)
62096bce 2423 if (r1_bio->bios[m] == IO_MADE_GOOD) {
3cb03002 2424 struct md_rdev *rdev = conf->mirrors[m].rdev;
62096bce
N
2425 rdev_clear_badblocks(rdev,
2426 r1_bio->sector,
c6563a8c 2427 r1_bio->sectors, 0);
62096bce
N
2428 rdev_dec_pending(rdev, conf->mddev);
2429 } else if (r1_bio->bios[m] != NULL) {
2430 /* This drive got a write error. We need to
2431 * narrow down and record precise write
2432 * errors.
2433 */
55ce74d4 2434 fail = true;
62096bce
N
2435 if (!narrow_write_error(r1_bio, m)) {
2436 md_error(conf->mddev,
2437 conf->mirrors[m].rdev);
2438 /* an I/O failed, we can't clear the bitmap */
2439 set_bit(R1BIO_Degraded, &r1_bio->state);
2440 }
2441 rdev_dec_pending(conf->mirrors[m].rdev,
2442 conf->mddev);
2443 }
55ce74d4
N
2444 if (fail) {
2445 spin_lock_irq(&conf->device_lock);
2446 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
fd76863e 2447 idx = sector_to_idx(r1_bio->sector);
824e47da 2448 atomic_inc(&conf->nr_queued[idx]);
55ce74d4 2449 spin_unlock_irq(&conf->device_lock);
824e47da 2450 /*
2451 * In case freeze_array() is waiting for condition
2452 * get_unqueued_pending() == extra to be true.
2453 */
2454 wake_up(&conf->wait_barrier);
55ce74d4 2455 md_wakeup_thread(conf->mddev->thread);
bd8688a1
N
2456 } else {
2457 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2458 close_write(r1_bio);
55ce74d4 2459 raid_end_bio_io(r1_bio);
bd8688a1 2460 }
62096bce
N
2461}
2462
e8096360 2463static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
62096bce 2464{
fd01b88c 2465 struct mddev *mddev = conf->mddev;
62096bce 2466 struct bio *bio;
3cb03002 2467 struct md_rdev *rdev;
62096bce
N
2468
2469 clear_bit(R1BIO_ReadError, &r1_bio->state);
2470 /* we got a read error. Maybe the drive is bad. Maybe just
2471 * the block and we can fix it.
2472 * We freeze all other IO, and try reading the block from
2473 * other devices. When we find one, we re-write
2474 * and check it that fixes the read error.
2475 * This is all done synchronously while the array is
2476 * frozen
2477 */
7449f699
TM
2478
2479 bio = r1_bio->bios[r1_bio->read_disk];
7449f699
TM
2480 bio_put(bio);
2481 r1_bio->bios[r1_bio->read_disk] = NULL;
2482
2e52d449
N
2483 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2484 if (mddev->ro == 0
2485 && !test_bit(FailFast, &rdev->flags)) {
e2d59925 2486 freeze_array(conf, 1);
62096bce
N
2487 fix_read_error(conf, r1_bio->read_disk,
2488 r1_bio->sector, r1_bio->sectors);
2489 unfreeze_array(conf);
b33d1062
GK
2490 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2491 md_error(mddev, rdev);
7449f699
TM
2492 } else {
2493 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2494 }
2495
2e52d449 2496 rdev_dec_pending(rdev, conf->mddev);
689389a0
N
2497 allow_barrier(conf, r1_bio->sector);
2498 bio = r1_bio->master_bio;
62096bce 2499
689389a0
N
2500 /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2501 r1_bio->state = 0;
2502 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
62096bce
N
2503}
2504
4ed8731d 2505static void raid1d(struct md_thread *thread)
1da177e4 2506{
4ed8731d 2507 struct mddev *mddev = thread->mddev;
9f2c9d12 2508 struct r1bio *r1_bio;
1da177e4 2509 unsigned long flags;
e8096360 2510 struct r1conf *conf = mddev->private;
1da177e4 2511 struct list_head *head = &conf->retry_list;
e1dfa0a2 2512 struct blk_plug plug;
fd76863e 2513 int idx;
1da177e4
LT
2514
2515 md_check_recovery(mddev);
e1dfa0a2 2516
55ce74d4 2517 if (!list_empty_careful(&conf->bio_end_io_list) &&
2953079c 2518 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
55ce74d4
N
2519 LIST_HEAD(tmp);
2520 spin_lock_irqsave(&conf->device_lock, flags);
fd76863e 2521 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2522 list_splice_init(&conf->bio_end_io_list, &tmp);
55ce74d4
N
2523 spin_unlock_irqrestore(&conf->device_lock, flags);
2524 while (!list_empty(&tmp)) {
a452744b
MP
2525 r1_bio = list_first_entry(&tmp, struct r1bio,
2526 retry_list);
55ce74d4 2527 list_del(&r1_bio->retry_list);
fd76863e 2528 idx = sector_to_idx(r1_bio->sector);
824e47da 2529 atomic_dec(&conf->nr_queued[idx]);
bd8688a1
N
2530 if (mddev->degraded)
2531 set_bit(R1BIO_Degraded, &r1_bio->state);
2532 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2533 close_write(r1_bio);
55ce74d4
N
2534 raid_end_bio_io(r1_bio);
2535 }
2536 }
2537
e1dfa0a2 2538 blk_start_plug(&plug);
1da177e4 2539 for (;;) {
191ea9b2 2540
0021b7bc 2541 flush_pending_writes(conf);
191ea9b2 2542
a35e63ef
N
2543 spin_lock_irqsave(&conf->device_lock, flags);
2544 if (list_empty(head)) {
2545 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 2546 break;
a35e63ef 2547 }
9f2c9d12 2548 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
1da177e4 2549 list_del(head->prev);
fd76863e 2550 idx = sector_to_idx(r1_bio->sector);
824e47da 2551 atomic_dec(&conf->nr_queued[idx]);
1da177e4
LT
2552 spin_unlock_irqrestore(&conf->device_lock, flags);
2553
2554 mddev = r1_bio->mddev;
070ec55d 2555 conf = mddev->private;
4367af55 2556 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
d8f05d29 2557 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
62096bce
N
2558 test_bit(R1BIO_WriteError, &r1_bio->state))
2559 handle_sync_write_finished(conf, r1_bio);
2560 else
4367af55 2561 sync_request_write(mddev, r1_bio);
cd5ff9a1 2562 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
62096bce
N
2563 test_bit(R1BIO_WriteError, &r1_bio->state))
2564 handle_write_finished(conf, r1_bio);
2565 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2566 handle_read_error(conf, r1_bio);
2567 else
c230e7e5 2568 WARN_ON_ONCE(1);
62096bce 2569
1d9d5241 2570 cond_resched();
2953079c 2571 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
de393cde 2572 md_check_recovery(mddev);
1da177e4 2573 }
e1dfa0a2 2574 blk_finish_plug(&plug);
1da177e4
LT
2575}
2576
e8096360 2577static int init_resync(struct r1conf *conf)
1da177e4
LT
2578{
2579 int buffs;
2580
2581 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
afeee514
KO
2582 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2583
2584 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2585 r1buf_pool_free, conf->poolinfo);
1da177e4
LT
2586}
2587
208410b5
SL
2588static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2589{
afeee514 2590 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
208410b5
SL
2591 struct resync_pages *rps;
2592 struct bio *bio;
2593 int i;
2594
2595 for (i = conf->poolinfo->raid_disks; i--; ) {
2596 bio = r1bio->bios[i];
2597 rps = bio->bi_private;
2598 bio_reset(bio);
2599 bio->bi_private = rps;
2600 }
2601 r1bio->master_bio = NULL;
2602 return r1bio;
2603}
2604
1da177e4
LT
2605/*
2606 * perform a "sync" on one "block"
2607 *
2608 * We need to make sure that no normal I/O request - particularly write
2609 * requests - conflict with active sync requests.
2610 *
2611 * This is achieved by tracking pending requests and a 'barrier' concept
2612 * that can be installed to exclude normal IO requests.
2613 */
2614
849674e4
SL
2615static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2616 int *skipped)
1da177e4 2617{
e8096360 2618 struct r1conf *conf = mddev->private;
9f2c9d12 2619 struct r1bio *r1_bio;
1da177e4
LT
2620 struct bio *bio;
2621 sector_t max_sector, nr_sectors;
3e198f78 2622 int disk = -1;
1da177e4 2623 int i;
3e198f78
N
2624 int wonly = -1;
2625 int write_targets = 0, read_targets = 0;
57dab0bd 2626 sector_t sync_blocks;
e3b9703e 2627 int still_degraded = 0;
06f60385
N
2628 int good_sectors = RESYNC_SECTORS;
2629 int min_bad = 0; /* number of sectors that are bad in all devices */
fd76863e 2630 int idx = sector_to_idx(sector_nr);
022e510f 2631 int page_idx = 0;
1da177e4 2632
afeee514 2633 if (!mempool_initialized(&conf->r1buf_pool))
1da177e4 2634 if (init_resync(conf))
57afd89f 2635 return 0;
1da177e4 2636
58c0fed4 2637 max_sector = mddev->dev_sectors;
1da177e4 2638 if (sector_nr >= max_sector) {
191ea9b2
N
2639 /* If we aborted, we need to abort the
2640 * sync on the 'current' bitmap chunk (there will
2641 * only be one in raid1 resync.
2642 * We can find the current addess in mddev->curr_resync
2643 */
6a806c51 2644 if (mddev->curr_resync < max_sector) /* aborted */
e64e4018
AS
2645 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2646 &sync_blocks, 1);
6a806c51 2647 else /* completed sync */
191ea9b2 2648 conf->fullsync = 0;
6a806c51 2649
e64e4018 2650 md_bitmap_close_sync(mddev->bitmap);
1da177e4 2651 close_sync(conf);
c40f341f
GR
2652
2653 if (mddev_is_clustered(mddev)) {
2654 conf->cluster_sync_low = 0;
2655 conf->cluster_sync_high = 0;
c40f341f 2656 }
1da177e4
LT
2657 return 0;
2658 }
2659
07d84d10
N
2660 if (mddev->bitmap == NULL &&
2661 mddev->recovery_cp == MaxSector &&
6394cca5 2662 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
07d84d10
N
2663 conf->fullsync == 0) {
2664 *skipped = 1;
2665 return max_sector - sector_nr;
2666 }
6394cca5
N
2667 /* before building a request, check if we can skip these blocks..
2668 * This call the bitmap_start_sync doesn't actually record anything
2669 */
e64e4018 2670 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
e5de485f 2671 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
191ea9b2
N
2672 /* We can skip this block, and probably several more */
2673 *skipped = 1;
2674 return sync_blocks;
2675 }
17999be4 2676
7ac50447
TM
2677 /*
2678 * If there is non-resync activity waiting for a turn, then let it
2679 * though before starting on this new sync request.
2680 */
824e47da 2681 if (atomic_read(&conf->nr_waiting[idx]))
7ac50447
TM
2682 schedule_timeout_uninterruptible(1);
2683
c40f341f
GR
2684 /* we are incrementing sector_nr below. To be safe, we check against
2685 * sector_nr + two times RESYNC_SECTORS
2686 */
2687
e64e4018 2688 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
c40f341f 2689 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
17999be4 2690
8c242593
YY
2691
2692 if (raise_barrier(conf, sector_nr))
2693 return 0;
2694
2695 r1_bio = raid1_alloc_init_r1buf(conf);
1da177e4 2696
3e198f78 2697 rcu_read_lock();
1da177e4 2698 /*
3e198f78
N
2699 * If we get a correctably read error during resync or recovery,
2700 * we might want to read from a different device. So we
2701 * flag all drives that could conceivably be read from for READ,
2702 * and any others (which will be non-In_sync devices) for WRITE.
2703 * If a read fails, we try reading from something else for which READ
2704 * is OK.
1da177e4 2705 */
1da177e4 2706
1da177e4
LT
2707 r1_bio->mddev = mddev;
2708 r1_bio->sector = sector_nr;
191ea9b2 2709 r1_bio->state = 0;
1da177e4 2710 set_bit(R1BIO_IsSync, &r1_bio->state);
fd76863e 2711 /* make sure good_sectors won't go across barrier unit boundary */
2712 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
1da177e4 2713
8f19ccb2 2714 for (i = 0; i < conf->raid_disks * 2; i++) {
3cb03002 2715 struct md_rdev *rdev;
1da177e4 2716 bio = r1_bio->bios[i];
1da177e4 2717
3e198f78
N
2718 rdev = rcu_dereference(conf->mirrors[i].rdev);
2719 if (rdev == NULL ||
06f60385 2720 test_bit(Faulty, &rdev->flags)) {
8f19ccb2
N
2721 if (i < conf->raid_disks)
2722 still_degraded = 1;
3e198f78 2723 } else if (!test_bit(In_sync, &rdev->flags)) {
796a5cf0 2724 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1da177e4
LT
2725 bio->bi_end_io = end_sync_write;
2726 write_targets ++;
3e198f78
N
2727 } else {
2728 /* may need to read from here */
06f60385
N
2729 sector_t first_bad = MaxSector;
2730 int bad_sectors;
2731
2732 if (is_badblock(rdev, sector_nr, good_sectors,
2733 &first_bad, &bad_sectors)) {
2734 if (first_bad > sector_nr)
2735 good_sectors = first_bad - sector_nr;
2736 else {
2737 bad_sectors -= (sector_nr - first_bad);
2738 if (min_bad == 0 ||
2739 min_bad > bad_sectors)
2740 min_bad = bad_sectors;
2741 }
2742 }
2743 if (sector_nr < first_bad) {
2744 if (test_bit(WriteMostly, &rdev->flags)) {
2745 if (wonly < 0)
2746 wonly = i;
2747 } else {
2748 if (disk < 0)
2749 disk = i;
2750 }
796a5cf0 2751 bio_set_op_attrs(bio, REQ_OP_READ, 0);
06f60385
N
2752 bio->bi_end_io = end_sync_read;
2753 read_targets++;
d57368af
AL
2754 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2755 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2756 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2757 /*
2758 * The device is suitable for reading (InSync),
2759 * but has bad block(s) here. Let's try to correct them,
2760 * if we are doing resync or repair. Otherwise, leave
2761 * this device alone for this sync request.
2762 */
796a5cf0 2763 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
d57368af
AL
2764 bio->bi_end_io = end_sync_write;
2765 write_targets++;
3e198f78 2766 }
3e198f78 2767 }
028288df 2768 if (rdev && bio->bi_end_io) {
06f60385 2769 atomic_inc(&rdev->nr_pending);
4f024f37 2770 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
74d46992 2771 bio_set_dev(bio, rdev->bdev);
2e52d449
N
2772 if (test_bit(FailFast, &rdev->flags))
2773 bio->bi_opf |= MD_FAILFAST;
06f60385 2774 }
1da177e4 2775 }
3e198f78
N
2776 rcu_read_unlock();
2777 if (disk < 0)
2778 disk = wonly;
2779 r1_bio->read_disk = disk;
191ea9b2 2780
06f60385
N
2781 if (read_targets == 0 && min_bad > 0) {
2782 /* These sectors are bad on all InSync devices, so we
2783 * need to mark them bad on all write targets
2784 */
2785 int ok = 1;
8f19ccb2 2786 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
06f60385 2787 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
a42f9d83 2788 struct md_rdev *rdev = conf->mirrors[i].rdev;
06f60385
N
2789 ok = rdev_set_badblocks(rdev, sector_nr,
2790 min_bad, 0
2791 ) && ok;
2792 }
2953079c 2793 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
06f60385
N
2794 *skipped = 1;
2795 put_buf(r1_bio);
2796
2797 if (!ok) {
2798 /* Cannot record the badblocks, so need to
2799 * abort the resync.
2800 * If there are multiple read targets, could just
2801 * fail the really bad ones ???
2802 */
2803 conf->recovery_disabled = mddev->recovery_disabled;
2804 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2805 return 0;
2806 } else
2807 return min_bad;
2808
2809 }
2810 if (min_bad > 0 && min_bad < good_sectors) {
2811 /* only resync enough to reach the next bad->good
2812 * transition */
2813 good_sectors = min_bad;
2814 }
2815
3e198f78
N
2816 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2817 /* extra read targets are also write targets */
2818 write_targets += read_targets-1;
2819
2820 if (write_targets == 0 || read_targets == 0) {
1da177e4
LT
2821 /* There is nowhere to write, so all non-sync
2822 * drives must be failed - so we are finished
2823 */
b7219ccb
N
2824 sector_t rv;
2825 if (min_bad > 0)
2826 max_sector = sector_nr + min_bad;
2827 rv = max_sector - sector_nr;
57afd89f 2828 *skipped = 1;
1da177e4 2829 put_buf(r1_bio);
1da177e4
LT
2830 return rv;
2831 }
2832
c6207277
N
2833 if (max_sector > mddev->resync_max)
2834 max_sector = mddev->resync_max; /* Don't do IO beyond here */
06f60385
N
2835 if (max_sector > sector_nr + good_sectors)
2836 max_sector = sector_nr + good_sectors;
1da177e4 2837 nr_sectors = 0;
289e99e8 2838 sync_blocks = 0;
1da177e4
LT
2839 do {
2840 struct page *page;
2841 int len = PAGE_SIZE;
2842 if (sector_nr + (len>>9) > max_sector)
2843 len = (max_sector - sector_nr) << 9;
2844 if (len == 0)
2845 break;
6a806c51 2846 if (sync_blocks == 0) {
e64e4018
AS
2847 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2848 &sync_blocks, still_degraded) &&
e5de485f
N
2849 !conf->fullsync &&
2850 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6a806c51 2851 break;
7571ae88 2852 if ((len >> 9) > sync_blocks)
6a806c51 2853 len = sync_blocks<<9;
ab7a30c7 2854 }
191ea9b2 2855
8f19ccb2 2856 for (i = 0 ; i < conf->raid_disks * 2; i++) {
98d30c58
ML
2857 struct resync_pages *rp;
2858
1da177e4 2859 bio = r1_bio->bios[i];
98d30c58 2860 rp = get_resync_pages(bio);
1da177e4 2861 if (bio->bi_end_io) {
022e510f 2862 page = resync_fetch_page(rp, page_idx);
c85ba149
ML
2863
2864 /*
2865 * won't fail because the vec table is big
2866 * enough to hold all these pages
2867 */
2868 bio_add_page(bio, page, len, 0);
1da177e4
LT
2869 }
2870 }
2871 nr_sectors += len>>9;
2872 sector_nr += len>>9;
191ea9b2 2873 sync_blocks -= (len>>9);
022e510f 2874 } while (++page_idx < RESYNC_PAGES);
98d30c58 2875
1da177e4
LT
2876 r1_bio->sectors = nr_sectors;
2877
c40f341f
GR
2878 if (mddev_is_clustered(mddev) &&
2879 conf->cluster_sync_high < sector_nr + nr_sectors) {
2880 conf->cluster_sync_low = mddev->curr_resync_completed;
2881 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2882 /* Send resync message */
2883 md_cluster_ops->resync_info_update(mddev,
2884 conf->cluster_sync_low,
2885 conf->cluster_sync_high);
2886 }
2887
d11c171e
N
2888 /* For a user-requested sync, we read all readable devices and do a
2889 * compare
2890 */
2891 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2892 atomic_set(&r1_bio->remaining, read_targets);
2d4f4f33 2893 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
d11c171e
N
2894 bio = r1_bio->bios[i];
2895 if (bio->bi_end_io == end_sync_read) {
2d4f4f33 2896 read_targets--;
74d46992 2897 md_sync_acct_bio(bio, nr_sectors);
2e52d449
N
2898 if (read_targets == 1)
2899 bio->bi_opf &= ~MD_FAILFAST;
ed00aabd 2900 submit_bio_noacct(bio);
d11c171e
N
2901 }
2902 }
2903 } else {
2904 atomic_set(&r1_bio->remaining, 1);
2905 bio = r1_bio->bios[r1_bio->read_disk];
74d46992 2906 md_sync_acct_bio(bio, nr_sectors);
2e52d449
N
2907 if (read_targets == 1)
2908 bio->bi_opf &= ~MD_FAILFAST;
ed00aabd 2909 submit_bio_noacct(bio);
d11c171e 2910 }
1da177e4
LT
2911 return nr_sectors;
2912}
2913
fd01b88c 2914static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
80c3a6ce
DW
2915{
2916 if (sectors)
2917 return sectors;
2918
2919 return mddev->dev_sectors;
2920}
2921
e8096360 2922static struct r1conf *setup_conf(struct mddev *mddev)
1da177e4 2923{
e8096360 2924 struct r1conf *conf;
709ae487 2925 int i;
0eaf822c 2926 struct raid1_info *disk;
3cb03002 2927 struct md_rdev *rdev;
709ae487 2928 int err = -ENOMEM;
1da177e4 2929
e8096360 2930 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
1da177e4 2931 if (!conf)
709ae487 2932 goto abort;
1da177e4 2933
fd76863e 2934 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
824e47da 2935 sizeof(atomic_t), GFP_KERNEL);
fd76863e 2936 if (!conf->nr_pending)
2937 goto abort;
2938
2939 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
824e47da 2940 sizeof(atomic_t), GFP_KERNEL);
fd76863e 2941 if (!conf->nr_waiting)
2942 goto abort;
2943
2944 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
824e47da 2945 sizeof(atomic_t), GFP_KERNEL);
fd76863e 2946 if (!conf->nr_queued)
2947 goto abort;
2948
2949 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
824e47da 2950 sizeof(atomic_t), GFP_KERNEL);
fd76863e 2951 if (!conf->barrier)
2952 goto abort;
2953
6396bb22
KC
2954 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2955 mddev->raid_disks, 2),
2956 GFP_KERNEL);
1da177e4 2957 if (!conf->mirrors)
709ae487 2958 goto abort;
1da177e4 2959
ddaf22ab
N
2960 conf->tmppage = alloc_page(GFP_KERNEL);
2961 if (!conf->tmppage)
709ae487 2962 goto abort;
ddaf22ab 2963
709ae487 2964 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1da177e4 2965 if (!conf->poolinfo)
709ae487 2966 goto abort;
8f19ccb2 2967 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
3f677f9c 2968 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
c7afa803 2969 rbio_pool_free, conf->poolinfo);
afeee514 2970 if (err)
709ae487
N
2971 goto abort;
2972
afeee514
KO
2973 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2974 if (err)
c230e7e5
N
2975 goto abort;
2976
ed9bfdf1 2977 conf->poolinfo->mddev = mddev;
1da177e4 2978
c19d5798 2979 err = -EINVAL;
e7e72bf6 2980 spin_lock_init(&conf->device_lock);
dafb20fa 2981 rdev_for_each(rdev, mddev) {
709ae487 2982 int disk_idx = rdev->raid_disk;
1da177e4
LT
2983 if (disk_idx >= mddev->raid_disks
2984 || disk_idx < 0)
2985 continue;
c19d5798 2986 if (test_bit(Replacement, &rdev->flags))
02b898f2 2987 disk = conf->mirrors + mddev->raid_disks + disk_idx;
c19d5798
N
2988 else
2989 disk = conf->mirrors + disk_idx;
1da177e4 2990
c19d5798
N
2991 if (disk->rdev)
2992 goto abort;
1da177e4 2993 disk->rdev = rdev;
1da177e4 2994 disk->head_position = 0;
12cee5a8 2995 disk->seq_start = MaxSector;
1da177e4
LT
2996 }
2997 conf->raid_disks = mddev->raid_disks;
2998 conf->mddev = mddev;
1da177e4 2999 INIT_LIST_HEAD(&conf->retry_list);
55ce74d4 3000 INIT_LIST_HEAD(&conf->bio_end_io_list);
1da177e4
LT
3001
3002 spin_lock_init(&conf->resync_lock);
17999be4 3003 init_waitqueue_head(&conf->wait_barrier);
1da177e4 3004
191ea9b2 3005 bio_list_init(&conf->pending_bio_list);
34db0cd6 3006 conf->pending_count = 0;
d890fa2b 3007 conf->recovery_disabled = mddev->recovery_disabled - 1;
191ea9b2 3008
c19d5798 3009 err = -EIO;
8f19ccb2 3010 for (i = 0; i < conf->raid_disks * 2; i++) {
1da177e4
LT
3011
3012 disk = conf->mirrors + i;
3013
c19d5798
N
3014 if (i < conf->raid_disks &&
3015 disk[conf->raid_disks].rdev) {
3016 /* This slot has a replacement. */
3017 if (!disk->rdev) {
3018 /* No original, just make the replacement
3019 * a recovering spare
3020 */
3021 disk->rdev =
3022 disk[conf->raid_disks].rdev;
3023 disk[conf->raid_disks].rdev = NULL;
3024 } else if (!test_bit(In_sync, &disk->rdev->flags))
3025 /* Original is not in_sync - bad */
3026 goto abort;
3027 }
3028
5fd6c1dc
N
3029 if (!disk->rdev ||
3030 !test_bit(In_sync, &disk->rdev->flags)) {
1da177e4 3031 disk->head_position = 0;
4f0a5e01
JB
3032 if (disk->rdev &&
3033 (disk->rdev->saved_raid_disk < 0))
918f0238 3034 conf->fullsync = 1;
be4d3280 3035 }
1da177e4 3036 }
709ae487 3037
709ae487 3038 err = -ENOMEM;
0232605d 3039 conf->thread = md_register_thread(raid1d, mddev, "raid1");
1d41c216 3040 if (!conf->thread)
709ae487 3041 goto abort;
1da177e4 3042
709ae487
N
3043 return conf;
3044
3045 abort:
3046 if (conf) {
afeee514 3047 mempool_exit(&conf->r1bio_pool);
709ae487
N
3048 kfree(conf->mirrors);
3049 safe_put_page(conf->tmppage);
3050 kfree(conf->poolinfo);
fd76863e 3051 kfree(conf->nr_pending);
3052 kfree(conf->nr_waiting);
3053 kfree(conf->nr_queued);
3054 kfree(conf->barrier);
afeee514 3055 bioset_exit(&conf->bio_split);
709ae487
N
3056 kfree(conf);
3057 }
3058 return ERR_PTR(err);
3059}
3060
afa0f557 3061static void raid1_free(struct mddev *mddev, void *priv);
849674e4 3062static int raid1_run(struct mddev *mddev)
709ae487 3063{
e8096360 3064 struct r1conf *conf;
709ae487 3065 int i;
3cb03002 3066 struct md_rdev *rdev;
5220ea1e 3067 int ret;
2ff8cc2c 3068 bool discard_supported = false;
709ae487
N
3069
3070 if (mddev->level != 1) {
1d41c216
N
3071 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3072 mdname(mddev), mddev->level);
709ae487
N
3073 return -EIO;
3074 }
3075 if (mddev->reshape_position != MaxSector) {
1d41c216
N
3076 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3077 mdname(mddev));
709ae487
N
3078 return -EIO;
3079 }
a415c0f1
N
3080 if (mddev_init_writes_pending(mddev) < 0)
3081 return -ENOMEM;
1da177e4 3082 /*
709ae487
N
3083 * copy the already verified devices into our private RAID1
3084 * bookkeeping area. [whatever we allocate in run(),
afa0f557 3085 * should be freed in raid1_free()]
1da177e4 3086 */
709ae487
N
3087 if (mddev->private == NULL)
3088 conf = setup_conf(mddev);
3089 else
3090 conf = mddev->private;
1da177e4 3091
709ae487
N
3092 if (IS_ERR(conf))
3093 return PTR_ERR(conf);
1da177e4 3094
3deff1a7 3095 if (mddev->queue) {
5026d7a9 3096 blk_queue_max_write_same_sectors(mddev->queue, 0);
3deff1a7
CH
3097 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3098 }
5026d7a9 3099
dafb20fa 3100 rdev_for_each(rdev, mddev) {
1ed7242e
JB
3101 if (!mddev->gendisk)
3102 continue;
709ae487
N
3103 disk_stack_limits(mddev->gendisk, rdev->bdev,
3104 rdev->data_offset << 9);
2ff8cc2c
SL
3105 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3106 discard_supported = true;
1da177e4 3107 }
191ea9b2 3108
709ae487 3109 mddev->degraded = 0;
ebfeb444 3110 for (i = 0; i < conf->raid_disks; i++)
709ae487
N
3111 if (conf->mirrors[i].rdev == NULL ||
3112 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3113 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3114 mddev->degraded++;
07f1a685
YY
3115 /*
3116 * RAID1 needs at least one disk in active
3117 */
3118 if (conf->raid_disks - mddev->degraded < 1) {
3119 ret = -EINVAL;
3120 goto abort;
3121 }
709ae487
N
3122
3123 if (conf->raid_disks - mddev->degraded == 1)
3124 mddev->recovery_cp = MaxSector;
3125
8c6ac868 3126 if (mddev->recovery_cp != MaxSector)
1d41c216
N
3127 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3128 mdname(mddev));
3129 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
f72ffdd6 3130 mdname(mddev), mddev->raid_disks - mddev->degraded,
1da177e4 3131 mddev->raid_disks);
709ae487 3132
1da177e4
LT
3133 /*
3134 * Ok, everything is just fine now
3135 */
709ae487
N
3136 mddev->thread = conf->thread;
3137 conf->thread = NULL;
3138 mddev->private = conf;
46533ff7 3139 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
709ae487 3140
1f403624 3141 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
1da177e4 3142
1ed7242e 3143 if (mddev->queue) {
2ff8cc2c 3144 if (discard_supported)
8b904b5b 3145 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
2ff8cc2c
SL
3146 mddev->queue);
3147 else
8b904b5b 3148 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
2ff8cc2c 3149 mddev->queue);
1ed7242e 3150 }
5220ea1e 3151
ebfeb444 3152 ret = md_integrity_register(mddev);
5aa61f42
N
3153 if (ret) {
3154 md_unregister_thread(&mddev->thread);
07f1a685 3155 goto abort;
5aa61f42 3156 }
07f1a685
YY
3157 return 0;
3158
3159abort:
3160 raid1_free(mddev, conf);
5220ea1e 3161 return ret;
1da177e4
LT
3162}
3163
afa0f557 3164static void raid1_free(struct mddev *mddev, void *priv)
1da177e4 3165{
afa0f557 3166 struct r1conf *conf = priv;
409c57f3 3167
afeee514 3168 mempool_exit(&conf->r1bio_pool);
990a8baf 3169 kfree(conf->mirrors);
0fea7ed8 3170 safe_put_page(conf->tmppage);
990a8baf 3171 kfree(conf->poolinfo);
fd76863e 3172 kfree(conf->nr_pending);
3173 kfree(conf->nr_waiting);
3174 kfree(conf->nr_queued);
3175 kfree(conf->barrier);
afeee514 3176 bioset_exit(&conf->bio_split);
1da177e4 3177 kfree(conf);
1da177e4
LT
3178}
3179
fd01b88c 3180static int raid1_resize(struct mddev *mddev, sector_t sectors)
1da177e4
LT
3181{
3182 /* no resync is happening, and there is enough space
3183 * on all devices, so we can resize.
3184 * We need to make sure resync covers any new space.
3185 * If the array is shrinking we should possibly wait until
3186 * any io in the removed space completes, but it hardly seems
3187 * worth it.
3188 */
a4a6125a
N
3189 sector_t newsize = raid1_size(mddev, sectors, 0);
3190 if (mddev->external_size &&
3191 mddev->array_sectors > newsize)
b522adcd 3192 return -EINVAL;
a4a6125a 3193 if (mddev->bitmap) {
e64e4018 3194 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
a4a6125a
N
3195 if (ret)
3196 return ret;
3197 }
3198 md_set_array_sectors(mddev, newsize);
b522adcd 3199 if (sectors > mddev->dev_sectors &&
b098636c 3200 mddev->recovery_cp > mddev->dev_sectors) {
58c0fed4 3201 mddev->recovery_cp = mddev->dev_sectors;
1da177e4
LT
3202 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3203 }
b522adcd 3204 mddev->dev_sectors = sectors;
4b5c7ae8 3205 mddev->resync_max_sectors = sectors;
1da177e4
LT
3206 return 0;
3207}
3208
fd01b88c 3209static int raid1_reshape(struct mddev *mddev)
1da177e4
LT
3210{
3211 /* We need to:
3212 * 1/ resize the r1bio_pool
3213 * 2/ resize conf->mirrors
3214 *
3215 * We allocate a new r1bio_pool if we can.
3216 * Then raise a device barrier and wait until all IO stops.
3217 * Then resize conf->mirrors and swap in the new r1bio pool.
6ea9c07c
N
3218 *
3219 * At the same time, we "pack" the devices so that all the missing
3220 * devices have the higher raid_disk numbers.
1da177e4 3221 */
afeee514 3222 mempool_t newpool, oldpool;
1da177e4 3223 struct pool_info *newpoolinfo;
0eaf822c 3224 struct raid1_info *newmirrors;
e8096360 3225 struct r1conf *conf = mddev->private;
63c70c4f 3226 int cnt, raid_disks;
c04be0aa 3227 unsigned long flags;
2214c260 3228 int d, d2;
afeee514
KO
3229 int ret;
3230
3231 memset(&newpool, 0, sizeof(newpool));
3232 memset(&oldpool, 0, sizeof(oldpool));
1da177e4 3233
63c70c4f 3234 /* Cannot change chunk_size, layout, or level */
664e7c41 3235 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
63c70c4f
N
3236 mddev->layout != mddev->new_layout ||
3237 mddev->level != mddev->new_level) {
664e7c41 3238 mddev->new_chunk_sectors = mddev->chunk_sectors;
63c70c4f
N
3239 mddev->new_layout = mddev->layout;
3240 mddev->new_level = mddev->level;
3241 return -EINVAL;
3242 }
3243
2214c260
AP
3244 if (!mddev_is_clustered(mddev))
3245 md_allow_write(mddev);
2a2275d6 3246
63c70c4f
N
3247 raid_disks = mddev->raid_disks + mddev->delta_disks;
3248
6ea9c07c
N
3249 if (raid_disks < conf->raid_disks) {
3250 cnt=0;
3251 for (d= 0; d < conf->raid_disks; d++)
3252 if (conf->mirrors[d].rdev)
3253 cnt++;
3254 if (cnt > raid_disks)
1da177e4 3255 return -EBUSY;
6ea9c07c 3256 }
1da177e4
LT
3257
3258 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3259 if (!newpoolinfo)
3260 return -ENOMEM;
3261 newpoolinfo->mddev = mddev;
8f19ccb2 3262 newpoolinfo->raid_disks = raid_disks * 2;
1da177e4 3263
3f677f9c 3264 ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
c7afa803 3265 rbio_pool_free, newpoolinfo);
afeee514 3266 if (ret) {
1da177e4 3267 kfree(newpoolinfo);
afeee514 3268 return ret;
1da177e4 3269 }
6396bb22
KC
3270 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3271 raid_disks, 2),
8f19ccb2 3272 GFP_KERNEL);
1da177e4
LT
3273 if (!newmirrors) {
3274 kfree(newpoolinfo);
afeee514 3275 mempool_exit(&newpool);
1da177e4
LT
3276 return -ENOMEM;
3277 }
1da177e4 3278
e2d59925 3279 freeze_array(conf, 0);
1da177e4
LT
3280
3281 /* ok, everything is stopped */
3282 oldpool = conf->r1bio_pool;
3283 conf->r1bio_pool = newpool;
6ea9c07c 3284
a88aa786 3285 for (d = d2 = 0; d < conf->raid_disks; d++) {
3cb03002 3286 struct md_rdev *rdev = conf->mirrors[d].rdev;
a88aa786 3287 if (rdev && rdev->raid_disk != d2) {
36fad858 3288 sysfs_unlink_rdev(mddev, rdev);
a88aa786 3289 rdev->raid_disk = d2;
36fad858
NK
3290 sysfs_unlink_rdev(mddev, rdev);
3291 if (sysfs_link_rdev(mddev, rdev))
1d41c216
N
3292 pr_warn("md/raid1:%s: cannot register rd%d\n",
3293 mdname(mddev), rdev->raid_disk);
6ea9c07c 3294 }
a88aa786
N
3295 if (rdev)
3296 newmirrors[d2++].rdev = rdev;
3297 }
1da177e4
LT
3298 kfree(conf->mirrors);
3299 conf->mirrors = newmirrors;
3300 kfree(conf->poolinfo);
3301 conf->poolinfo = newpoolinfo;
3302
c04be0aa 3303 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 3304 mddev->degraded += (raid_disks - conf->raid_disks);
c04be0aa 3305 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 3306 conf->raid_disks = mddev->raid_disks = raid_disks;
63c70c4f 3307 mddev->delta_disks = 0;
1da177e4 3308
e2d59925 3309 unfreeze_array(conf);
1da177e4 3310
985ca973 3311 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
1da177e4
LT
3312 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3313 md_wakeup_thread(mddev->thread);
3314
afeee514 3315 mempool_exit(&oldpool);
1da177e4
LT
3316 return 0;
3317}
3318
b03e0ccb 3319static void raid1_quiesce(struct mddev *mddev, int quiesce)
36fa3063 3320{
e8096360 3321 struct r1conf *conf = mddev->private;
36fa3063 3322
b03e0ccb 3323 if (quiesce)
07169fd4 3324 freeze_array(conf, 0);
b03e0ccb 3325 else
07169fd4 3326 unfreeze_array(conf);
36fa3063
N
3327}
3328
fd01b88c 3329static void *raid1_takeover(struct mddev *mddev)
709ae487
N
3330{
3331 /* raid1 can take over:
3332 * raid5 with 2 devices, any layout or chunk size
3333 */
3334 if (mddev->level == 5 && mddev->raid_disks == 2) {
e8096360 3335 struct r1conf *conf;
709ae487
N
3336 mddev->new_level = 1;
3337 mddev->new_layout = 0;
3338 mddev->new_chunk_sectors = 0;
3339 conf = setup_conf(mddev);
6995f0b2 3340 if (!IS_ERR(conf)) {
07169fd4 3341 /* Array must appear to be quiesced */
3342 conf->array_frozen = 1;
394ed8e4
SL
3343 mddev_clear_unsupported_flags(mddev,
3344 UNSUPPORTED_MDDEV_FLAGS);
6995f0b2 3345 }
709ae487
N
3346 return conf;
3347 }
3348 return ERR_PTR(-EINVAL);
3349}
1da177e4 3350
84fc4b56 3351static struct md_personality raid1_personality =
1da177e4
LT
3352{
3353 .name = "raid1",
2604b703 3354 .level = 1,
1da177e4 3355 .owner = THIS_MODULE,
849674e4
SL
3356 .make_request = raid1_make_request,
3357 .run = raid1_run,
afa0f557 3358 .free = raid1_free,
849674e4
SL
3359 .status = raid1_status,
3360 .error_handler = raid1_error,
1da177e4
LT
3361 .hot_add_disk = raid1_add_disk,
3362 .hot_remove_disk= raid1_remove_disk,
3363 .spare_active = raid1_spare_active,
849674e4 3364 .sync_request = raid1_sync_request,
1da177e4 3365 .resize = raid1_resize,
80c3a6ce 3366 .size = raid1_size,
63c70c4f 3367 .check_reshape = raid1_reshape,
36fa3063 3368 .quiesce = raid1_quiesce,
709ae487 3369 .takeover = raid1_takeover,
1da177e4
LT
3370};
3371
3372static int __init raid_init(void)
3373{
2604b703 3374 return register_md_personality(&raid1_personality);
1da177e4
LT
3375}
3376
3377static void raid_exit(void)
3378{
2604b703 3379 unregister_md_personality(&raid1_personality);
1da177e4
LT
3380}
3381
3382module_init(raid_init);
3383module_exit(raid_exit);
3384MODULE_LICENSE("GPL");
0efb9e61 3385MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
1da177e4 3386MODULE_ALIAS("md-personality-3"); /* RAID1 */
d9d166c2 3387MODULE_ALIAS("md-raid1");
2604b703 3388MODULE_ALIAS("md-level-1");
34db0cd6
N
3389
3390module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);