Merge tag 'regulator-fix-v6.8-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / md / dm-raid1.c
CommitLineData
3bd94003 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Copyright (C) 2003 Sistina Software Limited.
1f965b19 4 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
5 *
6 * This file is released under the GPL.
7 */
8
06386bbf 9#include "dm-bio-record.h"
1da177e4 10
1da177e4
LT
11#include <linux/init.h>
12#include <linux/mempool.h>
13#include <linux/module.h>
14#include <linux/pagemap.h>
15#include <linux/slab.h>
1da177e4 16#include <linux/workqueue.h>
1f965b19 17#include <linux/device-mapper.h>
a765e20e
AK
18#include <linux/dm-io.h>
19#include <linux/dm-dirty-log.h>
20#include <linux/dm-kcopyd.h>
1f965b19 21#include <linux/dm-region-hash.h>
1da177e4 22
a7e8f7fb
TH
23static struct workqueue_struct *dm_raid1_wq;
24
72d94861 25#define DM_MSG_PREFIX "raid1"
1f965b19
HM
26
27#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
72d94861 28
65972a6f
KC
29#define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1)
30
ed63287d
LZ
31#define DM_RAID1_HANDLE_ERRORS 0x01
32#define DM_RAID1_KEEP_LOG 0x02
f44db678 33#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
ed63287d 34#define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
a8e6afa2 35
33184048 36static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
1da177e4 37
a4a82ce3
HM
38/*
39 *---------------------------------------------------------------
e4c8b3ba 40 * Mirror set structures.
a4a82ce3
HM
41 *---------------------------------------------------------------
42 */
72f4b314
JB
43enum dm_raid1_error {
44 DM_RAID1_WRITE_ERROR,
64b30c46 45 DM_RAID1_FLUSH_ERROR,
72f4b314
JB
46 DM_RAID1_SYNC_ERROR,
47 DM_RAID1_READ_ERROR
48};
49
e4c8b3ba 50struct mirror {
aa5617c5 51 struct mirror_set *ms;
e4c8b3ba 52 atomic_t error_count;
39ed7adb 53 unsigned long error_type;
e4c8b3ba
NB
54 struct dm_dev *dev;
55 sector_t offset;
56};
57
58struct mirror_set {
59 struct dm_target *ti;
60 struct list_head list;
1f965b19 61
a8e6afa2 62 uint64_t features;
e4c8b3ba 63
72f4b314 64 spinlock_t lock; /* protects the lists */
e4c8b3ba
NB
65 struct bio_list reads;
66 struct bio_list writes;
72f4b314 67 struct bio_list failures;
04788507 68 struct bio_list holds; /* bios are waiting until suspend */
e4c8b3ba 69
1f965b19
HM
70 struct dm_region_hash *rh;
71 struct dm_kcopyd_client *kcopyd_client;
88be163a
MB
72 struct dm_io_client *io_client;
73
e4c8b3ba
NB
74 /* recovery */
75 region_t nr_regions;
76 int in_sync;
fc1ff958 77 int log_failure;
929be8fc 78 int leg_failure;
b80aa7a0 79 atomic_t suspend;
e4c8b3ba 80
72f4b314 81 atomic_t default_mirror; /* Default mirror */
e4c8b3ba 82
6ad36fe2
HS
83 struct workqueue_struct *kmirrord_wq;
84 struct work_struct kmirrord_work;
a2aebe03
MP
85 struct timer_list timer;
86 unsigned long timer_pending;
87
72f4b314 88 struct work_struct trigger_event;
6ad36fe2 89
86a3238c 90 unsigned int nr_mirrors;
b18ae8dd 91 struct mirror mirror[];
e4c8b3ba
NB
92};
93
df5d2e90
MP
94DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
95 "A percentage of time allocated for raid resynchronization");
96
1f965b19 97static void wakeup_mirrord(void *context)
1da177e4 98{
1f965b19 99 struct mirror_set *ms = context;
1da177e4 100
6ad36fe2
HS
101 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
102}
103
8376d3c1 104static void delayed_wake_fn(struct timer_list *t)
a2aebe03 105{
8376d3c1 106 struct mirror_set *ms = from_timer(ms, t, timer);
a2aebe03
MP
107
108 clear_bit(0, &ms->timer_pending);
1f965b19 109 wakeup_mirrord(ms);
a2aebe03
MP
110}
111
112static void delayed_wake(struct mirror_set *ms)
113{
114 if (test_and_set_bit(0, &ms->timer_pending))
115 return;
116
117 ms->timer.expires = jiffies + HZ / 5;
a2aebe03
MP
118 add_timer(&ms->timer);
119}
120
1f965b19 121static void wakeup_all_recovery_waiters(void *context)
1da177e4 122{
1f965b19 123 wake_up_all(&_kmirrord_recovery_stopped);
1da177e4
LT
124}
125
1f965b19 126static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1da177e4
LT
127{
128 unsigned long flags;
1da177e4 129 int should_wake = 0;
1f965b19 130 struct bio_list *bl;
1da177e4 131
1f965b19
HM
132 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
133 spin_lock_irqsave(&ms->lock, flags);
134 should_wake = !(bl->head);
135 bio_list_add(bl, bio);
136 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4
LT
137
138 if (should_wake)
1f965b19 139 wakeup_mirrord(ms);
1da177e4
LT
140}
141
1f965b19 142static void dispatch_bios(void *context, struct bio_list *bio_list)
1da177e4 143{
1f965b19
HM
144 struct mirror_set *ms = context;
145 struct bio *bio;
1da177e4 146
1f965b19
HM
147 while ((bio = bio_list_pop(bio_list)))
148 queue_bio(ms, bio, WRITE);
1da177e4
LT
149}
150
89c7cd89 151struct dm_raid1_bio_record {
06386bbf 152 struct mirror *m;
309dca30 153 /* if details->bi_bdev == NULL, details were not saved */
06386bbf 154 struct dm_bio_details details;
0045d61b 155 region_t write_region;
06386bbf
JB
156};
157
1da177e4
LT
158/*
159 * Every mirror should look like this one.
160 */
161#define DEFAULT_MIRROR 0
162
163/*
06386bbf
JB
164 * This is yucky. We squirrel the mirror struct away inside
165 * bi_next for read/write buffers. This is safe since the bh
1da177e4
LT
166 * doesn't get submitted to the lower levels of block layer.
167 */
06386bbf 168static struct mirror *bio_get_m(struct bio *bio)
1da177e4 169{
06386bbf 170 return (struct mirror *) bio->bi_next;
1da177e4
LT
171}
172
06386bbf 173static void bio_set_m(struct bio *bio, struct mirror *m)
1da177e4 174{
06386bbf 175 bio->bi_next = (struct bio *) m;
1da177e4
LT
176}
177
72f4b314
JB
178static struct mirror *get_default_mirror(struct mirror_set *ms)
179{
180 return &ms->mirror[atomic_read(&ms->default_mirror)];
181}
182
183static void set_default_mirror(struct mirror *m)
184{
185 struct mirror_set *ms = m->ms;
186 struct mirror *m0 = &(ms->mirror[0]);
187
188 atomic_set(&ms->default_mirror, m - m0);
189}
190
87968ddd
MP
191static struct mirror *get_valid_mirror(struct mirror_set *ms)
192{
193 struct mirror *m;
194
195 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
196 if (!atomic_read(&m->error_count))
197 return m;
198
199 return NULL;
200}
201
72f4b314
JB
202/* fail_mirror
203 * @m: mirror device to fail
204 * @error_type: one of the enum's, DM_RAID1_*_ERROR
205 *
206 * If errors are being handled, record the type of
207 * error encountered for this device. If this type
208 * of error has already been recorded, we can return;
209 * otherwise, we must signal userspace by triggering
210 * an event. Additionally, if the device is the
211 * primary device, we must choose a new primary, but
212 * only if the mirror is in-sync.
213 *
214 * This function must not block.
215 */
216static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
217{
218 struct mirror_set *ms = m->ms;
219 struct mirror *new;
220
929be8fc
MP
221 ms->leg_failure = 1;
222
72f4b314
JB
223 /*
224 * error_count is used for nothing more than a
225 * simple way to tell if a device has encountered
226 * errors.
227 */
228 atomic_inc(&m->error_count);
229
230 if (test_and_set_bit(error_type, &m->error_type))
231 return;
232
d460c65a
JB
233 if (!errors_handled(ms))
234 return;
235
72f4b314
JB
236 if (m != get_default_mirror(ms))
237 goto out;
238
ed63287d 239 if (!ms->in_sync && !keep_log(ms)) {
72f4b314
JB
240 /*
241 * Better to issue requests to same failing device
242 * than to risk returning corrupt data.
243 */
2e84fecf
HM
244 DMERR("Primary mirror (%s) failed while out-of-sync: Reads may fail.",
245 m->dev->name);
72f4b314
JB
246 goto out;
247 }
248
87968ddd
MP
249 new = get_valid_mirror(ms);
250 if (new)
251 set_default_mirror(new);
252 else
72f4b314
JB
253 DMWARN("All sides of mirror have failed.");
254
255out:
a7e8f7fb 256 queue_work(dm_raid1_wq, &ms->trigger_event);
72f4b314
JB
257}
258
c0da3748
MP
259static int mirror_flush(struct dm_target *ti)
260{
261 struct mirror_set *ms = ti->private;
262 unsigned long error_bits;
263
264 unsigned int i;
65972a6f 265 struct dm_io_region io[MAX_NR_MIRRORS];
c0da3748
MP
266 struct mirror *m;
267 struct dm_io_request io_req = {
581075e4 268 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
c0da3748 269 .mem.type = DM_IO_KMEM,
5fc2ffea 270 .mem.ptr.addr = NULL,
c0da3748
MP
271 .client = ms->io_client,
272 };
273
274 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
275 io[i].bdev = m->dev->bdev;
276 io[i].sector = 0;
277 io[i].count = 0;
278 }
279
280 error_bits = -1;
281 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
282 if (unlikely(error_bits != 0)) {
283 for (i = 0; i < ms->nr_mirrors; i++)
284 if (test_bit(i, &error_bits))
285 fail_mirror(ms->mirror + i,
64b30c46 286 DM_RAID1_FLUSH_ERROR);
c0da3748
MP
287 return -EIO;
288 }
289
290 return 0;
291}
292
a4a82ce3
HM
293/*
294 *---------------------------------------------------------------
1da177e4
LT
295 * Recovery.
296 *
297 * When a mirror is first activated we may find that some regions
298 * are in the no-sync state. We have to recover these by
299 * recopying from the default mirror to all the others.
a4a82ce3
HM
300 *---------------------------------------------------------------
301 */
4cdc1d1f 302static void recovery_complete(int read_err, unsigned long write_err,
1da177e4
LT
303 void *context)
304{
1f965b19
HM
305 struct dm_region *reg = context;
306 struct mirror_set *ms = dm_rh_region_context(reg);
8f0205b7 307 int m, bit = 0;
1da177e4 308
8f0205b7 309 if (read_err) {
f44db678
JB
310 /* Read error means the failure of default mirror. */
311 DMERR_LIMIT("Unable to read primary mirror during recovery");
8f0205b7
JB
312 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
313 }
f44db678 314
8f0205b7 315 if (write_err) {
4cdc1d1f 316 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
f44db678 317 write_err);
8f0205b7
JB
318 /*
319 * Bits correspond to devices (excluding default mirror).
320 * The default mirror cannot change during recovery.
321 */
322 for (m = 0; m < ms->nr_mirrors; m++) {
323 if (&ms->mirror[m] == get_default_mirror(ms))
324 continue;
325 if (test_bit(bit, &write_err))
326 fail_mirror(ms->mirror + m,
327 DM_RAID1_SYNC_ERROR);
328 bit++;
329 }
330 }
f44db678 331
1f965b19 332 dm_rh_recovery_end(reg, !(read_err || write_err));
1da177e4
LT
333}
334
7209049d 335static void recover(struct mirror_set *ms, struct dm_region *reg)
1da177e4 336{
86a3238c 337 unsigned int i;
eb69aca5 338 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
1da177e4
LT
339 struct mirror *m;
340 unsigned long flags = 0;
1f965b19
HM
341 region_t key = dm_rh_get_region_key(reg);
342 sector_t region_size = dm_rh_get_region_size(ms->rh);
1da177e4
LT
343
344 /* fill in the source */
72f4b314 345 m = get_default_mirror(ms);
1da177e4 346 from.bdev = m->dev->bdev;
1f965b19
HM
347 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
348 if (key == (ms->nr_regions - 1)) {
1da177e4
LT
349 /*
350 * The final region may be smaller than
351 * region_size.
352 */
1f965b19 353 from.count = ms->ti->len & (region_size - 1);
1da177e4 354 if (!from.count)
1f965b19 355 from.count = region_size;
1da177e4 356 } else
1f965b19 357 from.count = region_size;
1da177e4
LT
358
359 /* fill in the destinations */
360 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
72f4b314 361 if (&ms->mirror[i] == get_default_mirror(ms))
1da177e4
LT
362 continue;
363
364 m = ms->mirror + i;
365 dest->bdev = m->dev->bdev;
1f965b19 366 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
1da177e4
LT
367 dest->count = from.count;
368 dest++;
369 }
370
371 /* hand to kcopyd */
f7c83e2e 372 if (!errors_handled(ms))
db2351eb 373 flags |= BIT(DM_KCOPYD_IGNORE_ERROR);
f7c83e2e 374
7209049d
MS
375 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
376 flags, recovery_complete, reg);
1da177e4
LT
377}
378
ed63287d
LZ
379static void reset_ms_flags(struct mirror_set *ms)
380{
381 unsigned int m;
382
383 ms->leg_failure = 0;
384 for (m = 0; m < ms->nr_mirrors; m++) {
385 atomic_set(&(ms->mirror[m].error_count), 0);
386 ms->mirror[m].error_type = 0;
387 }
388}
389
1da177e4
LT
390static void do_recovery(struct mirror_set *ms)
391{
1f965b19
HM
392 struct dm_region *reg;
393 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4
LT
394
395 /*
396 * Start quiescing some regions.
397 */
1f965b19 398 dm_rh_recovery_prepare(ms->rh);
1da177e4
LT
399
400 /*
401 * Copy any already quiesced regions.
402 */
7209049d
MS
403 while ((reg = dm_rh_recovery_start(ms->rh)))
404 recover(ms, reg);
1da177e4
LT
405
406 /*
407 * Update the in sync flag.
408 */
409 if (!ms->in_sync &&
410 (log->type->get_sync_count(log) == ms->nr_regions)) {
411 /* the sync is complete */
412 dm_table_event(ms->ti->table);
413 ms->in_sync = 1;
ed63287d 414 reset_ms_flags(ms);
1da177e4
LT
415 }
416}
417
a4a82ce3
HM
418/*
419 *---------------------------------------------------------------
1da177e4 420 * Reads
a4a82ce3
HM
421 *---------------------------------------------------------------
422 */
1da177e4
LT
423static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
424{
06386bbf
JB
425 struct mirror *m = get_default_mirror(ms);
426
427 do {
428 if (likely(!atomic_read(&m->error_count)))
429 return m;
430
431 if (m-- == ms->mirror)
432 m += ms->nr_mirrors;
433 } while (m != get_default_mirror(ms));
434
435 return NULL;
436}
437
438static int default_ok(struct mirror *m)
439{
440 struct mirror *default_mirror = get_default_mirror(m->ms);
441
442 return !atomic_read(&default_mirror->error_count);
443}
444
445static int mirror_available(struct mirror_set *ms, struct bio *bio)
446{
1f965b19
HM
447 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
448 region_t region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 449
1f965b19 450 if (log->type->in_sync(log, region, 0))
4f024f37 451 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
06386bbf
JB
452
453 return 0;
1da177e4
LT
454}
455
456/*
457 * remap a buffer to a particular mirror.
458 */
06386bbf
JB
459static sector_t map_sector(struct mirror *m, struct bio *bio)
460{
4f024f37 461 if (unlikely(!bio->bi_iter.bi_size))
4184153f 462 return 0;
4f024f37 463 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
06386bbf
JB
464}
465
466static void map_bio(struct mirror *m, struct bio *bio)
1da177e4 467{
74d46992 468 bio_set_dev(bio, m->dev->bdev);
4f024f37 469 bio->bi_iter.bi_sector = map_sector(m, bio);
06386bbf
JB
470}
471
22a1ceb1 472static void map_region(struct dm_io_region *io, struct mirror *m,
06386bbf
JB
473 struct bio *bio)
474{
475 io->bdev = m->dev->bdev;
476 io->sector = map_sector(m, bio);
aa8b57aa 477 io->count = bio_sectors(bio);
06386bbf
JB
478}
479
04788507
MP
480static void hold_bio(struct mirror_set *ms, struct bio *bio)
481{
482 /*
f0703040
TY
483 * Lock is required to avoid race condition during suspend
484 * process.
04788507 485 */
f0703040
TY
486 spin_lock_irq(&ms->lock);
487
04788507 488 if (atomic_read(&ms->suspend)) {
f0703040
TY
489 spin_unlock_irq(&ms->lock);
490
491 /*
492 * If device is suspended, complete the bio.
493 */
04788507 494 if (dm_noflush_suspending(ms->ti))
4e4cbee9 495 bio->bi_status = BLK_STS_DM_REQUEUE;
04788507 496 else
4e4cbee9 497 bio->bi_status = BLK_STS_IOERR;
4246a0b6
CH
498
499 bio_endio(bio);
04788507
MP
500 return;
501 }
502
503 /*
504 * Hold bio until the suspend is complete.
505 */
04788507
MP
506 bio_list_add(&ms->holds, bio);
507 spin_unlock_irq(&ms->lock);
508}
509
a4a82ce3
HM
510/*
511 *---------------------------------------------------------------
06386bbf 512 * Reads
a4a82ce3
HM
513 *---------------------------------------------------------------
514 */
06386bbf
JB
515static void read_callback(unsigned long error, void *context)
516{
517 struct bio *bio = context;
518 struct mirror *m;
519
520 m = bio_get_m(bio);
521 bio_set_m(bio, NULL);
522
523 if (likely(!error)) {
4246a0b6 524 bio_endio(bio);
06386bbf
JB
525 return;
526 }
527
528 fail_mirror(m, DM_RAID1_READ_ERROR);
529
530 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
2e84fecf 531 DMWARN_LIMIT("Read failure on mirror device %s. Trying alternative device.",
06386bbf 532 m->dev->name);
70246286 533 queue_bio(m->ms, bio, bio_data_dir(bio));
06386bbf
JB
534 return;
535 }
536
537 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
538 m->dev->name);
4246a0b6 539 bio_io_error(bio);
06386bbf
JB
540}
541
542/* Asynchronous read. */
543static void read_async_bio(struct mirror *m, struct bio *bio)
544{
22a1ceb1 545 struct dm_io_region io;
06386bbf 546 struct dm_io_request io_req = {
581075e4 547 .bi_opf = REQ_OP_READ,
003b5c57
KO
548 .mem.type = DM_IO_BIO,
549 .mem.ptr.bio = bio,
06386bbf
JB
550 .notify.fn = read_callback,
551 .notify.context = bio,
552 .client = m->ms->io_client,
553 };
554
555 map_region(&io, m, bio);
556 bio_set_m(bio, m);
1f965b19
HM
557 BUG_ON(dm_io(&io_req, 1, &io, NULL));
558}
559
560static inline int region_in_sync(struct mirror_set *ms, region_t region,
561 int may_block)
562{
563 int state = dm_rh_get_state(ms->rh, region, may_block);
564 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
1da177e4
LT
565}
566
567static void do_reads(struct mirror_set *ms, struct bio_list *reads)
568{
569 region_t region;
570 struct bio *bio;
571 struct mirror *m;
572
573 while ((bio = bio_list_pop(reads))) {
1f965b19 574 region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 575 m = get_default_mirror(ms);
1da177e4
LT
576
577 /*
578 * We can only read balance if the region is in sync.
579 */
1f965b19 580 if (likely(region_in_sync(ms, region, 1)))
4f024f37 581 m = choose_mirror(ms, bio->bi_iter.bi_sector);
06386bbf
JB
582 else if (m && atomic_read(&m->error_count))
583 m = NULL;
1da177e4 584
06386bbf
JB
585 if (likely(m))
586 read_async_bio(m, bio);
587 else
4246a0b6 588 bio_io_error(bio);
1da177e4
LT
589 }
590}
591
a4a82ce3
HM
592/*
593 *---------------------------------------------------------------------
1da177e4
LT
594 * Writes.
595 *
596 * We do different things with the write io depending on the
597 * state of the region that it's in:
598 *
8ca817c4 599 * SYNC: increment pending, use kcopyd to write to *all* mirrors
1da177e4
LT
600 * RECOVERING: delay the io until recovery completes
601 * NOSYNC: increment pending, just write to the default mirror
a4a82ce3
HM
602 *---------------------------------------------------------------------
603 */
1da177e4
LT
604static void write_callback(unsigned long error, void *context)
605{
86a3238c 606 unsigned int i;
26cb62a2 607 struct bio *bio = context;
1da177e4 608 struct mirror_set *ms;
72f4b314
JB
609 int should_wake = 0;
610 unsigned long flags;
1da177e4 611
06386bbf
JB
612 ms = bio_get_m(bio)->ms;
613 bio_set_m(bio, NULL);
1da177e4
LT
614
615 /*
616 * NOTE: We don't decrement the pending count here,
617 * instead it is done by the targets endio function.
618 * This way we handle both writes to SYNC and NOSYNC
619 * regions with the same code.
620 */
60f355ea 621 if (likely(!error)) {
4246a0b6 622 bio_endio(bio);
60f355ea
MP
623 return;
624 }
1da177e4 625
f2ed51ac
MP
626 /*
627 * If the bio is discard, return an error, but do not
628 * degrade the array.
629 */
e6047149 630 if (bio_op(bio) == REQ_OP_DISCARD) {
4e4cbee9 631 bio->bi_status = BLK_STS_NOTSUPP;
4246a0b6 632 bio_endio(bio);
f2ed51ac
MP
633 return;
634 }
635
72f4b314
JB
636 for (i = 0; i < ms->nr_mirrors; i++)
637 if (test_bit(i, &error))
638 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
72f4b314 639
60f355ea
MP
640 /*
641 * Need to raise event. Since raising
642 * events can block, we need to do it in
643 * the main thread.
644 */
645 spin_lock_irqsave(&ms->lock, flags);
646 if (!ms->failures.head)
647 should_wake = 1;
648 bio_list_add(&ms->failures, bio);
649 spin_unlock_irqrestore(&ms->lock, flags);
650 if (should_wake)
651 wakeup_mirrord(ms);
1da177e4
LT
652}
653
654static void do_write(struct mirror_set *ms, struct bio *bio)
655{
656 unsigned int i;
65972a6f 657 struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
1da177e4 658 struct mirror *m;
581075e4 659 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
88be163a 660 struct dm_io_request io_req = {
581075e4 661 .bi_opf = REQ_OP_WRITE | op_flags,
003b5c57
KO
662 .mem.type = DM_IO_BIO,
663 .mem.ptr.bio = bio,
88be163a
MB
664 .notify.fn = write_callback,
665 .notify.context = bio,
666 .client = ms->io_client,
667 };
1da177e4 668
e6047149 669 if (bio_op(bio) == REQ_OP_DISCARD) {
581075e4 670 io_req.bi_opf = REQ_OP_DISCARD | op_flags;
5fc2ffea
MS
671 io_req.mem.type = DM_IO_KMEM;
672 io_req.mem.ptr.addr = NULL;
673 }
674
06386bbf
JB
675 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
676 map_region(dest++, m, bio);
1da177e4 677
06386bbf
JB
678 /*
679 * Use default mirror because we only need it to retrieve the reference
680 * to the mirror set in write_callback().
681 */
682 bio_set_m(bio, get_default_mirror(ms));
88be163a 683
1f965b19 684 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
1da177e4
LT
685}
686
687static void do_writes(struct mirror_set *ms, struct bio_list *writes)
688{
689 int state;
690 struct bio *bio;
691 struct bio_list sync, nosync, recover, *this_list = NULL;
7513c2a7
JB
692 struct bio_list requeue;
693 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
694 region_t region;
1da177e4
LT
695
696 if (!writes->head)
697 return;
698
699 /*
700 * Classify each write.
701 */
702 bio_list_init(&sync);
703 bio_list_init(&nosync);
704 bio_list_init(&recover);
7513c2a7 705 bio_list_init(&requeue);
1da177e4
LT
706
707 while ((bio = bio_list_pop(writes))) {
1eff9d32 708 if ((bio->bi_opf & REQ_PREFLUSH) ||
e6047149 709 (bio_op(bio) == REQ_OP_DISCARD)) {
4184153f
MP
710 bio_list_add(&sync, bio);
711 continue;
712 }
713
7513c2a7
JB
714 region = dm_rh_bio_to_region(ms->rh, bio);
715
716 if (log->type->is_remote_recovering &&
717 log->type->is_remote_recovering(log, region)) {
718 bio_list_add(&requeue, bio);
719 continue;
720 }
721
722 state = dm_rh_get_state(ms->rh, region, 1);
1da177e4 723 switch (state) {
1f965b19
HM
724 case DM_RH_CLEAN:
725 case DM_RH_DIRTY:
1da177e4
LT
726 this_list = &sync;
727 break;
728
1f965b19 729 case DM_RH_NOSYNC:
1da177e4
LT
730 this_list = &nosync;
731 break;
732
1f965b19 733 case DM_RH_RECOVERING:
1da177e4
LT
734 this_list = &recover;
735 break;
736 }
737
738 bio_list_add(this_list, bio);
739 }
740
7513c2a7
JB
741 /*
742 * Add bios that are delayed due to remote recovery
743 * back on to the write queue
744 */
745 if (unlikely(requeue.head)) {
746 spin_lock_irq(&ms->lock);
747 bio_list_merge(&ms->writes, &requeue);
748 spin_unlock_irq(&ms->lock);
69885683 749 delayed_wake(ms);
7513c2a7
JB
750 }
751
1da177e4
LT
752 /*
753 * Increment the pending counts for any regions that will
754 * be written to (writes to recover regions are going to
755 * be delayed).
756 */
1f965b19
HM
757 dm_rh_inc_pending(ms->rh, &sync);
758 dm_rh_inc_pending(ms->rh, &nosync);
d2b69864
JB
759
760 /*
761 * If the flush fails on a previous call and succeeds here,
762 * we must not reset the log_failure variable. We need
763 * userspace interaction to do that.
764 */
765 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
1da177e4
LT
766
767 /*
768 * Dispatch io.
769 */
5528d17d 770 if (unlikely(ms->log_failure) && errors_handled(ms)) {
b80aa7a0
JB
771 spin_lock_irq(&ms->lock);
772 bio_list_merge(&ms->failures, &sync);
773 spin_unlock_irq(&ms->lock);
1f965b19 774 wakeup_mirrord(ms);
b80aa7a0 775 } else
fc1ff958 776 while ((bio = bio_list_pop(&sync)))
b80aa7a0 777 do_write(ms, bio);
1da177e4
LT
778
779 while ((bio = bio_list_pop(&recover)))
1f965b19 780 dm_rh_delay(ms->rh, bio);
1da177e4
LT
781
782 while ((bio = bio_list_pop(&nosync))) {
ed63287d 783 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
ede5ea0b
MP
784 spin_lock_irq(&ms->lock);
785 bio_list_add(&ms->failures, bio);
786 spin_unlock_irq(&ms->lock);
787 wakeup_mirrord(ms);
788 } else {
929be8fc 789 map_bio(get_default_mirror(ms), bio);
ed00aabd 790 submit_bio_noacct(bio);
929be8fc 791 }
1da177e4
LT
792 }
793}
794
72f4b314
JB
795static void do_failures(struct mirror_set *ms, struct bio_list *failures)
796{
797 struct bio *bio;
798
0f398a84 799 if (likely(!failures->head))
72f4b314
JB
800 return;
801
b80aa7a0
JB
802 /*
803 * If the log has failed, unattempted writes are being
0f398a84 804 * put on the holds list. We can't issue those writes
b80aa7a0
JB
805 * until a log has been marked, so we must store them.
806 *
807 * If a 'noflush' suspend is in progress, we can requeue
808 * the I/O's to the core. This give userspace a chance
809 * to reconfigure the mirror, at which point the core
810 * will reissue the writes. If the 'noflush' flag is
811 * not set, we have no choice but to return errors.
812 *
813 * Some writes on the failures list may have been
814 * submitted before the log failure and represent a
815 * failure to write to one of the devices. It is ok
816 * for us to treat them the same and requeue them
817 * as well.
818 */
0f398a84 819 while ((bio = bio_list_pop(failures))) {
60f355ea 820 if (!ms->log_failure) {
0f398a84 821 ms->in_sync = 0;
c58098be 822 dm_rh_mark_nosync(ms->rh, bio);
0f398a84 823 }
60f355ea
MP
824
825 /*
826 * If all the legs are dead, fail the I/O.
ed63287d
LZ
827 * If the device has failed and keep_log is enabled,
828 * fail the I/O.
829 *
830 * If we have been told to handle errors, and keep_log
831 * isn't enabled, hold the bio and wait for userspace to
832 * deal with the problem.
833 *
60f355ea
MP
834 * Otherwise pretend that the I/O succeeded. (This would
835 * be wrong if the failed leg returned after reboot and
836 * got replicated back to the good legs.)
837 */
ed63287d 838 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
4246a0b6 839 bio_io_error(bio);
ed63287d 840 else if (errors_handled(ms) && !keep_log(ms))
60f355ea
MP
841 hold_bio(ms, bio);
842 else
4246a0b6 843 bio_endio(bio);
b80aa7a0 844 }
72f4b314
JB
845}
846
847static void trigger_event(struct work_struct *work)
848{
849 struct mirror_set *ms =
850 container_of(work, struct mirror_set, trigger_event);
851
852 dm_table_event(ms->ti->table);
853}
854
a4a82ce3
HM
855/*
856 *---------------------------------------------------------------
1da177e4 857 * kmirrord
a4a82ce3
HM
858 *---------------------------------------------------------------
859 */
a2aebe03 860static void do_mirror(struct work_struct *work)
1da177e4 861{
1f965b19
HM
862 struct mirror_set *ms = container_of(work, struct mirror_set,
863 kmirrord_work);
72f4b314
JB
864 struct bio_list reads, writes, failures;
865 unsigned long flags;
1da177e4 866
72f4b314 867 spin_lock_irqsave(&ms->lock, flags);
1da177e4
LT
868 reads = ms->reads;
869 writes = ms->writes;
72f4b314 870 failures = ms->failures;
1da177e4
LT
871 bio_list_init(&ms->reads);
872 bio_list_init(&ms->writes);
72f4b314
JB
873 bio_list_init(&ms->failures);
874 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4 875
1f965b19 876 dm_rh_update_states(ms->rh, errors_handled(ms));
1da177e4
LT
877 do_recovery(ms);
878 do_reads(ms, &reads);
879 do_writes(ms, &writes);
72f4b314 880 do_failures(ms, &failures);
1da177e4
LT
881}
882
a4a82ce3
HM
883/*
884 *---------------------------------------------------------------
1da177e4 885 * Target functions
a4a82ce3
HM
886 *---------------------------------------------------------------
887 */
1da177e4
LT
888static struct mirror_set *alloc_context(unsigned int nr_mirrors,
889 uint32_t region_size,
890 struct dm_target *ti,
416cd17b 891 struct dm_dirty_log *dl)
1da177e4 892{
bcd67654
GS
893 struct mirror_set *ms =
894 kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL);
1da177e4 895
1da177e4 896 if (!ms) {
72d94861 897 ti->error = "Cannot allocate mirror context";
1da177e4
LT
898 return NULL;
899 }
900
1da177e4 901 spin_lock_init(&ms->lock);
5339fc2d
MP
902 bio_list_init(&ms->reads);
903 bio_list_init(&ms->writes);
904 bio_list_init(&ms->failures);
905 bio_list_init(&ms->holds);
1da177e4
LT
906
907 ms->ti = ti;
908 ms->nr_mirrors = nr_mirrors;
909 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
910 ms->in_sync = 0;
b80aa7a0 911 ms->log_failure = 0;
929be8fc 912 ms->leg_failure = 0;
b80aa7a0 913 atomic_set(&ms->suspend, 0);
72f4b314 914 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
1da177e4 915
bda8efec 916 ms->io_client = dm_io_client_create();
88be163a
MB
917 if (IS_ERR(ms->io_client)) {
918 ti->error = "Error creating dm_io client";
919 kfree(ms);
255e2646 920 return NULL;
88be163a
MB
921 }
922
1f965b19
HM
923 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
924 wakeup_all_recovery_waiters,
925 ms->ti->begin, MAX_RECOVERY,
926 dl, region_size, ms->nr_regions);
927 if (IS_ERR(ms->rh)) {
72d94861 928 ti->error = "Error creating dirty region hash";
a72cf737 929 dm_io_client_destroy(ms->io_client);
1da177e4
LT
930 kfree(ms);
931 return NULL;
932 }
933
934 return ms;
935}
936
937static void free_context(struct mirror_set *ms, struct dm_target *ti,
938 unsigned int m)
939{
940 while (m--)
941 dm_put_device(ti, ms->mirror[m].dev);
942
88be163a 943 dm_io_client_destroy(ms->io_client);
1f965b19 944 dm_region_hash_destroy(ms->rh);
1da177e4
LT
945 kfree(ms);
946}
947
1da177e4
LT
948static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
949 unsigned int mirror, char **argv)
950{
4ee218cd 951 unsigned long long offset;
31998ef1 952 char dummy;
e80d1c80 953 int ret;
1da177e4 954
ef87bfc2
MB
955 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
956 offset != (sector_t)offset) {
72d94861 957 ti->error = "Invalid offset";
1da177e4
LT
958 return -EINVAL;
959 }
960
e80d1c80
VG
961 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
962 &ms->mirror[mirror].dev);
963 if (ret) {
72d94861 964 ti->error = "Device lookup failure";
e80d1c80 965 return ret;
1da177e4
LT
966 }
967
aa5617c5 968 ms->mirror[mirror].ms = ms;
72f4b314
JB
969 atomic_set(&(ms->mirror[mirror].error_count), 0);
970 ms->mirror[mirror].error_type = 0;
1da177e4
LT
971 ms->mirror[mirror].offset = offset;
972
973 return 0;
974}
975
1da177e4
LT
976/*
977 * Create dirty log: log_type #log_params <log_params>
978 */
416cd17b 979static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
86a3238c
HM
980 unsigned int argc, char **argv,
981 unsigned int *args_used)
1da177e4 982{
86a3238c 983 unsigned int param_count;
416cd17b 984 struct dm_dirty_log *dl;
31998ef1 985 char dummy;
1da177e4
LT
986
987 if (argc < 2) {
72d94861 988 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
989 return NULL;
990 }
991
31998ef1 992 if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
72d94861 993 ti->error = "Invalid mirror log argument count";
1da177e4
LT
994 return NULL;
995 }
996
997 *args_used = 2 + param_count;
998
999 if (argc < *args_used) {
72d94861 1000 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
1001 return NULL;
1002 }
1003
c0da3748
MP
1004 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
1005 argv + 2);
1da177e4 1006 if (!dl) {
72d94861 1007 ti->error = "Error creating mirror dirty log";
1da177e4
LT
1008 return NULL;
1009 }
1010
1da177e4
LT
1011 return dl;
1012}
1013
86a3238c
HM
1014static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv,
1015 unsigned int *args_used)
a8e6afa2 1016{
86a3238c 1017 unsigned int num_features;
a8e6afa2 1018 struct dm_target *ti = ms->ti;
31998ef1 1019 char dummy;
ed63287d 1020 int i;
a8e6afa2
JB
1021
1022 *args_used = 0;
1023
1024 if (!argc)
1025 return 0;
1026
31998ef1 1027 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
a8e6afa2
JB
1028 ti->error = "Invalid number of features";
1029 return -EINVAL;
1030 }
1031
1032 argc--;
1033 argv++;
1034 (*args_used)++;
1035
1036 if (num_features > argc) {
1037 ti->error = "Not enough arguments to support feature count";
1038 return -EINVAL;
1039 }
1040
ed63287d
LZ
1041 for (i = 0; i < num_features; i++) {
1042 if (!strcmp("handle_errors", argv[0]))
1043 ms->features |= DM_RAID1_HANDLE_ERRORS;
1044 else if (!strcmp("keep_log", argv[0]))
1045 ms->features |= DM_RAID1_KEEP_LOG;
1046 else {
1047 ti->error = "Unrecognised feature requested";
1048 return -EINVAL;
1049 }
1050
1051 argc--;
1052 argv++;
1053 (*args_used)++;
1054 }
1055 if (!errors_handled(ms) && keep_log(ms)) {
1056 ti->error = "keep_log feature requires the handle_errors feature";
a8e6afa2
JB
1057 return -EINVAL;
1058 }
1059
a8e6afa2
JB
1060 return 0;
1061}
1062
1da177e4
LT
1063/*
1064 * Construct a mirror mapping:
1065 *
1066 * log_type #log_params <log_params>
1067 * #mirrors [mirror_path offset]{2,}
a8e6afa2 1068 * [#features <features>]
1da177e4
LT
1069 *
1070 * log_type is "core" or "disk"
1071 * #log_params is between 1 and 3
a8e6afa2 1072 *
ed63287d 1073 * If present, supported features are "handle_errors" and "keep_log".
1da177e4 1074 */
1da177e4
LT
1075static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1076{
1077 int r;
1078 unsigned int nr_mirrors, m, args_used;
1079 struct mirror_set *ms;
416cd17b 1080 struct dm_dirty_log *dl;
31998ef1 1081 char dummy;
1da177e4
LT
1082
1083 dl = create_dirty_log(ti, argc, argv, &args_used);
1084 if (!dl)
1085 return -EINVAL;
1086
1087 argv += args_used;
1088 argc -= args_used;
1089
31998ef1 1090 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
65972a6f 1091 nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
72d94861 1092 ti->error = "Invalid number of mirrors";
416cd17b 1093 dm_dirty_log_destroy(dl);
1da177e4
LT
1094 return -EINVAL;
1095 }
1096
1097 argv++, argc--;
1098
a8e6afa2
JB
1099 if (argc < nr_mirrors * 2) {
1100 ti->error = "Too few mirror arguments";
416cd17b 1101 dm_dirty_log_destroy(dl);
1da177e4
LT
1102 return -EINVAL;
1103 }
1104
1105 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1106 if (!ms) {
416cd17b 1107 dm_dirty_log_destroy(dl);
1da177e4
LT
1108 return -ENOMEM;
1109 }
1110
1111 /* Get the mirror parameter sets */
1112 for (m = 0; m < nr_mirrors; m++) {
1113 r = get_mirror(ms, ti, m, argv);
1114 if (r) {
1115 free_context(ms, ti, m);
1116 return r;
1117 }
1118 argv += 2;
1119 argc -= 2;
1120 }
1121
1122 ti->private = ms;
542f9038
MS
1123
1124 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1125 if (r)
1126 goto err_free_context;
1127
55a62eef
AK
1128 ti->num_flush_bios = 1;
1129 ti->num_discard_bios = 1;
30187e1d 1130 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
1da177e4 1131
670368a8 1132 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
6ad36fe2
HS
1133 if (!ms->kmirrord_wq) {
1134 DMERR("couldn't start kmirrord");
a72cf737
DM
1135 r = -ENOMEM;
1136 goto err_free_context;
6ad36fe2
HS
1137 }
1138 INIT_WORK(&ms->kmirrord_work, do_mirror);
8376d3c1 1139 timer_setup(&ms->timer, delayed_wake_fn, 0);
a2aebe03 1140 ms->timer_pending = 0;
72f4b314 1141 INIT_WORK(&ms->trigger_event, trigger_event);
6ad36fe2 1142
a8e6afa2 1143 r = parse_features(ms, argc, argv, &args_used);
a72cf737
DM
1144 if (r)
1145 goto err_destroy_wq;
a8e6afa2
JB
1146
1147 argv += args_used;
1148 argc -= args_used;
1149
f44db678
JB
1150 /*
1151 * Any read-balancing addition depends on the
1152 * DM_RAID1_HANDLE_ERRORS flag being present.
1153 * This is because the decision to balance depends
1154 * on the sync state of a region. If the above
1155 * flag is not present, we ignore errors; and
1156 * the sync state may be inaccurate.
1157 */
1158
a8e6afa2
JB
1159 if (argc) {
1160 ti->error = "Too many mirror arguments";
a72cf737
DM
1161 r = -EINVAL;
1162 goto err_destroy_wq;
a8e6afa2
JB
1163 }
1164
df5d2e90 1165 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
fa34ce73
MP
1166 if (IS_ERR(ms->kcopyd_client)) {
1167 r = PTR_ERR(ms->kcopyd_client);
a72cf737 1168 goto err_destroy_wq;
fa34ce73 1169 }
1da177e4 1170
1f965b19 1171 wakeup_mirrord(ms);
1da177e4 1172 return 0;
a72cf737
DM
1173
1174err_destroy_wq:
1175 destroy_workqueue(ms->kmirrord_wq);
1176err_free_context:
1177 free_context(ms, ti, ms->nr_mirrors);
1178 return r;
1da177e4
LT
1179}
1180
1181static void mirror_dtr(struct dm_target *ti)
1182{
26cb62a2 1183 struct mirror_set *ms = ti->private;
1da177e4 1184
a2aebe03 1185 del_timer_sync(&ms->timer);
6ad36fe2 1186 flush_workqueue(ms->kmirrord_wq);
43829731 1187 flush_work(&ms->trigger_event);
eb69aca5 1188 dm_kcopyd_client_destroy(ms->kcopyd_client);
6ad36fe2 1189 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1190 free_context(ms, ti, ms->nr_mirrors);
1191}
1192
1da177e4
LT
1193/*
1194 * Mirror mapping function
1195 */
7de3ee57 1196static int mirror_map(struct dm_target *ti, struct bio *bio)
1da177e4 1197{
70246286 1198 int r, rw = bio_data_dir(bio);
1da177e4
LT
1199 struct mirror *m;
1200 struct mirror_set *ms = ti->private;
1f965b19 1201 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
0045d61b
MP
1202 struct dm_raid1_bio_record *bio_record =
1203 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1204
309dca30 1205 bio_record->details.bi_bdev = NULL;
cd15fb64 1206
1da177e4 1207 if (rw == WRITE) {
06386bbf 1208 /* Save region for mirror_end_io() handler */
0045d61b 1209 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
1da177e4 1210 queue_bio(ms, bio, rw);
d2a7ad29 1211 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1212 }
1213
1f965b19 1214 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1da177e4 1215 if (r < 0 && r != -EWOULDBLOCK)
846785e6 1216 return DM_MAPIO_KILL;
1da177e4 1217
1da177e4 1218 /*
06386bbf 1219 * If region is not in-sync queue the bio.
1da177e4 1220 */
06386bbf 1221 if (!r || (r == -EWOULDBLOCK)) {
1eff9d32 1222 if (bio->bi_opf & REQ_RAHEAD)
846785e6 1223 return DM_MAPIO_KILL;
1da177e4 1224
1da177e4 1225 queue_bio(ms, bio, rw);
d2a7ad29 1226 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1227 }
1228
06386bbf
JB
1229 /*
1230 * The region is in-sync and we can perform reads directly.
1231 * Store enough information so we can retry if it fails.
1232 */
4f024f37 1233 m = choose_mirror(ms, bio->bi_iter.bi_sector);
06386bbf 1234 if (unlikely(!m))
846785e6 1235 return DM_MAPIO_KILL;
1da177e4 1236
89c7cd89 1237 dm_bio_record(&bio_record->details, bio);
89c7cd89 1238 bio_record->m = m;
06386bbf
JB
1239
1240 map_bio(m, bio);
1241
d2a7ad29 1242 return DM_MAPIO_REMAPPED;
1da177e4
LT
1243}
1244
4e4cbee9
CH
1245static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1246 blk_status_t *error)
1da177e4 1247{
70246286 1248 int rw = bio_data_dir(bio);
26cb62a2 1249 struct mirror_set *ms = ti->private;
06386bbf
JB
1250 struct mirror *m = NULL;
1251 struct dm_bio_details *bd = NULL;
0045d61b
MP
1252 struct dm_raid1_bio_record *bio_record =
1253 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1da177e4
LT
1254
1255 /*
1256 * We need to dec pending if this was a write.
1257 */
06386bbf 1258 if (rw == WRITE) {
1eff9d32 1259 if (!(bio->bi_opf & REQ_PREFLUSH) &&
28a8f0d3 1260 bio_op(bio) != REQ_OP_DISCARD)
0045d61b 1261 dm_rh_dec(ms->rh, bio_record->write_region);
1be56909 1262 return DM_ENDIO_DONE;
06386bbf 1263 }
1da177e4 1264
4e4cbee9 1265 if (*error == BLK_STS_NOTSUPP)
cd15fb64 1266 goto out;
06386bbf 1267
9966afaf 1268 if (bio->bi_opf & REQ_RAHEAD)
cd15fb64 1269 goto out;
06386bbf 1270
1be56909 1271 if (unlikely(*error)) {
309dca30 1272 if (!bio_record->details.bi_bdev) {
cd15fb64
MS
1273 /*
1274 * There wasn't enough memory to record necessary
1275 * information for a retry or there was no other
1276 * mirror in-sync.
1277 */
1278 DMERR_LIMIT("Mirror read failed.");
c6b1e36c 1279 return DM_ENDIO_DONE;
cd15fb64
MS
1280 }
1281
89c7cd89 1282 m = bio_record->m;
e03f1a84 1283
06386bbf
JB
1284 DMERR("Mirror read failed from %s. Trying alternative device.",
1285 m->dev->name);
1286
06386bbf
JB
1287 fail_mirror(m, DM_RAID1_READ_ERROR);
1288
1289 /*
1290 * A failed read is requeued for another attempt using an intact
1291 * mirror.
1292 */
1293 if (default_ok(m) || mirror_available(ms, bio)) {
89c7cd89 1294 bd = &bio_record->details;
06386bbf
JB
1295
1296 dm_bio_restore(bd, bio);
309dca30 1297 bio_record->details.bi_bdev = NULL;
4e4cbee9 1298 bio->bi_status = 0;
f3a44fe0 1299
06386bbf 1300 queue_bio(ms, bio, rw);
19cbbc60 1301 return DM_ENDIO_INCOMPLETE;
06386bbf
JB
1302 }
1303 DMERR("All replicated volumes dead, failing I/O");
1304 }
1305
cd15fb64 1306out:
309dca30 1307 bio_record->details.bi_bdev = NULL;
cd15fb64 1308
1be56909 1309 return DM_ENDIO_DONE;
1da177e4
LT
1310}
1311
b80aa7a0 1312static void mirror_presuspend(struct dm_target *ti)
1da177e4 1313{
26cb62a2 1314 struct mirror_set *ms = ti->private;
1f965b19 1315 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4 1316
04788507
MP
1317 struct bio_list holds;
1318 struct bio *bio;
1319
b80aa7a0
JB
1320 atomic_set(&ms->suspend, 1);
1321
f0703040
TY
1322 /*
1323 * Process bios in the hold list to start recovery waiting
1324 * for bios in the hold list. After the process, no bio has
1325 * a chance to be added in the hold list because ms->suspend
1326 * is set.
1327 */
1328 spin_lock_irq(&ms->lock);
1329 holds = ms->holds;
1330 bio_list_init(&ms->holds);
1331 spin_unlock_irq(&ms->lock);
1332
1333 while ((bio = bio_list_pop(&holds)))
1334 hold_bio(ms, bio);
1335
b80aa7a0
JB
1336 /*
1337 * We must finish up all the work that we've
1338 * generated (i.e. recovery work).
1339 */
1f965b19 1340 dm_rh_stop_recovery(ms->rh);
33184048 1341
33184048 1342 wait_event(_kmirrord_recovery_stopped,
1f965b19 1343 !dm_rh_recovery_in_flight(ms->rh));
33184048 1344
b80aa7a0
JB
1345 if (log->type->presuspend && log->type->presuspend(log))
1346 /* FIXME: need better error handling */
1347 DMWARN("log presuspend failed");
1348
1349 /*
1350 * Now that recovery is complete/stopped and the
1351 * delayed bios are queued, we need to wait for
1352 * the worker thread to complete. This way,
1353 * we know that all of our I/O has been pushed.
1354 */
1355 flush_workqueue(ms->kmirrord_wq);
1356}
1357
1358static void mirror_postsuspend(struct dm_target *ti)
1359{
1360 struct mirror_set *ms = ti->private;
1f965b19 1361 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0 1362
6b3df0d7 1363 if (log->type->postsuspend && log->type->postsuspend(log))
1da177e4 1364 /* FIXME: need better error handling */
b80aa7a0 1365 DMWARN("log postsuspend failed");
1da177e4
LT
1366}
1367
1368static void mirror_resume(struct dm_target *ti)
1369{
b80aa7a0 1370 struct mirror_set *ms = ti->private;
1f965b19 1371 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0
JB
1372
1373 atomic_set(&ms->suspend, 0);
1da177e4
LT
1374 if (log->type->resume && log->type->resume(log))
1375 /* FIXME: need better error handling */
1376 DMWARN("log resume failed");
1f965b19 1377 dm_rh_start_recovery(ms->rh);
1da177e4
LT
1378}
1379
af195ac8
JB
1380/*
1381 * device_status_char
1382 * @m: mirror device/leg we want the status of
1383 *
1384 * We return one character representing the most severe error
1385 * we have encountered.
1386 * A => Alive - No failures
1387 * D => Dead - A write failure occurred leaving mirror out-of-sync
1388 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1389 * R => Read - A read failure occurred, mirror data unaffected
1390 *
1391 * Returns: <char>
1392 */
1393static char device_status_char(struct mirror *m)
1394{
1395 if (!atomic_read(&(m->error_count)))
1396 return 'A';
1397
64b30c46
MP
1398 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1399 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
af195ac8
JB
1400 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1401 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1402}
1403
1404
fd7c092e 1405static void mirror_status(struct dm_target *ti, status_type_t type,
86a3238c 1406 unsigned int status_flags, char *result, unsigned int maxlen)
1da177e4 1407{
315dcc22 1408 unsigned int m, sz = 0;
ed63287d 1409 int num_feature_args = 0;
26cb62a2 1410 struct mirror_set *ms = ti->private;
1f965b19 1411 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
65972a6f 1412 char buffer[MAX_NR_MIRRORS + 1];
1da177e4 1413
1da177e4
LT
1414 switch (type) {
1415 case STATUSTYPE_INFO:
1416 DMEMIT("%d ", ms->nr_mirrors);
af195ac8 1417 for (m = 0; m < ms->nr_mirrors; m++) {
1da177e4 1418 DMEMIT("%s ", ms->mirror[m].dev->name);
af195ac8
JB
1419 buffer[m] = device_status_char(&(ms->mirror[m]));
1420 }
1421 buffer[m] = '\0';
1da177e4 1422
af195ac8 1423 DMEMIT("%llu/%llu 1 %s ",
1f965b19 1424 (unsigned long long)log->type->get_sync_count(log),
af195ac8 1425 (unsigned long long)ms->nr_regions, buffer);
315dcc22 1426
1f965b19 1427 sz += log->type->status(log, type, result+sz, maxlen-sz);
315dcc22 1428
1da177e4
LT
1429 break;
1430
1431 case STATUSTYPE_TABLE:
1f965b19 1432 sz = log->type->status(log, type, result, maxlen);
315dcc22 1433
e52b8f6d 1434 DMEMIT("%d", ms->nr_mirrors);
1da177e4 1435 for (m = 0; m < ms->nr_mirrors; m++)
e52b8f6d 1436 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
b80aa7a0 1437 (unsigned long long)ms->mirror[m].offset);
a8e6afa2 1438
ed63287d
LZ
1439 num_feature_args += !!errors_handled(ms);
1440 num_feature_args += !!keep_log(ms);
1441 if (num_feature_args) {
1442 DMEMIT(" %d", num_feature_args);
1443 if (errors_handled(ms))
1444 DMEMIT(" handle_errors");
1445 if (keep_log(ms))
1446 DMEMIT(" keep_log");
1447 }
1448
1449 break;
8ec45662
TS
1450
1451 case STATUSTYPE_IMA:
1452 DMEMIT_TARGET_NAME_VERSION(ti->type);
1453 DMEMIT(",nr_mirrors=%d", ms->nr_mirrors);
1454 for (m = 0; m < ms->nr_mirrors; m++) {
1455 DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name);
1456 DMEMIT(",mirror_device_%d_status=%c",
1457 m, device_status_char(&(ms->mirror[m])));
1458 }
1459
1460 DMEMIT(",handle_errors=%c", errors_handled(ms) ? 'y' : 'n');
1461 DMEMIT(",keep_log=%c", keep_log(ms) ? 'y' : 'n');
1462
1463 DMEMIT(",log_type_status=");
1464 sz += log->type->status(log, type, result+sz, maxlen-sz);
1465 DMEMIT(";");
1466 break;
1da177e4 1467 }
1da177e4
LT
1468}
1469
af4874e0
MS
1470static int mirror_iterate_devices(struct dm_target *ti,
1471 iterate_devices_callout_fn fn, void *data)
1472{
1473 struct mirror_set *ms = ti->private;
1474 int ret = 0;
86a3238c 1475 unsigned int i;
af4874e0
MS
1476
1477 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1478 ret = fn(ti, ms->mirror[i].dev,
5dea271b 1479 ms->mirror[i].offset, ti->len, data);
af4874e0
MS
1480
1481 return ret;
1482}
1483
1da177e4
LT
1484static struct target_type mirror_target = {
1485 .name = "mirror",
ed63287d 1486 .version = {1, 14, 0},
1da177e4
LT
1487 .module = THIS_MODULE,
1488 .ctr = mirror_ctr,
1489 .dtr = mirror_dtr,
1490 .map = mirror_map,
1491 .end_io = mirror_end_io,
b80aa7a0 1492 .presuspend = mirror_presuspend,
1da177e4
LT
1493 .postsuspend = mirror_postsuspend,
1494 .resume = mirror_resume,
1495 .status = mirror_status,
af4874e0 1496 .iterate_devices = mirror_iterate_devices,
1da177e4
LT
1497};
1498
1499static int __init dm_mirror_init(void)
1500{
b362c733 1501 int r;
a7e8f7fb
TH
1502
1503 dm_raid1_wq = alloc_workqueue("dm_raid1_wq", 0, 0);
990f61e4
YL
1504 if (!dm_raid1_wq) {
1505 DMERR("Failed to alloc workqueue");
b362c733 1506 return -ENOMEM;
990f61e4 1507 }
1da177e4 1508
1da177e4 1509 r = dm_register_target(&mirror_target);
95f8fac8 1510 if (r < 0) {
a7e8f7fb 1511 destroy_workqueue(dm_raid1_wq);
b362c733 1512 return r;
95f8fac8
MP
1513 }
1514
1515 return 0;
1da177e4
LT
1516}
1517
1518static void __exit dm_mirror_exit(void)
1519{
a7e8f7fb 1520 destroy_workqueue(dm_raid1_wq);
10d3bd09 1521 dm_unregister_target(&mirror_target);
1da177e4
LT
1522}
1523
1524/* Module hooks */
1525module_init(dm_mirror_init);
1526module_exit(dm_mirror_exit);
1527
1528MODULE_DESCRIPTION(DM_NAME " mirror target");
1529MODULE_AUTHOR("Joe Thornber");
1530MODULE_LICENSE("GPL");