dm mpath: remove is_active from struct dm_path
[linux-2.6-block.git] / drivers / md / dm-raid1.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-list.h"
06386bbf 9#include "dm-bio-record.h"
1da177e4
LT
10
11#include <linux/ctype.h>
12#include <linux/init.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/pagemap.h>
16#include <linux/slab.h>
17#include <linux/time.h>
18#include <linux/vmalloc.h>
19#include <linux/workqueue.h>
6f3c3f0a 20#include <linux/log2.h>
72f4b314 21#include <linux/hardirq.h>
a765e20e
AK
22#include <linux/dm-io.h>
23#include <linux/dm-dirty-log.h>
24#include <linux/dm-kcopyd.h>
1da177e4 25
72d94861 26#define DM_MSG_PREFIX "raid1"
88be163a 27#define DM_IO_PAGES 64
72d94861 28
a8e6afa2 29#define DM_RAID1_HANDLE_ERRORS 0x01
f44db678 30#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
a8e6afa2 31
33184048 32static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
1da177e4 33
1da177e4
LT
34/*-----------------------------------------------------------------
35 * Region hash
36 *
37 * The mirror splits itself up into discrete regions. Each
38 * region can be in one of three states: clean, dirty,
39 * nosync. There is no need to put clean regions in the hash.
40 *
41 * In addition to being present in the hash table a region _may_
42 * be present on one of three lists.
43 *
44 * clean_regions: Regions on this list have no io pending to
45 * them, they are in sync, we are no longer interested in them,
46 * they are dull. rh_update_states() will remove them from the
47 * hash table.
48 *
49 * quiesced_regions: These regions have been spun down, ready
50 * for recovery. rh_recovery_start() will remove regions from
51 * this list and hand them to kmirrord, which will schedule the
52 * recovery io with kcopyd.
53 *
54 * recovered_regions: Regions that kcopyd has successfully
55 * recovered. rh_update_states() will now schedule any delayed
56 * io, up the recovery_count, and remove the region from the
57 * hash.
58 *
59 * There are 2 locks:
60 * A rw spin lock 'hash_lock' protects just the hash table,
61 * this is never held in write mode from interrupt context,
62 * which I believe means that we only have to disable irqs when
63 * doing a write lock.
64 *
65 * An ordinary spin lock 'region_lock' that protects the three
66 * lists in the region_hash, with the 'state', 'list' and
67 * 'bhs_delayed' fields of the regions. This is used from irq
68 * context, so all other uses will have to suspend local irqs.
69 *---------------------------------------------------------------*/
70struct mirror_set;
71struct region_hash {
72 struct mirror_set *ms;
73 uint32_t region_size;
74 unsigned region_shift;
75
76 /* holds persistent region state */
416cd17b 77 struct dm_dirty_log *log;
1da177e4
LT
78
79 /* hash table */
80 rwlock_t hash_lock;
81 mempool_t *region_pool;
82 unsigned int mask;
83 unsigned int nr_buckets;
84 struct list_head *buckets;
85
86 spinlock_t region_lock;
33184048 87 atomic_t recovery_in_flight;
1da177e4
LT
88 struct semaphore recovery_count;
89 struct list_head clean_regions;
90 struct list_head quiesced_regions;
91 struct list_head recovered_regions;
f44db678 92 struct list_head failed_recovered_regions;
1da177e4
LT
93};
94
95enum {
96 RH_CLEAN,
97 RH_DIRTY,
98 RH_NOSYNC,
99 RH_RECOVERING
100};
101
102struct region {
103 struct region_hash *rh; /* FIXME: can we get rid of this ? */
104 region_t key;
105 int state;
106
107 struct list_head hash_list;
108 struct list_head list;
109
110 atomic_t pending;
111 struct bio_list delayed_bios;
112};
113
e4c8b3ba
NB
114
115/*-----------------------------------------------------------------
116 * Mirror set structures.
117 *---------------------------------------------------------------*/
72f4b314
JB
118enum dm_raid1_error {
119 DM_RAID1_WRITE_ERROR,
120 DM_RAID1_SYNC_ERROR,
121 DM_RAID1_READ_ERROR
122};
123
e4c8b3ba 124struct mirror {
aa5617c5 125 struct mirror_set *ms;
e4c8b3ba 126 atomic_t error_count;
39ed7adb 127 unsigned long error_type;
e4c8b3ba
NB
128 struct dm_dev *dev;
129 sector_t offset;
130};
131
132struct mirror_set {
133 struct dm_target *ti;
134 struct list_head list;
135 struct region_hash rh;
eb69aca5 136 struct dm_kcopyd_client *kcopyd_client;
a8e6afa2 137 uint64_t features;
e4c8b3ba 138
72f4b314 139 spinlock_t lock; /* protects the lists */
e4c8b3ba
NB
140 struct bio_list reads;
141 struct bio_list writes;
72f4b314 142 struct bio_list failures;
e4c8b3ba 143
88be163a 144 struct dm_io_client *io_client;
06386bbf 145 mempool_t *read_record_pool;
88be163a 146
e4c8b3ba
NB
147 /* recovery */
148 region_t nr_regions;
149 int in_sync;
fc1ff958 150 int log_failure;
b80aa7a0 151 atomic_t suspend;
e4c8b3ba 152
72f4b314 153 atomic_t default_mirror; /* Default mirror */
e4c8b3ba 154
6ad36fe2
HS
155 struct workqueue_struct *kmirrord_wq;
156 struct work_struct kmirrord_work;
a2aebe03
MP
157 struct timer_list timer;
158 unsigned long timer_pending;
159
72f4b314 160 struct work_struct trigger_event;
6ad36fe2 161
e4c8b3ba
NB
162 unsigned int nr_mirrors;
163 struct mirror mirror[0];
164};
165
1da177e4
LT
166/*
167 * Conversion fns
168 */
169static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
170{
e4c8b3ba 171 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
1da177e4
LT
172}
173
174static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
175{
176 return region << rh->region_shift;
177}
178
6ad36fe2
HS
179static void wake(struct mirror_set *ms)
180{
181 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
182}
183
a2aebe03
MP
184static void delayed_wake_fn(unsigned long data)
185{
186 struct mirror_set *ms = (struct mirror_set *) data;
187
188 clear_bit(0, &ms->timer_pending);
189 wake(ms);
190}
191
192static void delayed_wake(struct mirror_set *ms)
193{
194 if (test_and_set_bit(0, &ms->timer_pending))
195 return;
196
197 ms->timer.expires = jiffies + HZ / 5;
198 ms->timer.data = (unsigned long) ms;
199 ms->timer.function = delayed_wake_fn;
200 add_timer(&ms->timer);
201}
202
1da177e4
LT
203/* FIXME move this */
204static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
205
1da177e4
LT
206#define MIN_REGIONS 64
207#define MAX_RECOVERY 1
208static int rh_init(struct region_hash *rh, struct mirror_set *ms,
416cd17b 209 struct dm_dirty_log *log, uint32_t region_size,
1da177e4
LT
210 region_t nr_regions)
211{
212 unsigned int nr_buckets, max_buckets;
213 size_t i;
214
215 /*
216 * Calculate a suitable number of buckets for our hash
217 * table.
218 */
219 max_buckets = nr_regions >> 6;
220 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
221 ;
222 nr_buckets >>= 1;
223
224 rh->ms = ms;
225 rh->log = log;
226 rh->region_size = region_size;
227 rh->region_shift = ffs(region_size) - 1;
228 rwlock_init(&rh->hash_lock);
229 rh->mask = nr_buckets - 1;
230 rh->nr_buckets = nr_buckets;
231
232 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
233 if (!rh->buckets) {
234 DMERR("unable to allocate region hash memory");
235 return -ENOMEM;
236 }
237
238 for (i = 0; i < nr_buckets; i++)
239 INIT_LIST_HEAD(rh->buckets + i);
240
241 spin_lock_init(&rh->region_lock);
242 sema_init(&rh->recovery_count, 0);
33184048 243 atomic_set(&rh->recovery_in_flight, 0);
1da177e4
LT
244 INIT_LIST_HEAD(&rh->clean_regions);
245 INIT_LIST_HEAD(&rh->quiesced_regions);
246 INIT_LIST_HEAD(&rh->recovered_regions);
f44db678 247 INIT_LIST_HEAD(&rh->failed_recovered_regions);
1da177e4 248
0eaae62a
MD
249 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
250 sizeof(struct region));
1da177e4
LT
251 if (!rh->region_pool) {
252 vfree(rh->buckets);
253 rh->buckets = NULL;
254 return -ENOMEM;
255 }
256
257 return 0;
258}
259
260static void rh_exit(struct region_hash *rh)
261{
262 unsigned int h;
263 struct region *reg, *nreg;
264
265 BUG_ON(!list_empty(&rh->quiesced_regions));
266 for (h = 0; h < rh->nr_buckets; h++) {
267 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
268 BUG_ON(atomic_read(&reg->pending));
269 mempool_free(reg, rh->region_pool);
270 }
271 }
272
273 if (rh->log)
416cd17b 274 dm_dirty_log_destroy(rh->log);
1da177e4
LT
275 if (rh->region_pool)
276 mempool_destroy(rh->region_pool);
277 vfree(rh->buckets);
278}
279
280#define RH_HASH_MULT 2654435387U
281
282static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
283{
284 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
285}
286
287static struct region *__rh_lookup(struct region_hash *rh, region_t region)
288{
289 struct region *reg;
290
291 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
292 if (reg->key == region)
293 return reg;
294
295 return NULL;
296}
297
298static void __rh_insert(struct region_hash *rh, struct region *reg)
299{
300 unsigned int h = rh_hash(rh, reg->key);
301 list_add(&reg->hash_list, rh->buckets + h);
302}
303
304static struct region *__rh_alloc(struct region_hash *rh, region_t region)
305{
306 struct region *reg, *nreg;
307
308 read_unlock(&rh->hash_lock);
c06aad85
DK
309 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
310 if (unlikely(!nreg))
311 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
1da177e4
LT
312 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
313 RH_CLEAN : RH_NOSYNC;
314 nreg->rh = rh;
315 nreg->key = region;
316
317 INIT_LIST_HEAD(&nreg->list);
318
319 atomic_set(&nreg->pending, 0);
320 bio_list_init(&nreg->delayed_bios);
321 write_lock_irq(&rh->hash_lock);
322
323 reg = __rh_lookup(rh, region);
324 if (reg)
325 /* we lost the race */
326 mempool_free(nreg, rh->region_pool);
327
328 else {
329 __rh_insert(rh, nreg);
330 if (nreg->state == RH_CLEAN) {
331 spin_lock(&rh->region_lock);
332 list_add(&nreg->list, &rh->clean_regions);
333 spin_unlock(&rh->region_lock);
334 }
335 reg = nreg;
336 }
337 write_unlock_irq(&rh->hash_lock);
338 read_lock(&rh->hash_lock);
339
340 return reg;
341}
342
343static inline struct region *__rh_find(struct region_hash *rh, region_t region)
344{
345 struct region *reg;
346
347 reg = __rh_lookup(rh, region);
348 if (!reg)
349 reg = __rh_alloc(rh, region);
350
351 return reg;
352}
353
354static int rh_state(struct region_hash *rh, region_t region, int may_block)
355{
356 int r;
357 struct region *reg;
358
359 read_lock(&rh->hash_lock);
360 reg = __rh_lookup(rh, region);
361 read_unlock(&rh->hash_lock);
362
363 if (reg)
364 return reg->state;
365
366 /*
367 * The region wasn't in the hash, so we fall back to the
368 * dirty log.
369 */
370 r = rh->log->type->in_sync(rh->log, region, may_block);
371
372 /*
373 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
374 * taken as a RH_NOSYNC
375 */
376 return r == 1 ? RH_CLEAN : RH_NOSYNC;
377}
378
379static inline int rh_in_sync(struct region_hash *rh,
380 region_t region, int may_block)
381{
382 int state = rh_state(rh, region, may_block);
383 return state == RH_CLEAN || state == RH_DIRTY;
384}
385
386static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
387{
388 struct bio *bio;
389
390 while ((bio = bio_list_pop(bio_list))) {
391 queue_bio(ms, bio, WRITE);
392 }
393}
394
f3ee6b2f
JB
395static void complete_resync_work(struct region *reg, int success)
396{
397 struct region_hash *rh = reg->rh;
398
399 rh->log->type->set_region_sync(rh->log, reg->key, success);
b80aa7a0
JB
400
401 /*
402 * Dispatch the bios before we call 'wake_up_all'.
403 * This is important because if we are suspending,
404 * we want to know that recovery is complete and
405 * the work queue is flushed. If we wake_up_all
406 * before we dispatch_bios (queue bios and call wake()),
407 * then we risk suspending before the work queue
408 * has been properly flushed.
409 */
f3ee6b2f
JB
410 dispatch_bios(rh->ms, &reg->delayed_bios);
411 if (atomic_dec_and_test(&rh->recovery_in_flight))
412 wake_up_all(&_kmirrord_recovery_stopped);
413 up(&rh->recovery_count);
414}
415
1da177e4
LT
416static void rh_update_states(struct region_hash *rh)
417{
418 struct region *reg, *next;
419
420 LIST_HEAD(clean);
421 LIST_HEAD(recovered);
f44db678 422 LIST_HEAD(failed_recovered);
1da177e4
LT
423
424 /*
425 * Quickly grab the lists.
426 */
427 write_lock_irq(&rh->hash_lock);
428 spin_lock(&rh->region_lock);
429 if (!list_empty(&rh->clean_regions)) {
c12bfc92 430 list_splice_init(&rh->clean_regions, &clean);
1da177e4 431
943317ef 432 list_for_each_entry(reg, &clean, list)
1da177e4 433 list_del(&reg->hash_list);
1da177e4
LT
434 }
435
436 if (!list_empty(&rh->recovered_regions)) {
c12bfc92 437 list_splice_init(&rh->recovered_regions, &recovered);
1da177e4
LT
438
439 list_for_each_entry (reg, &recovered, list)
440 list_del(&reg->hash_list);
441 }
f44db678
JB
442
443 if (!list_empty(&rh->failed_recovered_regions)) {
c12bfc92
RD
444 list_splice_init(&rh->failed_recovered_regions,
445 &failed_recovered);
f44db678
JB
446
447 list_for_each_entry(reg, &failed_recovered, list)
448 list_del(&reg->hash_list);
449 }
450
1da177e4
LT
451 spin_unlock(&rh->region_lock);
452 write_unlock_irq(&rh->hash_lock);
453
454 /*
455 * All the regions on the recovered and clean lists have
456 * now been pulled out of the system, so no need to do
457 * any more locking.
458 */
459 list_for_each_entry_safe (reg, next, &recovered, list) {
460 rh->log->type->clear_region(rh->log, reg->key);
f3ee6b2f 461 complete_resync_work(reg, 1);
1da177e4
LT
462 mempool_free(reg, rh->region_pool);
463 }
464
f44db678
JB
465 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
466 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
467 mempool_free(reg, rh->region_pool);
468 }
469
943317ef
JB
470 list_for_each_entry_safe(reg, next, &clean, list) {
471 rh->log->type->clear_region(rh->log, reg->key);
1da177e4 472 mempool_free(reg, rh->region_pool);
943317ef
JB
473 }
474
475 rh->log->type->flush(rh->log);
1da177e4
LT
476}
477
478static void rh_inc(struct region_hash *rh, region_t region)
479{
480 struct region *reg;
481
482 read_lock(&rh->hash_lock);
483 reg = __rh_find(rh, region);
844e8d90 484
7692c5dd 485 spin_lock_irq(&rh->region_lock);
844e8d90
JN
486 atomic_inc(&reg->pending);
487
1da177e4 488 if (reg->state == RH_CLEAN) {
1da177e4
LT
489 reg->state = RH_DIRTY;
490 list_del_init(&reg->list); /* take off the clean list */
7692c5dd
JB
491 spin_unlock_irq(&rh->region_lock);
492
493 rh->log->type->mark_region(rh->log, reg->key);
494 } else
495 spin_unlock_irq(&rh->region_lock);
496
1da177e4 497
1da177e4
LT
498 read_unlock(&rh->hash_lock);
499}
500
501static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
502{
503 struct bio *bio;
504
505 for (bio = bios->head; bio; bio = bio->bi_next)
506 rh_inc(rh, bio_to_region(rh, bio));
507}
508
509static void rh_dec(struct region_hash *rh, region_t region)
510{
511 unsigned long flags;
512 struct region *reg;
513 int should_wake = 0;
514
515 read_lock(&rh->hash_lock);
516 reg = __rh_lookup(rh, region);
517 read_unlock(&rh->hash_lock);
518
7692c5dd 519 spin_lock_irqsave(&rh->region_lock, flags);
1da177e4 520 if (atomic_dec_and_test(&reg->pending)) {
930d332a
JN
521 /*
522 * There is no pending I/O for this region.
523 * We can move the region to corresponding list for next action.
524 * At this point, the region is not yet connected to any list.
525 *
526 * If the state is RH_NOSYNC, the region should be kept off
527 * from clean list.
528 * The hash entry for RH_NOSYNC will remain in memory
529 * until the region is recovered or the map is reloaded.
530 */
531
532 /* do nothing for RH_NOSYNC */
1da177e4
LT
533 if (reg->state == RH_RECOVERING) {
534 list_add_tail(&reg->list, &rh->quiesced_regions);
930d332a 535 } else if (reg->state == RH_DIRTY) {
1da177e4
LT
536 reg->state = RH_CLEAN;
537 list_add(&reg->list, &rh->clean_regions);
538 }
1da177e4
LT
539 should_wake = 1;
540 }
7692c5dd 541 spin_unlock_irqrestore(&rh->region_lock, flags);
1da177e4
LT
542
543 if (should_wake)
6ad36fe2 544 wake(rh->ms);
1da177e4
LT
545}
546
547/*
548 * Starts quiescing a region in preparation for recovery.
549 */
550static int __rh_recovery_prepare(struct region_hash *rh)
551{
552 int r;
553 struct region *reg;
554 region_t region;
555
556 /*
557 * Ask the dirty log what's next.
558 */
559 r = rh->log->type->get_resync_work(rh->log, &region);
560 if (r <= 0)
561 return r;
562
563 /*
564 * Get this region, and start it quiescing by setting the
565 * recovering flag.
566 */
567 read_lock(&rh->hash_lock);
568 reg = __rh_find(rh, region);
569 read_unlock(&rh->hash_lock);
570
571 spin_lock_irq(&rh->region_lock);
572 reg->state = RH_RECOVERING;
573
574 /* Already quiesced ? */
575 if (atomic_read(&reg->pending))
576 list_del_init(&reg->list);
179e0917
AM
577 else
578 list_move(&reg->list, &rh->quiesced_regions);
1da177e4 579
1da177e4
LT
580 spin_unlock_irq(&rh->region_lock);
581
582 return 1;
583}
584
585static void rh_recovery_prepare(struct region_hash *rh)
586{
33184048
JB
587 /* Extra reference to avoid race with rh_stop_recovery */
588 atomic_inc(&rh->recovery_in_flight);
589
590 while (!down_trylock(&rh->recovery_count)) {
591 atomic_inc(&rh->recovery_in_flight);
1da177e4 592 if (__rh_recovery_prepare(rh) <= 0) {
33184048 593 atomic_dec(&rh->recovery_in_flight);
1da177e4
LT
594 up(&rh->recovery_count);
595 break;
596 }
33184048
JB
597 }
598
599 /* Drop the extra reference */
600 if (atomic_dec_and_test(&rh->recovery_in_flight))
601 wake_up_all(&_kmirrord_recovery_stopped);
1da177e4
LT
602}
603
604/*
605 * Returns any quiesced regions.
606 */
607static struct region *rh_recovery_start(struct region_hash *rh)
608{
609 struct region *reg = NULL;
610
611 spin_lock_irq(&rh->region_lock);
612 if (!list_empty(&rh->quiesced_regions)) {
613 reg = list_entry(rh->quiesced_regions.next,
614 struct region, list);
615 list_del_init(&reg->list); /* remove from the quiesced list */
616 }
617 spin_unlock_irq(&rh->region_lock);
618
619 return reg;
620}
621
1da177e4
LT
622static void rh_recovery_end(struct region *reg, int success)
623{
624 struct region_hash *rh = reg->rh;
625
626 spin_lock_irq(&rh->region_lock);
f44db678
JB
627 if (success)
628 list_add(&reg->list, &reg->rh->recovered_regions);
629 else {
630 reg->state = RH_NOSYNC;
631 list_add(&reg->list, &reg->rh->failed_recovered_regions);
632 }
1da177e4
LT
633 spin_unlock_irq(&rh->region_lock);
634
6ad36fe2 635 wake(rh->ms);
1da177e4
LT
636}
637
fc1ff958 638static int rh_flush(struct region_hash *rh)
1da177e4 639{
fc1ff958 640 return rh->log->type->flush(rh->log);
1da177e4
LT
641}
642
643static void rh_delay(struct region_hash *rh, struct bio *bio)
644{
645 struct region *reg;
646
647 read_lock(&rh->hash_lock);
648 reg = __rh_find(rh, bio_to_region(rh, bio));
649 bio_list_add(&reg->delayed_bios, bio);
650 read_unlock(&rh->hash_lock);
651}
652
653static void rh_stop_recovery(struct region_hash *rh)
654{
655 int i;
656
657 /* wait for any recovering regions */
658 for (i = 0; i < MAX_RECOVERY; i++)
659 down(&rh->recovery_count);
660}
661
662static void rh_start_recovery(struct region_hash *rh)
663{
664 int i;
665
666 for (i = 0; i < MAX_RECOVERY; i++)
667 up(&rh->recovery_count);
668
6ad36fe2 669 wake(rh->ms);
1da177e4
LT
670}
671
06386bbf
JB
672#define MIN_READ_RECORDS 20
673struct dm_raid1_read_record {
674 struct mirror *m;
675 struct dm_bio_details details;
676};
677
1da177e4
LT
678/*
679 * Every mirror should look like this one.
680 */
681#define DEFAULT_MIRROR 0
682
683/*
06386bbf
JB
684 * This is yucky. We squirrel the mirror struct away inside
685 * bi_next for read/write buffers. This is safe since the bh
1da177e4
LT
686 * doesn't get submitted to the lower levels of block layer.
687 */
06386bbf 688static struct mirror *bio_get_m(struct bio *bio)
1da177e4 689{
06386bbf 690 return (struct mirror *) bio->bi_next;
1da177e4
LT
691}
692
06386bbf 693static void bio_set_m(struct bio *bio, struct mirror *m)
1da177e4 694{
06386bbf 695 bio->bi_next = (struct bio *) m;
1da177e4
LT
696}
697
72f4b314
JB
698static struct mirror *get_default_mirror(struct mirror_set *ms)
699{
700 return &ms->mirror[atomic_read(&ms->default_mirror)];
701}
702
703static void set_default_mirror(struct mirror *m)
704{
705 struct mirror_set *ms = m->ms;
706 struct mirror *m0 = &(ms->mirror[0]);
707
708 atomic_set(&ms->default_mirror, m - m0);
709}
710
711/* fail_mirror
712 * @m: mirror device to fail
713 * @error_type: one of the enum's, DM_RAID1_*_ERROR
714 *
715 * If errors are being handled, record the type of
716 * error encountered for this device. If this type
717 * of error has already been recorded, we can return;
718 * otherwise, we must signal userspace by triggering
719 * an event. Additionally, if the device is the
720 * primary device, we must choose a new primary, but
721 * only if the mirror is in-sync.
722 *
723 * This function must not block.
724 */
725static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
726{
727 struct mirror_set *ms = m->ms;
728 struct mirror *new;
729
730 if (!errors_handled(ms))
731 return;
732
733 /*
734 * error_count is used for nothing more than a
735 * simple way to tell if a device has encountered
736 * errors.
737 */
738 atomic_inc(&m->error_count);
739
740 if (test_and_set_bit(error_type, &m->error_type))
741 return;
742
743 if (m != get_default_mirror(ms))
744 goto out;
745
746 if (!ms->in_sync) {
747 /*
748 * Better to issue requests to same failing device
749 * than to risk returning corrupt data.
750 */
751 DMERR("Primary mirror (%s) failed while out-of-sync: "
752 "Reads may fail.", m->dev->name);
753 goto out;
754 }
755
756 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
757 if (!atomic_read(&new->error_count)) {
758 set_default_mirror(new);
759 break;
760 }
761
762 if (unlikely(new == ms->mirror + ms->nr_mirrors))
763 DMWARN("All sides of mirror have failed.");
764
765out:
766 schedule_work(&ms->trigger_event);
767}
768
1da177e4
LT
769/*-----------------------------------------------------------------
770 * Recovery.
771 *
772 * When a mirror is first activated we may find that some regions
773 * are in the no-sync state. We have to recover these by
774 * recopying from the default mirror to all the others.
775 *---------------------------------------------------------------*/
4cdc1d1f 776static void recovery_complete(int read_err, unsigned long write_err,
1da177e4
LT
777 void *context)
778{
8f0205b7
JB
779 struct region *reg = (struct region *)context;
780 struct mirror_set *ms = reg->rh->ms;
781 int m, bit = 0;
1da177e4 782
8f0205b7 783 if (read_err) {
f44db678
JB
784 /* Read error means the failure of default mirror. */
785 DMERR_LIMIT("Unable to read primary mirror during recovery");
8f0205b7
JB
786 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
787 }
f44db678 788
8f0205b7 789 if (write_err) {
4cdc1d1f 790 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
f44db678 791 write_err);
8f0205b7
JB
792 /*
793 * Bits correspond to devices (excluding default mirror).
794 * The default mirror cannot change during recovery.
795 */
796 for (m = 0; m < ms->nr_mirrors; m++) {
797 if (&ms->mirror[m] == get_default_mirror(ms))
798 continue;
799 if (test_bit(bit, &write_err))
800 fail_mirror(ms->mirror + m,
801 DM_RAID1_SYNC_ERROR);
802 bit++;
803 }
804 }
f44db678 805
ce503f59 806 rh_recovery_end(reg, !(read_err || write_err));
1da177e4
LT
807}
808
809static int recover(struct mirror_set *ms, struct region *reg)
810{
811 int r;
812 unsigned int i;
eb69aca5 813 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
1da177e4
LT
814 struct mirror *m;
815 unsigned long flags = 0;
816
817 /* fill in the source */
72f4b314 818 m = get_default_mirror(ms);
1da177e4
LT
819 from.bdev = m->dev->bdev;
820 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
821 if (reg->key == (ms->nr_regions - 1)) {
822 /*
823 * The final region may be smaller than
824 * region_size.
825 */
826 from.count = ms->ti->len & (reg->rh->region_size - 1);
827 if (!from.count)
828 from.count = reg->rh->region_size;
829 } else
830 from.count = reg->rh->region_size;
831
832 /* fill in the destinations */
833 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
72f4b314 834 if (&ms->mirror[i] == get_default_mirror(ms))
1da177e4
LT
835 continue;
836
837 m = ms->mirror + i;
838 dest->bdev = m->dev->bdev;
839 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
840 dest->count = from.count;
841 dest++;
842 }
843
844 /* hand to kcopyd */
eb69aca5
HM
845 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
846 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
847 flags, recovery_complete, reg);
1da177e4
LT
848
849 return r;
850}
851
852static void do_recovery(struct mirror_set *ms)
853{
854 int r;
855 struct region *reg;
416cd17b 856 struct dm_dirty_log *log = ms->rh.log;
1da177e4
LT
857
858 /*
859 * Start quiescing some regions.
860 */
861 rh_recovery_prepare(&ms->rh);
862
863 /*
864 * Copy any already quiesced regions.
865 */
866 while ((reg = rh_recovery_start(&ms->rh))) {
867 r = recover(ms, reg);
868 if (r)
869 rh_recovery_end(reg, 0);
870 }
871
872 /*
873 * Update the in sync flag.
874 */
875 if (!ms->in_sync &&
876 (log->type->get_sync_count(log) == ms->nr_regions)) {
877 /* the sync is complete */
878 dm_table_event(ms->ti->table);
879 ms->in_sync = 1;
880 }
881}
882
883/*-----------------------------------------------------------------
884 * Reads
885 *---------------------------------------------------------------*/
886static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
887{
06386bbf
JB
888 struct mirror *m = get_default_mirror(ms);
889
890 do {
891 if (likely(!atomic_read(&m->error_count)))
892 return m;
893
894 if (m-- == ms->mirror)
895 m += ms->nr_mirrors;
896 } while (m != get_default_mirror(ms));
897
898 return NULL;
899}
900
901static int default_ok(struct mirror *m)
902{
903 struct mirror *default_mirror = get_default_mirror(m->ms);
904
905 return !atomic_read(&default_mirror->error_count);
906}
907
908static int mirror_available(struct mirror_set *ms, struct bio *bio)
909{
910 region_t region = bio_to_region(&ms->rh, bio);
911
912 if (ms->rh.log->type->in_sync(ms->rh.log, region, 0))
913 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
914
915 return 0;
1da177e4
LT
916}
917
918/*
919 * remap a buffer to a particular mirror.
920 */
06386bbf
JB
921static sector_t map_sector(struct mirror *m, struct bio *bio)
922{
923 return m->offset + (bio->bi_sector - m->ms->ti->begin);
924}
925
926static void map_bio(struct mirror *m, struct bio *bio)
1da177e4
LT
927{
928 bio->bi_bdev = m->dev->bdev;
06386bbf
JB
929 bio->bi_sector = map_sector(m, bio);
930}
931
22a1ceb1 932static void map_region(struct dm_io_region *io, struct mirror *m,
06386bbf
JB
933 struct bio *bio)
934{
935 io->bdev = m->dev->bdev;
936 io->sector = map_sector(m, bio);
937 io->count = bio->bi_size >> 9;
938}
939
940/*-----------------------------------------------------------------
941 * Reads
942 *---------------------------------------------------------------*/
943static void read_callback(unsigned long error, void *context)
944{
945 struct bio *bio = context;
946 struct mirror *m;
947
948 m = bio_get_m(bio);
949 bio_set_m(bio, NULL);
950
951 if (likely(!error)) {
952 bio_endio(bio, 0);
953 return;
954 }
955
956 fail_mirror(m, DM_RAID1_READ_ERROR);
957
958 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
959 DMWARN_LIMIT("Read failure on mirror device %s. "
960 "Trying alternative device.",
961 m->dev->name);
962 queue_bio(m->ms, bio, bio_rw(bio));
963 return;
964 }
965
966 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
967 m->dev->name);
968 bio_endio(bio, -EIO);
969}
970
971/* Asynchronous read. */
972static void read_async_bio(struct mirror *m, struct bio *bio)
973{
22a1ceb1 974 struct dm_io_region io;
06386bbf
JB
975 struct dm_io_request io_req = {
976 .bi_rw = READ,
977 .mem.type = DM_IO_BVEC,
978 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
979 .notify.fn = read_callback,
980 .notify.context = bio,
981 .client = m->ms->io_client,
982 };
983
984 map_region(&io, m, bio);
985 bio_set_m(bio, m);
986 (void) dm_io(&io_req, 1, &io, NULL);
1da177e4
LT
987}
988
989static void do_reads(struct mirror_set *ms, struct bio_list *reads)
990{
991 region_t region;
992 struct bio *bio;
993 struct mirror *m;
994
995 while ((bio = bio_list_pop(reads))) {
996 region = bio_to_region(&ms->rh, bio);
06386bbf 997 m = get_default_mirror(ms);
1da177e4
LT
998
999 /*
1000 * We can only read balance if the region is in sync.
1001 */
06386bbf 1002 if (likely(rh_in_sync(&ms->rh, region, 1)))
1da177e4 1003 m = choose_mirror(ms, bio->bi_sector);
06386bbf
JB
1004 else if (m && atomic_read(&m->error_count))
1005 m = NULL;
1da177e4 1006
06386bbf
JB
1007 if (likely(m))
1008 read_async_bio(m, bio);
1009 else
1010 bio_endio(bio, -EIO);
1da177e4
LT
1011 }
1012}
1013
1014/*-----------------------------------------------------------------
1015 * Writes.
1016 *
1017 * We do different things with the write io depending on the
1018 * state of the region that it's in:
1019 *
1020 * SYNC: increment pending, use kcopyd to write to *all* mirrors
1021 * RECOVERING: delay the io until recovery completes
1022 * NOSYNC: increment pending, just write to the default mirror
1023 *---------------------------------------------------------------*/
72f4b314
JB
1024
1025/* __bio_mark_nosync
1026 * @ms
1027 * @bio
1028 * @done
1029 * @error
1030 *
1031 * The bio was written on some mirror(s) but failed on other mirror(s).
1032 * We can successfully endio the bio but should avoid the region being
1033 * marked clean by setting the state RH_NOSYNC.
1034 *
1035 * This function is _not_ safe in interrupt context!
1036 */
1037static void __bio_mark_nosync(struct mirror_set *ms,
1038 struct bio *bio, unsigned done, int error)
1039{
1040 unsigned long flags;
1041 struct region_hash *rh = &ms->rh;
416cd17b 1042 struct dm_dirty_log *log = ms->rh.log;
72f4b314
JB
1043 struct region *reg;
1044 region_t region = bio_to_region(rh, bio);
1045 int recovering = 0;
1046
1047 /* We must inform the log that the sync count has changed. */
1048 log->type->set_region_sync(log, region, 0);
1049 ms->in_sync = 0;
1050
1051 read_lock(&rh->hash_lock);
1052 reg = __rh_find(rh, region);
1053 read_unlock(&rh->hash_lock);
1054
1055 /* region hash entry should exist because write was in-flight */
1056 BUG_ON(!reg);
1057 BUG_ON(!list_empty(&reg->list));
1058
1059 spin_lock_irqsave(&rh->region_lock, flags);
1060 /*
1061 * Possible cases:
1062 * 1) RH_DIRTY
1063 * 2) RH_NOSYNC: was dirty, other preceeding writes failed
1064 * 3) RH_RECOVERING: flushing pending writes
1065 * Either case, the region should have not been connected to list.
1066 */
1067 recovering = (reg->state == RH_RECOVERING);
1068 reg->state = RH_NOSYNC;
1069 BUG_ON(!list_empty(&reg->list));
1070 spin_unlock_irqrestore(&rh->region_lock, flags);
1071
1072 bio_endio(bio, error);
1073 if (recovering)
1074 complete_resync_work(reg, 0);
1075}
1076
1da177e4
LT
1077static void write_callback(unsigned long error, void *context)
1078{
72f4b314 1079 unsigned i, ret = 0;
1da177e4
LT
1080 struct bio *bio = (struct bio *) context;
1081 struct mirror_set *ms;
72f4b314
JB
1082 int uptodate = 0;
1083 int should_wake = 0;
1084 unsigned long flags;
1da177e4 1085
06386bbf
JB
1086 ms = bio_get_m(bio)->ms;
1087 bio_set_m(bio, NULL);
1da177e4
LT
1088
1089 /*
1090 * NOTE: We don't decrement the pending count here,
1091 * instead it is done by the targets endio function.
1092 * This way we handle both writes to SYNC and NOSYNC
1093 * regions with the same code.
1094 */
72f4b314
JB
1095 if (likely(!error))
1096 goto out;
1da177e4 1097
72f4b314
JB
1098 for (i = 0; i < ms->nr_mirrors; i++)
1099 if (test_bit(i, &error))
1100 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
1101 else
1102 uptodate = 1;
1103
1104 if (unlikely(!uptodate)) {
1105 DMERR("All replicated volumes dead, failing I/O");
1106 /* None of the writes succeeded, fail the I/O. */
1107 ret = -EIO;
1108 } else if (errors_handled(ms)) {
1da177e4 1109 /*
72f4b314
JB
1110 * Need to raise event. Since raising
1111 * events can block, we need to do it in
1112 * the main thread.
1da177e4 1113 */
72f4b314
JB
1114 spin_lock_irqsave(&ms->lock, flags);
1115 if (!ms->failures.head)
1116 should_wake = 1;
1117 bio_list_add(&ms->failures, bio);
1118 spin_unlock_irqrestore(&ms->lock, flags);
1119 if (should_wake)
1120 wake(ms);
1121 return;
1da177e4 1122 }
72f4b314
JB
1123out:
1124 bio_endio(bio, ret);
1da177e4
LT
1125}
1126
1127static void do_write(struct mirror_set *ms, struct bio *bio)
1128{
1129 unsigned int i;
22a1ceb1 1130 struct dm_io_region io[ms->nr_mirrors], *dest = io;
1da177e4 1131 struct mirror *m;
88be163a
MB
1132 struct dm_io_request io_req = {
1133 .bi_rw = WRITE,
1134 .mem.type = DM_IO_BVEC,
1135 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
1136 .notify.fn = write_callback,
1137 .notify.context = bio,
1138 .client = ms->io_client,
1139 };
1da177e4 1140
06386bbf
JB
1141 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
1142 map_region(dest++, m, bio);
1da177e4 1143
06386bbf
JB
1144 /*
1145 * Use default mirror because we only need it to retrieve the reference
1146 * to the mirror set in write_callback().
1147 */
1148 bio_set_m(bio, get_default_mirror(ms));
88be163a
MB
1149
1150 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
1da177e4
LT
1151}
1152
1153static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1154{
1155 int state;
1156 struct bio *bio;
1157 struct bio_list sync, nosync, recover, *this_list = NULL;
1158
1159 if (!writes->head)
1160 return;
1161
1162 /*
1163 * Classify each write.
1164 */
1165 bio_list_init(&sync);
1166 bio_list_init(&nosync);
1167 bio_list_init(&recover);
1168
1169 while ((bio = bio_list_pop(writes))) {
1170 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
1171 switch (state) {
1172 case RH_CLEAN:
1173 case RH_DIRTY:
1174 this_list = &sync;
1175 break;
1176
1177 case RH_NOSYNC:
1178 this_list = &nosync;
1179 break;
1180
1181 case RH_RECOVERING:
1182 this_list = &recover;
1183 break;
1184 }
1185
1186 bio_list_add(this_list, bio);
1187 }
1188
1189 /*
1190 * Increment the pending counts for any regions that will
1191 * be written to (writes to recover regions are going to
1192 * be delayed).
1193 */
1194 rh_inc_pending(&ms->rh, &sync);
1195 rh_inc_pending(&ms->rh, &nosync);
fc1ff958 1196 ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
1da177e4
LT
1197
1198 /*
1199 * Dispatch io.
1200 */
b80aa7a0
JB
1201 if (unlikely(ms->log_failure)) {
1202 spin_lock_irq(&ms->lock);
1203 bio_list_merge(&ms->failures, &sync);
1204 spin_unlock_irq(&ms->lock);
a2aebe03 1205 wake(ms);
b80aa7a0 1206 } else
fc1ff958 1207 while ((bio = bio_list_pop(&sync)))
b80aa7a0 1208 do_write(ms, bio);
1da177e4
LT
1209
1210 while ((bio = bio_list_pop(&recover)))
1211 rh_delay(&ms->rh, bio);
1212
1213 while ((bio = bio_list_pop(&nosync))) {
06386bbf 1214 map_bio(get_default_mirror(ms), bio);
1da177e4
LT
1215 generic_make_request(bio);
1216 }
1217}
1218
72f4b314
JB
1219static void do_failures(struct mirror_set *ms, struct bio_list *failures)
1220{
1221 struct bio *bio;
1222
1223 if (!failures->head)
1224 return;
1225
b80aa7a0
JB
1226 if (!ms->log_failure) {
1227 while ((bio = bio_list_pop(failures)))
1228 __bio_mark_nosync(ms, bio, bio->bi_size, 0);
1229 return;
1230 }
1231
1232 /*
1233 * If the log has failed, unattempted writes are being
1234 * put on the failures list. We can't issue those writes
1235 * until a log has been marked, so we must store them.
1236 *
1237 * If a 'noflush' suspend is in progress, we can requeue
1238 * the I/O's to the core. This give userspace a chance
1239 * to reconfigure the mirror, at which point the core
1240 * will reissue the writes. If the 'noflush' flag is
1241 * not set, we have no choice but to return errors.
1242 *
1243 * Some writes on the failures list may have been
1244 * submitted before the log failure and represent a
1245 * failure to write to one of the devices. It is ok
1246 * for us to treat them the same and requeue them
1247 * as well.
1248 */
1249 if (dm_noflush_suspending(ms->ti)) {
1250 while ((bio = bio_list_pop(failures)))
1251 bio_endio(bio, DM_ENDIO_REQUEUE);
1252 return;
1253 }
1254
1255 if (atomic_read(&ms->suspend)) {
1256 while ((bio = bio_list_pop(failures)))
1257 bio_endio(bio, -EIO);
1258 return;
1259 }
1260
1261 spin_lock_irq(&ms->lock);
1262 bio_list_merge(&ms->failures, failures);
1263 spin_unlock_irq(&ms->lock);
1264
a2aebe03 1265 delayed_wake(ms);
72f4b314
JB
1266}
1267
1268static void trigger_event(struct work_struct *work)
1269{
1270 struct mirror_set *ms =
1271 container_of(work, struct mirror_set, trigger_event);
1272
1273 dm_table_event(ms->ti->table);
1274}
1275
1da177e4
LT
1276/*-----------------------------------------------------------------
1277 * kmirrord
1278 *---------------------------------------------------------------*/
a2aebe03 1279static void do_mirror(struct work_struct *work)
1da177e4 1280{
6ad36fe2
HS
1281 struct mirror_set *ms =container_of(work, struct mirror_set,
1282 kmirrord_work);
72f4b314
JB
1283 struct bio_list reads, writes, failures;
1284 unsigned long flags;
1da177e4 1285
72f4b314 1286 spin_lock_irqsave(&ms->lock, flags);
1da177e4
LT
1287 reads = ms->reads;
1288 writes = ms->writes;
72f4b314 1289 failures = ms->failures;
1da177e4
LT
1290 bio_list_init(&ms->reads);
1291 bio_list_init(&ms->writes);
72f4b314
JB
1292 bio_list_init(&ms->failures);
1293 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4
LT
1294
1295 rh_update_states(&ms->rh);
1296 do_recovery(ms);
1297 do_reads(ms, &reads);
1298 do_writes(ms, &writes);
72f4b314 1299 do_failures(ms, &failures);
7ff14a36
MP
1300
1301 dm_table_unplug_all(ms->ti->table);
1da177e4
LT
1302}
1303
72f4b314 1304
1da177e4
LT
1305/*-----------------------------------------------------------------
1306 * Target functions
1307 *---------------------------------------------------------------*/
1308static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1309 uint32_t region_size,
1310 struct dm_target *ti,
416cd17b 1311 struct dm_dirty_log *dl)
1da177e4
LT
1312{
1313 size_t len;
1314 struct mirror_set *ms = NULL;
1315
1316 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
1317 return NULL;
1318
1319 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
1320
dd00cc48 1321 ms = kzalloc(len, GFP_KERNEL);
1da177e4 1322 if (!ms) {
72d94861 1323 ti->error = "Cannot allocate mirror context";
1da177e4
LT
1324 return NULL;
1325 }
1326
1da177e4
LT
1327 spin_lock_init(&ms->lock);
1328
1329 ms->ti = ti;
1330 ms->nr_mirrors = nr_mirrors;
1331 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
1332 ms->in_sync = 0;
b80aa7a0
JB
1333 ms->log_failure = 0;
1334 atomic_set(&ms->suspend, 0);
72f4b314 1335 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
1da177e4 1336
06386bbf
JB
1337 len = sizeof(struct dm_raid1_read_record);
1338 ms->read_record_pool = mempool_create_kmalloc_pool(MIN_READ_RECORDS,
1339 len);
1340 if (!ms->read_record_pool) {
1341 ti->error = "Error creating mirror read_record_pool";
1342 kfree(ms);
1343 return NULL;
1344 }
1345
88be163a
MB
1346 ms->io_client = dm_io_client_create(DM_IO_PAGES);
1347 if (IS_ERR(ms->io_client)) {
1348 ti->error = "Error creating dm_io client";
06386bbf 1349 mempool_destroy(ms->read_record_pool);
88be163a
MB
1350 kfree(ms);
1351 return NULL;
1352 }
1353
1da177e4 1354 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
72d94861 1355 ti->error = "Error creating dirty region hash";
a72cf737 1356 dm_io_client_destroy(ms->io_client);
06386bbf 1357 mempool_destroy(ms->read_record_pool);
1da177e4
LT
1358 kfree(ms);
1359 return NULL;
1360 }
1361
1362 return ms;
1363}
1364
1365static void free_context(struct mirror_set *ms, struct dm_target *ti,
1366 unsigned int m)
1367{
1368 while (m--)
1369 dm_put_device(ti, ms->mirror[m].dev);
1370
88be163a 1371 dm_io_client_destroy(ms->io_client);
1da177e4 1372 rh_exit(&ms->rh);
06386bbf 1373 mempool_destroy(ms->read_record_pool);
1da177e4
LT
1374 kfree(ms);
1375}
1376
1377static inline int _check_region_size(struct dm_target *ti, uint32_t size)
1378{
6f3c3f0a 1379 return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) ||
1da177e4
LT
1380 size > ti->len);
1381}
1382
1383static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1384 unsigned int mirror, char **argv)
1385{
4ee218cd 1386 unsigned long long offset;
1da177e4 1387
4ee218cd 1388 if (sscanf(argv[1], "%llu", &offset) != 1) {
72d94861 1389 ti->error = "Invalid offset";
1da177e4
LT
1390 return -EINVAL;
1391 }
1392
1393 if (dm_get_device(ti, argv[0], offset, ti->len,
1394 dm_table_get_mode(ti->table),
1395 &ms->mirror[mirror].dev)) {
72d94861 1396 ti->error = "Device lookup failure";
1da177e4
LT
1397 return -ENXIO;
1398 }
1399
aa5617c5 1400 ms->mirror[mirror].ms = ms;
72f4b314
JB
1401 atomic_set(&(ms->mirror[mirror].error_count), 0);
1402 ms->mirror[mirror].error_type = 0;
1da177e4
LT
1403 ms->mirror[mirror].offset = offset;
1404
1405 return 0;
1406}
1407
1da177e4
LT
1408/*
1409 * Create dirty log: log_type #log_params <log_params>
1410 */
416cd17b 1411static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
1da177e4
LT
1412 unsigned int argc, char **argv,
1413 unsigned int *args_used)
1414{
1415 unsigned int param_count;
416cd17b 1416 struct dm_dirty_log *dl;
1da177e4
LT
1417
1418 if (argc < 2) {
72d94861 1419 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
1420 return NULL;
1421 }
1422
1423 if (sscanf(argv[1], "%u", &param_count) != 1) {
72d94861 1424 ti->error = "Invalid mirror log argument count";
1da177e4
LT
1425 return NULL;
1426 }
1427
1428 *args_used = 2 + param_count;
1429
1430 if (argc < *args_used) {
72d94861 1431 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
1432 return NULL;
1433 }
1434
416cd17b 1435 dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
1da177e4 1436 if (!dl) {
72d94861 1437 ti->error = "Error creating mirror dirty log";
1da177e4
LT
1438 return NULL;
1439 }
1440
1441 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
72d94861 1442 ti->error = "Invalid region size";
416cd17b 1443 dm_dirty_log_destroy(dl);
1da177e4
LT
1444 return NULL;
1445 }
1446
1447 return dl;
1448}
1449
a8e6afa2
JB
1450static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1451 unsigned *args_used)
1452{
1453 unsigned num_features;
1454 struct dm_target *ti = ms->ti;
1455
1456 *args_used = 0;
1457
1458 if (!argc)
1459 return 0;
1460
1461 if (sscanf(argv[0], "%u", &num_features) != 1) {
1462 ti->error = "Invalid number of features";
1463 return -EINVAL;
1464 }
1465
1466 argc--;
1467 argv++;
1468 (*args_used)++;
1469
1470 if (num_features > argc) {
1471 ti->error = "Not enough arguments to support feature count";
1472 return -EINVAL;
1473 }
1474
1475 if (!strcmp("handle_errors", argv[0]))
1476 ms->features |= DM_RAID1_HANDLE_ERRORS;
1477 else {
1478 ti->error = "Unrecognised feature requested";
1479 return -EINVAL;
1480 }
1481
1482 (*args_used)++;
1483
1484 return 0;
1485}
1486
1da177e4
LT
1487/*
1488 * Construct a mirror mapping:
1489 *
1490 * log_type #log_params <log_params>
1491 * #mirrors [mirror_path offset]{2,}
a8e6afa2 1492 * [#features <features>]
1da177e4
LT
1493 *
1494 * log_type is "core" or "disk"
1495 * #log_params is between 1 and 3
a8e6afa2
JB
1496 *
1497 * If present, features must be "handle_errors".
1da177e4 1498 */
1da177e4
LT
1499static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1500{
1501 int r;
1502 unsigned int nr_mirrors, m, args_used;
1503 struct mirror_set *ms;
416cd17b 1504 struct dm_dirty_log *dl;
1da177e4
LT
1505
1506 dl = create_dirty_log(ti, argc, argv, &args_used);
1507 if (!dl)
1508 return -EINVAL;
1509
1510 argv += args_used;
1511 argc -= args_used;
1512
1513 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
eb69aca5 1514 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
72d94861 1515 ti->error = "Invalid number of mirrors";
416cd17b 1516 dm_dirty_log_destroy(dl);
1da177e4
LT
1517 return -EINVAL;
1518 }
1519
1520 argv++, argc--;
1521
a8e6afa2
JB
1522 if (argc < nr_mirrors * 2) {
1523 ti->error = "Too few mirror arguments";
416cd17b 1524 dm_dirty_log_destroy(dl);
1da177e4
LT
1525 return -EINVAL;
1526 }
1527
1528 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1529 if (!ms) {
416cd17b 1530 dm_dirty_log_destroy(dl);
1da177e4
LT
1531 return -ENOMEM;
1532 }
1533
1534 /* Get the mirror parameter sets */
1535 for (m = 0; m < nr_mirrors; m++) {
1536 r = get_mirror(ms, ti, m, argv);
1537 if (r) {
1538 free_context(ms, ti, m);
1539 return r;
1540 }
1541 argv += 2;
1542 argc -= 2;
1543 }
1544
1545 ti->private = ms;
d88854f0 1546 ti->split_io = ms->rh.region_size;
1da177e4 1547
6ad36fe2
HS
1548 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1549 if (!ms->kmirrord_wq) {
1550 DMERR("couldn't start kmirrord");
a72cf737
DM
1551 r = -ENOMEM;
1552 goto err_free_context;
6ad36fe2
HS
1553 }
1554 INIT_WORK(&ms->kmirrord_work, do_mirror);
a2aebe03
MP
1555 init_timer(&ms->timer);
1556 ms->timer_pending = 0;
72f4b314 1557 INIT_WORK(&ms->trigger_event, trigger_event);
6ad36fe2 1558
a8e6afa2 1559 r = parse_features(ms, argc, argv, &args_used);
a72cf737
DM
1560 if (r)
1561 goto err_destroy_wq;
a8e6afa2
JB
1562
1563 argv += args_used;
1564 argc -= args_used;
1565
f44db678
JB
1566 /*
1567 * Any read-balancing addition depends on the
1568 * DM_RAID1_HANDLE_ERRORS flag being present.
1569 * This is because the decision to balance depends
1570 * on the sync state of a region. If the above
1571 * flag is not present, we ignore errors; and
1572 * the sync state may be inaccurate.
1573 */
1574
a8e6afa2
JB
1575 if (argc) {
1576 ti->error = "Too many mirror arguments";
a72cf737
DM
1577 r = -EINVAL;
1578 goto err_destroy_wq;
a8e6afa2
JB
1579 }
1580
eb69aca5 1581 r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
a72cf737
DM
1582 if (r)
1583 goto err_destroy_wq;
1da177e4 1584
6ad36fe2 1585 wake(ms);
1da177e4 1586 return 0;
a72cf737
DM
1587
1588err_destroy_wq:
1589 destroy_workqueue(ms->kmirrord_wq);
1590err_free_context:
1591 free_context(ms, ti, ms->nr_mirrors);
1592 return r;
1da177e4
LT
1593}
1594
1595static void mirror_dtr(struct dm_target *ti)
1596{
1597 struct mirror_set *ms = (struct mirror_set *) ti->private;
1598
a2aebe03 1599 del_timer_sync(&ms->timer);
6ad36fe2 1600 flush_workqueue(ms->kmirrord_wq);
eb69aca5 1601 dm_kcopyd_client_destroy(ms->kcopyd_client);
6ad36fe2 1602 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1603 free_context(ms, ti, ms->nr_mirrors);
1604}
1605
1606static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1607{
72f4b314 1608 unsigned long flags;
1da177e4
LT
1609 int should_wake = 0;
1610 struct bio_list *bl;
1611
1612 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
72f4b314 1613 spin_lock_irqsave(&ms->lock, flags);
1da177e4
LT
1614 should_wake = !(bl->head);
1615 bio_list_add(bl, bio);
72f4b314 1616 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4
LT
1617
1618 if (should_wake)
6ad36fe2 1619 wake(ms);
1da177e4
LT
1620}
1621
1622/*
1623 * Mirror mapping function
1624 */
1625static int mirror_map(struct dm_target *ti, struct bio *bio,
1626 union map_info *map_context)
1627{
1628 int r, rw = bio_rw(bio);
1629 struct mirror *m;
1630 struct mirror_set *ms = ti->private;
06386bbf 1631 struct dm_raid1_read_record *read_record = NULL;
1da177e4
LT
1632
1633 if (rw == WRITE) {
06386bbf
JB
1634 /* Save region for mirror_end_io() handler */
1635 map_context->ll = bio_to_region(&ms->rh, bio);
1da177e4 1636 queue_bio(ms, bio, rw);
d2a7ad29 1637 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1638 }
1639
1640 r = ms->rh.log->type->in_sync(ms->rh.log,
1641 bio_to_region(&ms->rh, bio), 0);
1642 if (r < 0 && r != -EWOULDBLOCK)
1643 return r;
1644
1da177e4 1645 /*
06386bbf 1646 * If region is not in-sync queue the bio.
1da177e4 1647 */
06386bbf
JB
1648 if (!r || (r == -EWOULDBLOCK)) {
1649 if (rw == READA)
1650 return -EWOULDBLOCK;
1da177e4 1651
1da177e4 1652 queue_bio(ms, bio, rw);
d2a7ad29 1653 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1654 }
1655
06386bbf
JB
1656 /*
1657 * The region is in-sync and we can perform reads directly.
1658 * Store enough information so we can retry if it fails.
1659 */
1da177e4 1660 m = choose_mirror(ms, bio->bi_sector);
06386bbf 1661 if (unlikely(!m))
1da177e4
LT
1662 return -EIO;
1663
06386bbf
JB
1664 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1665 if (likely(read_record)) {
1666 dm_bio_record(&read_record->details, bio);
1667 map_context->ptr = read_record;
1668 read_record->m = m;
1669 }
1670
1671 map_bio(m, bio);
1672
d2a7ad29 1673 return DM_MAPIO_REMAPPED;
1da177e4
LT
1674}
1675
1676static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1677 int error, union map_info *map_context)
1678{
1679 int rw = bio_rw(bio);
1680 struct mirror_set *ms = (struct mirror_set *) ti->private;
06386bbf
JB
1681 struct mirror *m = NULL;
1682 struct dm_bio_details *bd = NULL;
1683 struct dm_raid1_read_record *read_record = map_context->ptr;
1da177e4
LT
1684
1685 /*
1686 * We need to dec pending if this was a write.
1687 */
06386bbf
JB
1688 if (rw == WRITE) {
1689 rh_dec(&ms->rh, map_context->ll);
1690 return error;
1691 }
1da177e4 1692
06386bbf
JB
1693 if (error == -EOPNOTSUPP)
1694 goto out;
1695
1696 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
1697 goto out;
1698
1699 if (unlikely(error)) {
1700 if (!read_record) {
1701 /*
1702 * There wasn't enough memory to record necessary
1703 * information for a retry or there was no other
1704 * mirror in-sync.
1705 */
e03f1a84 1706 DMERR_LIMIT("Mirror read failed.");
06386bbf
JB
1707 return -EIO;
1708 }
e03f1a84
AB
1709
1710 m = read_record->m;
1711
06386bbf
JB
1712 DMERR("Mirror read failed from %s. Trying alternative device.",
1713 m->dev->name);
1714
06386bbf
JB
1715 fail_mirror(m, DM_RAID1_READ_ERROR);
1716
1717 /*
1718 * A failed read is requeued for another attempt using an intact
1719 * mirror.
1720 */
1721 if (default_ok(m) || mirror_available(ms, bio)) {
1722 bd = &read_record->details;
1723
1724 dm_bio_restore(bd, bio);
1725 mempool_free(read_record, ms->read_record_pool);
1726 map_context->ptr = NULL;
1727 queue_bio(ms, bio, rw);
1728 return 1;
1729 }
1730 DMERR("All replicated volumes dead, failing I/O");
1731 }
1732
1733out:
1734 if (read_record) {
1735 mempool_free(read_record, ms->read_record_pool);
1736 map_context->ptr = NULL;
1737 }
1738
1739 return error;
1da177e4
LT
1740}
1741
b80aa7a0 1742static void mirror_presuspend(struct dm_target *ti)
1da177e4
LT
1743{
1744 struct mirror_set *ms = (struct mirror_set *) ti->private;
416cd17b 1745 struct dm_dirty_log *log = ms->rh.log;
1da177e4 1746
b80aa7a0
JB
1747 atomic_set(&ms->suspend, 1);
1748
1749 /*
1750 * We must finish up all the work that we've
1751 * generated (i.e. recovery work).
1752 */
1da177e4 1753 rh_stop_recovery(&ms->rh);
33184048 1754
33184048
JB
1755 wait_event(_kmirrord_recovery_stopped,
1756 !atomic_read(&ms->rh.recovery_in_flight));
1757
b80aa7a0
JB
1758 if (log->type->presuspend && log->type->presuspend(log))
1759 /* FIXME: need better error handling */
1760 DMWARN("log presuspend failed");
1761
1762 /*
1763 * Now that recovery is complete/stopped and the
1764 * delayed bios are queued, we need to wait for
1765 * the worker thread to complete. This way,
1766 * we know that all of our I/O has been pushed.
1767 */
1768 flush_workqueue(ms->kmirrord_wq);
1769}
1770
1771static void mirror_postsuspend(struct dm_target *ti)
1772{
1773 struct mirror_set *ms = ti->private;
416cd17b 1774 struct dm_dirty_log *log = ms->rh.log;
b80aa7a0 1775
6b3df0d7 1776 if (log->type->postsuspend && log->type->postsuspend(log))
1da177e4 1777 /* FIXME: need better error handling */
b80aa7a0 1778 DMWARN("log postsuspend failed");
1da177e4
LT
1779}
1780
1781static void mirror_resume(struct dm_target *ti)
1782{
b80aa7a0 1783 struct mirror_set *ms = ti->private;
416cd17b 1784 struct dm_dirty_log *log = ms->rh.log;
b80aa7a0
JB
1785
1786 atomic_set(&ms->suspend, 0);
1da177e4
LT
1787 if (log->type->resume && log->type->resume(log))
1788 /* FIXME: need better error handling */
1789 DMWARN("log resume failed");
1790 rh_start_recovery(&ms->rh);
1791}
1792
af195ac8
JB
1793/*
1794 * device_status_char
1795 * @m: mirror device/leg we want the status of
1796 *
1797 * We return one character representing the most severe error
1798 * we have encountered.
1799 * A => Alive - No failures
1800 * D => Dead - A write failure occurred leaving mirror out-of-sync
1801 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1802 * R => Read - A read failure occurred, mirror data unaffected
1803 *
1804 * Returns: <char>
1805 */
1806static char device_status_char(struct mirror *m)
1807{
1808 if (!atomic_read(&(m->error_count)))
1809 return 'A';
1810
1811 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1812 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1813 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1814}
1815
1816
1da177e4
LT
1817static int mirror_status(struct dm_target *ti, status_type_t type,
1818 char *result, unsigned int maxlen)
1819{
315dcc22 1820 unsigned int m, sz = 0;
1da177e4 1821 struct mirror_set *ms = (struct mirror_set *) ti->private;
416cd17b 1822 struct dm_dirty_log *log = ms->rh.log;
af195ac8 1823 char buffer[ms->nr_mirrors + 1];
1da177e4 1824
1da177e4
LT
1825 switch (type) {
1826 case STATUSTYPE_INFO:
1827 DMEMIT("%d ", ms->nr_mirrors);
af195ac8 1828 for (m = 0; m < ms->nr_mirrors; m++) {
1da177e4 1829 DMEMIT("%s ", ms->mirror[m].dev->name);
af195ac8
JB
1830 buffer[m] = device_status_char(&(ms->mirror[m]));
1831 }
1832 buffer[m] = '\0';
1da177e4 1833
af195ac8
JB
1834 DMEMIT("%llu/%llu 1 %s ",
1835 (unsigned long long)log->type->get_sync_count(ms->rh.log),
1836 (unsigned long long)ms->nr_regions, buffer);
315dcc22 1837
af195ac8 1838 sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
315dcc22 1839
1da177e4
LT
1840 break;
1841
1842 case STATUSTYPE_TABLE:
af195ac8 1843 sz = log->type->status(ms->rh.log, type, result, maxlen);
315dcc22 1844
e52b8f6d 1845 DMEMIT("%d", ms->nr_mirrors);
1da177e4 1846 for (m = 0; m < ms->nr_mirrors; m++)
e52b8f6d 1847 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
b80aa7a0 1848 (unsigned long long)ms->mirror[m].offset);
a8e6afa2
JB
1849
1850 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1851 DMEMIT(" 1 handle_errors");
1da177e4
LT
1852 }
1853
1854 return 0;
1855}
1856
1857static struct target_type mirror_target = {
1858 .name = "mirror",
af195ac8 1859 .version = {1, 0, 20},
1da177e4
LT
1860 .module = THIS_MODULE,
1861 .ctr = mirror_ctr,
1862 .dtr = mirror_dtr,
1863 .map = mirror_map,
1864 .end_io = mirror_end_io,
b80aa7a0 1865 .presuspend = mirror_presuspend,
1da177e4
LT
1866 .postsuspend = mirror_postsuspend,
1867 .resume = mirror_resume,
1868 .status = mirror_status,
1869};
1870
1871static int __init dm_mirror_init(void)
1872{
1873 int r;
1874
1da177e4 1875 r = dm_register_target(&mirror_target);
769aef30 1876 if (r < 0)
0cd33124 1877 DMERR("Failed to register mirror target");
1da177e4
LT
1878
1879 return r;
1880}
1881
1882static void __exit dm_mirror_exit(void)
1883{
1884 int r;
1885
1886 r = dm_unregister_target(&mirror_target);
1887 if (r < 0)
0cd33124 1888 DMERR("unregister failed %d", r);
1da177e4
LT
1889}
1890
1891/* Module hooks */
1892module_init(dm_mirror_init);
1893module_exit(dm_mirror_exit);
1894
1895MODULE_DESCRIPTION(DM_NAME " mirror target");
1896MODULE_AUTHOR("Joe Thornber");
1897MODULE_LICENSE("GPL");