dm io: remove old interface
[linux-2.6-block.git] / drivers / md / dm-raid1.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-list.h"
9#include "dm-io.h"
10#include "dm-log.h"
11#include "kcopyd.h"
12
13#include <linux/ctype.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/pagemap.h>
18#include <linux/slab.h>
19#include <linux/time.h>
20#include <linux/vmalloc.h>
21#include <linux/workqueue.h>
22
72d94861 23#define DM_MSG_PREFIX "raid1"
88be163a 24#define DM_IO_PAGES 64
72d94861 25
a8e6afa2
JB
26#define DM_RAID1_HANDLE_ERRORS 0x01
27
33184048 28static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
1da177e4 29
1da177e4
LT
30/*-----------------------------------------------------------------
31 * Region hash
32 *
33 * The mirror splits itself up into discrete regions. Each
34 * region can be in one of three states: clean, dirty,
35 * nosync. There is no need to put clean regions in the hash.
36 *
37 * In addition to being present in the hash table a region _may_
38 * be present on one of three lists.
39 *
40 * clean_regions: Regions on this list have no io pending to
41 * them, they are in sync, we are no longer interested in them,
42 * they are dull. rh_update_states() will remove them from the
43 * hash table.
44 *
45 * quiesced_regions: These regions have been spun down, ready
46 * for recovery. rh_recovery_start() will remove regions from
47 * this list and hand them to kmirrord, which will schedule the
48 * recovery io with kcopyd.
49 *
50 * recovered_regions: Regions that kcopyd has successfully
51 * recovered. rh_update_states() will now schedule any delayed
52 * io, up the recovery_count, and remove the region from the
53 * hash.
54 *
55 * There are 2 locks:
56 * A rw spin lock 'hash_lock' protects just the hash table,
57 * this is never held in write mode from interrupt context,
58 * which I believe means that we only have to disable irqs when
59 * doing a write lock.
60 *
61 * An ordinary spin lock 'region_lock' that protects the three
62 * lists in the region_hash, with the 'state', 'list' and
63 * 'bhs_delayed' fields of the regions. This is used from irq
64 * context, so all other uses will have to suspend local irqs.
65 *---------------------------------------------------------------*/
66struct mirror_set;
67struct region_hash {
68 struct mirror_set *ms;
69 uint32_t region_size;
70 unsigned region_shift;
71
72 /* holds persistent region state */
73 struct dirty_log *log;
74
75 /* hash table */
76 rwlock_t hash_lock;
77 mempool_t *region_pool;
78 unsigned int mask;
79 unsigned int nr_buckets;
80 struct list_head *buckets;
81
82 spinlock_t region_lock;
33184048 83 atomic_t recovery_in_flight;
1da177e4
LT
84 struct semaphore recovery_count;
85 struct list_head clean_regions;
86 struct list_head quiesced_regions;
87 struct list_head recovered_regions;
88};
89
90enum {
91 RH_CLEAN,
92 RH_DIRTY,
93 RH_NOSYNC,
94 RH_RECOVERING
95};
96
97struct region {
98 struct region_hash *rh; /* FIXME: can we get rid of this ? */
99 region_t key;
100 int state;
101
102 struct list_head hash_list;
103 struct list_head list;
104
105 atomic_t pending;
106 struct bio_list delayed_bios;
107};
108
e4c8b3ba
NB
109
110/*-----------------------------------------------------------------
111 * Mirror set structures.
112 *---------------------------------------------------------------*/
113struct mirror {
114 atomic_t error_count;
115 struct dm_dev *dev;
116 sector_t offset;
117};
118
119struct mirror_set {
120 struct dm_target *ti;
121 struct list_head list;
122 struct region_hash rh;
123 struct kcopyd_client *kcopyd_client;
a8e6afa2 124 uint64_t features;
e4c8b3ba
NB
125
126 spinlock_t lock; /* protects the next two lists */
127 struct bio_list reads;
128 struct bio_list writes;
129
88be163a
MB
130 struct dm_io_client *io_client;
131
e4c8b3ba
NB
132 /* recovery */
133 region_t nr_regions;
134 int in_sync;
135
136 struct mirror *default_mirror; /* Default mirror */
137
6ad36fe2
HS
138 struct workqueue_struct *kmirrord_wq;
139 struct work_struct kmirrord_work;
140
e4c8b3ba
NB
141 unsigned int nr_mirrors;
142 struct mirror mirror[0];
143};
144
1da177e4
LT
145/*
146 * Conversion fns
147 */
148static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
149{
e4c8b3ba 150 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
1da177e4
LT
151}
152
153static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
154{
155 return region << rh->region_shift;
156}
157
6ad36fe2
HS
158static void wake(struct mirror_set *ms)
159{
160 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
161}
162
1da177e4
LT
163/* FIXME move this */
164static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
165
1da177e4
LT
166#define MIN_REGIONS 64
167#define MAX_RECOVERY 1
168static int rh_init(struct region_hash *rh, struct mirror_set *ms,
169 struct dirty_log *log, uint32_t region_size,
170 region_t nr_regions)
171{
172 unsigned int nr_buckets, max_buckets;
173 size_t i;
174
175 /*
176 * Calculate a suitable number of buckets for our hash
177 * table.
178 */
179 max_buckets = nr_regions >> 6;
180 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
181 ;
182 nr_buckets >>= 1;
183
184 rh->ms = ms;
185 rh->log = log;
186 rh->region_size = region_size;
187 rh->region_shift = ffs(region_size) - 1;
188 rwlock_init(&rh->hash_lock);
189 rh->mask = nr_buckets - 1;
190 rh->nr_buckets = nr_buckets;
191
192 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
193 if (!rh->buckets) {
194 DMERR("unable to allocate region hash memory");
195 return -ENOMEM;
196 }
197
198 for (i = 0; i < nr_buckets; i++)
199 INIT_LIST_HEAD(rh->buckets + i);
200
201 spin_lock_init(&rh->region_lock);
202 sema_init(&rh->recovery_count, 0);
33184048 203 atomic_set(&rh->recovery_in_flight, 0);
1da177e4
LT
204 INIT_LIST_HEAD(&rh->clean_regions);
205 INIT_LIST_HEAD(&rh->quiesced_regions);
206 INIT_LIST_HEAD(&rh->recovered_regions);
207
0eaae62a
MD
208 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
209 sizeof(struct region));
1da177e4
LT
210 if (!rh->region_pool) {
211 vfree(rh->buckets);
212 rh->buckets = NULL;
213 return -ENOMEM;
214 }
215
216 return 0;
217}
218
219static void rh_exit(struct region_hash *rh)
220{
221 unsigned int h;
222 struct region *reg, *nreg;
223
224 BUG_ON(!list_empty(&rh->quiesced_regions));
225 for (h = 0; h < rh->nr_buckets; h++) {
226 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
227 BUG_ON(atomic_read(&reg->pending));
228 mempool_free(reg, rh->region_pool);
229 }
230 }
231
232 if (rh->log)
233 dm_destroy_dirty_log(rh->log);
234 if (rh->region_pool)
235 mempool_destroy(rh->region_pool);
236 vfree(rh->buckets);
237}
238
239#define RH_HASH_MULT 2654435387U
240
241static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
242{
243 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
244}
245
246static struct region *__rh_lookup(struct region_hash *rh, region_t region)
247{
248 struct region *reg;
249
250 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
251 if (reg->key == region)
252 return reg;
253
254 return NULL;
255}
256
257static void __rh_insert(struct region_hash *rh, struct region *reg)
258{
259 unsigned int h = rh_hash(rh, reg->key);
260 list_add(&reg->hash_list, rh->buckets + h);
261}
262
263static struct region *__rh_alloc(struct region_hash *rh, region_t region)
264{
265 struct region *reg, *nreg;
266
267 read_unlock(&rh->hash_lock);
c06aad85
DK
268 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
269 if (unlikely(!nreg))
270 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
1da177e4
LT
271 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
272 RH_CLEAN : RH_NOSYNC;
273 nreg->rh = rh;
274 nreg->key = region;
275
276 INIT_LIST_HEAD(&nreg->list);
277
278 atomic_set(&nreg->pending, 0);
279 bio_list_init(&nreg->delayed_bios);
280 write_lock_irq(&rh->hash_lock);
281
282 reg = __rh_lookup(rh, region);
283 if (reg)
284 /* we lost the race */
285 mempool_free(nreg, rh->region_pool);
286
287 else {
288 __rh_insert(rh, nreg);
289 if (nreg->state == RH_CLEAN) {
290 spin_lock(&rh->region_lock);
291 list_add(&nreg->list, &rh->clean_regions);
292 spin_unlock(&rh->region_lock);
293 }
294 reg = nreg;
295 }
296 write_unlock_irq(&rh->hash_lock);
297 read_lock(&rh->hash_lock);
298
299 return reg;
300}
301
302static inline struct region *__rh_find(struct region_hash *rh, region_t region)
303{
304 struct region *reg;
305
306 reg = __rh_lookup(rh, region);
307 if (!reg)
308 reg = __rh_alloc(rh, region);
309
310 return reg;
311}
312
313static int rh_state(struct region_hash *rh, region_t region, int may_block)
314{
315 int r;
316 struct region *reg;
317
318 read_lock(&rh->hash_lock);
319 reg = __rh_lookup(rh, region);
320 read_unlock(&rh->hash_lock);
321
322 if (reg)
323 return reg->state;
324
325 /*
326 * The region wasn't in the hash, so we fall back to the
327 * dirty log.
328 */
329 r = rh->log->type->in_sync(rh->log, region, may_block);
330
331 /*
332 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
333 * taken as a RH_NOSYNC
334 */
335 return r == 1 ? RH_CLEAN : RH_NOSYNC;
336}
337
338static inline int rh_in_sync(struct region_hash *rh,
339 region_t region, int may_block)
340{
341 int state = rh_state(rh, region, may_block);
342 return state == RH_CLEAN || state == RH_DIRTY;
343}
344
345static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
346{
347 struct bio *bio;
348
349 while ((bio = bio_list_pop(bio_list))) {
350 queue_bio(ms, bio, WRITE);
351 }
352}
353
f3ee6b2f
JB
354static void complete_resync_work(struct region *reg, int success)
355{
356 struct region_hash *rh = reg->rh;
357
358 rh->log->type->set_region_sync(rh->log, reg->key, success);
359 dispatch_bios(rh->ms, &reg->delayed_bios);
360 if (atomic_dec_and_test(&rh->recovery_in_flight))
361 wake_up_all(&_kmirrord_recovery_stopped);
362 up(&rh->recovery_count);
363}
364
1da177e4
LT
365static void rh_update_states(struct region_hash *rh)
366{
367 struct region *reg, *next;
368
369 LIST_HEAD(clean);
370 LIST_HEAD(recovered);
371
372 /*
373 * Quickly grab the lists.
374 */
375 write_lock_irq(&rh->hash_lock);
376 spin_lock(&rh->region_lock);
377 if (!list_empty(&rh->clean_regions)) {
378 list_splice(&rh->clean_regions, &clean);
379 INIT_LIST_HEAD(&rh->clean_regions);
380
381 list_for_each_entry (reg, &clean, list) {
382 rh->log->type->clear_region(rh->log, reg->key);
383 list_del(&reg->hash_list);
384 }
385 }
386
387 if (!list_empty(&rh->recovered_regions)) {
388 list_splice(&rh->recovered_regions, &recovered);
389 INIT_LIST_HEAD(&rh->recovered_regions);
390
391 list_for_each_entry (reg, &recovered, list)
392 list_del(&reg->hash_list);
393 }
394 spin_unlock(&rh->region_lock);
395 write_unlock_irq(&rh->hash_lock);
396
397 /*
398 * All the regions on the recovered and clean lists have
399 * now been pulled out of the system, so no need to do
400 * any more locking.
401 */
402 list_for_each_entry_safe (reg, next, &recovered, list) {
403 rh->log->type->clear_region(rh->log, reg->key);
f3ee6b2f 404 complete_resync_work(reg, 1);
1da177e4
LT
405 mempool_free(reg, rh->region_pool);
406 }
407
408 if (!list_empty(&recovered))
409 rh->log->type->flush(rh->log);
410
411 list_for_each_entry_safe (reg, next, &clean, list)
412 mempool_free(reg, rh->region_pool);
413}
414
415static void rh_inc(struct region_hash *rh, region_t region)
416{
417 struct region *reg;
418
419 read_lock(&rh->hash_lock);
420 reg = __rh_find(rh, region);
844e8d90 421
7692c5dd 422 spin_lock_irq(&rh->region_lock);
844e8d90
JN
423 atomic_inc(&reg->pending);
424
1da177e4 425 if (reg->state == RH_CLEAN) {
1da177e4
LT
426 reg->state = RH_DIRTY;
427 list_del_init(&reg->list); /* take off the clean list */
7692c5dd
JB
428 spin_unlock_irq(&rh->region_lock);
429
430 rh->log->type->mark_region(rh->log, reg->key);
431 } else
432 spin_unlock_irq(&rh->region_lock);
433
1da177e4 434
1da177e4
LT
435 read_unlock(&rh->hash_lock);
436}
437
438static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
439{
440 struct bio *bio;
441
442 for (bio = bios->head; bio; bio = bio->bi_next)
443 rh_inc(rh, bio_to_region(rh, bio));
444}
445
446static void rh_dec(struct region_hash *rh, region_t region)
447{
448 unsigned long flags;
449 struct region *reg;
450 int should_wake = 0;
451
452 read_lock(&rh->hash_lock);
453 reg = __rh_lookup(rh, region);
454 read_unlock(&rh->hash_lock);
455
7692c5dd 456 spin_lock_irqsave(&rh->region_lock, flags);
1da177e4 457 if (atomic_dec_and_test(&reg->pending)) {
930d332a
JN
458 /*
459 * There is no pending I/O for this region.
460 * We can move the region to corresponding list for next action.
461 * At this point, the region is not yet connected to any list.
462 *
463 * If the state is RH_NOSYNC, the region should be kept off
464 * from clean list.
465 * The hash entry for RH_NOSYNC will remain in memory
466 * until the region is recovered or the map is reloaded.
467 */
468
469 /* do nothing for RH_NOSYNC */
1da177e4
LT
470 if (reg->state == RH_RECOVERING) {
471 list_add_tail(&reg->list, &rh->quiesced_regions);
930d332a 472 } else if (reg->state == RH_DIRTY) {
1da177e4
LT
473 reg->state = RH_CLEAN;
474 list_add(&reg->list, &rh->clean_regions);
475 }
1da177e4
LT
476 should_wake = 1;
477 }
7692c5dd 478 spin_unlock_irqrestore(&rh->region_lock, flags);
1da177e4
LT
479
480 if (should_wake)
6ad36fe2 481 wake(rh->ms);
1da177e4
LT
482}
483
484/*
485 * Starts quiescing a region in preparation for recovery.
486 */
487static int __rh_recovery_prepare(struct region_hash *rh)
488{
489 int r;
490 struct region *reg;
491 region_t region;
492
493 /*
494 * Ask the dirty log what's next.
495 */
496 r = rh->log->type->get_resync_work(rh->log, &region);
497 if (r <= 0)
498 return r;
499
500 /*
501 * Get this region, and start it quiescing by setting the
502 * recovering flag.
503 */
504 read_lock(&rh->hash_lock);
505 reg = __rh_find(rh, region);
506 read_unlock(&rh->hash_lock);
507
508 spin_lock_irq(&rh->region_lock);
509 reg->state = RH_RECOVERING;
510
511 /* Already quiesced ? */
512 if (atomic_read(&reg->pending))
513 list_del_init(&reg->list);
179e0917
AM
514 else
515 list_move(&reg->list, &rh->quiesced_regions);
1da177e4 516
1da177e4
LT
517 spin_unlock_irq(&rh->region_lock);
518
519 return 1;
520}
521
522static void rh_recovery_prepare(struct region_hash *rh)
523{
33184048
JB
524 /* Extra reference to avoid race with rh_stop_recovery */
525 atomic_inc(&rh->recovery_in_flight);
526
527 while (!down_trylock(&rh->recovery_count)) {
528 atomic_inc(&rh->recovery_in_flight);
1da177e4 529 if (__rh_recovery_prepare(rh) <= 0) {
33184048 530 atomic_dec(&rh->recovery_in_flight);
1da177e4
LT
531 up(&rh->recovery_count);
532 break;
533 }
33184048
JB
534 }
535
536 /* Drop the extra reference */
537 if (atomic_dec_and_test(&rh->recovery_in_flight))
538 wake_up_all(&_kmirrord_recovery_stopped);
1da177e4
LT
539}
540
541/*
542 * Returns any quiesced regions.
543 */
544static struct region *rh_recovery_start(struct region_hash *rh)
545{
546 struct region *reg = NULL;
547
548 spin_lock_irq(&rh->region_lock);
549 if (!list_empty(&rh->quiesced_regions)) {
550 reg = list_entry(rh->quiesced_regions.next,
551 struct region, list);
552 list_del_init(&reg->list); /* remove from the quiesced list */
553 }
554 spin_unlock_irq(&rh->region_lock);
555
556 return reg;
557}
558
559/* FIXME: success ignored for now */
560static void rh_recovery_end(struct region *reg, int success)
561{
562 struct region_hash *rh = reg->rh;
563
564 spin_lock_irq(&rh->region_lock);
565 list_add(&reg->list, &reg->rh->recovered_regions);
566 spin_unlock_irq(&rh->region_lock);
567
6ad36fe2 568 wake(rh->ms);
1da177e4
LT
569}
570
571static void rh_flush(struct region_hash *rh)
572{
573 rh->log->type->flush(rh->log);
574}
575
576static void rh_delay(struct region_hash *rh, struct bio *bio)
577{
578 struct region *reg;
579
580 read_lock(&rh->hash_lock);
581 reg = __rh_find(rh, bio_to_region(rh, bio));
582 bio_list_add(&reg->delayed_bios, bio);
583 read_unlock(&rh->hash_lock);
584}
585
586static void rh_stop_recovery(struct region_hash *rh)
587{
588 int i;
589
590 /* wait for any recovering regions */
591 for (i = 0; i < MAX_RECOVERY; i++)
592 down(&rh->recovery_count);
593}
594
595static void rh_start_recovery(struct region_hash *rh)
596{
597 int i;
598
599 for (i = 0; i < MAX_RECOVERY; i++)
600 up(&rh->recovery_count);
601
6ad36fe2 602 wake(rh->ms);
1da177e4
LT
603}
604
1da177e4
LT
605/*
606 * Every mirror should look like this one.
607 */
608#define DEFAULT_MIRROR 0
609
610/*
611 * This is yucky. We squirrel the mirror_set struct away inside
612 * bi_next for write buffers. This is safe since the bh
613 * doesn't get submitted to the lower levels of block layer.
614 */
615static struct mirror_set *bio_get_ms(struct bio *bio)
616{
617 return (struct mirror_set *) bio->bi_next;
618}
619
620static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
621{
622 bio->bi_next = (struct bio *) ms;
623}
624
625/*-----------------------------------------------------------------
626 * Recovery.
627 *
628 * When a mirror is first activated we may find that some regions
629 * are in the no-sync state. We have to recover these by
630 * recopying from the default mirror to all the others.
631 *---------------------------------------------------------------*/
632static void recovery_complete(int read_err, unsigned int write_err,
633 void *context)
634{
635 struct region *reg = (struct region *) context;
636
637 /* FIXME: better error handling */
ce503f59 638 rh_recovery_end(reg, !(read_err || write_err));
1da177e4
LT
639}
640
641static int recover(struct mirror_set *ms, struct region *reg)
642{
643 int r;
644 unsigned int i;
645 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
646 struct mirror *m;
647 unsigned long flags = 0;
648
649 /* fill in the source */
a1a19080 650 m = ms->default_mirror;
1da177e4
LT
651 from.bdev = m->dev->bdev;
652 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
653 if (reg->key == (ms->nr_regions - 1)) {
654 /*
655 * The final region may be smaller than
656 * region_size.
657 */
658 from.count = ms->ti->len & (reg->rh->region_size - 1);
659 if (!from.count)
660 from.count = reg->rh->region_size;
661 } else
662 from.count = reg->rh->region_size;
663
664 /* fill in the destinations */
665 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
a1a19080 666 if (&ms->mirror[i] == ms->default_mirror)
1da177e4
LT
667 continue;
668
669 m = ms->mirror + i;
670 dest->bdev = m->dev->bdev;
671 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
672 dest->count = from.count;
673 dest++;
674 }
675
676 /* hand to kcopyd */
677 set_bit(KCOPYD_IGNORE_ERROR, &flags);
678 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
679 recovery_complete, reg);
680
681 return r;
682}
683
684static void do_recovery(struct mirror_set *ms)
685{
686 int r;
687 struct region *reg;
688 struct dirty_log *log = ms->rh.log;
689
690 /*
691 * Start quiescing some regions.
692 */
693 rh_recovery_prepare(&ms->rh);
694
695 /*
696 * Copy any already quiesced regions.
697 */
698 while ((reg = rh_recovery_start(&ms->rh))) {
699 r = recover(ms, reg);
700 if (r)
701 rh_recovery_end(reg, 0);
702 }
703
704 /*
705 * Update the in sync flag.
706 */
707 if (!ms->in_sync &&
708 (log->type->get_sync_count(log) == ms->nr_regions)) {
709 /* the sync is complete */
710 dm_table_event(ms->ti->table);
711 ms->in_sync = 1;
712 }
713}
714
715/*-----------------------------------------------------------------
716 * Reads
717 *---------------------------------------------------------------*/
718static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
719{
720 /* FIXME: add read balancing */
a1a19080 721 return ms->default_mirror;
1da177e4
LT
722}
723
724/*
725 * remap a buffer to a particular mirror.
726 */
727static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
728{
729 bio->bi_bdev = m->dev->bdev;
730 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
731}
732
733static void do_reads(struct mirror_set *ms, struct bio_list *reads)
734{
735 region_t region;
736 struct bio *bio;
737 struct mirror *m;
738
739 while ((bio = bio_list_pop(reads))) {
740 region = bio_to_region(&ms->rh, bio);
741
742 /*
743 * We can only read balance if the region is in sync.
744 */
745 if (rh_in_sync(&ms->rh, region, 0))
746 m = choose_mirror(ms, bio->bi_sector);
747 else
a1a19080 748 m = ms->default_mirror;
1da177e4
LT
749
750 map_bio(ms, m, bio);
751 generic_make_request(bio);
752 }
753}
754
755/*-----------------------------------------------------------------
756 * Writes.
757 *
758 * We do different things with the write io depending on the
759 * state of the region that it's in:
760 *
761 * SYNC: increment pending, use kcopyd to write to *all* mirrors
762 * RECOVERING: delay the io until recovery completes
763 * NOSYNC: increment pending, just write to the default mirror
764 *---------------------------------------------------------------*/
765static void write_callback(unsigned long error, void *context)
766{
767 unsigned int i;
768 int uptodate = 1;
769 struct bio *bio = (struct bio *) context;
770 struct mirror_set *ms;
771
772 ms = bio_get_ms(bio);
773 bio_set_ms(bio, NULL);
774
775 /*
776 * NOTE: We don't decrement the pending count here,
777 * instead it is done by the targets endio function.
778 * This way we handle both writes to SYNC and NOSYNC
779 * regions with the same code.
780 */
781
782 if (error) {
783 /*
784 * only error the io if all mirrors failed.
785 * FIXME: bogus
786 */
787 uptodate = 0;
788 for (i = 0; i < ms->nr_mirrors; i++)
789 if (!test_bit(i, &error)) {
790 uptodate = 1;
791 break;
792 }
793 }
794 bio_endio(bio, bio->bi_size, 0);
795}
796
797static void do_write(struct mirror_set *ms, struct bio *bio)
798{
799 unsigned int i;
800 struct io_region io[KCOPYD_MAX_REGIONS+1];
801 struct mirror *m;
88be163a
MB
802 struct dm_io_request io_req = {
803 .bi_rw = WRITE,
804 .mem.type = DM_IO_BVEC,
805 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
806 .notify.fn = write_callback,
807 .notify.context = bio,
808 .client = ms->io_client,
809 };
1da177e4
LT
810
811 for (i = 0; i < ms->nr_mirrors; i++) {
812 m = ms->mirror + i;
813
814 io[i].bdev = m->dev->bdev;
815 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
816 io[i].count = bio->bi_size >> 9;
817 }
818
819 bio_set_ms(bio, ms);
88be163a
MB
820
821 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
1da177e4
LT
822}
823
824static void do_writes(struct mirror_set *ms, struct bio_list *writes)
825{
826 int state;
827 struct bio *bio;
828 struct bio_list sync, nosync, recover, *this_list = NULL;
829
830 if (!writes->head)
831 return;
832
833 /*
834 * Classify each write.
835 */
836 bio_list_init(&sync);
837 bio_list_init(&nosync);
838 bio_list_init(&recover);
839
840 while ((bio = bio_list_pop(writes))) {
841 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
842 switch (state) {
843 case RH_CLEAN:
844 case RH_DIRTY:
845 this_list = &sync;
846 break;
847
848 case RH_NOSYNC:
849 this_list = &nosync;
850 break;
851
852 case RH_RECOVERING:
853 this_list = &recover;
854 break;
855 }
856
857 bio_list_add(this_list, bio);
858 }
859
860 /*
861 * Increment the pending counts for any regions that will
862 * be written to (writes to recover regions are going to
863 * be delayed).
864 */
865 rh_inc_pending(&ms->rh, &sync);
866 rh_inc_pending(&ms->rh, &nosync);
867 rh_flush(&ms->rh);
868
869 /*
870 * Dispatch io.
871 */
872 while ((bio = bio_list_pop(&sync)))
873 do_write(ms, bio);
874
875 while ((bio = bio_list_pop(&recover)))
876 rh_delay(&ms->rh, bio);
877
878 while ((bio = bio_list_pop(&nosync))) {
a1a19080 879 map_bio(ms, ms->default_mirror, bio);
1da177e4
LT
880 generic_make_request(bio);
881 }
882}
883
884/*-----------------------------------------------------------------
885 * kmirrord
886 *---------------------------------------------------------------*/
6ad36fe2 887static void do_mirror(struct work_struct *work)
1da177e4 888{
6ad36fe2
HS
889 struct mirror_set *ms =container_of(work, struct mirror_set,
890 kmirrord_work);
1da177e4
LT
891 struct bio_list reads, writes;
892
893 spin_lock(&ms->lock);
894 reads = ms->reads;
895 writes = ms->writes;
896 bio_list_init(&ms->reads);
897 bio_list_init(&ms->writes);
898 spin_unlock(&ms->lock);
899
900 rh_update_states(&ms->rh);
901 do_recovery(ms);
902 do_reads(ms, &reads);
903 do_writes(ms, &writes);
904}
905
1da177e4
LT
906/*-----------------------------------------------------------------
907 * Target functions
908 *---------------------------------------------------------------*/
909static struct mirror_set *alloc_context(unsigned int nr_mirrors,
910 uint32_t region_size,
911 struct dm_target *ti,
912 struct dirty_log *dl)
913{
914 size_t len;
915 struct mirror_set *ms = NULL;
916
917 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
918 return NULL;
919
920 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
921
922 ms = kmalloc(len, GFP_KERNEL);
923 if (!ms) {
72d94861 924 ti->error = "Cannot allocate mirror context";
1da177e4
LT
925 return NULL;
926 }
927
928 memset(ms, 0, len);
929 spin_lock_init(&ms->lock);
930
931 ms->ti = ti;
932 ms->nr_mirrors = nr_mirrors;
933 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
934 ms->in_sync = 0;
a1a19080 935 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
1da177e4 936
88be163a
MB
937 ms->io_client = dm_io_client_create(DM_IO_PAGES);
938 if (IS_ERR(ms->io_client)) {
939 ti->error = "Error creating dm_io client";
940 kfree(ms);
941 return NULL;
942 }
943
1da177e4 944 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
72d94861 945 ti->error = "Error creating dirty region hash";
1da177e4
LT
946 kfree(ms);
947 return NULL;
948 }
949
950 return ms;
951}
952
953static void free_context(struct mirror_set *ms, struct dm_target *ti,
954 unsigned int m)
955{
956 while (m--)
957 dm_put_device(ti, ms->mirror[m].dev);
958
88be163a 959 dm_io_client_destroy(ms->io_client);
1da177e4
LT
960 rh_exit(&ms->rh);
961 kfree(ms);
962}
963
964static inline int _check_region_size(struct dm_target *ti, uint32_t size)
965{
966 return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
967 size > ti->len);
968}
969
970static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
971 unsigned int mirror, char **argv)
972{
4ee218cd 973 unsigned long long offset;
1da177e4 974
4ee218cd 975 if (sscanf(argv[1], "%llu", &offset) != 1) {
72d94861 976 ti->error = "Invalid offset";
1da177e4
LT
977 return -EINVAL;
978 }
979
980 if (dm_get_device(ti, argv[0], offset, ti->len,
981 dm_table_get_mode(ti->table),
982 &ms->mirror[mirror].dev)) {
72d94861 983 ti->error = "Device lookup failure";
1da177e4
LT
984 return -ENXIO;
985 }
986
987 ms->mirror[mirror].offset = offset;
988
989 return 0;
990}
991
1da177e4
LT
992/*
993 * Create dirty log: log_type #log_params <log_params>
994 */
995static struct dirty_log *create_dirty_log(struct dm_target *ti,
996 unsigned int argc, char **argv,
997 unsigned int *args_used)
998{
999 unsigned int param_count;
1000 struct dirty_log *dl;
1001
1002 if (argc < 2) {
72d94861 1003 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
1004 return NULL;
1005 }
1006
1007 if (sscanf(argv[1], "%u", &param_count) != 1) {
72d94861 1008 ti->error = "Invalid mirror log argument count";
1da177e4
LT
1009 return NULL;
1010 }
1011
1012 *args_used = 2 + param_count;
1013
1014 if (argc < *args_used) {
72d94861 1015 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
1016 return NULL;
1017 }
1018
1019 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1020 if (!dl) {
72d94861 1021 ti->error = "Error creating mirror dirty log";
1da177e4
LT
1022 return NULL;
1023 }
1024
1025 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
72d94861 1026 ti->error = "Invalid region size";
1da177e4
LT
1027 dm_destroy_dirty_log(dl);
1028 return NULL;
1029 }
1030
1031 return dl;
1032}
1033
a8e6afa2
JB
1034static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1035 unsigned *args_used)
1036{
1037 unsigned num_features;
1038 struct dm_target *ti = ms->ti;
1039
1040 *args_used = 0;
1041
1042 if (!argc)
1043 return 0;
1044
1045 if (sscanf(argv[0], "%u", &num_features) != 1) {
1046 ti->error = "Invalid number of features";
1047 return -EINVAL;
1048 }
1049
1050 argc--;
1051 argv++;
1052 (*args_used)++;
1053
1054 if (num_features > argc) {
1055 ti->error = "Not enough arguments to support feature count";
1056 return -EINVAL;
1057 }
1058
1059 if (!strcmp("handle_errors", argv[0]))
1060 ms->features |= DM_RAID1_HANDLE_ERRORS;
1061 else {
1062 ti->error = "Unrecognised feature requested";
1063 return -EINVAL;
1064 }
1065
1066 (*args_used)++;
1067
1068 return 0;
1069}
1070
1da177e4
LT
1071/*
1072 * Construct a mirror mapping:
1073 *
1074 * log_type #log_params <log_params>
1075 * #mirrors [mirror_path offset]{2,}
a8e6afa2 1076 * [#features <features>]
1da177e4
LT
1077 *
1078 * log_type is "core" or "disk"
1079 * #log_params is between 1 and 3
a8e6afa2
JB
1080 *
1081 * If present, features must be "handle_errors".
1da177e4 1082 */
1da177e4
LT
1083static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1084{
1085 int r;
1086 unsigned int nr_mirrors, m, args_used;
1087 struct mirror_set *ms;
1088 struct dirty_log *dl;
1089
1090 dl = create_dirty_log(ti, argc, argv, &args_used);
1091 if (!dl)
1092 return -EINVAL;
1093
1094 argv += args_used;
1095 argc -= args_used;
1096
1097 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1098 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
72d94861 1099 ti->error = "Invalid number of mirrors";
1da177e4
LT
1100 dm_destroy_dirty_log(dl);
1101 return -EINVAL;
1102 }
1103
1104 argv++, argc--;
1105
a8e6afa2
JB
1106 if (argc < nr_mirrors * 2) {
1107 ti->error = "Too few mirror arguments";
1da177e4
LT
1108 dm_destroy_dirty_log(dl);
1109 return -EINVAL;
1110 }
1111
1112 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1113 if (!ms) {
1114 dm_destroy_dirty_log(dl);
1115 return -ENOMEM;
1116 }
1117
1118 /* Get the mirror parameter sets */
1119 for (m = 0; m < nr_mirrors; m++) {
1120 r = get_mirror(ms, ti, m, argv);
1121 if (r) {
1122 free_context(ms, ti, m);
1123 return r;
1124 }
1125 argv += 2;
1126 argc -= 2;
1127 }
1128
1129 ti->private = ms;
d88854f0 1130 ti->split_io = ms->rh.region_size;
1da177e4 1131
6ad36fe2
HS
1132 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1133 if (!ms->kmirrord_wq) {
1134 DMERR("couldn't start kmirrord");
1135 free_context(ms, ti, m);
1136 return -ENOMEM;
1137 }
1138 INIT_WORK(&ms->kmirrord_work, do_mirror);
1139
a8e6afa2
JB
1140 r = parse_features(ms, argc, argv, &args_used);
1141 if (r) {
1142 free_context(ms, ti, ms->nr_mirrors);
1143 return r;
1144 }
1145
1146 argv += args_used;
1147 argc -= args_used;
1148
1149 if (argc) {
1150 ti->error = "Too many mirror arguments";
1151 free_context(ms, ti, ms->nr_mirrors);
1152 return -EINVAL;
1153 }
1154
1da177e4
LT
1155 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1156 if (r) {
6ad36fe2 1157 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1158 free_context(ms, ti, ms->nr_mirrors);
1159 return r;
1160 }
1161
6ad36fe2 1162 wake(ms);
1da177e4
LT
1163 return 0;
1164}
1165
1166static void mirror_dtr(struct dm_target *ti)
1167{
1168 struct mirror_set *ms = (struct mirror_set *) ti->private;
1169
6ad36fe2 1170 flush_workqueue(ms->kmirrord_wq);
1da177e4 1171 kcopyd_client_destroy(ms->kcopyd_client);
6ad36fe2 1172 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1173 free_context(ms, ti, ms->nr_mirrors);
1174}
1175
1176static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1177{
1178 int should_wake = 0;
1179 struct bio_list *bl;
1180
1181 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1182 spin_lock(&ms->lock);
1183 should_wake = !(bl->head);
1184 bio_list_add(bl, bio);
1185 spin_unlock(&ms->lock);
1186
1187 if (should_wake)
6ad36fe2 1188 wake(ms);
1da177e4
LT
1189}
1190
1191/*
1192 * Mirror mapping function
1193 */
1194static int mirror_map(struct dm_target *ti, struct bio *bio,
1195 union map_info *map_context)
1196{
1197 int r, rw = bio_rw(bio);
1198 struct mirror *m;
1199 struct mirror_set *ms = ti->private;
1200
e4c8b3ba 1201 map_context->ll = bio_to_region(&ms->rh, bio);
1da177e4
LT
1202
1203 if (rw == WRITE) {
1204 queue_bio(ms, bio, rw);
d2a7ad29 1205 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1206 }
1207
1208 r = ms->rh.log->type->in_sync(ms->rh.log,
1209 bio_to_region(&ms->rh, bio), 0);
1210 if (r < 0 && r != -EWOULDBLOCK)
1211 return r;
1212
1213 if (r == -EWOULDBLOCK) /* FIXME: ugly */
d2a7ad29 1214 r = DM_MAPIO_SUBMITTED;
1da177e4
LT
1215
1216 /*
1217 * We don't want to fast track a recovery just for a read
1218 * ahead. So we just let it silently fail.
1219 * FIXME: get rid of this.
1220 */
1221 if (!r && rw == READA)
1222 return -EIO;
1223
1224 if (!r) {
1225 /* Pass this io over to the daemon */
1226 queue_bio(ms, bio, rw);
d2a7ad29 1227 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1228 }
1229
1230 m = choose_mirror(ms, bio->bi_sector);
1231 if (!m)
1232 return -EIO;
1233
1234 map_bio(ms, m, bio);
d2a7ad29 1235 return DM_MAPIO_REMAPPED;
1da177e4
LT
1236}
1237
1238static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1239 int error, union map_info *map_context)
1240{
1241 int rw = bio_rw(bio);
1242 struct mirror_set *ms = (struct mirror_set *) ti->private;
1243 region_t region = map_context->ll;
1244
1245 /*
1246 * We need to dec pending if this was a write.
1247 */
1248 if (rw == WRITE)
1249 rh_dec(&ms->rh, region);
1250
1251 return 0;
1252}
1253
1254static void mirror_postsuspend(struct dm_target *ti)
1255{
1256 struct mirror_set *ms = (struct mirror_set *) ti->private;
1257 struct dirty_log *log = ms->rh.log;
1258
1259 rh_stop_recovery(&ms->rh);
33184048
JB
1260
1261 /* Wait for all I/O we generated to complete */
1262 wait_event(_kmirrord_recovery_stopped,
1263 !atomic_read(&ms->rh.recovery_in_flight));
1264
1da177e4
LT
1265 if (log->type->suspend && log->type->suspend(log))
1266 /* FIXME: need better error handling */
1267 DMWARN("log suspend failed");
1268}
1269
1270static void mirror_resume(struct dm_target *ti)
1271{
1272 struct mirror_set *ms = (struct mirror_set *) ti->private;
1273 struct dirty_log *log = ms->rh.log;
1274 if (log->type->resume && log->type->resume(log))
1275 /* FIXME: need better error handling */
1276 DMWARN("log resume failed");
1277 rh_start_recovery(&ms->rh);
1278}
1279
1280static int mirror_status(struct dm_target *ti, status_type_t type,
1281 char *result, unsigned int maxlen)
1282{
315dcc22 1283 unsigned int m, sz = 0;
1da177e4
LT
1284 struct mirror_set *ms = (struct mirror_set *) ti->private;
1285
1da177e4
LT
1286 switch (type) {
1287 case STATUSTYPE_INFO:
1288 DMEMIT("%d ", ms->nr_mirrors);
1289 for (m = 0; m < ms->nr_mirrors; m++)
1290 DMEMIT("%s ", ms->mirror[m].dev->name);
1291
4ee218cd
AM
1292 DMEMIT("%llu/%llu",
1293 (unsigned long long)ms->rh.log->type->
1294 get_sync_count(ms->rh.log),
1295 (unsigned long long)ms->nr_regions);
315dcc22
JB
1296
1297 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1298
1da177e4
LT
1299 break;
1300
1301 case STATUSTYPE_TABLE:
315dcc22
JB
1302 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1303
e52b8f6d 1304 DMEMIT("%d", ms->nr_mirrors);
1da177e4 1305 for (m = 0; m < ms->nr_mirrors; m++)
e52b8f6d 1306 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
4ee218cd 1307 (unsigned long long)ms->mirror[m].offset);
a8e6afa2
JB
1308
1309 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1310 DMEMIT(" 1 handle_errors");
1da177e4
LT
1311 }
1312
1313 return 0;
1314}
1315
1316static struct target_type mirror_target = {
1317 .name = "mirror",
315dcc22 1318 .version = {1, 0, 3},
1da177e4
LT
1319 .module = THIS_MODULE,
1320 .ctr = mirror_ctr,
1321 .dtr = mirror_dtr,
1322 .map = mirror_map,
1323 .end_io = mirror_end_io,
1324 .postsuspend = mirror_postsuspend,
1325 .resume = mirror_resume,
1326 .status = mirror_status,
1327};
1328
1329static int __init dm_mirror_init(void)
1330{
1331 int r;
1332
1333 r = dm_dirty_log_init();
1334 if (r)
1335 return r;
1336
1da177e4
LT
1337 r = dm_register_target(&mirror_target);
1338 if (r < 0) {
1339 DMERR("%s: Failed to register mirror target",
1340 mirror_target.name);
1341 dm_dirty_log_exit();
1da177e4
LT
1342 }
1343
1344 return r;
1345}
1346
1347static void __exit dm_mirror_exit(void)
1348{
1349 int r;
1350
1351 r = dm_unregister_target(&mirror_target);
1352 if (r < 0)
1353 DMERR("%s: unregister failed %d", mirror_target.name, r);
1354
1da177e4
LT
1355 dm_dirty_log_exit();
1356}
1357
1358/* Module hooks */
1359module_init(dm_mirror_init);
1360module_exit(dm_mirror_exit);
1361
1362MODULE_DESCRIPTION(DM_NAME " mirror target");
1363MODULE_AUTHOR("Joe Thornber");
1364MODULE_LICENSE("GPL");