Commit | Line | Data |
---|---|---|
1f965b19 HM |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software Limited. | |
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include <linux/dm-dirty-log.h> | |
9 | #include <linux/dm-region-hash.h> | |
10 | ||
11 | #include <linux/ctype.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/vmalloc.h> | |
15 | ||
16 | #include "dm.h" | |
1f965b19 HM |
17 | |
18 | #define DM_MSG_PREFIX "region hash" | |
19 | ||
20 | /*----------------------------------------------------------------- | |
21 | * Region hash | |
22 | * | |
23 | * The mirror splits itself up into discrete regions. Each | |
24 | * region can be in one of three states: clean, dirty, | |
25 | * nosync. There is no need to put clean regions in the hash. | |
26 | * | |
27 | * In addition to being present in the hash table a region _may_ | |
28 | * be present on one of three lists. | |
29 | * | |
30 | * clean_regions: Regions on this list have no io pending to | |
31 | * them, they are in sync, we are no longer interested in them, | |
32 | * they are dull. dm_rh_update_states() will remove them from the | |
33 | * hash table. | |
34 | * | |
35 | * quiesced_regions: These regions have been spun down, ready | |
36 | * for recovery. rh_recovery_start() will remove regions from | |
37 | * this list and hand them to kmirrord, which will schedule the | |
38 | * recovery io with kcopyd. | |
39 | * | |
40 | * recovered_regions: Regions that kcopyd has successfully | |
41 | * recovered. dm_rh_update_states() will now schedule any delayed | |
42 | * io, up the recovery_count, and remove the region from the | |
43 | * hash. | |
44 | * | |
45 | * There are 2 locks: | |
46 | * A rw spin lock 'hash_lock' protects just the hash table, | |
47 | * this is never held in write mode from interrupt context, | |
48 | * which I believe means that we only have to disable irqs when | |
49 | * doing a write lock. | |
50 | * | |
51 | * An ordinary spin lock 'region_lock' that protects the three | |
52 | * lists in the region_hash, with the 'state', 'list' and | |
53 | * 'delayed_bios' fields of the regions. This is used from irq | |
54 | * context, so all other uses will have to suspend local irqs. | |
55 | *---------------------------------------------------------------*/ | |
56 | struct dm_region_hash { | |
57 | uint32_t region_size; | |
58 | unsigned region_shift; | |
59 | ||
60 | /* holds persistent region state */ | |
61 | struct dm_dirty_log *log; | |
62 | ||
63 | /* hash table */ | |
64 | rwlock_t hash_lock; | |
65 | mempool_t *region_pool; | |
66 | unsigned mask; | |
67 | unsigned nr_buckets; | |
68 | unsigned prime; | |
69 | unsigned shift; | |
70 | struct list_head *buckets; | |
71 | ||
72 | unsigned max_recovery; /* Max # of regions to recover in parallel */ | |
73 | ||
74 | spinlock_t region_lock; | |
75 | atomic_t recovery_in_flight; | |
76 | struct semaphore recovery_count; | |
77 | struct list_head clean_regions; | |
78 | struct list_head quiesced_regions; | |
79 | struct list_head recovered_regions; | |
80 | struct list_head failed_recovered_regions; | |
81 | ||
4184153f MP |
82 | /* |
83 | * If there was a barrier failure no regions can be marked clean. | |
84 | */ | |
85 | int barrier_failure; | |
86 | ||
1f965b19 HM |
87 | void *context; |
88 | sector_t target_begin; | |
89 | ||
90 | /* Callback function to schedule bios writes */ | |
91 | void (*dispatch_bios)(void *context, struct bio_list *bios); | |
92 | ||
93 | /* Callback function to wakeup callers worker thread. */ | |
94 | void (*wakeup_workers)(void *context); | |
95 | ||
96 | /* Callback function to wakeup callers recovery waiters. */ | |
97 | void (*wakeup_all_recovery_waiters)(void *context); | |
98 | }; | |
99 | ||
100 | struct dm_region { | |
101 | struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ | |
102 | region_t key; | |
103 | int state; | |
104 | ||
105 | struct list_head hash_list; | |
106 | struct list_head list; | |
107 | ||
108 | atomic_t pending; | |
109 | struct bio_list delayed_bios; | |
110 | }; | |
111 | ||
112 | /* | |
113 | * Conversion fns | |
114 | */ | |
115 | static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) | |
116 | { | |
117 | return sector >> rh->region_shift; | |
118 | } | |
119 | ||
120 | sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) | |
121 | { | |
122 | return region << rh->region_shift; | |
123 | } | |
124 | EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); | |
125 | ||
126 | region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) | |
127 | { | |
128 | return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); | |
129 | } | |
130 | EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); | |
131 | ||
132 | void *dm_rh_region_context(struct dm_region *reg) | |
133 | { | |
134 | return reg->rh->context; | |
135 | } | |
136 | EXPORT_SYMBOL_GPL(dm_rh_region_context); | |
137 | ||
138 | region_t dm_rh_get_region_key(struct dm_region *reg) | |
139 | { | |
140 | return reg->key; | |
141 | } | |
142 | EXPORT_SYMBOL_GPL(dm_rh_get_region_key); | |
143 | ||
144 | sector_t dm_rh_get_region_size(struct dm_region_hash *rh) | |
145 | { | |
146 | return rh->region_size; | |
147 | } | |
148 | EXPORT_SYMBOL_GPL(dm_rh_get_region_size); | |
149 | ||
150 | /* | |
151 | * FIXME: shall we pass in a structure instead of all these args to | |
152 | * dm_region_hash_create()???? | |
153 | */ | |
154 | #define RH_HASH_MULT 2654435387U | |
155 | #define RH_HASH_SHIFT 12 | |
156 | ||
157 | #define MIN_REGIONS 64 | |
158 | struct dm_region_hash *dm_region_hash_create( | |
159 | void *context, void (*dispatch_bios)(void *context, | |
160 | struct bio_list *bios), | |
161 | void (*wakeup_workers)(void *context), | |
162 | void (*wakeup_all_recovery_waiters)(void *context), | |
163 | sector_t target_begin, unsigned max_recovery, | |
164 | struct dm_dirty_log *log, uint32_t region_size, | |
165 | region_t nr_regions) | |
166 | { | |
167 | struct dm_region_hash *rh; | |
168 | unsigned nr_buckets, max_buckets; | |
169 | size_t i; | |
170 | ||
171 | /* | |
172 | * Calculate a suitable number of buckets for our hash | |
173 | * table. | |
174 | */ | |
175 | max_buckets = nr_regions >> 6; | |
176 | for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) | |
177 | ; | |
178 | nr_buckets >>= 1; | |
179 | ||
180 | rh = kmalloc(sizeof(*rh), GFP_KERNEL); | |
181 | if (!rh) { | |
182 | DMERR("unable to allocate region hash memory"); | |
183 | return ERR_PTR(-ENOMEM); | |
184 | } | |
185 | ||
186 | rh->context = context; | |
187 | rh->dispatch_bios = dispatch_bios; | |
188 | rh->wakeup_workers = wakeup_workers; | |
189 | rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters; | |
190 | rh->target_begin = target_begin; | |
191 | rh->max_recovery = max_recovery; | |
192 | rh->log = log; | |
193 | rh->region_size = region_size; | |
194 | rh->region_shift = ffs(region_size) - 1; | |
195 | rwlock_init(&rh->hash_lock); | |
196 | rh->mask = nr_buckets - 1; | |
197 | rh->nr_buckets = nr_buckets; | |
198 | ||
199 | rh->shift = RH_HASH_SHIFT; | |
200 | rh->prime = RH_HASH_MULT; | |
201 | ||
202 | rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); | |
203 | if (!rh->buckets) { | |
204 | DMERR("unable to allocate region hash bucket memory"); | |
205 | kfree(rh); | |
206 | return ERR_PTR(-ENOMEM); | |
207 | } | |
208 | ||
209 | for (i = 0; i < nr_buckets; i++) | |
210 | INIT_LIST_HEAD(rh->buckets + i); | |
211 | ||
212 | spin_lock_init(&rh->region_lock); | |
213 | sema_init(&rh->recovery_count, 0); | |
214 | atomic_set(&rh->recovery_in_flight, 0); | |
215 | INIT_LIST_HEAD(&rh->clean_regions); | |
216 | INIT_LIST_HEAD(&rh->quiesced_regions); | |
217 | INIT_LIST_HEAD(&rh->recovered_regions); | |
218 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | |
4184153f | 219 | rh->barrier_failure = 0; |
1f965b19 HM |
220 | |
221 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, | |
222 | sizeof(struct dm_region)); | |
223 | if (!rh->region_pool) { | |
224 | vfree(rh->buckets); | |
225 | kfree(rh); | |
226 | rh = ERR_PTR(-ENOMEM); | |
227 | } | |
228 | ||
229 | return rh; | |
230 | } | |
231 | EXPORT_SYMBOL_GPL(dm_region_hash_create); | |
232 | ||
233 | void dm_region_hash_destroy(struct dm_region_hash *rh) | |
234 | { | |
235 | unsigned h; | |
236 | struct dm_region *reg, *nreg; | |
237 | ||
238 | BUG_ON(!list_empty(&rh->quiesced_regions)); | |
239 | for (h = 0; h < rh->nr_buckets; h++) { | |
240 | list_for_each_entry_safe(reg, nreg, rh->buckets + h, | |
241 | hash_list) { | |
242 | BUG_ON(atomic_read(®->pending)); | |
243 | mempool_free(reg, rh->region_pool); | |
244 | } | |
245 | } | |
246 | ||
247 | if (rh->log) | |
248 | dm_dirty_log_destroy(rh->log); | |
249 | ||
250 | if (rh->region_pool) | |
251 | mempool_destroy(rh->region_pool); | |
252 | ||
253 | vfree(rh->buckets); | |
254 | kfree(rh); | |
255 | } | |
256 | EXPORT_SYMBOL_GPL(dm_region_hash_destroy); | |
257 | ||
258 | struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh) | |
259 | { | |
260 | return rh->log; | |
261 | } | |
262 | EXPORT_SYMBOL_GPL(dm_rh_dirty_log); | |
263 | ||
264 | static unsigned rh_hash(struct dm_region_hash *rh, region_t region) | |
265 | { | |
266 | return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask; | |
267 | } | |
268 | ||
269 | static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region) | |
270 | { | |
271 | struct dm_region *reg; | |
272 | struct list_head *bucket = rh->buckets + rh_hash(rh, region); | |
273 | ||
274 | list_for_each_entry(reg, bucket, hash_list) | |
275 | if (reg->key == region) | |
276 | return reg; | |
277 | ||
278 | return NULL; | |
279 | } | |
280 | ||
281 | static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) | |
282 | { | |
283 | list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key)); | |
284 | } | |
285 | ||
286 | static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) | |
287 | { | |
288 | struct dm_region *reg, *nreg; | |
289 | ||
290 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); | |
291 | if (unlikely(!nreg)) | |
a72986c5 | 292 | nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL); |
1f965b19 HM |
293 | |
294 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? | |
295 | DM_RH_CLEAN : DM_RH_NOSYNC; | |
296 | nreg->rh = rh; | |
297 | nreg->key = region; | |
298 | INIT_LIST_HEAD(&nreg->list); | |
299 | atomic_set(&nreg->pending, 0); | |
300 | bio_list_init(&nreg->delayed_bios); | |
301 | ||
302 | write_lock_irq(&rh->hash_lock); | |
303 | reg = __rh_lookup(rh, region); | |
304 | if (reg) | |
305 | /* We lost the race. */ | |
306 | mempool_free(nreg, rh->region_pool); | |
307 | else { | |
308 | __rh_insert(rh, nreg); | |
309 | if (nreg->state == DM_RH_CLEAN) { | |
310 | spin_lock(&rh->region_lock); | |
311 | list_add(&nreg->list, &rh->clean_regions); | |
312 | spin_unlock(&rh->region_lock); | |
313 | } | |
314 | ||
315 | reg = nreg; | |
316 | } | |
317 | write_unlock_irq(&rh->hash_lock); | |
318 | ||
319 | return reg; | |
320 | } | |
321 | ||
322 | static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region) | |
323 | { | |
324 | struct dm_region *reg; | |
325 | ||
326 | reg = __rh_lookup(rh, region); | |
327 | if (!reg) { | |
328 | read_unlock(&rh->hash_lock); | |
329 | reg = __rh_alloc(rh, region); | |
330 | read_lock(&rh->hash_lock); | |
331 | } | |
332 | ||
333 | return reg; | |
334 | } | |
335 | ||
336 | int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) | |
337 | { | |
338 | int r; | |
339 | struct dm_region *reg; | |
340 | ||
341 | read_lock(&rh->hash_lock); | |
342 | reg = __rh_lookup(rh, region); | |
343 | read_unlock(&rh->hash_lock); | |
344 | ||
345 | if (reg) | |
346 | return reg->state; | |
347 | ||
348 | /* | |
349 | * The region wasn't in the hash, so we fall back to the | |
350 | * dirty log. | |
351 | */ | |
352 | r = rh->log->type->in_sync(rh->log, region, may_block); | |
353 | ||
354 | /* | |
355 | * Any error from the dirty log (eg. -EWOULDBLOCK) gets | |
356 | * taken as a DM_RH_NOSYNC | |
357 | */ | |
358 | return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC; | |
359 | } | |
360 | EXPORT_SYMBOL_GPL(dm_rh_get_state); | |
361 | ||
362 | static void complete_resync_work(struct dm_region *reg, int success) | |
363 | { | |
364 | struct dm_region_hash *rh = reg->rh; | |
365 | ||
366 | rh->log->type->set_region_sync(rh->log, reg->key, success); | |
367 | ||
368 | /* | |
369 | * Dispatch the bios before we call 'wake_up_all'. | |
370 | * This is important because if we are suspending, | |
371 | * we want to know that recovery is complete and | |
372 | * the work queue is flushed. If we wake_up_all | |
373 | * before we dispatch_bios (queue bios and call wake()), | |
374 | * then we risk suspending before the work queue | |
375 | * has been properly flushed. | |
376 | */ | |
377 | rh->dispatch_bios(rh->context, ®->delayed_bios); | |
378 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | |
379 | rh->wakeup_all_recovery_waiters(rh->context); | |
380 | up(&rh->recovery_count); | |
381 | } | |
382 | ||
383 | /* dm_rh_mark_nosync | |
384 | * @ms | |
385 | * @bio | |
1f965b19 HM |
386 | * |
387 | * The bio was written on some mirror(s) but failed on other mirror(s). | |
388 | * We can successfully endio the bio but should avoid the region being | |
389 | * marked clean by setting the state DM_RH_NOSYNC. | |
390 | * | |
391 | * This function is _not_ safe in interrupt context! | |
392 | */ | |
c58098be | 393 | void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) |
1f965b19 HM |
394 | { |
395 | unsigned long flags; | |
396 | struct dm_dirty_log *log = rh->log; | |
397 | struct dm_region *reg; | |
398 | region_t region = dm_rh_bio_to_region(rh, bio); | |
399 | int recovering = 0; | |
400 | ||
4184153f MP |
401 | if (bio_empty_barrier(bio)) { |
402 | rh->barrier_failure = 1; | |
403 | return; | |
404 | } | |
405 | ||
1f965b19 HM |
406 | /* We must inform the log that the sync count has changed. */ |
407 | log->type->set_region_sync(log, region, 0); | |
408 | ||
409 | read_lock(&rh->hash_lock); | |
410 | reg = __rh_find(rh, region); | |
411 | read_unlock(&rh->hash_lock); | |
412 | ||
413 | /* region hash entry should exist because write was in-flight */ | |
414 | BUG_ON(!reg); | |
415 | BUG_ON(!list_empty(®->list)); | |
416 | ||
417 | spin_lock_irqsave(&rh->region_lock, flags); | |
418 | /* | |
419 | * Possible cases: | |
420 | * 1) DM_RH_DIRTY | |
421 | * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed | |
422 | * 3) DM_RH_RECOVERING: flushing pending writes | |
423 | * Either case, the region should have not been connected to list. | |
424 | */ | |
425 | recovering = (reg->state == DM_RH_RECOVERING); | |
426 | reg->state = DM_RH_NOSYNC; | |
427 | BUG_ON(!list_empty(®->list)); | |
428 | spin_unlock_irqrestore(&rh->region_lock, flags); | |
429 | ||
1f965b19 HM |
430 | if (recovering) |
431 | complete_resync_work(reg, 0); | |
432 | } | |
433 | EXPORT_SYMBOL_GPL(dm_rh_mark_nosync); | |
434 | ||
435 | void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled) | |
436 | { | |
437 | struct dm_region *reg, *next; | |
438 | ||
439 | LIST_HEAD(clean); | |
440 | LIST_HEAD(recovered); | |
441 | LIST_HEAD(failed_recovered); | |
442 | ||
443 | /* | |
444 | * Quickly grab the lists. | |
445 | */ | |
446 | write_lock_irq(&rh->hash_lock); | |
447 | spin_lock(&rh->region_lock); | |
448 | if (!list_empty(&rh->clean_regions)) { | |
449 | list_splice_init(&rh->clean_regions, &clean); | |
450 | ||
451 | list_for_each_entry(reg, &clean, list) | |
452 | list_del(®->hash_list); | |
453 | } | |
454 | ||
455 | if (!list_empty(&rh->recovered_regions)) { | |
456 | list_splice_init(&rh->recovered_regions, &recovered); | |
457 | ||
458 | list_for_each_entry(reg, &recovered, list) | |
459 | list_del(®->hash_list); | |
460 | } | |
461 | ||
462 | if (!list_empty(&rh->failed_recovered_regions)) { | |
463 | list_splice_init(&rh->failed_recovered_regions, | |
464 | &failed_recovered); | |
465 | ||
466 | list_for_each_entry(reg, &failed_recovered, list) | |
467 | list_del(®->hash_list); | |
468 | } | |
469 | ||
470 | spin_unlock(&rh->region_lock); | |
471 | write_unlock_irq(&rh->hash_lock); | |
472 | ||
473 | /* | |
474 | * All the regions on the recovered and clean lists have | |
475 | * now been pulled out of the system, so no need to do | |
476 | * any more locking. | |
477 | */ | |
478 | list_for_each_entry_safe(reg, next, &recovered, list) { | |
479 | rh->log->type->clear_region(rh->log, reg->key); | |
480 | complete_resync_work(reg, 1); | |
481 | mempool_free(reg, rh->region_pool); | |
482 | } | |
483 | ||
484 | list_for_each_entry_safe(reg, next, &failed_recovered, list) { | |
485 | complete_resync_work(reg, errors_handled ? 0 : 1); | |
486 | mempool_free(reg, rh->region_pool); | |
487 | } | |
488 | ||
489 | list_for_each_entry_safe(reg, next, &clean, list) { | |
490 | rh->log->type->clear_region(rh->log, reg->key); | |
491 | mempool_free(reg, rh->region_pool); | |
492 | } | |
493 | ||
494 | rh->log->type->flush(rh->log); | |
495 | } | |
496 | EXPORT_SYMBOL_GPL(dm_rh_update_states); | |
497 | ||
498 | static void rh_inc(struct dm_region_hash *rh, region_t region) | |
499 | { | |
500 | struct dm_region *reg; | |
501 | ||
502 | read_lock(&rh->hash_lock); | |
503 | reg = __rh_find(rh, region); | |
504 | ||
505 | spin_lock_irq(&rh->region_lock); | |
506 | atomic_inc(®->pending); | |
507 | ||
508 | if (reg->state == DM_RH_CLEAN) { | |
509 | reg->state = DM_RH_DIRTY; | |
510 | list_del_init(®->list); /* take off the clean list */ | |
511 | spin_unlock_irq(&rh->region_lock); | |
512 | ||
513 | rh->log->type->mark_region(rh->log, reg->key); | |
514 | } else | |
515 | spin_unlock_irq(&rh->region_lock); | |
516 | ||
517 | ||
518 | read_unlock(&rh->hash_lock); | |
519 | } | |
520 | ||
521 | void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) | |
522 | { | |
523 | struct bio *bio; | |
524 | ||
4184153f MP |
525 | for (bio = bios->head; bio; bio = bio->bi_next) { |
526 | if (bio_empty_barrier(bio)) | |
527 | continue; | |
1f965b19 | 528 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); |
4184153f | 529 | } |
1f965b19 HM |
530 | } |
531 | EXPORT_SYMBOL_GPL(dm_rh_inc_pending); | |
532 | ||
533 | void dm_rh_dec(struct dm_region_hash *rh, region_t region) | |
534 | { | |
535 | unsigned long flags; | |
536 | struct dm_region *reg; | |
537 | int should_wake = 0; | |
538 | ||
539 | read_lock(&rh->hash_lock); | |
540 | reg = __rh_lookup(rh, region); | |
541 | read_unlock(&rh->hash_lock); | |
542 | ||
543 | spin_lock_irqsave(&rh->region_lock, flags); | |
544 | if (atomic_dec_and_test(®->pending)) { | |
545 | /* | |
546 | * There is no pending I/O for this region. | |
547 | * We can move the region to corresponding list for next action. | |
548 | * At this point, the region is not yet connected to any list. | |
549 | * | |
550 | * If the state is DM_RH_NOSYNC, the region should be kept off | |
551 | * from clean list. | |
552 | * The hash entry for DM_RH_NOSYNC will remain in memory | |
553 | * until the region is recovered or the map is reloaded. | |
554 | */ | |
555 | ||
556 | /* do nothing for DM_RH_NOSYNC */ | |
4184153f MP |
557 | if (unlikely(rh->barrier_failure)) { |
558 | /* | |
559 | * If a write barrier failed some time ago, we | |
560 | * don't know whether or not this write made it | |
561 | * to the disk, so we must resync the device. | |
562 | */ | |
563 | reg->state = DM_RH_NOSYNC; | |
564 | } else if (reg->state == DM_RH_RECOVERING) { | |
1f965b19 HM |
565 | list_add_tail(®->list, &rh->quiesced_regions); |
566 | } else if (reg->state == DM_RH_DIRTY) { | |
567 | reg->state = DM_RH_CLEAN; | |
568 | list_add(®->list, &rh->clean_regions); | |
569 | } | |
570 | should_wake = 1; | |
571 | } | |
572 | spin_unlock_irqrestore(&rh->region_lock, flags); | |
573 | ||
574 | if (should_wake) | |
575 | rh->wakeup_workers(rh->context); | |
576 | } | |
577 | EXPORT_SYMBOL_GPL(dm_rh_dec); | |
578 | ||
579 | /* | |
580 | * Starts quiescing a region in preparation for recovery. | |
581 | */ | |
582 | static int __rh_recovery_prepare(struct dm_region_hash *rh) | |
583 | { | |
584 | int r; | |
585 | region_t region; | |
586 | struct dm_region *reg; | |
587 | ||
588 | /* | |
589 | * Ask the dirty log what's next. | |
590 | */ | |
591 | r = rh->log->type->get_resync_work(rh->log, ®ion); | |
592 | if (r <= 0) | |
593 | return r; | |
594 | ||
595 | /* | |
596 | * Get this region, and start it quiescing by setting the | |
597 | * recovering flag. | |
598 | */ | |
599 | read_lock(&rh->hash_lock); | |
600 | reg = __rh_find(rh, region); | |
601 | read_unlock(&rh->hash_lock); | |
602 | ||
603 | spin_lock_irq(&rh->region_lock); | |
604 | reg->state = DM_RH_RECOVERING; | |
605 | ||
606 | /* Already quiesced ? */ | |
607 | if (atomic_read(®->pending)) | |
608 | list_del_init(®->list); | |
609 | else | |
610 | list_move(®->list, &rh->quiesced_regions); | |
611 | ||
612 | spin_unlock_irq(&rh->region_lock); | |
613 | ||
614 | return 1; | |
615 | } | |
616 | ||
617 | void dm_rh_recovery_prepare(struct dm_region_hash *rh) | |
618 | { | |
619 | /* Extra reference to avoid race with dm_rh_stop_recovery */ | |
620 | atomic_inc(&rh->recovery_in_flight); | |
621 | ||
622 | while (!down_trylock(&rh->recovery_count)) { | |
623 | atomic_inc(&rh->recovery_in_flight); | |
624 | if (__rh_recovery_prepare(rh) <= 0) { | |
625 | atomic_dec(&rh->recovery_in_flight); | |
626 | up(&rh->recovery_count); | |
627 | break; | |
628 | } | |
629 | } | |
630 | ||
631 | /* Drop the extra reference */ | |
632 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | |
633 | rh->wakeup_all_recovery_waiters(rh->context); | |
634 | } | |
635 | EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare); | |
636 | ||
637 | /* | |
638 | * Returns any quiesced regions. | |
639 | */ | |
640 | struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh) | |
641 | { | |
642 | struct dm_region *reg = NULL; | |
643 | ||
644 | spin_lock_irq(&rh->region_lock); | |
645 | if (!list_empty(&rh->quiesced_regions)) { | |
646 | reg = list_entry(rh->quiesced_regions.next, | |
647 | struct dm_region, list); | |
648 | list_del_init(®->list); /* remove from the quiesced list */ | |
649 | } | |
650 | spin_unlock_irq(&rh->region_lock); | |
651 | ||
652 | return reg; | |
653 | } | |
654 | EXPORT_SYMBOL_GPL(dm_rh_recovery_start); | |
655 | ||
656 | void dm_rh_recovery_end(struct dm_region *reg, int success) | |
657 | { | |
658 | struct dm_region_hash *rh = reg->rh; | |
659 | ||
660 | spin_lock_irq(&rh->region_lock); | |
661 | if (success) | |
662 | list_add(®->list, ®->rh->recovered_regions); | |
558569aa | 663 | else |
1f965b19 | 664 | list_add(®->list, ®->rh->failed_recovered_regions); |
558569aa | 665 | |
1f965b19 HM |
666 | spin_unlock_irq(&rh->region_lock); |
667 | ||
668 | rh->wakeup_workers(rh->context); | |
669 | } | |
670 | EXPORT_SYMBOL_GPL(dm_rh_recovery_end); | |
671 | ||
672 | /* Return recovery in flight count. */ | |
673 | int dm_rh_recovery_in_flight(struct dm_region_hash *rh) | |
674 | { | |
675 | return atomic_read(&rh->recovery_in_flight); | |
676 | } | |
677 | EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight); | |
678 | ||
679 | int dm_rh_flush(struct dm_region_hash *rh) | |
680 | { | |
681 | return rh->log->type->flush(rh->log); | |
682 | } | |
683 | EXPORT_SYMBOL_GPL(dm_rh_flush); | |
684 | ||
685 | void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio) | |
686 | { | |
687 | struct dm_region *reg; | |
688 | ||
689 | read_lock(&rh->hash_lock); | |
690 | reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio)); | |
691 | bio_list_add(®->delayed_bios, bio); | |
692 | read_unlock(&rh->hash_lock); | |
693 | } | |
694 | EXPORT_SYMBOL_GPL(dm_rh_delay); | |
695 | ||
696 | void dm_rh_stop_recovery(struct dm_region_hash *rh) | |
697 | { | |
698 | int i; | |
699 | ||
700 | /* wait for any recovering regions */ | |
701 | for (i = 0; i < rh->max_recovery; i++) | |
702 | down(&rh->recovery_count); | |
703 | } | |
704 | EXPORT_SYMBOL_GPL(dm_rh_stop_recovery); | |
705 | ||
706 | void dm_rh_start_recovery(struct dm_region_hash *rh) | |
707 | { | |
708 | int i; | |
709 | ||
710 | for (i = 0; i < rh->max_recovery; i++) | |
711 | up(&rh->recovery_count); | |
712 | ||
713 | rh->wakeup_workers(rh->context); | |
714 | } | |
715 | EXPORT_SYMBOL_GPL(dm_rh_start_recovery); | |
716 | ||
717 | MODULE_DESCRIPTION(DM_NAME " region hash"); | |
718 | MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>"); | |
719 | MODULE_LICENSE("GPL"); |