4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/slab.h>
27 #include <linux/crc32c.h>
28 #include <linux/drbd.h>
29 #include <linux/drbd_limits.h>
30 #include <linux/dynamic_debug.h>
32 #include "drbd_wrappers.h"
35 enum al_transaction_types {
37 AL_TR_INITIALIZED = 0xffff
39 /* all fields on disc in big endian */
40 struct __packed al_transaction_on_disk {
41 /* don't we all like magic */
44 /* to identify the most recent transaction block
45 * in the on disk ring buffer */
48 /* checksum on the full 4k block, with this field set to 0. */
51 /* type of transaction, special transaction types like:
52 * purge-all, set-all-idle, set-all-active, ... to-be-defined
53 * see also enum al_transaction_types */
54 __be16 transaction_type;
56 /* we currently allow only a few thousand extents,
57 * so 16bit will be enough for the slot number. */
59 /* how many updates in this transaction */
62 /* maximum slot number, "al-extents" in drbd.conf speak.
63 * Having this in each transaction should make reconfiguration
64 * of that parameter easier. */
67 /* slot number the context starts with */
68 __be16 context_start_slot_nr;
70 /* Some reserved bytes. Expected usage is a 64bit counter of
71 * sectors-written since device creation, and other data generation tag
75 /* --- 36 byte used --- */
77 /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
78 * in one transaction, then use the remaining byte in the 4k block for
79 * context information. "Flexible" number of updates per transaction
80 * does not help, as we have to account for the case when all update
81 * slots are used anyways, so it would only complicate code without
84 __be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION];
86 /* but the extent number is 32bit, which at an extent size of 4 MiB
87 * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
88 __be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION];
90 /* --- 420 bytes used (36 + 64*6) --- */
92 /* 4096 - 420 = 3676 = 919 * 4 */
93 __be32 context[AL_CONTEXT_PER_TRANSACTION];
96 struct update_odbm_work {
101 struct update_al_work {
103 struct completion event;
107 static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
109 void *drbd_md_get_buffer(struct drbd_conf *mdev)
113 wait_event(mdev->misc_wait,
114 (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
115 mdev->state.disk <= D_FAILED);
117 return r ? NULL : page_address(mdev->md_io_page);
120 void drbd_md_put_buffer(struct drbd_conf *mdev)
122 if (atomic_dec_and_test(&mdev->md_io_in_use))
123 wake_up(&mdev->misc_wait);
126 void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
132 dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
136 dt = MAX_SCHEDULE_TIMEOUT;
138 dt = wait_event_timeout(mdev->misc_wait,
139 *done || test_bit(FORCE_DETACH, &mdev->flags), dt);
141 dev_err(DEV, "meta-data IO operation timed out\n");
142 drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
146 static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
147 struct drbd_backing_dev *bdev,
148 struct page *page, sector_t sector,
154 mdev->md_io.done = 0;
155 mdev->md_io.error = -ENODEV;
157 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
158 rw |= REQ_FUA | REQ_FLUSH;
161 bio = bio_alloc_drbd(GFP_NOIO);
162 bio->bi_bdev = bdev->md_bdev;
163 bio->bi_sector = sector;
165 if (bio_add_page(bio, page, size, 0) != size)
167 bio->bi_private = &mdev->md_io;
168 bio->bi_end_io = drbd_md_io_complete;
171 if (!(rw & WRITE) && mdev->state.disk == D_DISKLESS && mdev->ldev == NULL)
172 /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
174 else if (!get_ldev_if_state(mdev, D_ATTACHING)) {
175 /* Corresponding put_ldev in drbd_md_io_complete() */
176 dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
181 bio_get(bio); /* one bio_put() is in the completion handler */
182 atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
183 if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
184 bio_endio(bio, -EIO);
187 wait_until_done_or_force_detached(mdev, bdev, &mdev->md_io.done);
188 if (bio_flagged(bio, BIO_UPTODATE))
189 err = mdev->md_io.error;
196 int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
197 sector_t sector, int rw)
200 struct page *iop = mdev->md_io_page;
202 D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
204 BUG_ON(!bdev->md_bdev);
206 dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
207 current->comm, current->pid, __func__,
208 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
211 if (sector < drbd_md_first_sector(bdev) ||
212 sector + 7 > drbd_md_last_sector(bdev))
213 dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
214 current->comm, current->pid, __func__,
215 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
217 /* we do all our meta data IO in aligned 4k blocks. */
218 err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096);
220 dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
221 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
226 static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
228 struct lc_element *al_ext;
229 struct lc_element *tmp;
232 spin_lock_irq(&mdev->al_lock);
233 tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
234 if (unlikely(tmp != NULL)) {
235 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
236 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
237 wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
238 spin_unlock_irq(&mdev->al_lock);
240 wake_up(&mdev->al_wait);
244 al_ext = lc_get(mdev->act_log, enr);
245 spin_unlock_irq(&mdev->al_lock);
250 * @delegate: delegate activity log I/O to the worker thread
252 void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate)
254 /* for bios crossing activity log extent boundaries,
255 * we may need to activate two extents in one go */
256 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
257 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
261 /* When called through generic_make_request(), we must delegate
262 * activity log I/O to the worker thread: a further request
263 * submitted via generic_make_request() within the same task
264 * would be queued on current->bio_list, and would only start
265 * after this function returns (see generic_make_request()).
267 * However, if we *are* the worker, we must not delegate to ourselves.
271 BUG_ON(current == mdev->tconn->worker.task);
273 D_ASSERT(first <= last);
274 D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
276 for (enr = first; enr <= last; enr++)
277 wait_event(mdev->al_wait, _al_get(mdev, enr) != NULL);
279 /* Serialize multiple transactions.
280 * This uses test_and_set_bit, memory barrier is implicit.
282 wait_event(mdev->al_wait,
283 mdev->act_log->pending_changes == 0 ||
284 (locked = lc_try_lock_for_transaction(mdev->act_log)));
287 /* Double check: it may have been committed by someone else,
288 * while we have been waiting for the lock. */
289 if (mdev->act_log->pending_changes) {
290 bool write_al_updates;
293 write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
296 if (write_al_updates) {
297 al_write_transaction(mdev, delegate);
301 spin_lock_irq(&mdev->al_lock);
304 we need an "lc_cancel" here;
306 lc_committed(mdev->act_log);
307 spin_unlock_irq(&mdev->al_lock);
309 lc_unlock(mdev->act_log);
310 wake_up(&mdev->al_wait);
314 void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
316 /* for bios crossing activity log extent boundaries,
317 * we may need to activate two extents in one go */
318 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
319 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
321 struct lc_element *extent;
324 D_ASSERT(first <= last);
325 spin_lock_irqsave(&mdev->al_lock, flags);
327 for (enr = first; enr <= last; enr++) {
328 extent = lc_find(mdev->act_log, enr);
330 dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
333 lc_put(mdev->act_log, extent);
335 spin_unlock_irqrestore(&mdev->al_lock, flags);
336 wake_up(&mdev->al_wait);
339 #if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
340 /* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
341 * are still coupled, or assume too much about their relation.
342 * Code below will not work if this is violated.
343 * Will be cleaned up with some followup patch.
348 static unsigned int al_extent_to_bm_page(unsigned int al_enr)
353 /* al extent number to bit */
354 (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
357 static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
362 /* resync extent number to bit */
363 (BM_EXT_SHIFT - BM_BLOCK_SHIFT));
366 static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
368 const unsigned int stripes = mdev->ldev->md.al_stripes;
369 const unsigned int stripe_size_4kB = mdev->ldev->md.al_stripe_size_4k;
371 /* transaction number, modulo on-disk ring buffer wrap around */
372 unsigned int t = mdev->al_tr_number % (mdev->ldev->md.al_size_4k);
374 /* ... to aligned 4k on disk block */
375 t = ((t % stripes) * stripe_size_4kB) + t/stripes;
377 /* ... to 512 byte sector in activity log */
380 /* ... plus offset to the on disk position */
381 return mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + t;
385 _al_write_transaction(struct drbd_conf *mdev)
387 struct al_transaction_on_disk *buffer;
388 struct lc_element *e;
395 if (!get_ldev(mdev)) {
396 dev_err(DEV, "disk is %s, cannot start al transaction\n",
397 drbd_disk_str(mdev->state.disk));
401 /* The bitmap write may have failed, causing a state change. */
402 if (mdev->state.disk < D_INCONSISTENT) {
404 "disk is %s, cannot write al transaction\n",
405 drbd_disk_str(mdev->state.disk));
410 buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
412 dev_err(DEV, "disk failed while waiting for md_io buffer\n");
417 memset(buffer, 0, sizeof(*buffer));
418 buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
419 buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
423 /* Even though no one can start to change this list
424 * once we set the LC_LOCKED -- from drbd_al_begin_io(),
425 * lc_try_lock_for_transaction() --, someone may still
426 * be in the process of changing it. */
427 spin_lock_irq(&mdev->al_lock);
428 list_for_each_entry(e, &mdev->act_log->to_be_changed, list) {
429 if (i == AL_UPDATES_PER_TRANSACTION) {
433 buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
434 buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
435 if (e->lc_number != LC_FREE)
436 drbd_bm_mark_for_writeout(mdev,
437 al_extent_to_bm_page(e->lc_number));
440 spin_unlock_irq(&mdev->al_lock);
441 BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
443 buffer->n_updates = cpu_to_be16(i);
444 for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
445 buffer->update_slot_nr[i] = cpu_to_be16(-1);
446 buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
449 buffer->context_size = cpu_to_be16(mdev->act_log->nr_elements);
450 buffer->context_start_slot_nr = cpu_to_be16(mdev->al_tr_cycle);
452 mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
453 mdev->act_log->nr_elements - mdev->al_tr_cycle);
454 for (i = 0; i < mx; i++) {
455 unsigned idx = mdev->al_tr_cycle + i;
456 extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
457 buffer->context[i] = cpu_to_be32(extent_nr);
459 for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
460 buffer->context[i] = cpu_to_be32(LC_FREE);
462 mdev->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
463 if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
464 mdev->al_tr_cycle = 0;
466 sector = al_tr_number_to_on_disk_sector(mdev);
468 crc = crc32c(0, buffer, 4096);
469 buffer->crc32c = cpu_to_be32(crc);
471 /* normal execution path goes through all three branches */
472 if (drbd_bm_write_hinted(mdev))
474 /* drbd_chk_io_error done already */
475 else if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
477 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
479 mdev->al_tr_number++;
482 drbd_md_put_buffer(mdev);
489 static int w_al_write_transaction(struct drbd_work *w, int unused)
491 struct update_al_work *aw = container_of(w, struct update_al_work, w);
492 struct drbd_conf *mdev = w->mdev;
495 err = _al_write_transaction(mdev);
497 complete(&aw->event);
499 return err != -EIO ? err : 0;
502 /* Calls from worker context (see w_restart_disk_io()) need to write the
503 transaction directly. Others came through generic_make_request(),
504 those need to delegate it to the worker. */
505 static int al_write_transaction(struct drbd_conf *mdev, bool delegate)
508 struct update_al_work al_work;
509 init_completion(&al_work.event);
510 al_work.w.cb = w_al_write_transaction;
511 al_work.w.mdev = mdev;
512 drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
513 wait_for_completion(&al_work.event);
516 return _al_write_transaction(mdev);
519 static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
523 spin_lock_irq(&mdev->al_lock);
524 rv = (al_ext->refcnt == 0);
526 lc_del(mdev->act_log, al_ext);
527 spin_unlock_irq(&mdev->al_lock);
533 * drbd_al_shrink() - Removes all active extents form the activity log
534 * @mdev: DRBD device.
536 * Removes all active extents form the activity log, waiting until
537 * the reference count of each entry dropped to 0 first, of course.
539 * You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
541 void drbd_al_shrink(struct drbd_conf *mdev)
543 struct lc_element *al_ext;
546 D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags));
548 for (i = 0; i < mdev->act_log->nr_elements; i++) {
549 al_ext = lc_element_by_index(mdev->act_log, i);
550 if (al_ext->lc_number == LC_FREE)
552 wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
555 wake_up(&mdev->al_wait);
558 static int w_update_odbm(struct drbd_work *w, int unused)
560 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
561 struct drbd_conf *mdev = w->mdev;
562 struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
564 if (!get_ldev(mdev)) {
565 if (__ratelimit(&drbd_ratelimit_state))
566 dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
571 drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
576 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
577 switch (mdev->state.conn) {
578 case C_SYNC_SOURCE: case C_SYNC_TARGET:
579 case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
580 drbd_resync_finished(mdev);
586 drbd_bcast_event(mdev, &sib);
592 /* ATTENTION. The AL's extents are 4MB each, while the extents in the
593 * resync LRU-cache are 16MB each.
594 * The caller of this function has to hold an get_ldev() reference.
596 * TODO will be obsoleted once we have a caching lru of the on disk bitmap
598 static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
599 int count, int success)
601 struct lc_element *e;
602 struct update_odbm_work *udw;
606 D_ASSERT(atomic_read(&mdev->local_cnt));
608 /* I simply assume that a sector/size pair never crosses
609 * a 16 MB extent border. (Currently this is true...) */
610 enr = BM_SECT_TO_EXT(sector);
612 e = lc_get(mdev->resync, enr);
614 struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
615 if (ext->lce.lc_number == enr) {
617 ext->rs_left -= count;
619 ext->rs_failed += count;
620 if (ext->rs_left < ext->rs_failed) {
621 dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
622 "rs_failed=%d count=%d cstate=%s\n",
623 (unsigned long long)sector,
624 ext->lce.lc_number, ext->rs_left,
625 ext->rs_failed, count,
626 drbd_conn_str(mdev->state.conn));
628 /* We don't expect to be able to clear more bits
629 * than have been set when we originally counted
630 * the set bits to cache that value in ext->rs_left.
631 * Whatever the reason (disconnect during resync,
632 * delayed local completion of an application write),
633 * try to fix it up by recounting here. */
634 ext->rs_left = drbd_bm_e_weight(mdev, enr);
637 /* Normally this element should be in the cache,
638 * since drbd_rs_begin_io() pulled it already in.
640 * But maybe an application write finished, and we set
641 * something outside the resync lru_cache in sync.
643 int rs_left = drbd_bm_e_weight(mdev, enr);
644 if (ext->flags != 0) {
645 dev_warn(DEV, "changing resync lce: %d[%u;%02lx]"
647 ext->lce.lc_number, ext->rs_left,
648 ext->flags, enr, rs_left);
651 if (ext->rs_failed) {
652 dev_warn(DEV, "Kicking resync_lru element enr=%u "
653 "out with rs_failed=%d\n",
654 ext->lce.lc_number, ext->rs_failed);
656 ext->rs_left = rs_left;
657 ext->rs_failed = success ? 0 : count;
658 /* we don't keep a persistent log of the resync lru,
659 * we can commit any change right away. */
660 lc_committed(mdev->resync);
662 lc_put(mdev->resync, &ext->lce);
663 /* no race, we are within the al_lock! */
665 if (ext->rs_left == ext->rs_failed) {
668 udw = kmalloc(sizeof(*udw), GFP_ATOMIC);
670 udw->enr = ext->lce.lc_number;
671 udw->w.cb = w_update_odbm;
673 drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
675 dev_warn(DEV, "Could not kmalloc an udw\n");
679 dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n",
681 mdev->resync->nr_elements,
682 mdev->resync->flags);
686 void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
688 unsigned long now = jiffies;
689 unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
690 int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
691 if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
692 if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go &&
693 mdev->state.conn != C_PAUSED_SYNC_T &&
694 mdev->state.conn != C_PAUSED_SYNC_S) {
695 mdev->rs_mark_time[next] = now;
696 mdev->rs_mark_left[next] = still_to_go;
697 mdev->rs_last_mark = next;
702 /* clear the bit corresponding to the piece of storage in question:
703 * size byte of data starting from sector. Only clear a bits of the affected
704 * one ore more _aligned_ BM_BLOCK_SIZE blocks.
706 * called by worker on C_SYNC_TARGET and receiver on SyncSource.
709 void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
710 const char *file, const unsigned int line)
712 /* Is called from worker and receiver context _only_ */
713 unsigned long sbnr, ebnr, lbnr;
714 unsigned long count = 0;
715 sector_t esector, nr_sectors;
719 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
720 dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
721 (unsigned long long)sector, size);
726 return; /* no disk, no metadata, no bitmap to clear bits in */
728 nr_sectors = drbd_get_capacity(mdev->this_bdev);
729 esector = sector + (size >> 9) - 1;
731 if (!expect(sector < nr_sectors))
733 if (!expect(esector < nr_sectors))
734 esector = nr_sectors - 1;
736 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
738 /* we clear it (in sync).
739 * round up start sector, round down end sector. we make sure we only
740 * clear full, aligned, BM_BLOCK_SIZE (4K) blocks */
741 if (unlikely(esector < BM_SECT_PER_BIT-1))
743 if (unlikely(esector == (nr_sectors-1)))
746 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
747 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
753 * ok, (capacity & 7) != 0 sometimes, but who cares...
754 * we count rs_{total,left} in bits, not sectors.
756 count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
758 drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
759 spin_lock_irqsave(&mdev->al_lock, flags);
760 drbd_try_clear_on_disk_bm(mdev, sector, count, true);
761 spin_unlock_irqrestore(&mdev->al_lock, flags);
763 /* just wake_up unconditional now, various lc_chaged(),
764 * lc_put() in drbd_try_clear_on_disk_bm(). */
770 wake_up(&mdev->al_wait);
774 * this is intended to set one request worth of data out of sync.
775 * affects at least 1 bit,
776 * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits.
778 * called by tl_clear and drbd_send_dblock (==drbd_make_request).
779 * so this can be _any_ process.
781 int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
782 const char *file, const unsigned int line)
784 unsigned long sbnr, ebnr, flags;
785 sector_t esector, nr_sectors;
786 unsigned int enr, count = 0;
787 struct lc_element *e;
789 /* this should be an empty REQ_FLUSH */
793 if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
794 dev_err(DEV, "sector: %llus, size: %d\n",
795 (unsigned long long)sector, size);
800 return 0; /* no disk, no metadata, no bitmap to set bits in */
802 nr_sectors = drbd_get_capacity(mdev->this_bdev);
803 esector = sector + (size >> 9) - 1;
805 if (!expect(sector < nr_sectors))
807 if (!expect(esector < nr_sectors))
808 esector = nr_sectors - 1;
810 /* we set it out of sync,
811 * we do not need to round anything here */
812 sbnr = BM_SECT_TO_BIT(sector);
813 ebnr = BM_SECT_TO_BIT(esector);
815 /* ok, (capacity & 7) != 0 sometimes, but who cares...
816 * we count rs_{total,left} in bits, not sectors. */
817 spin_lock_irqsave(&mdev->al_lock, flags);
818 count = drbd_bm_set_bits(mdev, sbnr, ebnr);
820 enr = BM_SECT_TO_EXT(sector);
821 e = lc_find(mdev->resync, enr);
823 lc_entry(e, struct bm_extent, lce)->rs_left += count;
824 spin_unlock_irqrestore(&mdev->al_lock, flags);
833 struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
835 struct lc_element *e;
836 struct bm_extent *bm_ext;
838 unsigned long rs_flags;
840 spin_lock_irq(&mdev->al_lock);
841 if (mdev->resync_locked > mdev->resync->nr_elements/2) {
842 spin_unlock_irq(&mdev->al_lock);
845 e = lc_get(mdev->resync, enr);
846 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
848 if (bm_ext->lce.lc_number != enr) {
849 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
850 bm_ext->rs_failed = 0;
851 lc_committed(mdev->resync);
854 if (bm_ext->lce.refcnt == 1)
855 mdev->resync_locked++;
856 set_bit(BME_NO_WRITES, &bm_ext->flags);
858 rs_flags = mdev->resync->flags;
859 spin_unlock_irq(&mdev->al_lock);
861 wake_up(&mdev->al_wait);
864 if (rs_flags & LC_STARVING)
865 dev_warn(DEV, "Have to wait for element"
866 " (resync LRU too small?)\n");
867 BUG_ON(rs_flags & LC_LOCKED);
873 static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
877 spin_lock_irq(&mdev->al_lock);
878 rv = lc_is_used(mdev->act_log, enr);
879 spin_unlock_irq(&mdev->al_lock);
885 * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
886 * @mdev: DRBD device.
887 * @sector: The sector number.
889 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
891 int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
893 unsigned int enr = BM_SECT_TO_EXT(sector);
894 struct bm_extent *bm_ext;
896 int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait.
897 200 times -> 20 seconds. */
900 sig = wait_event_interruptible(mdev->al_wait,
901 (bm_ext = _bme_get(mdev, enr)));
905 if (test_bit(BME_LOCKED, &bm_ext->flags))
908 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
909 sig = wait_event_interruptible(mdev->al_wait,
910 !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) ||
911 test_bit(BME_PRIORITY, &bm_ext->flags));
913 if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
914 spin_lock_irq(&mdev->al_lock);
915 if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
916 bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
917 mdev->resync_locked--;
918 wake_up(&mdev->al_wait);
920 spin_unlock_irq(&mdev->al_lock);
923 if (schedule_timeout_interruptible(HZ/10))
926 dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec."
927 "Resync stalled?\n");
931 set_bit(BME_LOCKED, &bm_ext->flags);
936 * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
937 * @mdev: DRBD device.
938 * @sector: The sector number.
940 * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
941 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
942 * if there is still application IO going on in this area.
944 int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
946 unsigned int enr = BM_SECT_TO_EXT(sector);
947 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
948 struct lc_element *e;
949 struct bm_extent *bm_ext;
952 spin_lock_irq(&mdev->al_lock);
953 if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
954 /* in case you have very heavy scattered io, it may
955 * stall the syncer undefined if we give up the ref count
956 * when we try again and requeue.
958 * if we don't give up the refcount, but the next time
959 * we are scheduled this extent has been "synced" by new
960 * application writes, we'd miss the lc_put on the
961 * extent we keep the refcount on.
962 * so we remembered which extent we had to try again, and
963 * if the next requested one is something else, we do
965 * we also have to wake_up
967 e = lc_find(mdev->resync, mdev->resync_wenr);
968 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
970 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
971 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
972 clear_bit(BME_NO_WRITES, &bm_ext->flags);
973 mdev->resync_wenr = LC_FREE;
974 if (lc_put(mdev->resync, &bm_ext->lce) == 0)
975 mdev->resync_locked--;
976 wake_up(&mdev->al_wait);
978 dev_alert(DEV, "LOGIC BUG\n");
982 e = lc_try_get(mdev->resync, enr);
983 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
985 if (test_bit(BME_LOCKED, &bm_ext->flags))
987 if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
988 mdev->resync_locked++;
990 /* we did set the BME_NO_WRITES,
991 * but then could not set BME_LOCKED,
993 * drop the extra reference. */
994 bm_ext->lce.refcnt--;
995 D_ASSERT(bm_ext->lce.refcnt > 0);
999 /* do we rather want to try later? */
1000 if (mdev->resync_locked > mdev->resync->nr_elements-3)
1002 /* Do or do not. There is no try. -- Yoda */
1003 e = lc_get(mdev->resync, enr);
1004 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1006 const unsigned long rs_flags = mdev->resync->flags;
1007 if (rs_flags & LC_STARVING)
1008 dev_warn(DEV, "Have to wait for element"
1009 " (resync LRU too small?)\n");
1010 BUG_ON(rs_flags & LC_LOCKED);
1013 if (bm_ext->lce.lc_number != enr) {
1014 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
1015 bm_ext->rs_failed = 0;
1016 lc_committed(mdev->resync);
1017 wake_up(&mdev->al_wait);
1018 D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1020 set_bit(BME_NO_WRITES, &bm_ext->flags);
1021 D_ASSERT(bm_ext->lce.refcnt == 1);
1022 mdev->resync_locked++;
1026 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1027 if (lc_is_used(mdev->act_log, al_enr+i))
1030 set_bit(BME_LOCKED, &bm_ext->flags);
1032 mdev->resync_wenr = LC_FREE;
1033 spin_unlock_irq(&mdev->al_lock);
1038 mdev->resync_wenr = enr;
1039 spin_unlock_irq(&mdev->al_lock);
1043 void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
1045 unsigned int enr = BM_SECT_TO_EXT(sector);
1046 struct lc_element *e;
1047 struct bm_extent *bm_ext;
1048 unsigned long flags;
1050 spin_lock_irqsave(&mdev->al_lock, flags);
1051 e = lc_find(mdev->resync, enr);
1052 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1054 spin_unlock_irqrestore(&mdev->al_lock, flags);
1055 if (__ratelimit(&drbd_ratelimit_state))
1056 dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n");
1060 if (bm_ext->lce.refcnt == 0) {
1061 spin_unlock_irqrestore(&mdev->al_lock, flags);
1062 dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, "
1063 "but refcnt is 0!?\n",
1064 (unsigned long long)sector, enr);
1068 if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
1069 bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
1070 mdev->resync_locked--;
1071 wake_up(&mdev->al_wait);
1074 spin_unlock_irqrestore(&mdev->al_lock, flags);
1078 * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
1079 * @mdev: DRBD device.
1081 void drbd_rs_cancel_all(struct drbd_conf *mdev)
1083 spin_lock_irq(&mdev->al_lock);
1085 if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
1086 lc_reset(mdev->resync);
1089 mdev->resync_locked = 0;
1090 mdev->resync_wenr = LC_FREE;
1091 spin_unlock_irq(&mdev->al_lock);
1092 wake_up(&mdev->al_wait);
1096 * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
1097 * @mdev: DRBD device.
1099 * Returns 0 upon success, -EAGAIN if at least one reference count was
1102 int drbd_rs_del_all(struct drbd_conf *mdev)
1104 struct lc_element *e;
1105 struct bm_extent *bm_ext;
1108 spin_lock_irq(&mdev->al_lock);
1110 if (get_ldev_if_state(mdev, D_FAILED)) {
1111 /* ok, ->resync is there. */
1112 for (i = 0; i < mdev->resync->nr_elements; i++) {
1113 e = lc_element_by_index(mdev->resync, i);
1114 bm_ext = lc_entry(e, struct bm_extent, lce);
1115 if (bm_ext->lce.lc_number == LC_FREE)
1117 if (bm_ext->lce.lc_number == mdev->resync_wenr) {
1118 dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently"
1119 " got 'synced' by application io\n",
1121 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1122 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
1123 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1124 mdev->resync_wenr = LC_FREE;
1125 lc_put(mdev->resync, &bm_ext->lce);
1127 if (bm_ext->lce.refcnt != 0) {
1128 dev_info(DEV, "Retrying drbd_rs_del_all() later. "
1129 "refcnt=%d\n", bm_ext->lce.refcnt);
1131 spin_unlock_irq(&mdev->al_lock);
1134 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1135 D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
1136 lc_del(mdev->resync, &bm_ext->lce);
1138 D_ASSERT(mdev->resync->used == 0);
1141 spin_unlock_irq(&mdev->al_lock);
1142 wake_up(&mdev->al_wait);
1148 * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks
1149 * @mdev: DRBD device.
1150 * @sector: The sector number.
1151 * @size: Size of failed IO operation, in byte.
1153 void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
1155 /* Is called from worker and receiver context _only_ */
1156 unsigned long sbnr, ebnr, lbnr;
1157 unsigned long count;
1158 sector_t esector, nr_sectors;
1161 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
1162 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
1163 (unsigned long long)sector, size);
1166 nr_sectors = drbd_get_capacity(mdev->this_bdev);
1167 esector = sector + (size >> 9) - 1;
1169 if (!expect(sector < nr_sectors))
1171 if (!expect(esector < nr_sectors))
1172 esector = nr_sectors - 1;
1174 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
1177 * round up start sector, round down end sector. we make sure we only
1178 * handle full, aligned, BM_BLOCK_SIZE (4K) blocks */
1179 if (unlikely(esector < BM_SECT_PER_BIT-1))
1181 if (unlikely(esector == (nr_sectors-1)))
1184 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
1185 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
1191 * ok, (capacity & 7) != 0 sometimes, but who cares...
1192 * we count rs_{total,left} in bits, not sectors.
1194 spin_lock_irq(&mdev->al_lock);
1195 count = drbd_bm_count_bits(mdev, sbnr, ebnr);
1197 mdev->rs_failed += count;
1199 if (get_ldev(mdev)) {
1200 drbd_try_clear_on_disk_bm(mdev, sector, count, false);
1204 /* just wake_up unconditional now, various lc_chaged(),
1205 * lc_put() in drbd_try_clear_on_disk_bm(). */
1208 spin_unlock_irq(&mdev->al_lock);
1210 wake_up(&mdev->al_wait);