4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
35 #include <linux/smp_lock.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
53 #include <linux/drbd_limits.h>
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
59 struct after_state_chg_work {
63 enum chg_state_flags flags;
64 struct completion *done;
67 int drbdd_init(struct drbd_thread *);
68 int drbd_worker(struct drbd_thread *);
69 int drbd_asender(struct drbd_thread *);
72 static int drbd_open(struct block_device *bdev, fmode_t mode);
73 static int drbd_release(struct gendisk *gd, fmode_t mode);
74 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
75 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
76 union drbd_state ns, enum chg_state_flags flags);
77 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
78 static void md_sync_timer_fn(unsigned long data);
79 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
82 "Lars Ellenberg <lars@linbit.com>");
83 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
84 MODULE_VERSION(REL_VERSION);
85 MODULE_LICENSE("GPL");
86 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
87 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
89 #include <linux/moduleparam.h>
90 /* allow_open_on_secondary */
91 MODULE_PARM_DESC(allow_oos, "DONT USE!");
92 /* thanks to these macros, if compiled into the kernel (not-module),
93 * this becomes the boot parameter drbd.minor_count */
94 module_param(minor_count, uint, 0444);
95 module_param(disable_sendpage, bool, 0644);
96 module_param(allow_oos, bool, 0);
97 module_param(cn_idx, uint, 0444);
98 module_param(proc_details, int, 0644);
100 #ifdef CONFIG_DRBD_FAULT_INJECTION
103 static int fault_count;
105 /* bitmap of enabled faults */
106 module_param(enable_faults, int, 0664);
107 /* fault rate % value - applies to all enabled faults */
108 module_param(fault_rate, int, 0664);
109 /* count of faults inserted */
110 module_param(fault_count, int, 0664);
111 /* bitmap of devices to insert faults on */
112 module_param(fault_devs, int, 0644);
115 /* module parameter, defined */
116 unsigned int minor_count = 32;
117 int disable_sendpage;
119 unsigned int cn_idx = CN_IDX_DRBD;
120 int proc_details; /* Detail level in proc drbd*/
122 /* Module parameter for setting the user mode helper program
123 * to run. Default is /sbin/drbdadm */
124 char usermode_helper[80] = "/sbin/drbdadm";
126 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
128 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
129 * as member "struct gendisk *vdisk;"
131 struct drbd_conf **minor_table;
133 struct kmem_cache *drbd_request_cache;
134 struct kmem_cache *drbd_ee_cache; /* epoch entries */
135 struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
136 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
137 mempool_t *drbd_request_mempool;
138 mempool_t *drbd_ee_mempool;
140 /* I do not use a standard mempool, because:
141 1) I want to hand out the pre-allocated objects first.
142 2) I want to be able to interrupt sleeping allocation with a signal.
143 Note: This is a single linked list, the next pointer is the private
144 member of struct page.
146 struct page *drbd_pp_pool;
147 spinlock_t drbd_pp_lock;
149 wait_queue_head_t drbd_pp_wait;
151 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
153 static const struct block_device_operations drbd_ops = {
154 .owner = THIS_MODULE,
156 .release = drbd_release,
159 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
162 /* When checking with sparse, and this is an inline function, sparse will
163 give tons of false positives. When this is a real functions sparse works.
165 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169 atomic_inc(&mdev->local_cnt);
170 io_allowed = (mdev->state.disk >= mins);
172 if (atomic_dec_and_test(&mdev->local_cnt))
173 wake_up(&mdev->misc_wait);
181 * DOC: The transfer log
183 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
184 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
185 * of the list. There is always at least one &struct drbd_tl_epoch object.
187 * Each &struct drbd_tl_epoch has a circular double linked list of requests
190 static int tl_init(struct drbd_conf *mdev)
192 struct drbd_tl_epoch *b;
194 /* during device minor initialization, we may well use GFP_KERNEL */
195 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
198 INIT_LIST_HEAD(&b->requests);
199 INIT_LIST_HEAD(&b->w.list);
203 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
205 mdev->oldest_tle = b;
206 mdev->newest_tle = b;
207 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
209 mdev->tl_hash = NULL;
215 static void tl_cleanup(struct drbd_conf *mdev)
217 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
218 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
219 kfree(mdev->oldest_tle);
220 mdev->oldest_tle = NULL;
221 kfree(mdev->unused_spare_tle);
222 mdev->unused_spare_tle = NULL;
223 kfree(mdev->tl_hash);
224 mdev->tl_hash = NULL;
229 * _tl_add_barrier() - Adds a barrier to the transfer log
230 * @mdev: DRBD device.
231 * @new: Barrier to be added before the current head of the TL.
233 * The caller must hold the req_lock.
235 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
237 struct drbd_tl_epoch *newest_before;
239 INIT_LIST_HEAD(&new->requests);
240 INIT_LIST_HEAD(&new->w.list);
241 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
245 newest_before = mdev->newest_tle;
246 /* never send a barrier number == 0, because that is special-cased
247 * when using TCQ for our write ordering code */
248 new->br_number = (newest_before->br_number+1) ?: 1;
249 if (mdev->newest_tle != new) {
250 mdev->newest_tle->next = new;
251 mdev->newest_tle = new;
256 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
257 * @mdev: DRBD device.
258 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
259 * @set_size: Expected number of requests before that barrier.
261 * In case the passed barrier_nr or set_size does not match the oldest
262 * &struct drbd_tl_epoch objects this function will cause a termination
265 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
266 unsigned int set_size)
268 struct drbd_tl_epoch *b, *nob; /* next old barrier */
269 struct list_head *le, *tle;
270 struct drbd_request *r;
272 spin_lock_irq(&mdev->req_lock);
274 b = mdev->oldest_tle;
276 /* first some paranoia code */
278 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
282 if (b->br_number != barrier_nr) {
283 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
284 barrier_nr, b->br_number);
287 if (b->n_writes != set_size) {
288 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
289 barrier_nr, set_size, b->n_writes);
293 /* Clean up list of requests processed during current epoch */
294 list_for_each_safe(le, tle, &b->requests) {
295 r = list_entry(le, struct drbd_request, tl_requests);
296 _req_mod(r, barrier_acked);
298 /* There could be requests on the list waiting for completion
299 of the write to the local disk. To avoid corruptions of
300 slab's data structures we have to remove the lists head.
302 Also there could have been a barrier ack out of sequence, overtaking
303 the write acks - which would be a bug and violating write ordering.
304 To not deadlock in case we lose connection while such requests are
305 still pending, we need some way to find them for the
306 _req_mode(connection_lost_while_pending).
308 These have been list_move'd to the out_of_sequence_requests list in
309 _req_mod(, barrier_acked) above.
311 list_del_init(&b->requests);
314 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
315 _tl_add_barrier(mdev, b);
317 mdev->oldest_tle = nob;
318 /* if nob == NULL b was the only barrier, and becomes the new
319 barrier. Therefore mdev->oldest_tle points already to b */
321 D_ASSERT(nob != NULL);
322 mdev->oldest_tle = nob;
326 spin_unlock_irq(&mdev->req_lock);
327 dec_ap_pending(mdev);
332 spin_unlock_irq(&mdev->req_lock);
333 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
337 * _tl_restart() - Walks the transfer log, and applies an action to all requests
338 * @mdev: DRBD device.
339 * @what: The action/event to perform with all request objects
341 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
342 * restart_frozen_disk_io.
344 static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
346 struct drbd_tl_epoch *b, *tmp, **pn;
347 struct list_head *le, *tle, carry_reads;
348 struct drbd_request *req;
349 int rv, n_writes, n_reads;
351 b = mdev->oldest_tle;
352 pn = &mdev->oldest_tle;
356 INIT_LIST_HEAD(&carry_reads);
357 list_for_each_safe(le, tle, &b->requests) {
358 req = list_entry(le, struct drbd_request, tl_requests);
359 rv = _req_mod(req, what);
361 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
362 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
367 if (what == resend) {
368 b->n_writes = n_writes;
369 if (b->w.cb == NULL) {
370 b->w.cb = w_send_barrier;
371 inc_ap_pending(mdev);
372 set_bit(CREATE_BARRIER, &mdev->flags);
375 drbd_queue_work(&mdev->data.work, &b->w);
380 list_add(&carry_reads, &b->requests);
381 /* there could still be requests on that ring list,
382 * in case local io is still pending */
383 list_del(&b->requests);
385 /* dec_ap_pending corresponding to queue_barrier.
386 * the newest barrier may not have been queued yet,
387 * in which case w.cb is still NULL. */
389 dec_ap_pending(mdev);
391 if (b == mdev->newest_tle) {
392 /* recycle, but reinit! */
393 D_ASSERT(tmp == NULL);
394 INIT_LIST_HEAD(&b->requests);
395 list_splice(&carry_reads, &b->requests);
396 INIT_LIST_HEAD(&b->w.list);
398 b->br_number = net_random();
408 list_splice(&carry_reads, &b->requests);
414 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
415 * @mdev: DRBD device.
417 * This is called after the connection to the peer was lost. The storage covered
418 * by the requests on the transfer gets marked as our of sync. Called from the
419 * receiver thread and the worker thread.
421 void tl_clear(struct drbd_conf *mdev)
423 struct list_head *le, *tle;
424 struct drbd_request *r;
426 spin_lock_irq(&mdev->req_lock);
428 _tl_restart(mdev, connection_lost_while_pending);
430 /* we expect this list to be empty. */
431 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
433 /* but just in case, clean it up anyways! */
434 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
435 r = list_entry(le, struct drbd_request, tl_requests);
436 /* It would be nice to complete outside of spinlock.
437 * But this is easier for now. */
438 _req_mod(r, connection_lost_while_pending);
441 /* ensure bit indicating barrier is required is clear */
442 clear_bit(CREATE_BARRIER, &mdev->flags);
444 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
446 spin_unlock_irq(&mdev->req_lock);
449 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
451 spin_lock_irq(&mdev->req_lock);
452 _tl_restart(mdev, what);
453 spin_unlock_irq(&mdev->req_lock);
457 * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
458 * @mdev: DRBD device.
459 * @os: old (current) state.
460 * @ns: new (wanted) state.
462 static int cl_wide_st_chg(struct drbd_conf *mdev,
463 union drbd_state os, union drbd_state ns)
465 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
466 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
467 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
468 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
469 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
470 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
471 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
474 int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
475 union drbd_state mask, union drbd_state val)
478 union drbd_state os, ns;
481 spin_lock_irqsave(&mdev->req_lock, flags);
483 ns.i = (os.i & ~mask.i) | val.i;
484 rv = _drbd_set_state(mdev, ns, f, NULL);
486 spin_unlock_irqrestore(&mdev->req_lock, flags);
492 * drbd_force_state() - Impose a change which happens outside our control on our state
493 * @mdev: DRBD device.
494 * @mask: mask of state bits to change.
495 * @val: value of new state bits.
497 void drbd_force_state(struct drbd_conf *mdev,
498 union drbd_state mask, union drbd_state val)
500 drbd_change_state(mdev, CS_HARD, mask, val);
503 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
504 static int is_valid_state_transition(struct drbd_conf *,
505 union drbd_state, union drbd_state);
506 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
507 union drbd_state ns, int *warn_sync_abort);
508 int drbd_send_state_req(struct drbd_conf *,
509 union drbd_state, union drbd_state);
511 static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
512 union drbd_state mask, union drbd_state val)
514 union drbd_state os, ns;
518 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
519 return SS_CW_SUCCESS;
521 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
522 return SS_CW_FAILED_BY_PEER;
525 spin_lock_irqsave(&mdev->req_lock, flags);
527 ns.i = (os.i & ~mask.i) | val.i;
528 ns = sanitize_state(mdev, os, ns, NULL);
530 if (!cl_wide_st_chg(mdev, os, ns))
533 rv = is_valid_state(mdev, ns);
534 if (rv == SS_SUCCESS) {
535 rv = is_valid_state_transition(mdev, ns, os);
536 if (rv == SS_SUCCESS)
537 rv = 0; /* cont waiting, otherwise fail. */
540 spin_unlock_irqrestore(&mdev->req_lock, flags);
546 * drbd_req_state() - Perform an eventually cluster wide state change
547 * @mdev: DRBD device.
548 * @mask: mask of state bits to change.
549 * @val: value of new state bits.
552 * Should not be called directly, use drbd_request_state() or
553 * _drbd_request_state().
555 static int drbd_req_state(struct drbd_conf *mdev,
556 union drbd_state mask, union drbd_state val,
557 enum chg_state_flags f)
559 struct completion done;
561 union drbd_state os, ns;
564 init_completion(&done);
566 if (f & CS_SERIALIZE)
567 mutex_lock(&mdev->state_mutex);
569 spin_lock_irqsave(&mdev->req_lock, flags);
571 ns.i = (os.i & ~mask.i) | val.i;
572 ns = sanitize_state(mdev, os, ns, NULL);
574 if (cl_wide_st_chg(mdev, os, ns)) {
575 rv = is_valid_state(mdev, ns);
576 if (rv == SS_SUCCESS)
577 rv = is_valid_state_transition(mdev, ns, os);
578 spin_unlock_irqrestore(&mdev->req_lock, flags);
580 if (rv < SS_SUCCESS) {
582 print_st_err(mdev, os, ns, rv);
586 drbd_state_lock(mdev);
587 if (!drbd_send_state_req(mdev, mask, val)) {
588 drbd_state_unlock(mdev);
589 rv = SS_CW_FAILED_BY_PEER;
591 print_st_err(mdev, os, ns, rv);
595 wait_event(mdev->state_wait,
596 (rv = _req_st_cond(mdev, mask, val)));
598 if (rv < SS_SUCCESS) {
599 drbd_state_unlock(mdev);
601 print_st_err(mdev, os, ns, rv);
604 spin_lock_irqsave(&mdev->req_lock, flags);
606 ns.i = (os.i & ~mask.i) | val.i;
607 rv = _drbd_set_state(mdev, ns, f, &done);
608 drbd_state_unlock(mdev);
610 rv = _drbd_set_state(mdev, ns, f, &done);
613 spin_unlock_irqrestore(&mdev->req_lock, flags);
615 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
616 D_ASSERT(current != mdev->worker.task);
617 wait_for_completion(&done);
621 if (f & CS_SERIALIZE)
622 mutex_unlock(&mdev->state_mutex);
628 * _drbd_request_state() - Request a state change (with flags)
629 * @mdev: DRBD device.
630 * @mask: mask of state bits to change.
631 * @val: value of new state bits.
634 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
635 * flag, or when logging of failed state change requests is not desired.
637 int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
638 union drbd_state val, enum chg_state_flags f)
642 wait_event(mdev->state_wait,
643 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
648 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
650 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
652 drbd_conn_str(ns.conn),
653 drbd_role_str(ns.role),
654 drbd_role_str(ns.peer),
655 drbd_disk_str(ns.disk),
656 drbd_disk_str(ns.pdsk),
658 ns.aftr_isp ? 'a' : '-',
659 ns.peer_isp ? 'p' : '-',
660 ns.user_isp ? 'u' : '-'
664 void print_st_err(struct drbd_conf *mdev,
665 union drbd_state os, union drbd_state ns, int err)
667 if (err == SS_IN_TRANSIENT_STATE)
669 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
670 print_st(mdev, " state", os);
671 print_st(mdev, "wanted", ns);
675 #define drbd_peer_str drbd_role_str
676 #define drbd_pdsk_str drbd_disk_str
678 #define drbd_susp_str(A) ((A) ? "1" : "0")
679 #define drbd_aftr_isp_str(A) ((A) ? "1" : "0")
680 #define drbd_peer_isp_str(A) ((A) ? "1" : "0")
681 #define drbd_user_isp_str(A) ((A) ? "1" : "0")
684 ({ if (ns.A != os.A) { \
685 pbp += sprintf(pbp, #A "( %s -> %s ) ", \
686 drbd_##A##_str(os.A), \
687 drbd_##A##_str(ns.A)); \
691 * is_valid_state() - Returns an SS_ error code if ns is not valid
692 * @mdev: DRBD device.
693 * @ns: State to consider.
695 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
697 /* See drbd_state_sw_errors in drbd_strings.c */
699 enum drbd_fencing_p fp;
703 if (get_ldev(mdev)) {
704 fp = mdev->ldev->dc.fencing;
708 if (get_net_conf(mdev)) {
709 if (!mdev->net_conf->two_primaries &&
710 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
711 rv = SS_TWO_PRIMARIES;
716 /* already found a reason to abort */;
717 else if (ns.role == R_SECONDARY && mdev->open_cnt)
718 rv = SS_DEVICE_IN_USE;
720 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
721 rv = SS_NO_UP_TO_DATE_DISK;
723 else if (fp >= FP_RESOURCE &&
724 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
727 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
728 rv = SS_NO_UP_TO_DATE_DISK;
730 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
731 rv = SS_NO_LOCAL_DISK;
733 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
734 rv = SS_NO_REMOTE_DISK;
736 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
737 rv = SS_NO_UP_TO_DATE_DISK;
739 else if ((ns.conn == C_CONNECTED ||
740 ns.conn == C_WF_BITMAP_S ||
741 ns.conn == C_SYNC_SOURCE ||
742 ns.conn == C_PAUSED_SYNC_S) &&
743 ns.disk == D_OUTDATED)
744 rv = SS_CONNECTED_OUTDATES;
746 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
747 (mdev->sync_conf.verify_alg[0] == 0))
748 rv = SS_NO_VERIFY_ALG;
750 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
751 mdev->agreed_pro_version < 88)
752 rv = SS_NOT_SUPPORTED;
758 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
759 * @mdev: DRBD device.
763 static int is_valid_state_transition(struct drbd_conf *mdev,
764 union drbd_state ns, union drbd_state os)
768 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
769 os.conn > C_CONNECTED)
770 rv = SS_RESYNC_RUNNING;
772 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
773 rv = SS_ALREADY_STANDALONE;
775 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
778 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
779 rv = SS_NO_NET_CONFIG;
781 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
782 rv = SS_LOWER_THAN_OUTDATED;
784 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
785 rv = SS_IN_TRANSIENT_STATE;
787 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
788 rv = SS_IN_TRANSIENT_STATE;
790 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
791 rv = SS_NEED_CONNECTION;
793 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
794 ns.conn != os.conn && os.conn > C_CONNECTED)
795 rv = SS_RESYNC_RUNNING;
797 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
798 os.conn < C_CONNECTED)
799 rv = SS_NEED_CONNECTION;
805 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
806 * @mdev: DRBD device.
811 * When we loose connection, we have to set the state of the peers disk (pdsk)
812 * to D_UNKNOWN. This rule and many more along those lines are in this function.
814 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
815 union drbd_state ns, int *warn_sync_abort)
817 enum drbd_fencing_p fp;
820 if (get_ldev(mdev)) {
821 fp = mdev->ldev->dc.fencing;
825 /* Disallow Network errors to configure a device's network part */
826 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
827 os.conn <= C_DISCONNECTING)
830 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */
831 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
832 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING)
835 /* After C_DISCONNECTING only C_STANDALONE may follow */
836 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
839 if (ns.conn < C_CONNECTED) {
842 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
846 /* Clear the aftr_isp when becoming unconfigured */
847 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
850 /* Abort resync if a disk fails/detaches */
851 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
852 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
854 *warn_sync_abort = 1;
855 ns.conn = C_CONNECTED;
858 if (ns.conn >= C_CONNECTED &&
859 ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) ||
860 (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) {
863 case C_PAUSED_SYNC_T:
864 ns.disk = D_OUTDATED;
869 case C_PAUSED_SYNC_S:
870 ns.disk = D_UP_TO_DATE;
873 ns.disk = D_INCONSISTENT;
874 dev_warn(DEV, "Implicitly set disk state Inconsistent!\n");
877 if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE)
878 dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n");
881 if (ns.conn >= C_CONNECTED &&
882 (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) {
886 case C_PAUSED_SYNC_T:
888 ns.pdsk = D_UP_TO_DATE;
891 case C_PAUSED_SYNC_S:
892 /* remap any consistent state to D_OUTDATED,
893 * but disallow "upgrade" of not even consistent states.
896 (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED)
897 ? os.pdsk : D_OUTDATED;
900 ns.pdsk = D_INCONSISTENT;
901 dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n");
904 if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE)
905 dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n");
908 /* Connection breaks down before we finished "Negotiating" */
909 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
910 get_ldev_if_state(mdev, D_NEGOTIATING)) {
911 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
912 ns.disk = mdev->new_state_tmp.disk;
913 ns.pdsk = mdev->new_state_tmp.pdsk;
915 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
916 ns.disk = D_DISKLESS;
922 if (fp == FP_STONITH &&
923 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
924 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
925 ns.susp = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
927 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
928 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
929 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
930 ns.susp = 1; /* Suspend IO while no data available (no accessible data available) */
932 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
933 if (ns.conn == C_SYNC_SOURCE)
934 ns.conn = C_PAUSED_SYNC_S;
935 if (ns.conn == C_SYNC_TARGET)
936 ns.conn = C_PAUSED_SYNC_T;
938 if (ns.conn == C_PAUSED_SYNC_S)
939 ns.conn = C_SYNC_SOURCE;
940 if (ns.conn == C_PAUSED_SYNC_T)
941 ns.conn = C_SYNC_TARGET;
947 /* helper for __drbd_set_state */
948 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
950 if (cs == C_VERIFY_T) {
951 /* starting online verify from an arbitrary position
952 * does not fit well into the existing protocol.
953 * on C_VERIFY_T, we initialize ov_left and friends
954 * implicitly in receive_DataRequest once the
955 * first P_OV_REQUEST is received */
956 mdev->ov_start_sector = ~(sector_t)0;
958 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
959 if (bit >= mdev->rs_total)
960 mdev->ov_start_sector =
961 BM_BIT_TO_SECT(mdev->rs_total - 1);
962 mdev->ov_position = mdev->ov_start_sector;
967 * __drbd_set_state() - Set a new DRBD state
968 * @mdev: DRBD device.
971 * @done: Optional completion, that will get completed after the after_state_ch() finished
973 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
975 int __drbd_set_state(struct drbd_conf *mdev,
976 union drbd_state ns, enum chg_state_flags flags,
977 struct completion *done)
981 int warn_sync_abort = 0;
982 struct after_state_chg_work *ascw;
986 ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
989 return SS_NOTHING_TO_DO;
991 if (!(flags & CS_HARD)) {
992 /* pre-state-change checks ; only look at ns */
993 /* See drbd_state_sw_errors in drbd_strings.c */
995 rv = is_valid_state(mdev, ns);
996 if (rv < SS_SUCCESS) {
997 /* If the old state was illegal as well, then let
1000 if (is_valid_state(mdev, os) == rv)
1001 rv = is_valid_state_transition(mdev, ns, os);
1003 rv = is_valid_state_transition(mdev, ns, os);
1006 if (rv < SS_SUCCESS) {
1007 if (flags & CS_VERBOSE)
1008 print_st_err(mdev, os, ns, rv);
1012 if (warn_sync_abort)
1013 dev_warn(DEV, "Resync aborted.\n");
1028 dev_info(DEV, "%s\n", pb);
1031 /* solve the race between becoming unconfigured,
1032 * worker doing the cleanup, and
1033 * admin reconfiguring us:
1034 * on (re)configure, first set CONFIG_PENDING,
1035 * then wait for a potentially exiting worker,
1036 * start the worker, and schedule one no_op.
1037 * then proceed with configuration.
1039 if (ns.disk == D_DISKLESS &&
1040 ns.conn == C_STANDALONE &&
1041 ns.role == R_SECONDARY &&
1042 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1043 set_bit(DEVICE_DYING, &mdev->flags);
1045 mdev->state.i = ns.i;
1046 wake_up(&mdev->misc_wait);
1047 wake_up(&mdev->state_wait);
1049 /* post-state-change actions */
1050 if (os.conn >= C_SYNC_SOURCE && ns.conn <= C_CONNECTED) {
1051 set_bit(STOP_SYNC_TIMER, &mdev->flags);
1052 mod_timer(&mdev->resync_timer, jiffies);
1055 /* aborted verify run. log the last position */
1056 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1057 ns.conn < C_CONNECTED) {
1058 mdev->ov_start_sector =
1059 BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left);
1060 dev_info(DEV, "Online Verify reached sector %llu\n",
1061 (unsigned long long)mdev->ov_start_sector);
1064 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1065 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1066 dev_info(DEV, "Syncer continues.\n");
1067 mdev->rs_paused += (long)jiffies
1068 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
1069 if (ns.conn == C_SYNC_TARGET) {
1070 if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))
1071 mod_timer(&mdev->resync_timer, jiffies);
1072 /* This if (!test_bit) is only needed for the case
1073 that a device that has ceased to used its timer,
1074 i.e. it is already in drbd_resync_finished() gets
1075 paused and resumed. */
1079 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1080 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1081 dev_info(DEV, "Resync suspended\n");
1082 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
1083 if (ns.conn == C_PAUSED_SYNC_T)
1084 set_bit(STOP_SYNC_TIMER, &mdev->flags);
1087 if (os.conn == C_CONNECTED &&
1088 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1089 unsigned long now = jiffies;
1092 mdev->ov_position = 0;
1093 mdev->rs_total = drbd_bm_bits(mdev);
1094 if (mdev->agreed_pro_version >= 90)
1095 set_ov_position(mdev, ns.conn);
1097 mdev->ov_start_sector = 0;
1098 mdev->ov_left = mdev->rs_total
1099 - BM_SECT_TO_BIT(mdev->ov_position);
1100 mdev->rs_start = now;
1101 mdev->ov_last_oos_size = 0;
1102 mdev->ov_last_oos_start = 0;
1104 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1105 mdev->rs_mark_left[i] = mdev->rs_total;
1106 mdev->rs_mark_time[i] = now;
1109 if (ns.conn == C_VERIFY_S) {
1110 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1111 (unsigned long long)mdev->ov_position);
1112 mod_timer(&mdev->resync_timer, jiffies);
1116 if (get_ldev(mdev)) {
1117 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1118 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1119 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1121 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1122 mdf |= MDF_CRASHED_PRIMARY;
1123 if (mdev->state.role == R_PRIMARY ||
1124 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1125 mdf |= MDF_PRIMARY_IND;
1126 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1127 mdf |= MDF_CONNECTED_IND;
1128 if (mdev->state.disk > D_INCONSISTENT)
1129 mdf |= MDF_CONSISTENT;
1130 if (mdev->state.disk > D_OUTDATED)
1131 mdf |= MDF_WAS_UP_TO_DATE;
1132 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1133 mdf |= MDF_PEER_OUT_DATED;
1134 if (mdf != mdev->ldev->md.flags) {
1135 mdev->ldev->md.flags = mdf;
1136 drbd_md_mark_dirty(mdev);
1138 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1139 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1143 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1144 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1145 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1146 set_bit(CONSIDER_RESYNC, &mdev->flags);
1148 /* Receiver should clean up itself */
1149 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1150 drbd_thread_stop_nowait(&mdev->receiver);
1152 /* Now the receiver finished cleaning up itself, it should die */
1153 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1154 drbd_thread_stop_nowait(&mdev->receiver);
1156 /* Upon network failure, we need to restart the receiver. */
1157 if (os.conn > C_TEAR_DOWN &&
1158 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1159 drbd_thread_restart_nowait(&mdev->receiver);
1161 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1165 ascw->flags = flags;
1166 ascw->w.cb = w_after_state_ch;
1168 drbd_queue_work(&mdev->data.work, &ascw->w);
1170 dev_warn(DEV, "Could not kmalloc an ascw\n");
1176 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1178 struct after_state_chg_work *ascw =
1179 container_of(w, struct after_state_chg_work, w);
1180 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1181 if (ascw->flags & CS_WAIT_COMPLETE) {
1182 D_ASSERT(ascw->done != NULL);
1183 complete(ascw->done);
1190 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1193 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1194 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1198 switch (mdev->state.conn) {
1199 case C_STARTING_SYNC_T:
1200 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1202 case C_STARTING_SYNC_S:
1203 drbd_start_resync(mdev, C_SYNC_SOURCE);
1209 * after_state_ch() - Perform after state change actions that may sleep
1210 * @mdev: DRBD device.
1215 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1216 union drbd_state ns, enum chg_state_flags flags)
1218 enum drbd_fencing_p fp;
1219 enum drbd_req_event what = nothing;
1221 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1222 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1224 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1228 if (get_ldev(mdev)) {
1229 fp = mdev->ldev->dc.fencing;
1233 /* Inform userspace about the change... */
1234 drbd_bcast_state(mdev, ns);
1236 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1237 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1238 drbd_khelper(mdev, "pri-on-incon-degr");
1240 /* Here we have the actions that are performed after a
1241 state change. This function might sleep */
1243 if (os.susp && ns.susp && mdev->sync_conf.on_no_data == OND_SUSPEND_IO) {
1244 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1245 if (ns.conn == C_CONNECTED)
1247 else /* ns.conn > C_CONNECTED */
1248 dev_err(DEV, "Unexpected Resynd going on!\n");
1251 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1252 what = restart_frozen_disk_io;
1255 if (fp == FP_STONITH && ns.susp) {
1256 /* case1: The outdate peer handler is successful: */
1257 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1259 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1260 drbd_uuid_new_current(mdev);
1261 clear_bit(NEW_CUR_UUID, &mdev->flags);
1264 spin_lock_irq(&mdev->req_lock);
1265 _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
1266 spin_unlock_irq(&mdev->req_lock);
1268 /* case2: The connection was established again: */
1269 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1270 clear_bit(NEW_CUR_UUID, &mdev->flags);
1275 if (what != nothing) {
1276 spin_lock_irq(&mdev->req_lock);
1277 _tl_restart(mdev, what);
1278 _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
1279 spin_unlock_irq(&mdev->req_lock);
1282 /* Do not change the order of the if above and the two below... */
1283 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1284 drbd_send_uuids(mdev);
1285 drbd_send_state(mdev);
1287 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
1288 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
1290 /* Lost contact to peer's copy of the data */
1291 if ((os.pdsk >= D_INCONSISTENT &&
1292 os.pdsk != D_UNKNOWN &&
1293 os.pdsk != D_OUTDATED)
1294 && (ns.pdsk < D_INCONSISTENT ||
1295 ns.pdsk == D_UNKNOWN ||
1296 ns.pdsk == D_OUTDATED)) {
1297 if (get_ldev(mdev)) {
1298 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1299 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1300 if (mdev->state.susp) {
1301 set_bit(NEW_CUR_UUID, &mdev->flags);
1303 drbd_uuid_new_current(mdev);
1304 drbd_send_uuids(mdev);
1311 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1312 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1313 drbd_uuid_new_current(mdev);
1314 drbd_send_uuids(mdev);
1317 /* D_DISKLESS Peer becomes secondary */
1318 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1319 drbd_al_to_on_disk_bm(mdev);
1323 /* Last part of the attaching process ... */
1324 if (ns.conn >= C_CONNECTED &&
1325 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1326 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
1327 drbd_send_uuids(mdev);
1328 drbd_send_state(mdev);
1331 /* We want to pause/continue resync, tell peer. */
1332 if (ns.conn >= C_CONNECTED &&
1333 ((os.aftr_isp != ns.aftr_isp) ||
1334 (os.user_isp != ns.user_isp)))
1335 drbd_send_state(mdev);
1337 /* In case one of the isp bits got set, suspend other devices. */
1338 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1339 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1340 suspend_other_sg(mdev);
1342 /* Make sure the peer gets informed about eventual state
1343 changes (ISP bits) while we were in WFReportParams. */
1344 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1345 drbd_send_state(mdev);
1347 /* We are in the progress to start a full sync... */
1348 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1349 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1350 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1352 /* We are invalidating our self... */
1353 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1354 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1355 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1357 if (os.disk > D_FAILED && ns.disk == D_FAILED) {
1358 enum drbd_io_error_p eh;
1361 if (get_ldev_if_state(mdev, D_FAILED)) {
1362 eh = mdev->ldev->dc.on_io_error;
1366 drbd_rs_cancel_all(mdev);
1367 /* since get_ldev() only works as long as disk>=D_INCONSISTENT,
1368 and it is D_DISKLESS here, local_cnt can only go down, it can
1369 not increase... It will reach zero */
1370 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1372 mdev->rs_failed = 0;
1373 atomic_set(&mdev->rs_pending_cnt, 0);
1375 spin_lock_irq(&mdev->req_lock);
1376 _drbd_set_state(_NS(mdev, disk, D_DISKLESS), CS_HARD, NULL);
1377 spin_unlock_irq(&mdev->req_lock);
1379 if (eh == EP_CALL_HELPER)
1380 drbd_khelper(mdev, "local-io-error");
1383 if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
1385 if (os.disk == D_FAILED) /* && ns.disk == D_DISKLESS*/ {
1386 if (drbd_send_state(mdev))
1387 dev_warn(DEV, "Notified peer that my disk is broken.\n");
1389 dev_err(DEV, "Sending state in drbd_io_error() failed\n");
1392 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1393 lc_destroy(mdev->resync);
1394 mdev->resync = NULL;
1395 lc_destroy(mdev->act_log);
1396 mdev->act_log = NULL;
1398 drbd_free_bc(mdev->ldev);
1399 mdev->ldev = NULL;);
1401 if (mdev->md_io_tmpp)
1402 __free_page(mdev->md_io_tmpp);
1405 /* Disks got bigger while they were detached */
1406 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1407 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1408 if (ns.conn == C_CONNECTED)
1409 resync_after_online_grow(mdev);
1412 /* A resync finished or aborted, wake paused devices... */
1413 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1414 (os.peer_isp && !ns.peer_isp) ||
1415 (os.user_isp && !ns.user_isp))
1416 resume_next_sg(mdev);
1418 /* free tl_hash if we Got thawed and are C_STANDALONE */
1419 if (ns.conn == C_STANDALONE && ns.susp == 0 && mdev->tl_hash)
1420 drbd_free_tl_hash(mdev);
1422 /* Upon network connection, we need to start the receiver */
1423 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1424 drbd_thread_start(&mdev->receiver);
1426 /* Terminate worker thread if we are unconfigured - it will be
1427 restarted as needed... */
1428 if (ns.disk == D_DISKLESS &&
1429 ns.conn == C_STANDALONE &&
1430 ns.role == R_SECONDARY) {
1431 if (os.aftr_isp != ns.aftr_isp)
1432 resume_next_sg(mdev);
1433 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1434 if (test_bit(DEVICE_DYING, &mdev->flags))
1435 drbd_thread_stop_nowait(&mdev->worker);
1442 static int drbd_thread_setup(void *arg)
1444 struct drbd_thread *thi = (struct drbd_thread *) arg;
1445 struct drbd_conf *mdev = thi->mdev;
1446 unsigned long flags;
1450 retval = thi->function(thi);
1452 spin_lock_irqsave(&thi->t_lock, flags);
1454 /* if the receiver has been "Exiting", the last thing it did
1455 * was set the conn state to "StandAlone",
1456 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1457 * and receiver thread will be "started".
1458 * drbd_thread_start needs to set "Restarting" in that case.
1459 * t_state check and assignment needs to be within the same spinlock,
1460 * so either thread_start sees Exiting, and can remap to Restarting,
1461 * or thread_start see None, and can proceed as normal.
1464 if (thi->t_state == Restarting) {
1465 dev_info(DEV, "Restarting %s\n", current->comm);
1466 thi->t_state = Running;
1467 spin_unlock_irqrestore(&thi->t_lock, flags);
1472 thi->t_state = None;
1474 complete(&thi->stop);
1475 spin_unlock_irqrestore(&thi->t_lock, flags);
1477 dev_info(DEV, "Terminating %s\n", current->comm);
1479 /* Release mod reference taken when thread was started */
1480 module_put(THIS_MODULE);
1484 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1485 int (*func) (struct drbd_thread *))
1487 spin_lock_init(&thi->t_lock);
1489 thi->t_state = None;
1490 thi->function = func;
1494 int drbd_thread_start(struct drbd_thread *thi)
1496 struct drbd_conf *mdev = thi->mdev;
1497 struct task_struct *nt;
1498 unsigned long flags;
1501 thi == &mdev->receiver ? "receiver" :
1502 thi == &mdev->asender ? "asender" :
1503 thi == &mdev->worker ? "worker" : "NONSENSE";
1505 /* is used from state engine doing drbd_thread_stop_nowait,
1506 * while holding the req lock irqsave */
1507 spin_lock_irqsave(&thi->t_lock, flags);
1509 switch (thi->t_state) {
1511 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1512 me, current->comm, current->pid);
1514 /* Get ref on module for thread - this is released when thread exits */
1515 if (!try_module_get(THIS_MODULE)) {
1516 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1517 spin_unlock_irqrestore(&thi->t_lock, flags);
1521 init_completion(&thi->stop);
1522 D_ASSERT(thi->task == NULL);
1523 thi->reset_cpu_mask = 1;
1524 thi->t_state = Running;
1525 spin_unlock_irqrestore(&thi->t_lock, flags);
1526 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1528 nt = kthread_create(drbd_thread_setup, (void *) thi,
1529 "drbd%d_%s", mdev_to_minor(mdev), me);
1532 dev_err(DEV, "Couldn't start thread\n");
1534 module_put(THIS_MODULE);
1537 spin_lock_irqsave(&thi->t_lock, flags);
1539 thi->t_state = Running;
1540 spin_unlock_irqrestore(&thi->t_lock, flags);
1541 wake_up_process(nt);
1544 thi->t_state = Restarting;
1545 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1546 me, current->comm, current->pid);
1551 spin_unlock_irqrestore(&thi->t_lock, flags);
1559 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1561 unsigned long flags;
1563 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1565 /* may be called from state engine, holding the req lock irqsave */
1566 spin_lock_irqsave(&thi->t_lock, flags);
1568 if (thi->t_state == None) {
1569 spin_unlock_irqrestore(&thi->t_lock, flags);
1571 drbd_thread_start(thi);
1575 if (thi->t_state != ns) {
1576 if (thi->task == NULL) {
1577 spin_unlock_irqrestore(&thi->t_lock, flags);
1583 init_completion(&thi->stop);
1584 if (thi->task != current)
1585 force_sig(DRBD_SIGKILL, thi->task);
1589 spin_unlock_irqrestore(&thi->t_lock, flags);
1592 wait_for_completion(&thi->stop);
1597 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1598 * @mdev: DRBD device.
1600 * Forces all threads of a device onto the same CPU. This is beneficial for
1601 * DRBD's performance. May be overwritten by user's configuration.
1603 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1607 /* user override. */
1608 if (cpumask_weight(mdev->cpu_mask))
1611 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1612 for_each_online_cpu(cpu) {
1614 cpumask_set_cpu(cpu, mdev->cpu_mask);
1618 /* should not be reached */
1619 cpumask_setall(mdev->cpu_mask);
1623 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1624 * @mdev: DRBD device.
1626 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1629 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1631 struct task_struct *p = current;
1632 struct drbd_thread *thi =
1633 p == mdev->asender.task ? &mdev->asender :
1634 p == mdev->receiver.task ? &mdev->receiver :
1635 p == mdev->worker.task ? &mdev->worker :
1639 if (!thi->reset_cpu_mask)
1641 thi->reset_cpu_mask = 0;
1642 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1646 /* the appropriate socket mutex must be held already */
1647 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1648 enum drbd_packets cmd, struct p_header *h,
1649 size_t size, unsigned msg_flags)
1653 ERR_IF(!h) return FALSE;
1654 ERR_IF(!size) return FALSE;
1656 h->magic = BE_DRBD_MAGIC;
1657 h->command = cpu_to_be16(cmd);
1658 h->length = cpu_to_be16(size-sizeof(struct p_header));
1660 sent = drbd_send(mdev, sock, h, size, msg_flags);
1662 ok = (sent == size);
1664 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1665 cmdname(cmd), (int)size, sent);
1669 /* don't pass the socket. we may only look at it
1670 * when we hold the appropriate socket mutex.
1672 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1673 enum drbd_packets cmd, struct p_header *h, size_t size)
1676 struct socket *sock;
1678 if (use_data_socket) {
1679 mutex_lock(&mdev->data.mutex);
1680 sock = mdev->data.socket;
1682 mutex_lock(&mdev->meta.mutex);
1683 sock = mdev->meta.socket;
1686 /* drbd_disconnect() could have called drbd_free_sock()
1687 * while we were waiting in down()... */
1688 if (likely(sock != NULL))
1689 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1691 if (use_data_socket)
1692 mutex_unlock(&mdev->data.mutex);
1694 mutex_unlock(&mdev->meta.mutex);
1698 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1704 h.magic = BE_DRBD_MAGIC;
1705 h.command = cpu_to_be16(cmd);
1706 h.length = cpu_to_be16(size);
1708 if (!drbd_get_data_sock(mdev))
1712 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1714 drbd_send(mdev, mdev->data.socket, data, size, 0));
1716 drbd_put_data_sock(mdev);
1721 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1723 struct p_rs_param_95 *p;
1724 struct socket *sock;
1726 const int apv = mdev->agreed_pro_version;
1728 size = apv <= 87 ? sizeof(struct p_rs_param)
1729 : apv == 88 ? sizeof(struct p_rs_param)
1730 + strlen(mdev->sync_conf.verify_alg) + 1
1731 : apv <= 94 ? sizeof(struct p_rs_param_89)
1732 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
1734 /* used from admin command context and receiver/worker context.
1735 * to avoid kmalloc, grab the socket right here,
1736 * then use the pre-allocated sbuf there */
1737 mutex_lock(&mdev->data.mutex);
1738 sock = mdev->data.socket;
1740 if (likely(sock != NULL)) {
1741 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1743 p = &mdev->data.sbuf.rs_param_95;
1745 /* initialize verify_alg and csums_alg */
1746 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1748 p->rate = cpu_to_be32(sc->rate);
1749 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1750 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1751 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1752 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
1755 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1757 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1759 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1761 rv = 0; /* not ok */
1763 mutex_unlock(&mdev->data.mutex);
1768 int drbd_send_protocol(struct drbd_conf *mdev)
1770 struct p_protocol *p;
1773 size = sizeof(struct p_protocol);
1775 if (mdev->agreed_pro_version >= 87)
1776 size += strlen(mdev->net_conf->integrity_alg) + 1;
1778 /* we must not recurse into our own queue,
1779 * as that is blocked during handshake */
1780 p = kmalloc(size, GFP_NOIO);
1784 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
1785 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1786 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1787 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
1788 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1791 if (mdev->net_conf->want_lose)
1793 if (mdev->net_conf->dry_run) {
1794 if (mdev->agreed_pro_version >= 92)
1797 dev_err(DEV, "--dry-run is not supported by peer");
1802 p->conn_flags = cpu_to_be32(cf);
1804 if (mdev->agreed_pro_version >= 87)
1805 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1807 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
1808 (struct p_header *)p, size);
1813 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1818 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1821 for (i = UI_CURRENT; i < UI_SIZE; i++)
1822 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
1824 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
1825 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
1826 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
1827 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1828 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
1829 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
1833 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
1834 (struct p_header *)&p, sizeof(p));
1837 int drbd_send_uuids(struct drbd_conf *mdev)
1839 return _drbd_send_uuids(mdev, 0);
1842 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1844 return _drbd_send_uuids(mdev, 8);
1848 int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
1852 p.uuid = cpu_to_be64(val);
1854 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
1855 (struct p_header *)&p, sizeof(p));
1858 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
1861 sector_t d_size, u_size;
1865 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1866 D_ASSERT(mdev->ldev->backing_bdev);
1867 d_size = drbd_get_max_capacity(mdev->ldev);
1868 u_size = mdev->ldev->dc.disk_size;
1869 q_order_type = drbd_queue_order_type(mdev);
1874 q_order_type = QUEUE_ORDERED_NONE;
1877 p.d_size = cpu_to_be64(d_size);
1878 p.u_size = cpu_to_be64(u_size);
1879 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1880 p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
1881 p.queue_order_type = cpu_to_be16(q_order_type);
1882 p.dds_flags = cpu_to_be16(flags);
1884 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
1885 (struct p_header *)&p, sizeof(p));
1890 * drbd_send_state() - Sends the drbd state to the peer
1891 * @mdev: DRBD device.
1893 int drbd_send_state(struct drbd_conf *mdev)
1895 struct socket *sock;
1899 /* Grab state lock so we wont send state if we're in the middle
1900 * of a cluster wide state change on another thread */
1901 drbd_state_lock(mdev);
1903 mutex_lock(&mdev->data.mutex);
1905 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1906 sock = mdev->data.socket;
1908 if (likely(sock != NULL)) {
1909 ok = _drbd_send_cmd(mdev, sock, P_STATE,
1910 (struct p_header *)&p, sizeof(p), 0);
1913 mutex_unlock(&mdev->data.mutex);
1915 drbd_state_unlock(mdev);
1919 int drbd_send_state_req(struct drbd_conf *mdev,
1920 union drbd_state mask, union drbd_state val)
1922 struct p_req_state p;
1924 p.mask = cpu_to_be32(mask.i);
1925 p.val = cpu_to_be32(val.i);
1927 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
1928 (struct p_header *)&p, sizeof(p));
1931 int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
1933 struct p_req_state_reply p;
1935 p.retcode = cpu_to_be32(retcode);
1937 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
1938 (struct p_header *)&p, sizeof(p));
1941 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1942 struct p_compressed_bm *p,
1943 struct bm_xfer_ctx *c)
1945 struct bitstream bs;
1946 unsigned long plain_bits;
1953 /* may we use this feature? */
1954 if ((mdev->sync_conf.use_rle == 0) ||
1955 (mdev->agreed_pro_version < 90))
1958 if (c->bit_offset >= c->bm_bits)
1959 return 0; /* nothing to do. */
1961 /* use at most thus many bytes */
1962 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
1963 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
1964 /* plain bits covered in this code string */
1967 /* p->encoding & 0x80 stores whether the first run length is set.
1968 * bit offset is implicit.
1969 * start with toggle == 2 to be able to tell the first iteration */
1972 /* see how much plain bits we can stuff into one packet
1973 * using RLE and VLI. */
1975 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1976 : _drbd_bm_find_next(mdev, c->bit_offset);
1979 rl = tmp - c->bit_offset;
1981 if (toggle == 2) { /* first iteration */
1983 /* the first checked bit was set,
1984 * store start value, */
1985 DCBP_set_start(p, 1);
1986 /* but skip encoding of zero run length */
1990 DCBP_set_start(p, 0);
1993 /* paranoia: catch zero runlength.
1994 * can only happen if bitmap is modified while we scan it. */
1996 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1997 "t:%u bo:%lu\n", toggle, c->bit_offset);
2001 bits = vli_encode_bits(&bs, rl);
2002 if (bits == -ENOBUFS) /* buffer full */
2005 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2011 c->bit_offset = tmp;
2012 } while (c->bit_offset < c->bm_bits);
2014 len = bs.cur.b - p->code + !!bs.cur.bit;
2016 if (plain_bits < (len << 3)) {
2017 /* incompressible with this method.
2018 * we need to rewind both word and bit position. */
2019 c->bit_offset -= plain_bits;
2020 bm_xfer_ctx_bit_to_word_offset(c);
2021 c->bit_offset = c->word_offset * BITS_PER_LONG;
2025 /* RLE + VLI was able to compress it just fine.
2026 * update c->word_offset. */
2027 bm_xfer_ctx_bit_to_word_offset(c);
2029 /* store pad_bits */
2030 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2035 enum { OK, FAILED, DONE }
2036 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2037 struct p_header *h, struct bm_xfer_ctx *c)
2039 struct p_compressed_bm *p = (void*)h;
2040 unsigned long num_words;
2044 len = fill_bitmap_rle_bits(mdev, p, c);
2050 DCBP_set_code(p, RLE_VLI_Bits);
2051 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2052 sizeof(*p) + len, 0);
2055 c->bytes[0] += sizeof(*p) + len;
2057 if (c->bit_offset >= c->bm_bits)
2060 /* was not compressible.
2061 * send a buffer full of plain text bits instead. */
2062 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2063 len = num_words * sizeof(long);
2065 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2066 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
2067 h, sizeof(struct p_header) + len, 0);
2068 c->word_offset += num_words;
2069 c->bit_offset = c->word_offset * BITS_PER_LONG;
2072 c->bytes[1] += sizeof(struct p_header) + len;
2074 if (c->bit_offset > c->bm_bits)
2075 c->bit_offset = c->bm_bits;
2077 ok = ok ? ((len == 0) ? DONE : OK) : FAILED;
2080 INFO_bm_xfer_stats(mdev, "send", c);
2084 /* See the comment at receive_bitmap() */
2085 int _drbd_send_bitmap(struct drbd_conf *mdev)
2087 struct bm_xfer_ctx c;
2091 ERR_IF(!mdev->bitmap) return FALSE;
2093 /* maybe we should use some per thread scratch page,
2094 * and allocate that during initial device creation? */
2095 p = (struct p_header *) __get_free_page(GFP_NOIO);
2097 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2101 if (get_ldev(mdev)) {
2102 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2103 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2104 drbd_bm_set_all(mdev);
2105 if (drbd_bm_write(mdev)) {
2106 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2107 * but otherwise process as per normal - need to tell other
2108 * side that a full resync is required! */
2109 dev_err(DEV, "Failed to write bitmap to disk!\n");
2111 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2118 c = (struct bm_xfer_ctx) {
2119 .bm_bits = drbd_bm_bits(mdev),
2120 .bm_words = drbd_bm_words(mdev),
2124 ret = send_bitmap_rle_or_plain(mdev, p, &c);
2125 } while (ret == OK);
2127 free_page((unsigned long) p);
2128 return (ret == DONE);
2131 int drbd_send_bitmap(struct drbd_conf *mdev)
2135 if (!drbd_get_data_sock(mdev))
2137 err = !_drbd_send_bitmap(mdev);
2138 drbd_put_data_sock(mdev);
2142 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2145 struct p_barrier_ack p;
2147 p.barrier = barrier_nr;
2148 p.set_size = cpu_to_be32(set_size);
2150 if (mdev->state.conn < C_CONNECTED)
2152 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2153 (struct p_header *)&p, sizeof(p));
2158 * _drbd_send_ack() - Sends an ack packet
2159 * @mdev: DRBD device.
2160 * @cmd: Packet command code.
2161 * @sector: sector, needs to be in big endian byte order
2162 * @blksize: size in byte, needs to be in big endian byte order
2163 * @block_id: Id, big endian byte order
2165 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2171 struct p_block_ack p;
2174 p.block_id = block_id;
2175 p.blksize = blksize;
2176 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2178 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2180 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2181 (struct p_header *)&p, sizeof(p));
2185 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2188 const int header_size = sizeof(struct p_data)
2189 - sizeof(struct p_header);
2190 int data_size = ((struct p_header *)dp)->length - header_size;
2192 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2196 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2197 struct p_block_req *rp)
2199 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2203 * drbd_send_ack() - Sends an ack packet
2204 * @mdev: DRBD device.
2205 * @cmd: Packet command code.
2208 int drbd_send_ack(struct drbd_conf *mdev,
2209 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2211 return _drbd_send_ack(mdev, cmd,
2212 cpu_to_be64(e->sector),
2213 cpu_to_be32(e->size),
2217 /* This function misuses the block_id field to signal if the blocks
2218 * are is sync or not. */
2219 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2220 sector_t sector, int blksize, u64 block_id)
2222 return _drbd_send_ack(mdev, cmd,
2223 cpu_to_be64(sector),
2224 cpu_to_be32(blksize),
2225 cpu_to_be64(block_id));
2228 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2229 sector_t sector, int size, u64 block_id)
2232 struct p_block_req p;
2234 p.sector = cpu_to_be64(sector);
2235 p.block_id = block_id;
2236 p.blksize = cpu_to_be32(size);
2238 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2239 (struct p_header *)&p, sizeof(p));
2243 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2244 sector_t sector, int size,
2245 void *digest, int digest_size,
2246 enum drbd_packets cmd)
2249 struct p_block_req p;
2251 p.sector = cpu_to_be64(sector);
2252 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2253 p.blksize = cpu_to_be32(size);
2255 p.head.magic = BE_DRBD_MAGIC;
2256 p.head.command = cpu_to_be16(cmd);
2257 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size);
2259 mutex_lock(&mdev->data.mutex);
2261 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2262 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2264 mutex_unlock(&mdev->data.mutex);
2269 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2272 struct p_block_req p;
2274 p.sector = cpu_to_be64(sector);
2275 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2276 p.blksize = cpu_to_be32(size);
2278 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2279 (struct p_header *)&p, sizeof(p));
2283 /* called on sndtimeo
2284 * returns FALSE if we should retry,
2285 * TRUE if we think connection is dead
2287 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2290 /* long elapsed = (long)(jiffies - mdev->last_received); */
2292 drop_it = mdev->meta.socket == sock
2293 || !mdev->asender.task
2294 || get_t_state(&mdev->asender) != Running
2295 || mdev->state.conn < C_CONNECTED;
2300 drop_it = !--mdev->ko_count;
2302 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2303 current->comm, current->pid, mdev->ko_count);
2307 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2310 /* The idea of sendpage seems to be to put some kind of reference
2311 * to the page into the skb, and to hand it over to the NIC. In
2312 * this process get_page() gets called.
2314 * As soon as the page was really sent over the network put_page()
2315 * gets called by some part of the network layer. [ NIC driver? ]
2317 * [ get_page() / put_page() increment/decrement the count. If count
2318 * reaches 0 the page will be freed. ]
2320 * This works nicely with pages from FSs.
2321 * But this means that in protocol A we might signal IO completion too early!
2323 * In order not to corrupt data during a resync we must make sure
2324 * that we do not reuse our own buffer pages (EEs) to early, therefore
2325 * we have the net_ee list.
2327 * XFS seems to have problems, still, it submits pages with page_count == 0!
2328 * As a workaround, we disable sendpage on pages
2329 * with page_count == 0 or PageSlab.
2331 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2332 int offset, size_t size, unsigned msg_flags)
2334 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2337 mdev->send_cnt += size>>9;
2338 return sent == size;
2341 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2342 int offset, size_t size, unsigned msg_flags)
2344 mm_segment_t oldfs = get_fs();
2348 /* e.g. XFS meta- & log-data is in slab pages, which have a
2349 * page_count of 0 and/or have PageSlab() set.
2350 * we cannot use send_page for those, as that does get_page();
2351 * put_page(); and would cause either a VM_BUG directly, or
2352 * __page_cache_release a page that would actually still be referenced
2353 * by someone, leading to some obscure delayed Oops somewhere else. */
2354 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2355 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2357 msg_flags |= MSG_NOSIGNAL;
2358 drbd_update_congested(mdev);
2361 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2364 if (sent == -EAGAIN) {
2365 if (we_should_drop_the_connection(mdev,
2372 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2373 __func__, (int)size, len, sent);
2378 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2380 clear_bit(NET_CONGESTED, &mdev->flags);
2384 mdev->send_cnt += size>>9;
2388 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2390 struct bio_vec *bvec;
2392 /* hint all but last page with MSG_MORE */
2393 __bio_for_each_segment(bvec, bio, i, 0) {
2394 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2395 bvec->bv_offset, bvec->bv_len,
2396 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2402 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2404 struct bio_vec *bvec;
2406 /* hint all but last page with MSG_MORE */
2407 __bio_for_each_segment(bvec, bio, i, 0) {
2408 if (!_drbd_send_page(mdev, bvec->bv_page,
2409 bvec->bv_offset, bvec->bv_len,
2410 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2416 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2418 struct page *page = e->pages;
2419 unsigned len = e->size;
2420 /* hint all but last page with MSG_MORE */
2421 page_chain_for_each(page) {
2422 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2423 if (!_drbd_send_page(mdev, page, 0, l,
2424 page_chain_next(page) ? MSG_MORE : 0))
2431 /* Used to send write requests
2432 * R_PRIMARY -> Peer (P_DATA)
2434 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2438 unsigned int dp_flags = 0;
2442 if (!drbd_get_data_sock(mdev))
2445 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2446 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2448 p.head.magic = BE_DRBD_MAGIC;
2449 p.head.command = cpu_to_be16(P_DATA);
2451 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->size);
2453 p.sector = cpu_to_be64(req->sector);
2454 p.block_id = (unsigned long)req;
2455 p.seq_num = cpu_to_be32(req->seq_num =
2456 atomic_add_return(1, &mdev->packet_seq));
2459 /* NOTE: no need to check if barriers supported here as we would
2460 * not pass the test in make_request_common in that case
2462 if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
2463 dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
2464 /* dp_flags |= DP_HARDBARRIER; */
2466 if (req->master_bio->bi_rw & REQ_SYNC)
2467 dp_flags |= DP_RW_SYNC;
2468 /* for now handle SYNCIO and UNPLUG
2469 * as if they still were one and the same flag */
2470 if (req->master_bio->bi_rw & REQ_UNPLUG)
2471 dp_flags |= DP_RW_SYNC;
2472 if (mdev->state.conn >= C_SYNC_SOURCE &&
2473 mdev->state.conn <= C_PAUSED_SYNC_T)
2474 dp_flags |= DP_MAY_SET_IN_SYNC;
2476 p.dp_flags = cpu_to_be32(dp_flags);
2477 set_bit(UNPLUG_REMOTE, &mdev->flags);
2479 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2481 dgb = mdev->int_dig_out;
2482 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2483 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2486 if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
2487 ok = _drbd_send_bio(mdev, req->master_bio);
2489 ok = _drbd_send_zc_bio(mdev, req->master_bio);
2492 drbd_put_data_sock(mdev);
2497 /* answer packet, used to send data back for read requests:
2498 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2499 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2501 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2502 struct drbd_epoch_entry *e)
2509 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2510 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2512 p.head.magic = BE_DRBD_MAGIC;
2513 p.head.command = cpu_to_be16(cmd);
2515 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + e->size);
2517 p.sector = cpu_to_be64(e->sector);
2518 p.block_id = e->block_id;
2519 /* p.seq_num = 0; No sequence numbers here.. */
2521 /* Only called by our kernel thread.
2522 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2523 * in response to admin command or module unload.
2525 if (!drbd_get_data_sock(mdev))
2528 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
2529 sizeof(p), dgs ? MSG_MORE : 0);
2531 dgb = mdev->int_dig_out;
2532 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2533 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2536 ok = _drbd_send_zc_ee(mdev, e);
2538 drbd_put_data_sock(mdev);
2544 drbd_send distinguishes two cases:
2546 Packets sent via the data socket "sock"
2547 and packets sent via the meta data socket "msock"
2550 -----------------+-------------------------+------------------------------
2551 timeout conf.timeout / 2 conf.timeout / 2
2552 timeout action send a ping via msock Abort communication
2553 and close all sockets
2557 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2559 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2560 void *buf, size_t size, unsigned msg_flags)
2569 /* THINK if (signal_pending) return ... ? */
2574 msg.msg_name = NULL;
2575 msg.msg_namelen = 0;
2576 msg.msg_control = NULL;
2577 msg.msg_controllen = 0;
2578 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2580 if (sock == mdev->data.socket) {
2581 mdev->ko_count = mdev->net_conf->ko_count;
2582 drbd_update_congested(mdev);
2586 * tcp_sendmsg does _not_ use its size parameter at all ?
2588 * -EAGAIN on timeout, -EINTR on signal.
2591 * do we need to block DRBD_SIG if sock == &meta.socket ??
2592 * otherwise wake_asender() might interrupt some send_*Ack !
2594 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2595 if (rv == -EAGAIN) {
2596 if (we_should_drop_the_connection(mdev, sock))
2603 flush_signals(current);
2611 } while (sent < size);
2613 if (sock == mdev->data.socket)
2614 clear_bit(NET_CONGESTED, &mdev->flags);
2617 if (rv != -EAGAIN) {
2618 dev_err(DEV, "%s_sendmsg returned %d\n",
2619 sock == mdev->meta.socket ? "msock" : "sock",
2621 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2623 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2629 static int drbd_open(struct block_device *bdev, fmode_t mode)
2631 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2632 unsigned long flags;
2636 spin_lock_irqsave(&mdev->req_lock, flags);
2637 /* to have a stable mdev->state.role
2638 * and no race with updating open_cnt */
2640 if (mdev->state.role != R_PRIMARY) {
2641 if (mode & FMODE_WRITE)
2643 else if (!allow_oos)
2649 spin_unlock_irqrestore(&mdev->req_lock, flags);
2655 static int drbd_release(struct gendisk *gd, fmode_t mode)
2657 struct drbd_conf *mdev = gd->private_data;
2664 static void drbd_unplug_fn(struct request_queue *q)
2666 struct drbd_conf *mdev = q->queuedata;
2669 spin_lock_irq(q->queue_lock);
2671 spin_unlock_irq(q->queue_lock);
2673 /* only if connected */
2674 spin_lock_irq(&mdev->req_lock);
2675 if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
2676 D_ASSERT(mdev->state.role == R_PRIMARY);
2677 if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
2678 /* add to the data.work queue,
2679 * unless already queued.
2680 * XXX this might be a good addition to drbd_queue_work
2681 * anyways, to detect "double queuing" ... */
2682 if (list_empty(&mdev->unplug_work.list))
2683 drbd_queue_work(&mdev->data.work,
2684 &mdev->unplug_work);
2687 spin_unlock_irq(&mdev->req_lock);
2689 if (mdev->state.disk >= D_INCONSISTENT)
2693 static void drbd_set_defaults(struct drbd_conf *mdev)
2695 /* This way we get a compile error when sync_conf grows,
2696 and we forgot to initialize it here */
2697 mdev->sync_conf = (struct syncer_conf) {
2698 /* .rate = */ DRBD_RATE_DEF,
2699 /* .after = */ DRBD_AFTER_DEF,
2700 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
2701 /* .verify_alg = */ {}, 0,
2702 /* .cpu_mask = */ {}, 0,
2703 /* .csums_alg = */ {}, 0,
2705 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
2706 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
2707 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2708 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
2709 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF
2712 /* Have to use that way, because the layout differs between
2713 big endian and little endian */
2714 mdev->state = (union drbd_state) {
2715 { .role = R_SECONDARY,
2717 .conn = C_STANDALONE,
2724 void drbd_init_set_defaults(struct drbd_conf *mdev)
2726 /* the memset(,0,) did most of this.
2727 * note: only assignments, no allocation in here */
2729 drbd_set_defaults(mdev);
2731 /* for now, we do NOT yet support it,
2732 * even though we start some framework
2733 * to eventually support barriers */
2734 set_bit(NO_BARRIER_SUPP, &mdev->flags);
2736 atomic_set(&mdev->ap_bio_cnt, 0);
2737 atomic_set(&mdev->ap_pending_cnt, 0);
2738 atomic_set(&mdev->rs_pending_cnt, 0);
2739 atomic_set(&mdev->unacked_cnt, 0);
2740 atomic_set(&mdev->local_cnt, 0);
2741 atomic_set(&mdev->net_cnt, 0);
2742 atomic_set(&mdev->packet_seq, 0);
2743 atomic_set(&mdev->pp_in_use, 0);
2744 atomic_set(&mdev->rs_sect_in, 0);
2746 mutex_init(&mdev->md_io_mutex);
2747 mutex_init(&mdev->data.mutex);
2748 mutex_init(&mdev->meta.mutex);
2749 sema_init(&mdev->data.work.s, 0);
2750 sema_init(&mdev->meta.work.s, 0);
2751 mutex_init(&mdev->state_mutex);
2753 spin_lock_init(&mdev->data.work.q_lock);
2754 spin_lock_init(&mdev->meta.work.q_lock);
2756 spin_lock_init(&mdev->al_lock);
2757 spin_lock_init(&mdev->req_lock);
2758 spin_lock_init(&mdev->peer_seq_lock);
2759 spin_lock_init(&mdev->epoch_lock);
2761 INIT_LIST_HEAD(&mdev->active_ee);
2762 INIT_LIST_HEAD(&mdev->sync_ee);
2763 INIT_LIST_HEAD(&mdev->done_ee);
2764 INIT_LIST_HEAD(&mdev->read_ee);
2765 INIT_LIST_HEAD(&mdev->net_ee);
2766 INIT_LIST_HEAD(&mdev->resync_reads);
2767 INIT_LIST_HEAD(&mdev->data.work.q);
2768 INIT_LIST_HEAD(&mdev->meta.work.q);
2769 INIT_LIST_HEAD(&mdev->resync_work.list);
2770 INIT_LIST_HEAD(&mdev->unplug_work.list);
2771 INIT_LIST_HEAD(&mdev->md_sync_work.list);
2772 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2774 mdev->resync_work.cb = w_resync_inactive;
2775 mdev->unplug_work.cb = w_send_write_hint;
2776 mdev->md_sync_work.cb = w_md_sync;
2777 mdev->bm_io_work.w.cb = w_bitmap_io;
2778 init_timer(&mdev->resync_timer);
2779 init_timer(&mdev->md_sync_timer);
2780 mdev->resync_timer.function = resync_timer_fn;
2781 mdev->resync_timer.data = (unsigned long) mdev;
2782 mdev->md_sync_timer.function = md_sync_timer_fn;
2783 mdev->md_sync_timer.data = (unsigned long) mdev;
2785 init_waitqueue_head(&mdev->misc_wait);
2786 init_waitqueue_head(&mdev->state_wait);
2787 init_waitqueue_head(&mdev->net_cnt_wait);
2788 init_waitqueue_head(&mdev->ee_wait);
2789 init_waitqueue_head(&mdev->al_wait);
2790 init_waitqueue_head(&mdev->seq_wait);
2792 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
2793 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
2794 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
2796 mdev->agreed_pro_version = PRO_VERSION_MAX;
2797 mdev->write_ordering = WO_bio_barrier;
2798 mdev->resync_wenr = LC_FREE;
2801 void drbd_mdev_cleanup(struct drbd_conf *mdev)
2804 if (mdev->receiver.t_state != None)
2805 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2806 mdev->receiver.t_state);
2808 /* no need to lock it, I'm the only thread alive */
2809 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
2810 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2820 mdev->rs_failed = 0;
2821 mdev->rs_last_events = 0;
2822 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2823 mdev->rs_mark_left[i] = 0;
2824 mdev->rs_mark_time[i] = 0;
2826 D_ASSERT(mdev->net_conf == NULL);
2828 drbd_set_my_capacity(mdev, 0);
2830 /* maybe never allocated. */
2831 drbd_bm_resize(mdev, 0, 1);
2832 drbd_bm_cleanup(mdev);
2835 drbd_free_resources(mdev);
2838 * currently we drbd_init_ee only on module load, so
2839 * we may do drbd_release_ee only on module unload!
2841 D_ASSERT(list_empty(&mdev->active_ee));
2842 D_ASSERT(list_empty(&mdev->sync_ee));
2843 D_ASSERT(list_empty(&mdev->done_ee));
2844 D_ASSERT(list_empty(&mdev->read_ee));
2845 D_ASSERT(list_empty(&mdev->net_ee));
2846 D_ASSERT(list_empty(&mdev->resync_reads));
2847 D_ASSERT(list_empty(&mdev->data.work.q));
2848 D_ASSERT(list_empty(&mdev->meta.work.q));
2849 D_ASSERT(list_empty(&mdev->resync_work.list));
2850 D_ASSERT(list_empty(&mdev->unplug_work.list));
2855 static void drbd_destroy_mempools(void)
2859 while (drbd_pp_pool) {
2860 page = drbd_pp_pool;
2861 drbd_pp_pool = (struct page *)page_private(page);
2866 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2868 if (drbd_ee_mempool)
2869 mempool_destroy(drbd_ee_mempool);
2870 if (drbd_request_mempool)
2871 mempool_destroy(drbd_request_mempool);
2873 kmem_cache_destroy(drbd_ee_cache);
2874 if (drbd_request_cache)
2875 kmem_cache_destroy(drbd_request_cache);
2876 if (drbd_bm_ext_cache)
2877 kmem_cache_destroy(drbd_bm_ext_cache);
2878 if (drbd_al_ext_cache)
2879 kmem_cache_destroy(drbd_al_ext_cache);
2881 drbd_ee_mempool = NULL;
2882 drbd_request_mempool = NULL;
2883 drbd_ee_cache = NULL;
2884 drbd_request_cache = NULL;
2885 drbd_bm_ext_cache = NULL;
2886 drbd_al_ext_cache = NULL;
2891 static int drbd_create_mempools(void)
2894 const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count;
2897 /* prepare our caches and mempools */
2898 drbd_request_mempool = NULL;
2899 drbd_ee_cache = NULL;
2900 drbd_request_cache = NULL;
2901 drbd_bm_ext_cache = NULL;
2902 drbd_al_ext_cache = NULL;
2903 drbd_pp_pool = NULL;
2906 drbd_request_cache = kmem_cache_create(
2907 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2908 if (drbd_request_cache == NULL)
2911 drbd_ee_cache = kmem_cache_create(
2912 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
2913 if (drbd_ee_cache == NULL)
2916 drbd_bm_ext_cache = kmem_cache_create(
2917 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2918 if (drbd_bm_ext_cache == NULL)
2921 drbd_al_ext_cache = kmem_cache_create(
2922 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2923 if (drbd_al_ext_cache == NULL)
2927 drbd_request_mempool = mempool_create(number,
2928 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2929 if (drbd_request_mempool == NULL)
2932 drbd_ee_mempool = mempool_create(number,
2933 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2934 if (drbd_request_mempool == NULL)
2937 /* drbd's page pool */
2938 spin_lock_init(&drbd_pp_lock);
2940 for (i = 0; i < number; i++) {
2941 page = alloc_page(GFP_HIGHUSER);
2944 set_page_private(page, (unsigned long)drbd_pp_pool);
2945 drbd_pp_pool = page;
2947 drbd_pp_vacant = number;
2952 drbd_destroy_mempools(); /* in case we allocated some */
2956 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2959 /* just so we have it. you never know what interesting things we
2960 * might want to do here some day...
2966 static struct notifier_block drbd_notifier = {
2967 .notifier_call = drbd_notify_sys,
2970 static void drbd_release_ee_lists(struct drbd_conf *mdev)
2974 rr = drbd_release_ee(mdev, &mdev->active_ee);
2976 dev_err(DEV, "%d EEs in active list found!\n", rr);
2978 rr = drbd_release_ee(mdev, &mdev->sync_ee);
2980 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2982 rr = drbd_release_ee(mdev, &mdev->read_ee);
2984 dev_err(DEV, "%d EEs in read list found!\n", rr);
2986 rr = drbd_release_ee(mdev, &mdev->done_ee);
2988 dev_err(DEV, "%d EEs in done list found!\n", rr);
2990 rr = drbd_release_ee(mdev, &mdev->net_ee);
2992 dev_err(DEV, "%d EEs in net list found!\n", rr);
2995 /* caution. no locking.
2996 * currently only used from module cleanup code. */
2997 static void drbd_delete_device(unsigned int minor)
2999 struct drbd_conf *mdev = minor_to_mdev(minor);
3004 /* paranoia asserts */
3005 if (mdev->open_cnt != 0)
3006 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3007 __FILE__ , __LINE__);
3009 ERR_IF (!list_empty(&mdev->data.work.q)) {
3010 struct list_head *lp;
3011 list_for_each(lp, &mdev->data.work.q) {
3012 dev_err(DEV, "lp = %p\n", lp);
3015 /* end paranoia asserts */
3017 del_gendisk(mdev->vdisk);
3019 /* cleanup stuff that may have been allocated during
3020 * device (re-)configuration or state changes */
3022 if (mdev->this_bdev)
3023 bdput(mdev->this_bdev);
3025 drbd_free_resources(mdev);
3027 drbd_release_ee_lists(mdev);
3029 /* should be free'd on disconnect? */
3030 kfree(mdev->ee_hash);
3032 mdev->ee_hash_s = 0;
3033 mdev->ee_hash = NULL;
3036 lc_destroy(mdev->act_log);
3037 lc_destroy(mdev->resync);
3039 kfree(mdev->p_uuid);
3040 /* mdev->p_uuid = NULL; */
3042 kfree(mdev->int_dig_out);
3043 kfree(mdev->int_dig_in);
3044 kfree(mdev->int_dig_vv);
3046 /* cleanup the rest that has been
3047 * allocated from drbd_new_device
3048 * and actually free the mdev itself */
3049 drbd_free_mdev(mdev);
3052 static void drbd_cleanup(void)
3056 unregister_reboot_notifier(&drbd_notifier);
3062 remove_proc_entry("drbd", NULL);
3065 drbd_delete_device(i);
3066 drbd_destroy_mempools();
3071 unregister_blkdev(DRBD_MAJOR, "drbd");
3073 printk(KERN_INFO "drbd: module cleanup done.\n");
3077 * drbd_congested() - Callback for pdflush
3078 * @congested_data: User data
3079 * @bdi_bits: Bits pdflush is currently interested in
3081 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3083 static int drbd_congested(void *congested_data, int bdi_bits)
3085 struct drbd_conf *mdev = congested_data;
3086 struct request_queue *q;
3090 if (!__inc_ap_bio_cond(mdev)) {
3091 /* DRBD has frozen IO */
3097 if (get_ldev(mdev)) {
3098 q = bdev_get_queue(mdev->ldev->backing_bdev);
3099 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3105 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3106 r |= (1 << BDI_async_congested);
3107 reason = reason == 'b' ? 'a' : 'n';
3111 mdev->congestion_reason = reason;
3115 struct drbd_conf *drbd_new_device(unsigned int minor)
3117 struct drbd_conf *mdev;
3118 struct gendisk *disk;
3119 struct request_queue *q;
3121 /* GFP_KERNEL, we are outside of all write-out paths */
3122 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3125 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3126 goto out_no_cpumask;
3128 mdev->minor = minor;
3130 drbd_init_set_defaults(mdev);
3132 q = blk_alloc_queue(GFP_KERNEL);
3136 q->queuedata = mdev;
3138 disk = alloc_disk(1);
3143 set_disk_ro(disk, TRUE);
3146 disk->major = DRBD_MAJOR;
3147 disk->first_minor = minor;
3148 disk->fops = &drbd_ops;
3149 sprintf(disk->disk_name, "drbd%d", minor);
3150 disk->private_data = mdev;
3152 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3153 /* we have no partitions. we contain only ourselves. */
3154 mdev->this_bdev->bd_contains = mdev->this_bdev;
3156 q->backing_dev_info.congested_fn = drbd_congested;
3157 q->backing_dev_info.congested_data = mdev;
3159 blk_queue_make_request(q, drbd_make_request_26);
3160 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
3161 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3162 blk_queue_merge_bvec(q, drbd_merge_bvec);
3163 q->queue_lock = &mdev->req_lock; /* needed since we use */
3164 /* plugging on a queue, that actually has no requests! */
3165 q->unplug_fn = drbd_unplug_fn;
3167 mdev->md_io_page = alloc_page(GFP_KERNEL);
3168 if (!mdev->md_io_page)
3169 goto out_no_io_page;
3171 if (drbd_bm_init(mdev))
3173 /* no need to lock access, we are still initializing this minor device. */
3177 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3178 if (!mdev->app_reads_hash)
3179 goto out_no_app_reads;
3181 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3182 if (!mdev->current_epoch)
3185 INIT_LIST_HEAD(&mdev->current_epoch->list);
3190 /* out_whatever_else:
3191 kfree(mdev->current_epoch); */
3193 kfree(mdev->app_reads_hash);
3197 drbd_bm_cleanup(mdev);
3199 __free_page(mdev->md_io_page);
3203 blk_cleanup_queue(q);
3205 free_cpumask_var(mdev->cpu_mask);
3211 /* counterpart of drbd_new_device.
3212 * last part of drbd_delete_device. */
3213 void drbd_free_mdev(struct drbd_conf *mdev)
3215 kfree(mdev->current_epoch);
3216 kfree(mdev->app_reads_hash);
3218 if (mdev->bitmap) /* should no longer be there. */
3219 drbd_bm_cleanup(mdev);
3220 __free_page(mdev->md_io_page);
3221 put_disk(mdev->vdisk);
3222 blk_cleanup_queue(mdev->rq_queue);
3223 free_cpumask_var(mdev->cpu_mask);
3228 int __init drbd_init(void)
3232 if (sizeof(struct p_handshake) != 80) {
3234 "drbd: never change the size or layout "
3235 "of the HandShake packet.\n");
3239 if (1 > minor_count || minor_count > 255) {
3241 "drbd: invalid minor_count (%d)\n", minor_count);
3249 err = drbd_nl_init();
3253 err = register_blkdev(DRBD_MAJOR, "drbd");
3256 "drbd: unable to register block device major %d\n",
3261 register_reboot_notifier(&drbd_notifier);
3264 * allocate all necessary structs
3268 init_waitqueue_head(&drbd_pp_wait);
3270 drbd_proc = NULL; /* play safe for drbd_cleanup */
3271 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3276 err = drbd_create_mempools();
3280 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3282 printk(KERN_ERR "drbd: unable to register proc file\n");
3286 rwlock_init(&global_state_lock);
3288 printk(KERN_INFO "drbd: initialized. "
3289 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3290 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3291 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3292 printk(KERN_INFO "drbd: registered as block device major %d\n",
3294 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3296 return 0; /* Success! */
3301 /* currently always the case */
3302 printk(KERN_ERR "drbd: ran out of memory\n");
3304 printk(KERN_ERR "drbd: initialization failure\n");
3308 void drbd_free_bc(struct drbd_backing_dev *ldev)
3313 bd_release(ldev->backing_bdev);
3314 bd_release(ldev->md_bdev);
3316 fput(ldev->lo_file);
3317 fput(ldev->md_file);
3322 void drbd_free_sock(struct drbd_conf *mdev)
3324 if (mdev->data.socket) {
3325 mutex_lock(&mdev->data.mutex);
3326 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3327 sock_release(mdev->data.socket);
3328 mdev->data.socket = NULL;
3329 mutex_unlock(&mdev->data.mutex);
3331 if (mdev->meta.socket) {
3332 mutex_lock(&mdev->meta.mutex);
3333 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3334 sock_release(mdev->meta.socket);
3335 mdev->meta.socket = NULL;
3336 mutex_unlock(&mdev->meta.mutex);
3341 void drbd_free_resources(struct drbd_conf *mdev)
3343 crypto_free_hash(mdev->csums_tfm);
3344 mdev->csums_tfm = NULL;
3345 crypto_free_hash(mdev->verify_tfm);
3346 mdev->verify_tfm = NULL;
3347 crypto_free_hash(mdev->cram_hmac_tfm);
3348 mdev->cram_hmac_tfm = NULL;
3349 crypto_free_hash(mdev->integrity_w_tfm);
3350 mdev->integrity_w_tfm = NULL;
3351 crypto_free_hash(mdev->integrity_r_tfm);
3352 mdev->integrity_r_tfm = NULL;
3354 drbd_free_sock(mdev);
3357 drbd_free_bc(mdev->ldev);
3358 mdev->ldev = NULL;);
3361 /* meta data management */
3363 struct meta_data_on_disk {
3364 u64 la_size; /* last agreed size. */
3365 u64 uuid[UI_SIZE]; /* UUIDs. */
3368 u32 flags; /* MDF */
3371 u32 al_offset; /* offset to this block */
3372 u32 al_nr_extents; /* important for restoring the AL */
3373 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3374 u32 bm_offset; /* offset to the bitmap, from here */
3375 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3376 u32 reserved_u32[4];
3381 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3382 * @mdev: DRBD device.
3384 void drbd_md_sync(struct drbd_conf *mdev)
3386 struct meta_data_on_disk *buffer;
3390 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3392 del_timer(&mdev->md_sync_timer);
3394 /* We use here D_FAILED and not D_ATTACHING because we try to write
3395 * metadata even if we detach due to a disk failure! */
3396 if (!get_ldev_if_state(mdev, D_FAILED))
3399 mutex_lock(&mdev->md_io_mutex);
3400 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3401 memset(buffer, 0, 512);
3403 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3404 for (i = UI_CURRENT; i < UI_SIZE; i++)
3405 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3406 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3407 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3409 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3410 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3411 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3412 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3413 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3415 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3417 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3418 sector = mdev->ldev->md.md_offset;
3420 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3421 clear_bit(MD_DIRTY, &mdev->flags);
3423 /* this was a try anyways ... */
3424 dev_err(DEV, "meta data update failed!\n");
3426 drbd_chk_io_error(mdev, 1, TRUE);
3429 /* Update mdev->ldev->md.la_size_sect,
3430 * since we updated it on metadata. */
3431 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3433 mutex_unlock(&mdev->md_io_mutex);
3438 * drbd_md_read() - Reads in the meta data super block
3439 * @mdev: DRBD device.
3440 * @bdev: Device from which the meta data should be read in.
3442 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case
3443 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3445 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3447 struct meta_data_on_disk *buffer;
3448 int i, rv = NO_ERROR;
3450 if (!get_ldev_if_state(mdev, D_ATTACHING))
3451 return ERR_IO_MD_DISK;
3453 mutex_lock(&mdev->md_io_mutex);
3454 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3456 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3457 /* NOTE: cant do normal error processing here as this is
3458 called BEFORE disk is attached */
3459 dev_err(DEV, "Error while reading metadata.\n");
3460 rv = ERR_IO_MD_DISK;
3464 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3465 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3466 rv = ERR_MD_INVALID;
3469 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3470 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3471 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3472 rv = ERR_MD_INVALID;
3475 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3476 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3477 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3478 rv = ERR_MD_INVALID;
3481 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3482 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3483 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3484 rv = ERR_MD_INVALID;
3488 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3489 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3490 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3491 rv = ERR_MD_INVALID;
3495 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3496 for (i = UI_CURRENT; i < UI_SIZE; i++)
3497 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3498 bdev->md.flags = be32_to_cpu(buffer->flags);
3499 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3500 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3502 if (mdev->sync_conf.al_extents < 7)
3503 mdev->sync_conf.al_extents = 127;
3506 mutex_unlock(&mdev->md_io_mutex);
3513 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3514 * @mdev: DRBD device.
3516 * Call this function if you change anything that should be written to
3517 * the meta-data super block. This function sets MD_DIRTY, and starts a
3518 * timer that ensures that within five seconds you have to call drbd_md_sync().
3520 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3522 set_bit(MD_DIRTY, &mdev->flags);
3523 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3527 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3531 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3532 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3535 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3537 if (idx == UI_CURRENT) {
3538 if (mdev->state.role == R_PRIMARY)
3543 drbd_set_ed_uuid(mdev, val);
3546 mdev->ldev->md.uuid[idx] = val;
3547 drbd_md_mark_dirty(mdev);
3551 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3553 if (mdev->ldev->md.uuid[idx]) {
3554 drbd_uuid_move_history(mdev);
3555 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3557 _drbd_uuid_set(mdev, idx, val);
3561 * drbd_uuid_new_current() - Creates a new current UUID
3562 * @mdev: DRBD device.
3564 * Creates a new current UUID, and rotates the old current UUID into
3565 * the bitmap slot. Causes an incremental resync upon next connect.
3567 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3571 dev_info(DEV, "Creating new current UUID\n");
3572 D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3573 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3575 get_random_bytes(&val, sizeof(u64));
3576 _drbd_uuid_set(mdev, UI_CURRENT, val);
3579 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3581 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3585 drbd_uuid_move_history(mdev);
3586 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3587 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3589 if (mdev->ldev->md.uuid[UI_BITMAP])
3590 dev_warn(DEV, "bm UUID already set");
3592 mdev->ldev->md.uuid[UI_BITMAP] = val;
3593 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3596 drbd_md_mark_dirty(mdev);
3600 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3601 * @mdev: DRBD device.
3603 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3605 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3609 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3610 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3612 drbd_bm_set_all(mdev);
3614 rv = drbd_bm_write(mdev);
3617 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3628 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3629 * @mdev: DRBD device.
3631 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3633 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3637 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3638 drbd_bm_clear_all(mdev);
3639 rv = drbd_bm_write(mdev);
3646 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3648 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3651 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3653 drbd_bm_lock(mdev, work->why);
3654 rv = work->io_fn(mdev);
3655 drbd_bm_unlock(mdev);
3657 clear_bit(BITMAP_IO, &mdev->flags);
3658 wake_up(&mdev->misc_wait);
3661 work->done(mdev, rv);
3663 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3670 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3671 * @mdev: DRBD device.
3672 * @io_fn: IO callback to be called when bitmap IO is possible
3673 * @done: callback to be called after the bitmap IO was performed
3674 * @why: Descriptive text of the reason for doing the IO
3676 * While IO on the bitmap happens we freeze application IO thus we ensure
3677 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3678 * called from worker context. It MUST NOT be used while a previous such
3679 * work is still pending!
3681 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3682 int (*io_fn)(struct drbd_conf *),
3683 void (*done)(struct drbd_conf *, int),
3686 D_ASSERT(current == mdev->worker.task);
3688 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3689 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3690 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3691 if (mdev->bm_io_work.why)
3692 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3693 why, mdev->bm_io_work.why);
3695 mdev->bm_io_work.io_fn = io_fn;
3696 mdev->bm_io_work.done = done;
3697 mdev->bm_io_work.why = why;
3699 set_bit(BITMAP_IO, &mdev->flags);
3700 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
3701 if (list_empty(&mdev->bm_io_work.w.list)) {
3702 set_bit(BITMAP_IO_QUEUED, &mdev->flags);
3703 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
3705 dev_err(DEV, "FIXME avoided double queuing bm_io_work\n");
3710 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3711 * @mdev: DRBD device.
3712 * @io_fn: IO callback to be called when bitmap IO is possible
3713 * @why: Descriptive text of the reason for doing the IO
3715 * freezes application IO while that the actual IO operations runs. This
3716 * functions MAY NOT be called from worker context.
3718 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
3722 D_ASSERT(current != mdev->worker.task);
3724 drbd_suspend_io(mdev);
3726 drbd_bm_lock(mdev, why);
3728 drbd_bm_unlock(mdev);
3730 drbd_resume_io(mdev);
3735 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3737 if ((mdev->ldev->md.flags & flag) != flag) {
3738 drbd_md_mark_dirty(mdev);
3739 mdev->ldev->md.flags |= flag;
3743 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3745 if ((mdev->ldev->md.flags & flag) != 0) {
3746 drbd_md_mark_dirty(mdev);
3747 mdev->ldev->md.flags &= ~flag;
3750 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3752 return (bdev->md.flags & flag) != 0;
3755 static void md_sync_timer_fn(unsigned long data)
3757 struct drbd_conf *mdev = (struct drbd_conf *) data;
3759 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
3762 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3764 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3770 #ifdef CONFIG_DRBD_FAULT_INJECTION
3771 /* Fault insertion support including random number generator shamelessly
3772 * stolen from kernel/rcutorture.c */
3773 struct fault_random_state {
3774 unsigned long state;
3775 unsigned long count;
3778 #define FAULT_RANDOM_MULT 39916801 /* prime */
3779 #define FAULT_RANDOM_ADD 479001701 /* prime */
3780 #define FAULT_RANDOM_REFRESH 10000
3783 * Crude but fast random-number generator. Uses a linear congruential
3784 * generator, with occasional help from get_random_bytes().
3786 static unsigned long
3787 _drbd_fault_random(struct fault_random_state *rsp)
3791 if (!rsp->count--) {
3792 get_random_bytes(&refresh, sizeof(refresh));
3793 rsp->state += refresh;
3794 rsp->count = FAULT_RANDOM_REFRESH;
3796 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3797 return swahw32(rsp->state);
3801 _drbd_fault_str(unsigned int type) {
3802 static char *_faults[] = {
3803 [DRBD_FAULT_MD_WR] = "Meta-data write",
3804 [DRBD_FAULT_MD_RD] = "Meta-data read",
3805 [DRBD_FAULT_RS_WR] = "Resync write",
3806 [DRBD_FAULT_RS_RD] = "Resync read",
3807 [DRBD_FAULT_DT_WR] = "Data write",
3808 [DRBD_FAULT_DT_RD] = "Data read",
3809 [DRBD_FAULT_DT_RA] = "Data read ahead",
3810 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3811 [DRBD_FAULT_AL_EE] = "EE allocation",
3812 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3815 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3819 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3821 static struct fault_random_state rrs = {0, 0};
3823 unsigned int ret = (
3825 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3826 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3831 if (__ratelimit(&drbd_ratelimit_state))
3832 dev_warn(DEV, "***Simulating %s failure\n",
3833 _drbd_fault_str(type));
3840 const char *drbd_buildtag(void)
3842 /* DRBD built from external sources has here a reference to the
3843 git hash of the source code. */
3845 static char buildtag[38] = "\0uilt-in";
3847 if (buildtag[0] == 0) {
3848 #ifdef CONFIG_MODULES
3849 if (THIS_MODULE != NULL)
3850 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3859 module_init(drbd_init)
3860 module_exit(drbd_cleanup)
3862 EXPORT_SYMBOL(drbd_conn_str);
3863 EXPORT_SYMBOL(drbd_role_str);
3864 EXPORT_SYMBOL(drbd_disk_str);
3865 EXPORT_SYMBOL(drbd_set_st_err_str);