4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83 /* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
87 static struct drbd_config_context {
88 /* assigned from drbd_genlmsghdr */
90 /* assigned from request attributes, if present */
92 #define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
98 struct sk_buff *reply_skb;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr *reply_dh;
101 /* resolved from attributes, if possible */
102 struct drbd_conf *mdev;
103 struct drbd_tconn *tconn;
106 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
108 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109 if (genlmsg_reply(skb, info))
110 printk(KERN_ERR "drbd: error sending genl reply\n");
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info)
117 struct sk_buff *skb = adm_ctx.reply_skb;
121 if (!info || !info[0])
124 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 err = nla_put_string(skb, T_info_text, info);
130 nla_nest_cancel(skb, nla);
133 nla_nest_end(skb, nla);
137 /* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
142 #define DRBD_ADM_NEED_MINOR 1
143 #define DRBD_ADM_NEED_CONN 2
144 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
147 struct drbd_genlmsghdr *d_in = info->userhdr;
148 const u8 cmd = info->genlhdr->cmd;
151 memset(&adm_ctx, 0, sizeof(adm_ctx));
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd != DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb, CAP_SYS_ADMIN))
158 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159 if (!adm_ctx.reply_skb) {
164 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
165 info, &drbd_genl_family, 0, cmd);
166 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
168 if (!adm_ctx.reply_dh) {
173 adm_ctx.reply_dh->minor = d_in->minor;
174 adm_ctx.reply_dh->ret_code = NO_ERROR;
176 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
178 /* parse and validate only */
179 err = drbd_cfg_context_from_attrs(NULL, info);
183 /* It was present, and valid,
184 * copy it over to the reply skb. */
185 err = nla_put_nohdr(adm_ctx.reply_skb,
186 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
187 info->attrs[DRBD_NLA_CFG_CONTEXT]);
191 /* and assign stuff to the global adm_ctx */
192 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
193 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
194 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
196 adm_ctx.resource_name = nla_data(nla);
198 adm_ctx.volume = VOLUME_UNSPECIFIED;
200 adm_ctx.minor = d_in->minor;
201 adm_ctx.mdev = minor_to_mdev(d_in->minor);
202 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
204 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
205 drbd_msg_put_info("unknown minor");
206 return ERR_MINOR_INVALID;
208 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
209 drbd_msg_put_info("unknown connection");
210 return ERR_INVALID_REQUEST;
213 /* some more paranoia, if the request was over-determined */
214 if (adm_ctx.mdev && adm_ctx.tconn &&
215 adm_ctx.mdev->tconn != adm_ctx.tconn) {
216 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
217 adm_ctx.minor, adm_ctx.resource_name,
218 adm_ctx.mdev->tconn->name);
219 drbd_msg_put_info("minor exists in different connection");
220 return ERR_INVALID_REQUEST;
223 adm_ctx.volume != VOLUME_UNSPECIFIED &&
224 adm_ctx.volume != adm_ctx.mdev->vnr) {
225 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
226 adm_ctx.minor, adm_ctx.volume,
227 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
228 drbd_msg_put_info("minor exists as different volume");
229 return ERR_INVALID_REQUEST;
235 nlmsg_free(adm_ctx.reply_skb);
236 adm_ctx.reply_skb = NULL;
240 static int drbd_adm_finish(struct genl_info *info, int retcode)
243 const char *resource_name = NULL;
246 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
247 adm_ctx.tconn = NULL;
250 if (!adm_ctx.reply_skb)
253 adm_ctx.reply_dh->ret_code = retcode;
255 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
257 int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
258 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
259 if (nla && !IS_ERR(nla))
260 resource_name = nla_data(nla);
263 drbd_adm_send_reply(adm_ctx.reply_skb, info);
267 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
273 nc = rcu_dereference(tconn->net_conf);
275 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
278 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
279 &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
283 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
284 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
288 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
289 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
291 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
296 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
298 char *envp[] = { "HOME=/",
300 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
301 (char[20]) { }, /* address family */
302 (char[60]) { }, /* address */
305 char *argv[] = {usermode_helper, cmd, mb, NULL };
309 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
310 setup_khelper_env(mdev->tconn, envp);
312 /* The helper may take some time.
313 * write out any unsynced meta data changes now */
316 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
317 sib.sib_reason = SIB_HELPER_PRE;
318 sib.helper_name = cmd;
319 drbd_bcast_event(mdev, &sib);
320 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
322 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
323 usermode_helper, cmd, mb,
324 (ret >> 8) & 0xff, ret);
326 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
327 usermode_helper, cmd, mb,
328 (ret >> 8) & 0xff, ret);
329 sib.sib_reason = SIB_HELPER_POST;
330 sib.helper_exit_code = ret;
331 drbd_bcast_event(mdev, &sib);
333 if (ret < 0) /* Ignore any ERRNOs we got. */
339 static void conn_md_sync(struct drbd_tconn *tconn)
341 struct drbd_conf *mdev;
345 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
346 kref_get(&mdev->kref);
349 kref_put(&mdev->kref, &drbd_minor_destroy);
355 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
357 char *envp[] = { "HOME=/",
359 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
360 (char[20]) { }, /* address family */
361 (char[60]) { }, /* address */
363 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
366 setup_khelper_env(tconn, envp);
369 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
370 /* TODO: conn_bcast_event() ?? */
372 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
374 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
375 usermode_helper, cmd, tconn->name,
376 (ret >> 8) & 0xff, ret);
378 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
379 usermode_helper, cmd, tconn->name,
380 (ret >> 8) & 0xff, ret);
381 /* TODO: conn_bcast_event() ?? */
383 if (ret < 0) /* Ignore any ERRNOs we got. */
389 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
391 enum drbd_fencing_p fp = FP_NOT_AVAIL;
392 struct drbd_conf *mdev;
396 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
397 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
398 fp = max_t(enum drbd_fencing_p, fp,
399 rcu_dereference(mdev->ldev->disk_conf)->fencing);
408 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
410 union drbd_state mask = { };
411 union drbd_state val = { };
412 enum drbd_fencing_p fp;
416 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
417 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
421 fp = highest_fencing_policy(tconn);
424 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
431 r = conn_khelper(tconn, "fence-peer");
433 switch ((r>>8) & 0xff) {
434 case 3: /* peer is inconsistent */
435 ex_to_string = "peer is inconsistent or worse";
437 val.pdsk = D_INCONSISTENT;
439 case 4: /* peer got outdated, or was already outdated */
440 ex_to_string = "peer was fenced";
442 val.pdsk = D_OUTDATED;
444 case 5: /* peer was down */
445 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
446 /* we will(have) create(d) a new UUID anyways... */
447 ex_to_string = "peer is unreachable, assumed to be dead";
449 val.pdsk = D_OUTDATED;
451 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
454 case 6: /* Peer is primary, voluntarily outdate myself.
455 * This is useful when an unconnected R_SECONDARY is asked to
456 * become R_PRIMARY, but finds the other peer being active. */
457 ex_to_string = "peer is active";
458 conn_warn(tconn, "Peer is primary, outdating myself.\n");
460 val.disk = D_OUTDATED;
463 if (fp != FP_STONITH)
464 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
465 ex_to_string = "peer was stonithed";
467 val.pdsk = D_OUTDATED;
470 /* The script is broken ... */
471 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
472 return false; /* Eventually leave IO frozen */
475 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
476 (r>>8) & 0xff, ex_to_string);
481 conn_request_state(tconn, mask, val, CS_VERBOSE);
482 here, because we might were able to re-establish the connection in the
484 spin_lock_irq(&tconn->req_lock);
485 if (tconn->cstate < C_WF_REPORT_PARAMS)
486 _conn_request_state(tconn, mask, val, CS_VERBOSE);
487 spin_unlock_irq(&tconn->req_lock);
489 return conn_highest_pdsk(tconn) <= D_OUTDATED;
492 static int _try_outdate_peer_async(void *data)
494 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
496 conn_try_outdate_peer(tconn);
498 kref_put(&tconn->kref, &conn_destroy);
502 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
504 struct task_struct *opa;
506 kref_get(&tconn->kref);
507 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
509 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
510 kref_put(&tconn->kref, &conn_destroy);
515 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
517 const int max_tries = 4;
518 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
522 union drbd_state mask, val;
524 if (new_role == R_PRIMARY)
525 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
527 mutex_lock(mdev->state_mutex);
529 mask.i = 0; mask.role = R_MASK;
530 val.i = 0; val.role = new_role;
532 while (try++ < max_tries) {
533 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
535 /* in case we first succeeded to outdate,
536 * but now suddenly could establish a connection */
537 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
543 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
544 (mdev->state.disk < D_UP_TO_DATE &&
545 mdev->state.disk >= D_INCONSISTENT)) {
547 val.disk = D_UP_TO_DATE;
552 if (rv == SS_NO_UP_TO_DATE_DISK &&
553 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
554 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
556 if (conn_try_outdate_peer(mdev->tconn)) {
557 val.disk = D_UP_TO_DATE;
563 if (rv == SS_NOTHING_TO_DO)
565 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
566 if (!conn_try_outdate_peer(mdev->tconn) && force) {
567 dev_warn(DEV, "Forced into split brain situation!\n");
569 val.pdsk = D_OUTDATED;
574 if (rv == SS_TWO_PRIMARIES) {
575 /* Maybe the peer is detected as dead very soon...
576 retry at most once more in this case. */
579 nc = rcu_dereference(mdev->tconn->net_conf);
580 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
582 schedule_timeout_interruptible(timeo);
587 if (rv < SS_SUCCESS) {
588 rv = _drbd_request_state(mdev, mask, val,
589 CS_VERBOSE + CS_WAIT_COMPLETE);
600 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
602 /* Wait until nothing is on the fly :) */
603 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
605 if (new_role == R_SECONDARY) {
606 set_disk_ro(mdev->vdisk, true);
607 if (get_ldev(mdev)) {
608 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
612 mutex_lock(&mdev->tconn->conf_update);
613 nc = mdev->tconn->net_conf;
615 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
616 mutex_unlock(&mdev->tconn->conf_update);
618 set_disk_ro(mdev->vdisk, false);
619 if (get_ldev(mdev)) {
620 if (((mdev->state.conn < C_CONNECTED ||
621 mdev->state.pdsk <= D_FAILED)
622 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
623 drbd_uuid_new_current(mdev);
625 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
630 /* writeout of activity log covered areas of the bitmap
631 * to stable storage done in after state change already */
633 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
634 /* if this was forced, we should consider sync */
636 drbd_send_uuids(mdev);
637 drbd_send_state(mdev);
642 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
644 mutex_unlock(mdev->state_mutex);
648 static const char *from_attrs_err_to_txt(int err)
650 return err == -ENOMSG ? "required attribute missing" :
651 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
652 err == -EEXIST ? "can not change invariant setting" :
653 "invalid attribute value";
656 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
658 struct set_role_parms parms;
660 enum drbd_ret_code retcode;
662 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
663 if (!adm_ctx.reply_skb)
665 if (retcode != NO_ERROR)
668 memset(&parms, 0, sizeof(parms));
669 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
670 err = set_role_parms_from_attrs(&parms, info);
672 retcode = ERR_MANDATORY_TAG;
673 drbd_msg_put_info(from_attrs_err_to_txt(err));
678 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
679 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
681 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
683 drbd_adm_finish(info, retcode);
687 /* initializes the md.*_offset members, so we are able to find
688 * the on disk meta data */
689 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
690 struct drbd_backing_dev *bdev)
692 sector_t md_size_sect = 0;
696 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
698 switch (meta_dev_idx) {
700 /* v07 style fixed size indexed meta data */
701 bdev->md.md_size_sect = MD_RESERVED_SECT;
702 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
703 bdev->md.al_offset = MD_AL_OFFSET;
704 bdev->md.bm_offset = MD_BM_OFFSET;
706 case DRBD_MD_INDEX_FLEX_EXT:
707 /* just occupy the full device; unit: sectors */
708 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
709 bdev->md.md_offset = 0;
710 bdev->md.al_offset = MD_AL_OFFSET;
711 bdev->md.bm_offset = MD_BM_OFFSET;
713 case DRBD_MD_INDEX_INTERNAL:
714 case DRBD_MD_INDEX_FLEX_INT:
715 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
716 /* al size is still fixed */
717 bdev->md.al_offset = -MD_AL_SECTORS;
718 /* we need (slightly less than) ~ this much bitmap sectors: */
719 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
720 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
721 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
722 md_size_sect = ALIGN(md_size_sect, 8);
724 /* plus the "drbd meta data super block",
725 * and the activity log; */
726 md_size_sect += MD_BM_OFFSET;
728 bdev->md.md_size_sect = md_size_sect;
729 /* bitmap offset is adjusted by 'super' block size */
730 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
736 /* input size is expected to be in KB */
737 char *ppsize(char *buf, unsigned long long size)
739 /* Needs 9 bytes at max including trailing NUL:
740 * -1ULL ==> "16384 EB" */
741 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
743 while (size >= 10000 && base < sizeof(units)-1) {
745 size = (size >> 10) + !!(size & (1<<9));
748 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
753 /* there is still a theoretical deadlock when called from receiver
754 * on an D_INCONSISTENT R_PRIMARY:
755 * remote READ does inc_ap_bio, receiver would need to receive answer
756 * packet from remote to dec_ap_bio again.
757 * receiver receive_sizes(), comes here,
758 * waits for ap_bio_cnt == 0. -> deadlock.
759 * but this cannot happen, actually, because:
760 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
761 * (not connected, or bad/no disk on peer):
762 * see drbd_fail_request_early, ap_bio_cnt is zero.
763 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
764 * peer may not initiate a resize.
766 /* Note these are not to be confused with
767 * drbd_adm_suspend_io/drbd_adm_resume_io,
768 * which are (sub) state changes triggered by admin (drbdsetup),
769 * and can be long lived.
770 * This changes an mdev->flag, is triggered by drbd internals,
771 * and should be short-lived. */
772 void drbd_suspend_io(struct drbd_conf *mdev)
774 set_bit(SUSPEND_IO, &mdev->flags);
775 if (drbd_suspended(mdev))
777 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
780 void drbd_resume_io(struct drbd_conf *mdev)
782 clear_bit(SUSPEND_IO, &mdev->flags);
783 wake_up(&mdev->misc_wait);
787 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
788 * @mdev: DRBD device.
790 * Returns 0 on success, negative return values indicate errors.
791 * You should call drbd_md_sync() after calling this function.
793 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
795 sector_t prev_first_sect, prev_size; /* previous meta location */
796 sector_t la_size, u_size;
800 int md_moved, la_size_changed;
801 enum determine_dev_size rv = unchanged;
804 * application request passes inc_ap_bio,
805 * but then cannot get an AL-reference.
806 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
809 * Suspend IO right here.
810 * still lock the act_log to not trigger ASSERTs there.
812 drbd_suspend_io(mdev);
814 /* no wait necessary anymore, actually we could assert that */
815 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
817 prev_first_sect = drbd_md_first_sector(mdev->ldev);
818 prev_size = mdev->ldev->md.md_size_sect;
819 la_size = mdev->ldev->md.la_size_sect;
821 /* TODO: should only be some assert here, not (re)init... */
822 drbd_md_set_sector_offsets(mdev, mdev->ldev);
825 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
827 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
829 if (drbd_get_capacity(mdev->this_bdev) != size ||
830 drbd_bm_capacity(mdev) != size) {
832 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
834 /* currently there is only one error: ENOMEM! */
835 size = drbd_bm_capacity(mdev)>>1;
837 dev_err(DEV, "OUT OF MEMORY! "
838 "Could not allocate bitmap!\n");
840 dev_err(DEV, "BM resizing failed. "
841 "Leaving size unchanged at size = %lu KB\n",
842 (unsigned long)size);
846 /* racy, see comments above. */
847 drbd_set_my_capacity(mdev, size);
848 mdev->ldev->md.la_size_sect = size;
849 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
850 (unsigned long long)size>>1);
852 if (rv == dev_size_error)
855 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
857 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
858 || prev_size != mdev->ldev->md.md_size_sect;
860 if (la_size_changed || md_moved) {
863 drbd_al_shrink(mdev); /* All extents inactive. */
864 dev_info(DEV, "Writing the whole bitmap, %s\n",
865 la_size_changed && md_moved ? "size changed and md moved" :
866 la_size_changed ? "size changed" : "md moved");
867 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
868 err = drbd_bitmap_io(mdev, &drbd_bm_write,
869 "size changed", BM_LOCKED_MASK);
874 drbd_md_mark_dirty(mdev);
882 lc_unlock(mdev->act_log);
883 wake_up(&mdev->al_wait);
884 drbd_resume_io(mdev);
890 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
891 sector_t u_size, int assume_peer_has_space)
893 sector_t p_size = mdev->p_size; /* partner's disk size. */
894 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
895 sector_t m_size; /* my size */
898 m_size = drbd_get_max_capacity(bdev);
900 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
901 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
905 if (p_size && m_size) {
906 size = min_t(sector_t, p_size, m_size);
910 if (m_size && m_size < size)
912 if (p_size && p_size < size)
923 dev_err(DEV, "Both nodes diskless!\n");
927 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
928 (unsigned long)u_size>>1, (unsigned long)size>>1);
937 * drbd_check_al_size() - Ensures that the AL is of the right size
938 * @mdev: DRBD device.
940 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
941 * failed, and 0 on success. You should call drbd_md_sync() after you called
944 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
946 struct lru_cache *n, *t;
947 struct lc_element *e;
952 mdev->act_log->nr_elements == dc->al_extents)
957 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
958 dc->al_extents, sizeof(struct lc_element), 0);
961 dev_err(DEV, "Cannot allocate act_log lru!\n");
964 spin_lock_irq(&mdev->al_lock);
966 for (i = 0; i < t->nr_elements; i++) {
967 e = lc_element_by_index(t, i);
969 dev_err(DEV, "refcnt(%d)==%d\n",
970 e->lc_number, e->refcnt);
976 spin_unlock_irq(&mdev->al_lock);
978 dev_err(DEV, "Activity log still in use!\n");
985 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
989 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
991 struct request_queue * const q = mdev->rq_queue;
992 int max_hw_sectors = max_bio_size >> 9;
993 int max_segments = 0;
995 if (get_ldev_if_state(mdev, D_ATTACHING)) {
996 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
998 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1000 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1005 blk_queue_logical_block_size(q, 512);
1006 blk_queue_max_hw_sectors(q, max_hw_sectors);
1007 /* This is the workaround for "bio would need to, but cannot, be split" */
1008 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1009 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1011 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1012 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1014 blk_queue_stack_limits(q, b);
1016 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1017 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1018 q->backing_dev_info.ra_pages,
1019 b->backing_dev_info.ra_pages);
1020 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1026 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1028 int now, new, local, peer;
1030 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1031 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1032 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1034 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1035 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1036 mdev->local_max_bio_size = local;
1040 /* We may ignore peer limits if the peer is modern enough.
1041 Because new from 8.3.8 onwards the peer can use multiple
1042 BIOs for a single peer_request */
1043 if (mdev->state.conn >= C_CONNECTED) {
1044 if (mdev->tconn->agreed_pro_version < 94)
1045 peer = mdev->peer_max_bio_size;
1046 else if (mdev->tconn->agreed_pro_version == 94)
1047 peer = DRBD_MAX_SIZE_H80_PACKET;
1048 else /* drbd 8.3.8 onwards */
1049 peer = DRBD_MAX_BIO_SIZE;
1052 new = min_t(int, local, peer);
1054 if (mdev->state.role == R_PRIMARY && new < now)
1055 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1058 dev_info(DEV, "max BIO size = %u\n", new);
1060 drbd_setup_queue_param(mdev, new);
1063 /* Starts the worker thread */
1064 static void conn_reconfig_start(struct drbd_tconn *tconn)
1066 drbd_thread_start(&tconn->worker);
1067 conn_flush_workqueue(tconn);
1070 /* if still unconfigured, stops worker again. */
1071 static void conn_reconfig_done(struct drbd_tconn *tconn)
1074 spin_lock_irq(&tconn->req_lock);
1075 stop_threads = conn_all_vols_unconf(tconn);
1076 spin_unlock_irq(&tconn->req_lock);
1078 /* asender is implicitly stopped by receiver
1079 * in conn_disconnect() */
1080 drbd_thread_stop(&tconn->receiver);
1081 drbd_thread_stop(&tconn->worker);
1085 /* Make sure IO is suspended before calling this function(). */
1086 static void drbd_suspend_al(struct drbd_conf *mdev)
1090 if (!lc_try_lock(mdev->act_log)) {
1091 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1095 drbd_al_shrink(mdev);
1096 spin_lock_irq(&mdev->tconn->req_lock);
1097 if (mdev->state.conn < C_CONNECTED)
1098 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1099 spin_unlock_irq(&mdev->tconn->req_lock);
1100 lc_unlock(mdev->act_log);
1103 dev_info(DEV, "Suspended AL updates\n");
1107 static bool should_set_defaults(struct genl_info *info)
1109 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1110 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1113 static void enforce_disk_conf_limits(struct disk_conf *dc)
1115 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1116 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1117 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1118 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1120 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1121 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1124 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1126 enum drbd_ret_code retcode;
1127 struct drbd_conf *mdev;
1128 struct disk_conf *new_disk_conf, *old_disk_conf;
1129 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1132 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1133 if (!adm_ctx.reply_skb)
1135 if (retcode != NO_ERROR)
1138 mdev = adm_ctx.mdev;
1140 /* we also need a disk
1141 * to change the options on */
1142 if (!get_ldev(mdev)) {
1143 retcode = ERR_NO_DISK;
1147 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1148 if (!new_disk_conf) {
1149 retcode = ERR_NOMEM;
1153 mutex_lock(&mdev->tconn->conf_update);
1154 old_disk_conf = mdev->ldev->disk_conf;
1155 *new_disk_conf = *old_disk_conf;
1156 if (should_set_defaults(info))
1157 set_disk_conf_defaults(new_disk_conf);
1159 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1160 if (err && err != -ENOMSG) {
1161 retcode = ERR_MANDATORY_TAG;
1162 drbd_msg_put_info(from_attrs_err_to_txt(err));
1165 if (!expect(new_disk_conf->resync_rate >= 1))
1166 new_disk_conf->resync_rate = 1;
1168 enforce_disk_conf_limits(new_disk_conf);
1170 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1171 if (fifo_size != mdev->rs_plan_s->size) {
1172 new_plan = fifo_alloc(fifo_size);
1174 dev_err(DEV, "kmalloc of fifo_buffer failed");
1175 retcode = ERR_NOMEM;
1180 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1181 drbd_al_shrink(mdev);
1182 err = drbd_check_al_size(mdev, new_disk_conf);
1183 lc_unlock(mdev->act_log);
1184 wake_up(&mdev->al_wait);
1187 retcode = ERR_NOMEM;
1191 write_lock_irq(&global_state_lock);
1192 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1193 if (retcode == NO_ERROR) {
1194 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
1195 drbd_resync_after_changed(mdev);
1197 write_unlock_irq(&global_state_lock);
1199 if (retcode != NO_ERROR)
1203 old_plan = mdev->rs_plan_s;
1204 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
1207 mutex_unlock(&mdev->tconn->conf_update);
1210 if (mdev->state.conn >= C_CONNECTED)
1211 drbd_send_sync_param(mdev);
1214 kfree(old_disk_conf);
1219 mutex_unlock(&mdev->tconn->conf_update);
1221 kfree(new_disk_conf);
1226 drbd_adm_finish(info, retcode);
1230 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1232 struct drbd_conf *mdev;
1234 enum drbd_ret_code retcode;
1235 enum determine_dev_size dd;
1236 sector_t max_possible_sectors;
1237 sector_t min_md_device_sectors;
1238 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1239 struct disk_conf *new_disk_conf = NULL;
1240 struct block_device *bdev;
1241 struct lru_cache *resync_lru = NULL;
1242 struct fifo_buffer *new_plan = NULL;
1243 union drbd_state ns, os;
1244 enum drbd_state_rv rv;
1245 struct net_conf *nc;
1246 int cp_discovered = 0;
1248 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1249 if (!adm_ctx.reply_skb)
1251 if (retcode != NO_ERROR)
1254 mdev = adm_ctx.mdev;
1255 conn_reconfig_start(mdev->tconn);
1257 /* if you want to reconfigure, please tear down first */
1258 if (mdev->state.disk > D_DISKLESS) {
1259 retcode = ERR_DISK_CONFIGURED;
1262 /* It may just now have detached because of IO error. Make sure
1263 * drbd_ldev_destroy is done already, we may end up here very fast,
1264 * e.g. if someone calls attach from the on-io-error handler,
1265 * to realize a "hot spare" feature (not that I'd recommend that) */
1266 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1268 /* allocation not in the IO path, drbdsetup context */
1269 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1271 retcode = ERR_NOMEM;
1274 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1275 if (!new_disk_conf) {
1276 retcode = ERR_NOMEM;
1279 nbc->disk_conf = new_disk_conf;
1281 set_disk_conf_defaults(new_disk_conf);
1282 err = disk_conf_from_attrs(new_disk_conf, info);
1284 retcode = ERR_MANDATORY_TAG;
1285 drbd_msg_put_info(from_attrs_err_to_txt(err));
1289 enforce_disk_conf_limits(new_disk_conf);
1291 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1293 retcode = ERR_NOMEM;
1297 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1298 retcode = ERR_MD_IDX_INVALID;
1303 nc = rcu_dereference(mdev->tconn->net_conf);
1305 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1307 retcode = ERR_STONITH_AND_PROT_A;
1313 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1314 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1316 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1318 retcode = ERR_OPEN_DISK;
1321 nbc->backing_bdev = bdev;
1324 * meta_dev_idx >= 0: external fixed size, possibly multiple
1325 * drbd sharing one meta device. TODO in that case, paranoia
1326 * check that [md_bdev, meta_dev_idx] is not yet used by some
1327 * other drbd minor! (if you use drbd.conf + drbdadm, that
1328 * should check it for you already; but if you don't, or
1329 * someone fooled it, we need to double check here)
1331 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1332 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1333 (new_disk_conf->meta_dev_idx < 0) ?
1334 (void *)mdev : (void *)drbd_m_holder);
1336 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1338 retcode = ERR_OPEN_MD_DISK;
1341 nbc->md_bdev = bdev;
1343 if ((nbc->backing_bdev == nbc->md_bdev) !=
1344 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1345 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1346 retcode = ERR_MD_IDX_INVALID;
1350 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1351 1, 61, sizeof(struct bm_extent),
1352 offsetof(struct bm_extent, lce));
1354 retcode = ERR_NOMEM;
1358 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1359 drbd_md_set_sector_offsets(mdev, nbc);
1361 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1362 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1363 (unsigned long long) drbd_get_max_capacity(nbc),
1364 (unsigned long long) new_disk_conf->disk_size);
1365 retcode = ERR_DISK_TOO_SMALL;
1369 if (new_disk_conf->meta_dev_idx < 0) {
1370 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1371 /* at least one MB, otherwise it does not make sense */
1372 min_md_device_sectors = (2<<10);
1374 max_possible_sectors = DRBD_MAX_SECTORS;
1375 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
1378 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1379 retcode = ERR_MD_DISK_TOO_SMALL;
1380 dev_warn(DEV, "refusing attach: md-device too small, "
1381 "at least %llu sectors needed for this meta-disk type\n",
1382 (unsigned long long) min_md_device_sectors);
1386 /* Make sure the new disk is big enough
1387 * (we may currently be R_PRIMARY with no local disk...) */
1388 if (drbd_get_max_capacity(nbc) <
1389 drbd_get_capacity(mdev->this_bdev)) {
1390 retcode = ERR_DISK_TOO_SMALL;
1394 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1396 if (nbc->known_size > max_possible_sectors) {
1397 dev_warn(DEV, "==> truncating very big lower level device "
1398 "to currently maximum possible %llu sectors <==\n",
1399 (unsigned long long) max_possible_sectors);
1400 if (new_disk_conf->meta_dev_idx >= 0)
1401 dev_warn(DEV, "==>> using internal or flexible "
1402 "meta data may help <<==\n");
1405 drbd_suspend_io(mdev);
1406 /* also wait for the last barrier ack. */
1407 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1408 /* and for any other previously queued work */
1409 drbd_flush_workqueue(mdev);
1411 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1412 retcode = rv; /* FIXME: Type mismatch. */
1413 drbd_resume_io(mdev);
1414 if (rv < SS_SUCCESS)
1417 if (!get_ldev_if_state(mdev, D_ATTACHING))
1418 goto force_diskless;
1420 drbd_md_set_sector_offsets(mdev, nbc);
1422 if (!mdev->bitmap) {
1423 if (drbd_bm_init(mdev)) {
1424 retcode = ERR_NOMEM;
1425 goto force_diskless_dec;
1429 retcode = drbd_md_read(mdev, nbc);
1430 if (retcode != NO_ERROR)
1431 goto force_diskless_dec;
1433 if (mdev->state.conn < C_CONNECTED &&
1434 mdev->state.role == R_PRIMARY &&
1435 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1436 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1437 (unsigned long long)mdev->ed_uuid);
1438 retcode = ERR_DATA_NOT_CURRENT;
1439 goto force_diskless_dec;
1442 /* Since we are diskless, fix the activity log first... */
1443 if (drbd_check_al_size(mdev, new_disk_conf)) {
1444 retcode = ERR_NOMEM;
1445 goto force_diskless_dec;
1448 /* Prevent shrinking of consistent devices ! */
1449 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1450 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1451 dev_warn(DEV, "refusing to truncate a consistent device\n");
1452 retcode = ERR_DISK_TOO_SMALL;
1453 goto force_diskless_dec;
1456 if (!drbd_al_read_log(mdev, nbc)) {
1457 retcode = ERR_IO_MD_DISK;
1458 goto force_diskless_dec;
1461 /* Reset the "barriers don't work" bits here, then force meta data to
1462 * be written, to ensure we determine if barriers are supported. */
1463 if (new_disk_conf->md_flushes)
1464 clear_bit(MD_NO_FUA, &mdev->flags);
1466 set_bit(MD_NO_FUA, &mdev->flags);
1468 /* Point of no return reached.
1469 * Devices and memory are no longer released by error cleanup below.
1470 * now mdev takes over responsibility, and the state engine should
1471 * clean it up somewhere. */
1472 D_ASSERT(mdev->ldev == NULL);
1474 mdev->resync = resync_lru;
1475 mdev->rs_plan_s = new_plan;
1478 new_disk_conf = NULL;
1481 mdev->write_ordering = WO_bdev_flush;
1482 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1484 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1485 set_bit(CRASHED_PRIMARY, &mdev->flags);
1487 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1489 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1490 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
1491 set_bit(CRASHED_PRIMARY, &mdev->flags);
1500 drbd_reconsider_max_bio_size(mdev);
1502 /* If I am currently not R_PRIMARY,
1503 * but meta data primary indicator is set,
1504 * I just now recover from a hard crash,
1505 * and have been R_PRIMARY before that crash.
1507 * Now, if I had no connection before that crash
1508 * (have been degraded R_PRIMARY), chances are that
1509 * I won't find my peer now either.
1511 * In that case, and _only_ in that case,
1512 * we use the degr-wfc-timeout instead of the default,
1513 * so we can automatically recover from a crash of a
1514 * degraded but active "cluster" after a certain timeout.
1516 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1517 if (mdev->state.role != R_PRIMARY &&
1518 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1519 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1520 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1522 dd = drbd_determine_dev_size(mdev, 0);
1523 if (dd == dev_size_error) {
1524 retcode = ERR_NOMEM_BITMAP;
1525 goto force_diskless_dec;
1526 } else if (dd == grew)
1527 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1529 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1530 dev_info(DEV, "Assuming that all blocks are out of sync "
1531 "(aka FullSync)\n");
1532 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1533 "set_n_write from attaching", BM_LOCKED_MASK)) {
1534 retcode = ERR_IO_MD_DISK;
1535 goto force_diskless_dec;
1538 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1539 "read from attaching", BM_LOCKED_MASK)) {
1540 retcode = ERR_IO_MD_DISK;
1541 goto force_diskless_dec;
1545 if (cp_discovered) {
1546 drbd_al_apply_to_bm(mdev);
1547 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1548 "crashed primary apply AL", BM_LOCKED_MASK)) {
1549 retcode = ERR_IO_MD_DISK;
1550 goto force_diskless_dec;
1554 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1555 drbd_suspend_al(mdev); /* IO is still suspended here... */
1557 spin_lock_irq(&mdev->tconn->req_lock);
1558 os = drbd_read_state(mdev);
1560 /* If MDF_CONSISTENT is not set go into inconsistent state,
1561 otherwise investigate MDF_WasUpToDate...
1562 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1563 otherwise into D_CONSISTENT state.
1565 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1566 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1567 ns.disk = D_CONSISTENT;
1569 ns.disk = D_OUTDATED;
1571 ns.disk = D_INCONSISTENT;
1574 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1575 ns.pdsk = D_OUTDATED;
1578 if (ns.disk == D_CONSISTENT &&
1579 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
1580 ns.disk = D_UP_TO_DATE;
1583 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1584 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1585 this point, because drbd_request_state() modifies these
1588 /* In case we are C_CONNECTED postpone any decision on the new disk
1589 state after the negotiation phase. */
1590 if (mdev->state.conn == C_CONNECTED) {
1591 mdev->new_state_tmp.i = ns.i;
1593 ns.disk = D_NEGOTIATING;
1595 /* We expect to receive up-to-date UUIDs soon.
1596 To avoid a race in receive_state, free p_uuid while
1597 holding req_lock. I.e. atomic with the state change */
1598 kfree(mdev->p_uuid);
1599 mdev->p_uuid = NULL;
1602 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1603 spin_unlock_irq(&mdev->tconn->req_lock);
1605 if (rv < SS_SUCCESS)
1606 goto force_diskless_dec;
1608 if (mdev->state.role == R_PRIMARY)
1609 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1611 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1613 drbd_md_mark_dirty(mdev);
1616 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1618 conn_reconfig_done(mdev->tconn);
1619 drbd_adm_finish(info, retcode);
1625 drbd_force_state(mdev, NS(disk, D_FAILED));
1628 conn_reconfig_done(mdev->tconn);
1630 if (nbc->backing_bdev)
1631 blkdev_put(nbc->backing_bdev,
1632 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1634 blkdev_put(nbc->md_bdev,
1635 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1638 kfree(new_disk_conf);
1639 lc_destroy(resync_lru);
1643 drbd_adm_finish(info, retcode);
1647 static int adm_detach(struct drbd_conf *mdev)
1649 enum drbd_state_rv retcode;
1651 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1652 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1653 /* D_FAILED will transition to DISKLESS. */
1654 ret = wait_event_interruptible(mdev->misc_wait,
1655 mdev->state.disk != D_FAILED);
1656 drbd_resume_io(mdev);
1657 if ((int)retcode == (int)SS_IS_DISKLESS)
1658 retcode = SS_NOTHING_TO_DO;
1664 /* Detaching the disk is a process in multiple stages. First we need to lock
1665 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1666 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1667 * internal references as well.
1668 * Only then we have finally detached. */
1669 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1671 enum drbd_ret_code retcode;
1673 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1674 if (!adm_ctx.reply_skb)
1676 if (retcode != NO_ERROR)
1679 retcode = adm_detach(adm_ctx.mdev);
1681 drbd_adm_finish(info, retcode);
1685 static bool conn_resync_running(struct drbd_tconn *tconn)
1687 struct drbd_conf *mdev;
1692 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1693 if (mdev->state.conn == C_SYNC_SOURCE ||
1694 mdev->state.conn == C_SYNC_TARGET ||
1695 mdev->state.conn == C_PAUSED_SYNC_S ||
1696 mdev->state.conn == C_PAUSED_SYNC_T) {
1706 static bool conn_ov_running(struct drbd_tconn *tconn)
1708 struct drbd_conf *mdev;
1713 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1714 if (mdev->state.conn == C_VERIFY_S ||
1715 mdev->state.conn == C_VERIFY_T) {
1725 static enum drbd_ret_code
1726 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1728 struct drbd_conf *mdev;
1731 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1732 if (new_conf->wire_protocol != old_conf->wire_protocol)
1733 return ERR_NEED_APV_100;
1735 if (new_conf->two_primaries != old_conf->two_primaries)
1736 return ERR_NEED_APV_100;
1738 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1739 return ERR_NEED_APV_100;
1741 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1742 return ERR_NEED_APV_100;
1745 if (!new_conf->two_primaries &&
1746 conn_highest_role(tconn) == R_PRIMARY &&
1747 conn_highest_peer(tconn) == R_PRIMARY)
1748 return ERR_NEED_ALLOW_TWO_PRI;
1750 if (new_conf->two_primaries &&
1751 (new_conf->wire_protocol != DRBD_PROT_C))
1752 return ERR_NOT_PROTO_C;
1754 idr_for_each_entry(&tconn->volumes, mdev, i) {
1755 if (get_ldev(mdev)) {
1756 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1758 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1759 return ERR_STONITH_AND_PROT_A;
1761 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
1765 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1766 return ERR_CONG_NOT_PROTO_A;
1771 static enum drbd_ret_code
1772 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1774 static enum drbd_ret_code rv;
1775 struct drbd_conf *mdev;
1779 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1782 /* tconn->volumes protected by genl_lock() here */
1783 idr_for_each_entry(&tconn->volumes, mdev, i) {
1784 if (!mdev->bitmap) {
1785 if(drbd_bm_init(mdev))
1794 struct crypto_hash *verify_tfm;
1795 struct crypto_hash *csums_tfm;
1796 struct crypto_hash *cram_hmac_tfm;
1797 struct crypto_hash *integrity_tfm;
1803 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1808 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1817 static enum drbd_ret_code
1818 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1820 char hmac_name[CRYPTO_MAX_ALG_NAME];
1821 enum drbd_ret_code rv;
1824 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1828 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1832 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1836 if (new_conf->cram_hmac_alg[0] != 0) {
1837 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1838 new_conf->cram_hmac_alg);
1840 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1843 if (crypto->integrity_tfm) {
1844 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
1845 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1846 if (!crypto->int_dig_in)
1848 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1849 if (!crypto->int_dig_vv)
1856 static void free_crypto(struct crypto *crypto)
1858 kfree(crypto->int_dig_in);
1859 kfree(crypto->int_dig_vv);
1860 crypto_free_hash(crypto->cram_hmac_tfm);
1861 crypto_free_hash(crypto->integrity_tfm);
1862 crypto_free_hash(crypto->csums_tfm);
1863 crypto_free_hash(crypto->verify_tfm);
1866 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1868 enum drbd_ret_code retcode;
1869 struct drbd_tconn *tconn;
1870 struct net_conf *old_conf, *new_conf = NULL;
1872 int ovr; /* online verify running */
1873 int rsr; /* re-sync running */
1874 struct crypto crypto = { };
1876 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1877 if (!adm_ctx.reply_skb)
1879 if (retcode != NO_ERROR)
1882 tconn = adm_ctx.tconn;
1884 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1886 retcode = ERR_NOMEM;
1890 conn_reconfig_start(tconn);
1892 mutex_lock(&tconn->data.mutex);
1893 mutex_lock(&tconn->conf_update);
1894 old_conf = tconn->net_conf;
1897 drbd_msg_put_info("net conf missing, try connect");
1898 retcode = ERR_INVALID_REQUEST;
1902 *new_conf = *old_conf;
1903 if (should_set_defaults(info))
1904 set_net_conf_defaults(new_conf);
1906 err = net_conf_from_attrs_for_change(new_conf, info);
1907 if (err && err != -ENOMSG) {
1908 retcode = ERR_MANDATORY_TAG;
1909 drbd_msg_put_info(from_attrs_err_to_txt(err));
1913 retcode = check_net_options(tconn, new_conf);
1914 if (retcode != NO_ERROR)
1917 /* re-sync running */
1918 rsr = conn_resync_running(tconn);
1919 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
1920 retcode = ERR_CSUMS_RESYNC_RUNNING;
1924 /* online verify running */
1925 ovr = conn_ov_running(tconn);
1926 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1927 retcode = ERR_VERIFY_RUNNING;
1931 retcode = alloc_crypto(&crypto, new_conf);
1932 if (retcode != NO_ERROR)
1935 rcu_assign_pointer(tconn->net_conf, new_conf);
1938 crypto_free_hash(tconn->csums_tfm);
1939 tconn->csums_tfm = crypto.csums_tfm;
1940 crypto.csums_tfm = NULL;
1943 crypto_free_hash(tconn->verify_tfm);
1944 tconn->verify_tfm = crypto.verify_tfm;
1945 crypto.verify_tfm = NULL;
1948 kfree(tconn->int_dig_in);
1949 tconn->int_dig_in = crypto.int_dig_in;
1950 kfree(tconn->int_dig_vv);
1951 tconn->int_dig_vv = crypto.int_dig_vv;
1952 crypto_free_hash(tconn->integrity_tfm);
1953 tconn->integrity_tfm = crypto.integrity_tfm;
1954 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
1955 /* Do this without trying to take tconn->data.mutex again. */
1956 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
1958 crypto_free_hash(tconn->cram_hmac_tfm);
1959 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1961 mutex_unlock(&tconn->conf_update);
1962 mutex_unlock(&tconn->data.mutex);
1966 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1967 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1972 mutex_unlock(&tconn->conf_update);
1973 mutex_unlock(&tconn->data.mutex);
1974 free_crypto(&crypto);
1977 conn_reconfig_done(tconn);
1979 drbd_adm_finish(info, retcode);
1983 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1985 struct drbd_conf *mdev;
1986 struct net_conf *old_conf, *new_conf = NULL;
1987 struct crypto crypto = { };
1988 struct drbd_tconn *oconn;
1989 struct drbd_tconn *tconn;
1990 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1991 enum drbd_ret_code retcode;
1995 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1996 if (!adm_ctx.reply_skb)
1998 if (retcode != NO_ERROR)
2001 tconn = adm_ctx.tconn;
2002 conn_reconfig_start(tconn);
2004 if (tconn->cstate > C_STANDALONE) {
2005 retcode = ERR_NET_CONFIGURED;
2009 /* allocation not in the IO path, cqueue thread context */
2010 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
2012 retcode = ERR_NOMEM;
2016 set_net_conf_defaults(new_conf);
2018 err = net_conf_from_attrs(new_conf, info);
2020 retcode = ERR_MANDATORY_TAG;
2021 drbd_msg_put_info(from_attrs_err_to_txt(err));
2025 retcode = check_net_options(tconn, new_conf);
2026 if (retcode != NO_ERROR)
2031 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
2032 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
2034 /* No need for _rcu here. All reconfiguration is
2035 * strictly serialized on genl_lock(). We are protected against
2036 * concurrent reconfiguration/addition/deletion */
2037 list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
2038 struct net_conf *nc;
2043 nc = rcu_dereference(oconn->net_conf);
2045 taken_addr = (struct sockaddr *)&nc->my_addr;
2046 if (new_conf->my_addr_len == nc->my_addr_len &&
2047 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
2048 retcode = ERR_LOCAL_ADDR;
2050 taken_addr = (struct sockaddr *)&nc->peer_addr;
2051 if (new_conf->peer_addr_len == nc->peer_addr_len &&
2052 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
2053 retcode = ERR_PEER_ADDR;
2056 if (retcode != NO_ERROR)
2060 retcode = alloc_crypto(&crypto, new_conf);
2061 if (retcode != NO_ERROR)
2064 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2066 conn_flush_workqueue(tconn);
2068 mutex_lock(&tconn->conf_update);
2069 old_conf = tconn->net_conf;
2071 retcode = ERR_NET_CONFIGURED;
2072 mutex_unlock(&tconn->conf_update);
2075 rcu_assign_pointer(tconn->net_conf, new_conf);
2077 conn_free_crypto(tconn);
2078 tconn->int_dig_in = crypto.int_dig_in;
2079 tconn->int_dig_vv = crypto.int_dig_vv;
2080 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2081 tconn->integrity_tfm = crypto.integrity_tfm;
2082 tconn->csums_tfm = crypto.csums_tfm;
2083 tconn->verify_tfm = crypto.verify_tfm;
2085 mutex_unlock(&tconn->conf_update);
2088 idr_for_each_entry(&tconn->volumes, mdev, i) {
2094 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2096 conn_reconfig_done(tconn);
2097 drbd_adm_finish(info, retcode);
2101 free_crypto(&crypto);
2104 conn_reconfig_done(tconn);
2106 drbd_adm_finish(info, retcode);
2110 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2112 enum drbd_state_rv rv;
2114 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2115 force ? CS_HARD : 0);
2118 case SS_NOTHING_TO_DO:
2120 case SS_ALREADY_STANDALONE:
2122 case SS_PRIMARY_NOP:
2123 /* Our state checking code wants to see the peer outdated. */
2124 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2125 pdsk, D_OUTDATED), CS_VERBOSE);
2127 case SS_CW_FAILED_BY_PEER:
2128 /* The peer probably wants to see us outdated. */
2129 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2130 disk, D_OUTDATED), 0);
2131 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2132 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2137 /* no special handling necessary */
2140 if (rv >= SS_SUCCESS) {
2141 enum drbd_state_rv rv2;
2142 /* No one else can reconfigure the network while I am here.
2143 * The state handling only uses drbd_thread_stop_nowait(),
2144 * we want to really wait here until the receiver is no more.
2146 drbd_thread_stop(&adm_ctx.tconn->receiver);
2148 /* Race breaker. This additional state change request may be
2149 * necessary, if this was a forced disconnect during a receiver
2150 * restart. We may have "killed" the receiver thread just
2151 * after drbdd_init() returned. Typically, we should be
2152 * C_STANDALONE already, now, and this becomes a no-op.
2154 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2155 CS_VERBOSE | CS_HARD);
2156 if (rv2 < SS_SUCCESS)
2158 "unexpected rv2=%d in conn_try_disconnect()\n",
2164 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2166 struct disconnect_parms parms;
2167 struct drbd_tconn *tconn;
2168 enum drbd_state_rv rv;
2169 enum drbd_ret_code retcode;
2172 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2173 if (!adm_ctx.reply_skb)
2175 if (retcode != NO_ERROR)
2178 tconn = adm_ctx.tconn;
2179 memset(&parms, 0, sizeof(parms));
2180 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2181 err = disconnect_parms_from_attrs(&parms, info);
2183 retcode = ERR_MANDATORY_TAG;
2184 drbd_msg_put_info(from_attrs_err_to_txt(err));
2189 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2190 if (rv < SS_SUCCESS)
2191 retcode = rv; /* FIXME: Type mismatch. */
2195 drbd_adm_finish(info, retcode);
2199 void resync_after_online_grow(struct drbd_conf *mdev)
2201 int iass; /* I am sync source */
2203 dev_info(DEV, "Resync of new storage after online grow\n");
2204 if (mdev->state.role != mdev->state.peer)
2205 iass = (mdev->state.role == R_PRIMARY);
2207 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2210 drbd_start_resync(mdev, C_SYNC_SOURCE);
2212 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2215 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2217 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2218 struct resize_parms rs;
2219 struct drbd_conf *mdev;
2220 enum drbd_ret_code retcode;
2221 enum determine_dev_size dd;
2222 enum dds_flags ddsf;
2226 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2227 if (!adm_ctx.reply_skb)
2229 if (retcode != NO_ERROR)
2232 memset(&rs, 0, sizeof(struct resize_parms));
2233 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2234 err = resize_parms_from_attrs(&rs, info);
2236 retcode = ERR_MANDATORY_TAG;
2237 drbd_msg_put_info(from_attrs_err_to_txt(err));
2242 mdev = adm_ctx.mdev;
2243 if (mdev->state.conn > C_CONNECTED) {
2244 retcode = ERR_RESIZE_RESYNC;
2248 if (mdev->state.role == R_SECONDARY &&
2249 mdev->state.peer == R_SECONDARY) {
2250 retcode = ERR_NO_PRIMARY;
2254 if (!get_ldev(mdev)) {
2255 retcode = ERR_NO_DISK;
2259 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2260 retcode = ERR_NEED_APV_93;
2265 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2267 if (u_size != (sector_t)rs.resize_size) {
2268 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2269 if (!new_disk_conf) {
2270 retcode = ERR_NOMEM;
2275 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2276 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2278 if (new_disk_conf) {
2279 mutex_lock(&mdev->tconn->conf_update);
2280 old_disk_conf = mdev->ldev->disk_conf;
2281 *new_disk_conf = *old_disk_conf;
2282 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2283 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2284 mutex_unlock(&mdev->tconn->conf_update);
2286 kfree(old_disk_conf);
2289 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2290 dd = drbd_determine_dev_size(mdev, ddsf);
2293 if (dd == dev_size_error) {
2294 retcode = ERR_NOMEM_BITMAP;
2298 if (mdev->state.conn == C_CONNECTED) {
2300 set_bit(RESIZE_PENDING, &mdev->flags);
2302 drbd_send_uuids(mdev);
2303 drbd_send_sizes(mdev, 1, ddsf);
2307 drbd_adm_finish(info, retcode);
2311 void drbd_set_res_opts_defaults(struct res_opts *r)
2313 return set_res_opts_defaults(r);
2316 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2318 enum drbd_ret_code retcode;
2319 cpumask_var_t new_cpu_mask;
2320 struct drbd_tconn *tconn;
2321 struct res_opts res_opts;
2324 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2325 if (!adm_ctx.reply_skb)
2327 if (retcode != NO_ERROR)
2329 tconn = adm_ctx.tconn;
2331 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2332 retcode = ERR_NOMEM;
2333 drbd_msg_put_info("unable to allocate cpumask");
2337 res_opts = tconn->res_opts;
2338 if (should_set_defaults(info))
2339 set_res_opts_defaults(&res_opts);
2341 err = res_opts_from_attrs(&res_opts, info);
2342 if (err && err != -ENOMSG) {
2343 retcode = ERR_MANDATORY_TAG;
2344 drbd_msg_put_info(from_attrs_err_to_txt(err));
2348 /* silently ignore cpu mask on UP kernel */
2349 if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
2350 err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
2351 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2353 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2354 retcode = ERR_CPU_MASK_PARSE;
2360 tconn->res_opts = res_opts;
2362 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2363 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2364 drbd_calc_cpu_mask(tconn);
2365 tconn->receiver.reset_cpu_mask = 1;
2366 tconn->asender.reset_cpu_mask = 1;
2367 tconn->worker.reset_cpu_mask = 1;
2371 free_cpumask_var(new_cpu_mask);
2373 drbd_adm_finish(info, retcode);
2377 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2379 struct drbd_conf *mdev;
2380 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2382 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2383 if (!adm_ctx.reply_skb)
2385 if (retcode != NO_ERROR)
2388 mdev = adm_ctx.mdev;
2390 /* If there is still bitmap IO pending, probably because of a previous
2391 * resync just being finished, wait for it before requesting a new resync. */
2392 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2394 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2396 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2397 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2399 while (retcode == SS_NEED_CONNECTION) {
2400 spin_lock_irq(&mdev->tconn->req_lock);
2401 if (mdev->state.conn < C_CONNECTED)
2402 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2403 spin_unlock_irq(&mdev->tconn->req_lock);
2405 if (retcode != SS_NEED_CONNECTION)
2408 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2412 drbd_adm_finish(info, retcode);
2416 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2420 rv = drbd_bmio_set_n_write(mdev);
2421 drbd_suspend_al(mdev);
2425 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2426 union drbd_state mask, union drbd_state val)
2428 enum drbd_ret_code retcode;
2430 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2431 if (!adm_ctx.reply_skb)
2433 if (retcode != NO_ERROR)
2436 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2438 drbd_adm_finish(info, retcode);
2442 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2444 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2447 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2449 enum drbd_ret_code retcode;
2451 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2452 if (!adm_ctx.reply_skb)
2454 if (retcode != NO_ERROR)
2457 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2458 retcode = ERR_PAUSE_IS_SET;
2460 drbd_adm_finish(info, retcode);
2464 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2466 union drbd_dev_state s;
2467 enum drbd_ret_code retcode;
2469 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2470 if (!adm_ctx.reply_skb)
2472 if (retcode != NO_ERROR)
2475 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2476 s = adm_ctx.mdev->state;
2477 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2478 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2479 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2481 retcode = ERR_PAUSE_IS_CLEAR;
2486 drbd_adm_finish(info, retcode);
2490 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2492 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2495 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2497 struct drbd_conf *mdev;
2498 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2500 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2501 if (!adm_ctx.reply_skb)
2503 if (retcode != NO_ERROR)
2506 mdev = adm_ctx.mdev;
2507 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2508 drbd_uuid_new_current(mdev);
2509 clear_bit(NEW_CUR_UUID, &mdev->flags);
2511 drbd_suspend_io(mdev);
2512 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2513 if (retcode == SS_SUCCESS) {
2514 if (mdev->state.conn < C_CONNECTED)
2515 tl_clear(mdev->tconn);
2516 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2517 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2519 drbd_resume_io(mdev);
2522 drbd_adm_finish(info, retcode);
2526 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2528 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2531 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *resource_name, unsigned vnr)
2534 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2536 goto nla_put_failure;
2537 if (vnr != VOLUME_UNSPECIFIED)
2538 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2539 NLA_PUT_STRING(skb, T_ctx_resource_name, resource_name);
2540 nla_nest_end(skb, nla);
2545 nla_nest_cancel(skb, nla);
2549 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2550 const struct sib_info *sib)
2552 struct state_info *si = NULL; /* for sizeof(si->member); */
2553 struct net_conf *nc;
2557 int exclude_sensitive;
2559 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2560 * to. So we better exclude_sensitive information.
2562 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2563 * in the context of the requesting user process. Exclude sensitive
2564 * information, unless current has superuser.
2566 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2567 * relies on the current implementation of netlink_dump(), which
2568 * executes the dump callback successively from netlink_recvmsg(),
2569 * always in the context of the receiving process */
2570 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2572 got_ldev = get_ldev(mdev);
2574 /* We need to add connection name and volume number information still.
2575 * Minor number is in drbd_genlmsghdr. */
2576 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2577 goto nla_put_failure;
2579 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2580 goto nla_put_failure;
2584 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
2585 goto nla_put_failure;
2587 nc = rcu_dereference(mdev->tconn->net_conf);
2589 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2592 goto nla_put_failure;
2594 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2596 goto nla_put_failure;
2597 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2598 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2599 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2600 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2603 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2604 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2605 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2606 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2607 if (C_SYNC_SOURCE <= mdev->state.conn &&
2608 C_PAUSED_SYNC_T >= mdev->state.conn) {
2609 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2610 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2615 switch(sib->sib_reason) {
2616 case SIB_SYNC_PROGRESS:
2617 case SIB_GET_STATUS_REPLY:
2619 case SIB_STATE_CHANGE:
2620 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2621 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2623 case SIB_HELPER_POST:
2625 T_helper_exit_code, sib->helper_exit_code);
2627 case SIB_HELPER_PRE:
2628 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2632 nla_nest_end(skb, nla);
2642 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2644 enum drbd_ret_code retcode;
2647 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2648 if (!adm_ctx.reply_skb)
2650 if (retcode != NO_ERROR)
2653 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2655 nlmsg_free(adm_ctx.reply_skb);
2659 drbd_adm_finish(info, retcode);
2663 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2665 struct drbd_conf *mdev;
2666 struct drbd_genlmsghdr *dh;
2667 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2668 struct drbd_tconn *tconn = NULL;
2669 struct drbd_tconn *tmp;
2670 unsigned volume = cb->args[1];
2672 /* Open coded, deferred, iteration:
2673 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2674 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2678 * where tconn is cb->args[0];
2679 * and i is cb->args[1];
2681 * cb->args[2] indicates if we shall loop over all resources,
2682 * or just dump all volumes of a single resource.
2684 * This may miss entries inserted after this dump started,
2685 * or entries deleted before they are reached.
2687 * We need to make sure the mdev won't disappear while
2688 * we are looking at it, and revalidate our iterators
2689 * on each iteration.
2692 /* synchronize with conn_create()/conn_destroy() */
2694 /* revalidate iterator position */
2695 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
2697 /* first iteration */
2709 mdev = idr_get_next(&tconn->volumes, &volume);
2711 /* No more volumes to dump on this tconn.
2712 * Advance tconn iterator. */
2713 pos = list_entry_rcu(tconn->all_tconn.next,
2714 struct drbd_tconn, all_tconn);
2715 /* Did we dump any volume on this tconn yet? */
2717 /* If we reached the end of the list,
2718 * or only a single resource dump was requested,
2720 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2728 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2729 cb->nlh->nlmsg_seq, &drbd_genl_family,
2730 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2735 /* this is a tconn without a single volume */
2737 dh->ret_code = NO_ERROR;
2738 if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2739 genlmsg_cancel(skb, dh);
2741 genlmsg_end(skb, dh);
2745 D_ASSERT(mdev->vnr == volume);
2746 D_ASSERT(mdev->tconn == tconn);
2748 dh->minor = mdev_to_minor(mdev);
2749 dh->ret_code = NO_ERROR;
2751 if (nla_put_status_info(skb, mdev, NULL)) {
2752 genlmsg_cancel(skb, dh);
2755 genlmsg_end(skb, dh);
2760 /* where to start the next iteration */
2761 cb->args[0] = (long)pos;
2762 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2764 /* No more tconns/volumes/minors found results in an empty skb.
2765 * Which will terminate the dump. */
2770 * Request status of all resources, or of all volumes within a single resource.
2772 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2773 * Which means we cannot use the family->attrbuf or other such members, because
2774 * dump is NOT protected by the genl_lock(). During dump, we only have access
2775 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2777 * Once things are setup properly, we call into get_one_status().
2779 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2781 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2783 const char *resource_name;
2784 struct drbd_tconn *tconn;
2787 /* Is this a followup call? */
2789 /* ... of a single resource dump,
2790 * and the resource iterator has been advanced already? */
2791 if (cb->args[2] && cb->args[2] != cb->args[0])
2792 return 0; /* DONE. */
2796 /* First call (from netlink_dump_start). We need to figure out
2797 * which resource(s) the user wants us to dump. */
2798 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2799 nlmsg_attrlen(cb->nlh, hdrlen),
2800 DRBD_NLA_CFG_CONTEXT);
2802 /* No explicit context given. Dump all. */
2805 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2806 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2808 return PTR_ERR(nla);
2809 /* context given, but no name present? */
2812 resource_name = nla_data(nla);
2813 tconn = conn_get_by_name(resource_name);
2818 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2820 /* prime iterators, and set "filter" mode mark:
2821 * only dump this tconn. */
2822 cb->args[0] = (long)tconn;
2823 /* cb->args[1] = 0; passed in this way. */
2824 cb->args[2] = (long)tconn;
2827 return get_one_status(skb, cb);
2830 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2832 enum drbd_ret_code retcode;
2833 struct timeout_parms tp;
2836 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2837 if (!adm_ctx.reply_skb)
2839 if (retcode != NO_ERROR)
2843 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2844 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2847 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2849 nlmsg_free(adm_ctx.reply_skb);
2853 drbd_adm_finish(info, retcode);
2857 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2859 struct drbd_conf *mdev;
2860 enum drbd_ret_code retcode;
2862 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2863 if (!adm_ctx.reply_skb)
2865 if (retcode != NO_ERROR)
2868 mdev = adm_ctx.mdev;
2869 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2870 /* resume from last known position, if possible */
2871 struct start_ov_parms parms =
2872 { .ov_start_sector = mdev->ov_start_sector };
2873 int err = start_ov_parms_from_attrs(&parms, info);
2875 retcode = ERR_MANDATORY_TAG;
2876 drbd_msg_put_info(from_attrs_err_to_txt(err));
2879 /* w_make_ov_request expects position to be aligned */
2880 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2882 /* If there is still bitmap IO pending, e.g. previous resync or verify
2883 * just being finished, wait for it before requesting a new resync. */
2884 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2885 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2887 drbd_adm_finish(info, retcode);
2892 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2894 struct drbd_conf *mdev;
2895 enum drbd_ret_code retcode;
2896 int skip_initial_sync = 0;
2898 struct new_c_uuid_parms args;
2900 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2901 if (!adm_ctx.reply_skb)
2903 if (retcode != NO_ERROR)
2906 mdev = adm_ctx.mdev;
2907 memset(&args, 0, sizeof(args));
2908 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2909 err = new_c_uuid_parms_from_attrs(&args, info);
2911 retcode = ERR_MANDATORY_TAG;
2912 drbd_msg_put_info(from_attrs_err_to_txt(err));
2917 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2919 if (!get_ldev(mdev)) {
2920 retcode = ERR_NO_DISK;
2924 /* this is "skip initial sync", assume to be clean */
2925 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2926 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2927 dev_info(DEV, "Preparing to skip initial sync\n");
2928 skip_initial_sync = 1;
2929 } else if (mdev->state.conn != C_STANDALONE) {
2930 retcode = ERR_CONNECTED;
2934 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2935 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2937 if (args.clear_bm) {
2938 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2939 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2941 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2942 retcode = ERR_IO_MD_DISK;
2944 if (skip_initial_sync) {
2945 drbd_send_uuids_skip_initial_sync(mdev);
2946 _drbd_uuid_set(mdev, UI_BITMAP, 0);
2947 drbd_print_uuids(mdev, "cleared bitmap UUID");
2948 spin_lock_irq(&mdev->tconn->req_lock);
2949 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2951 spin_unlock_irq(&mdev->tconn->req_lock);
2959 mutex_unlock(mdev->state_mutex);
2961 drbd_adm_finish(info, retcode);
2965 static enum drbd_ret_code
2966 drbd_check_resource_name(const char *name)
2968 if (!name || !name[0]) {
2969 drbd_msg_put_info("resource name missing");
2970 return ERR_MANDATORY_TAG;
2972 /* if we want to use these in sysfs/configfs/debugfs some day,
2973 * we must not allow slashes */
2974 if (strchr(name, '/')) {
2975 drbd_msg_put_info("invalid resource name");
2976 return ERR_INVALID_REQUEST;
2981 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
2983 enum drbd_ret_code retcode;
2985 retcode = drbd_adm_prepare(skb, info, 0);
2986 if (!adm_ctx.reply_skb)
2988 if (retcode != NO_ERROR)
2991 retcode = drbd_check_resource_name(adm_ctx.resource_name);
2992 if (retcode != NO_ERROR)
2995 if (adm_ctx.tconn) {
2996 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2997 retcode = ERR_INVALID_REQUEST;
2998 drbd_msg_put_info("resource exists");
3000 /* else: still NO_ERROR */
3004 if (!conn_create(adm_ctx.resource_name))
3005 retcode = ERR_NOMEM;
3007 drbd_adm_finish(info, retcode);
3011 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3013 struct drbd_genlmsghdr *dh = info->userhdr;
3014 enum drbd_ret_code retcode;
3016 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3017 if (!adm_ctx.reply_skb)
3019 if (retcode != NO_ERROR)
3022 /* FIXME drop minor_count parameter, limit to MINORMASK */
3023 if (dh->minor >= minor_count) {
3024 drbd_msg_put_info("requested minor out of range");
3025 retcode = ERR_INVALID_REQUEST;
3028 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3029 drbd_msg_put_info("requested volume id out of range");
3030 retcode = ERR_INVALID_REQUEST;
3034 /* drbd_adm_prepare made sure already
3035 * that mdev->tconn and mdev->vnr match the request. */
3037 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3038 retcode = ERR_MINOR_EXISTS;
3039 /* else: still NO_ERROR */
3043 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3045 drbd_adm_finish(info, retcode);
3049 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3051 if (mdev->state.disk == D_DISKLESS &&
3052 /* no need to be mdev->state.conn == C_STANDALONE &&
3053 * we may want to delete a minor from a live replication group.
3055 mdev->state.role == R_SECONDARY) {
3056 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3057 idr_remove(&minors, mdev_to_minor(mdev));
3058 del_gendisk(mdev->vdisk);
3060 kref_put(&mdev->kref, &drbd_minor_destroy);
3063 return ERR_MINOR_CONFIGURED;
3066 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
3068 enum drbd_ret_code retcode;
3070 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3071 if (!adm_ctx.reply_skb)
3073 if (retcode != NO_ERROR)
3076 retcode = adm_delete_minor(adm_ctx.mdev);
3078 drbd_adm_finish(info, retcode);
3082 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3084 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3085 struct drbd_conf *mdev;
3088 retcode = drbd_adm_prepare(skb, info, 0);
3089 if (!adm_ctx.reply_skb)
3091 if (retcode != NO_ERROR)
3094 if (!adm_ctx.tconn) {
3095 retcode = ERR_RES_NOT_KNOWN;
3100 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3101 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3102 if (retcode < SS_SUCCESS) {
3103 drbd_msg_put_info("failed to demote");
3108 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3109 if (retcode < SS_SUCCESS) {
3110 drbd_msg_put_info("failed to disconnect");
3115 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3116 retcode = adm_detach(mdev);
3117 if (retcode < SS_SUCCESS) {
3118 drbd_msg_put_info("failed to detach");
3123 /* If we reach this, all volumes (of this tconn) are Secondary,
3124 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3125 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3126 drbd_thread_stop(&adm_ctx.tconn->worker);
3128 /* Now, nothing can fail anymore */
3130 /* delete volumes */
3131 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3132 retcode = adm_delete_minor(mdev);
3133 if (retcode != NO_ERROR) {
3134 /* "can not happen" */
3135 drbd_msg_put_info("failed to delete volume");
3140 /* delete connection */
3141 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3142 list_del_rcu(&adm_ctx.tconn->all_tconn);
3144 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3148 /* "can not happen" */
3149 retcode = ERR_RES_IN_USE;
3150 drbd_msg_put_info("failed to delete connection");
3154 drbd_adm_finish(info, retcode);
3158 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3160 enum drbd_ret_code retcode;
3162 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3163 if (!adm_ctx.reply_skb)
3165 if (retcode != NO_ERROR)
3168 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3169 list_del_rcu(&adm_ctx.tconn->all_tconn);
3171 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3175 retcode = ERR_RES_IN_USE;
3178 if (retcode == NO_ERROR)
3179 drbd_thread_stop(&adm_ctx.tconn->worker);
3181 drbd_adm_finish(info, retcode);
3185 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3187 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3188 struct sk_buff *msg;
3189 struct drbd_genlmsghdr *d_out;
3193 seq = atomic_inc_return(&drbd_genl_seq);
3194 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3199 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3200 if (!d_out) /* cannot happen, but anyways. */
3201 goto nla_put_failure;
3202 d_out->minor = mdev_to_minor(mdev);
3203 d_out->ret_code = NO_ERROR;
3205 if (nla_put_status_info(msg, mdev, sib))
3206 goto nla_put_failure;
3207 genlmsg_end(msg, d_out);
3208 err = drbd_genl_multicast_events(msg, 0);
3209 /* msg has been consumed or freed in netlink_broadcast() */
3210 if (err && err != -ESRCH)
3218 dev_err(DEV, "Error %d while broadcasting event. "
3219 "Event seq:%u sib_reason:%u\n",
3220 err, seq, sib->sib_reason);
3223 int drbd_nla_check_mandatory(int maxtype, struct nlattr *nla)
3225 struct nlattr *head = nla_data(nla);
3226 int len = nla_len(nla);
3230 * validate_nla (called from nla_parse_nested) ignores attributes
3231 * beyond maxtype, and does not understand the DRBD_GENLA_F_MANDATORY flag.
3232 * In order to have it validate attributes with the DRBD_GENLA_F_MANDATORY
3233 * flag set also, check and remove that flag before calling
3237 nla_for_each_attr(nla, head, len, rem) {
3238 if (nla->nla_type & DRBD_GENLA_F_MANDATORY) {
3239 nla->nla_type &= ~DRBD_GENLA_F_MANDATORY;
3240 if (nla_type(nla) > maxtype)
3247 int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
3248 const struct nla_policy *policy)
3252 err = drbd_nla_check_mandatory(maxtype, nla);
3254 err = nla_parse_nested(tb, maxtype, nla, policy);
3259 struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype)
3263 * If any nested attribute has the DRBD_GENLA_F_MANDATORY flag set and
3264 * we don't know about that attribute, reject all the nested
3267 err = drbd_nla_check_mandatory(maxtype, nla);
3269 return ERR_PTR(err);
3270 return nla_find_nested(nla, attrtype);