4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/module.h>
29 #include <linux/drbd.h>
32 #include <linux/file.h>
33 #include <linux/slab.h>
34 #include <linux/blkpg.h>
35 #include <linux/cpumask.h>
37 #include "drbd_protocol.h"
39 #include "drbd_state_change.h"
40 #include <asm/unaligned.h>
41 #include <linux/drbd_limits.h>
42 #include <linux/kthread.h>
44 #include <net/genetlink.h>
47 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
48 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
53 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
74 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
76 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
78 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
79 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
80 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
81 int drbd_adm_dump_devices_done(struct netlink_callback *cb);
82 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
83 int drbd_adm_dump_connections_done(struct netlink_callback *cb);
84 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
85 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
86 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
88 #include <linux/drbd_genl_api.h>
90 #include <linux/genl_magic_func.h>
92 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
93 static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
95 DEFINE_MUTEX(notification_mutex);
97 /* used blkdev_get_by_path, to claim our meta data device(s) */
98 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
100 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
102 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
103 if (genlmsg_reply(skb, info))
104 pr_err("error sending genl reply\n");
107 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
108 * reason it could fail was no space in skb, and there are 4k available. */
109 static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
114 if (!info || !info[0])
117 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
121 err = nla_put_string(skb, T_info_text, info);
123 nla_nest_cancel(skb, nla);
126 nla_nest_end(skb, nla);
131 static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
134 struct nlattr *nla, *txt;
138 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
142 txt = nla_reserve(skb, T_info_text, 256);
144 nla_nest_cancel(skb, nla);
148 len = vscnprintf(nla_data(txt), 256, fmt, args);
151 /* maybe: retry with larger reserve, if truncated */
152 txt->nla_len = nla_attr_size(len+1);
153 nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
154 nla_nest_end(skb, nla);
159 /* This would be a good candidate for a "pre_doit" hook,
160 * and per-family private info->pointers.
161 * But we need to stay compatible with older kernels.
162 * If it returns successfully, adm_ctx members are valid.
164 * At this point, we still rely on the global genl_lock().
165 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
166 * to add additional synchronization against object destruction/modification.
168 #define DRBD_ADM_NEED_MINOR 1
169 #define DRBD_ADM_NEED_RESOURCE 2
170 #define DRBD_ADM_NEED_CONNECTION 4
171 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
172 struct sk_buff *skb, struct genl_info *info, unsigned flags)
174 struct drbd_genlmsghdr *d_in = info->userhdr;
175 const u8 cmd = info->genlhdr->cmd;
178 memset(adm_ctx, 0, sizeof(*adm_ctx));
180 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
181 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
184 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
185 if (!adm_ctx->reply_skb) {
190 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
191 info, &drbd_genl_family, 0, cmd);
192 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
194 if (!adm_ctx->reply_dh) {
199 adm_ctx->reply_dh->minor = d_in->minor;
200 adm_ctx->reply_dh->ret_code = NO_ERROR;
202 adm_ctx->volume = VOLUME_UNSPECIFIED;
203 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
205 /* parse and validate only */
206 err = drbd_cfg_context_from_attrs(NULL, info);
210 /* It was present, and valid,
211 * copy it over to the reply skb. */
212 err = nla_put_nohdr(adm_ctx->reply_skb,
213 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
214 info->attrs[DRBD_NLA_CFG_CONTEXT]);
218 /* and assign stuff to the adm_ctx */
219 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
221 adm_ctx->volume = nla_get_u32(nla);
222 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
224 adm_ctx->resource_name = nla_data(nla);
225 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
226 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
227 if ((adm_ctx->my_addr &&
228 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
229 (adm_ctx->peer_addr &&
230 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
236 adm_ctx->minor = d_in->minor;
237 adm_ctx->device = minor_to_device(d_in->minor);
239 /* We are protected by the global genl_lock().
240 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
241 * so make sure this object stays around. */
243 kref_get(&adm_ctx->device->kref);
245 if (adm_ctx->resource_name) {
246 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
249 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
250 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
251 return ERR_MINOR_INVALID;
253 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
254 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
255 if (adm_ctx->resource_name)
256 return ERR_RES_NOT_KNOWN;
257 return ERR_INVALID_REQUEST;
260 if (flags & DRBD_ADM_NEED_CONNECTION) {
261 if (adm_ctx->resource) {
262 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
263 return ERR_INVALID_REQUEST;
265 if (adm_ctx->device) {
266 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
267 return ERR_INVALID_REQUEST;
269 if (adm_ctx->my_addr && adm_ctx->peer_addr)
270 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
271 nla_len(adm_ctx->my_addr),
272 nla_data(adm_ctx->peer_addr),
273 nla_len(adm_ctx->peer_addr));
274 if (!adm_ctx->connection) {
275 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
276 return ERR_INVALID_REQUEST;
280 /* some more paranoia, if the request was over-determined */
281 if (adm_ctx->device && adm_ctx->resource &&
282 adm_ctx->device->resource != adm_ctx->resource) {
283 pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
284 adm_ctx->minor, adm_ctx->resource->name,
285 adm_ctx->device->resource->name);
286 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
287 return ERR_INVALID_REQUEST;
289 if (adm_ctx->device &&
290 adm_ctx->volume != VOLUME_UNSPECIFIED &&
291 adm_ctx->volume != adm_ctx->device->vnr) {
292 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
293 adm_ctx->minor, adm_ctx->volume,
294 adm_ctx->device->vnr,
295 adm_ctx->device->resource->name);
296 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
297 return ERR_INVALID_REQUEST;
300 /* still, provide adm_ctx->resource always, if possible. */
301 if (!adm_ctx->resource) {
302 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
303 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
304 if (adm_ctx->resource)
305 kref_get(&adm_ctx->resource->kref);
311 nlmsg_free(adm_ctx->reply_skb);
312 adm_ctx->reply_skb = NULL;
316 static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
317 struct genl_info *info, int retcode)
319 if (adm_ctx->device) {
320 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
321 adm_ctx->device = NULL;
323 if (adm_ctx->connection) {
324 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
325 adm_ctx->connection = NULL;
327 if (adm_ctx->resource) {
328 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
329 adm_ctx->resource = NULL;
332 if (!adm_ctx->reply_skb)
335 adm_ctx->reply_dh->ret_code = retcode;
336 drbd_adm_send_reply(adm_ctx->reply_skb, info);
340 static void setup_khelper_env(struct drbd_connection *connection, char **envp)
344 /* FIXME: A future version will not allow this case. */
345 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
348 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
351 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
352 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
356 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
357 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
361 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
362 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
364 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
367 int drbd_khelper(struct drbd_device *device, char *cmd)
369 char *envp[] = { "HOME=/",
371 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
372 (char[20]) { }, /* address family */
373 (char[60]) { }, /* address */
376 char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
377 struct drbd_connection *connection = first_peer_device(device)->connection;
381 if (current == connection->worker.task)
382 set_bit(CALLBACK_PENDING, &connection->flags);
384 snprintf(mb, 14, "minor-%d", device_to_minor(device));
385 setup_khelper_env(connection, envp);
387 /* The helper may take some time.
388 * write out any unsynced meta data changes now */
389 drbd_md_sync(device);
391 drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
392 sib.sib_reason = SIB_HELPER_PRE;
393 sib.helper_name = cmd;
394 drbd_bcast_event(device, &sib);
395 notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
396 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
398 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
399 drbd_usermode_helper, cmd, mb,
400 (ret >> 8) & 0xff, ret);
402 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
403 drbd_usermode_helper, cmd, mb,
404 (ret >> 8) & 0xff, ret);
405 sib.sib_reason = SIB_HELPER_POST;
406 sib.helper_exit_code = ret;
407 drbd_bcast_event(device, &sib);
408 notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
410 if (current == connection->worker.task)
411 clear_bit(CALLBACK_PENDING, &connection->flags);
413 if (ret < 0) /* Ignore any ERRNOs we got. */
419 enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
421 char *envp[] = { "HOME=/",
423 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
424 (char[20]) { }, /* address family */
425 (char[60]) { }, /* address */
427 char *resource_name = connection->resource->name;
428 char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
431 setup_khelper_env(connection, envp);
432 conn_md_sync(connection);
434 drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
435 /* TODO: conn_bcast_event() ?? */
436 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
438 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
440 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
441 drbd_usermode_helper, cmd, resource_name,
442 (ret >> 8) & 0xff, ret);
444 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
445 drbd_usermode_helper, cmd, resource_name,
446 (ret >> 8) & 0xff, ret);
447 /* TODO: conn_bcast_event() ?? */
448 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
450 if (ret < 0) /* Ignore any ERRNOs we got. */
456 static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
458 enum drbd_fencing_p fp = FP_NOT_AVAIL;
459 struct drbd_peer_device *peer_device;
463 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
464 struct drbd_device *device = peer_device->device;
465 if (get_ldev_if_state(device, D_CONSISTENT)) {
466 struct disk_conf *disk_conf =
467 rcu_dereference(peer_device->device->ldev->disk_conf);
468 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
477 static bool resource_is_supended(struct drbd_resource *resource)
479 return resource->susp || resource->susp_fen || resource->susp_nod;
482 bool conn_try_outdate_peer(struct drbd_connection *connection)
484 struct drbd_resource * const resource = connection->resource;
485 unsigned int connect_cnt;
486 union drbd_state mask = { };
487 union drbd_state val = { };
488 enum drbd_fencing_p fp;
492 spin_lock_irq(&resource->req_lock);
493 if (connection->cstate >= C_WF_REPORT_PARAMS) {
494 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
495 spin_unlock_irq(&resource->req_lock);
499 connect_cnt = connection->connect_cnt;
500 spin_unlock_irq(&resource->req_lock);
502 fp = highest_fencing_policy(connection);
505 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
506 spin_lock_irq(&resource->req_lock);
507 if (connection->cstate < C_WF_REPORT_PARAMS) {
508 _conn_request_state(connection,
509 (union drbd_state) { { .susp_fen = 1 } },
510 (union drbd_state) { { .susp_fen = 0 } },
511 CS_VERBOSE | CS_HARD | CS_DC_SUSP);
512 /* We are no longer suspended due to the fencing policy.
513 * We may still be suspended due to the on-no-data-accessible policy.
514 * If that was OND_IO_ERROR, fail pending requests. */
515 if (!resource_is_supended(resource))
516 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
518 /* Else: in case we raced with a connection handshake,
519 * let the handshake figure out if we maybe can RESEND,
520 * and do not resume/fail pending requests here.
521 * Worst case is we stay suspended for now, which may be
522 * resolved by either re-establishing the replication link, or
523 * the next link failure, or eventually the administrator. */
524 spin_unlock_irq(&resource->req_lock);
532 r = conn_khelper(connection, "fence-peer");
534 switch ((r>>8) & 0xff) {
535 case P_INCONSISTENT: /* peer is inconsistent */
536 ex_to_string = "peer is inconsistent or worse";
538 val.pdsk = D_INCONSISTENT;
540 case P_OUTDATED: /* peer got outdated, or was already outdated */
541 ex_to_string = "peer was fenced";
543 val.pdsk = D_OUTDATED;
545 case P_DOWN: /* peer was down */
546 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
547 /* we will(have) create(d) a new UUID anyways... */
548 ex_to_string = "peer is unreachable, assumed to be dead";
550 val.pdsk = D_OUTDATED;
552 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
555 case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
556 * This is useful when an unconnected R_SECONDARY is asked to
557 * become R_PRIMARY, but finds the other peer being active. */
558 ex_to_string = "peer is active";
559 drbd_warn(connection, "Peer is primary, outdating myself.\n");
561 val.disk = D_OUTDATED;
564 /* THINK: do we need to handle this
565 * like case 4, or more like case 5? */
566 if (fp != FP_STONITH)
567 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
568 ex_to_string = "peer was stonithed";
570 val.pdsk = D_OUTDATED;
573 /* The script is broken ... */
574 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
575 return false; /* Eventually leave IO frozen */
578 drbd_info(connection, "fence-peer helper returned %d (%s)\n",
579 (r>>8) & 0xff, ex_to_string);
582 conn_request_state(connection, mask, val, CS_VERBOSE);
583 here, because we might were able to re-establish the connection in the
585 spin_lock_irq(&resource->req_lock);
586 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
587 if (connection->connect_cnt != connect_cnt)
588 /* In case the connection was established and droped
589 while the fence-peer handler was running, ignore it */
590 drbd_info(connection, "Ignoring fence-peer exit code\n");
592 _conn_request_state(connection, mask, val, CS_VERBOSE);
594 spin_unlock_irq(&resource->req_lock);
596 return conn_highest_pdsk(connection) <= D_OUTDATED;
599 static int _try_outdate_peer_async(void *data)
601 struct drbd_connection *connection = (struct drbd_connection *)data;
603 conn_try_outdate_peer(connection);
605 kref_put(&connection->kref, drbd_destroy_connection);
609 void conn_try_outdate_peer_async(struct drbd_connection *connection)
611 struct task_struct *opa;
613 kref_get(&connection->kref);
614 /* We may just have force_sig()'ed this thread
615 * to get it out of some blocking network function.
616 * Clear signals; otherwise kthread_run(), which internally uses
617 * wait_on_completion_killable(), will mistake our pending signal
618 * for a new fatal signal and fail. */
619 flush_signals(current);
620 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
622 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
623 kref_put(&connection->kref, drbd_destroy_connection);
628 drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
630 struct drbd_peer_device *const peer_device = first_peer_device(device);
631 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
632 const int max_tries = 4;
633 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
637 union drbd_state mask, val;
639 if (new_role == R_PRIMARY) {
640 struct drbd_connection *connection;
642 /* Detect dead peers as soon as possible. */
645 for_each_connection(connection, device->resource)
646 request_ping(connection);
650 mutex_lock(device->state_mutex);
652 mask.i = 0; mask.role = R_MASK;
653 val.i = 0; val.role = new_role;
655 while (try++ < max_tries) {
656 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
658 /* in case we first succeeded to outdate,
659 * but now suddenly could establish a connection */
660 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
666 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
667 (device->state.disk < D_UP_TO_DATE &&
668 device->state.disk >= D_INCONSISTENT)) {
670 val.disk = D_UP_TO_DATE;
675 if (rv == SS_NO_UP_TO_DATE_DISK &&
676 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
677 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
679 if (conn_try_outdate_peer(connection)) {
680 val.disk = D_UP_TO_DATE;
686 if (rv == SS_NOTHING_TO_DO)
688 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
689 if (!conn_try_outdate_peer(connection) && force) {
690 drbd_warn(device, "Forced into split brain situation!\n");
692 val.pdsk = D_OUTDATED;
697 if (rv == SS_TWO_PRIMARIES) {
698 /* Maybe the peer is detected as dead very soon...
699 retry at most once more in this case. */
700 if (try < max_tries) {
704 nc = rcu_dereference(connection->net_conf);
705 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
707 schedule_timeout_interruptible(timeo);
711 if (rv < SS_SUCCESS) {
712 rv = _drbd_request_state(device, mask, val,
713 CS_VERBOSE + CS_WAIT_COMPLETE);
724 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
726 /* Wait until nothing is on the fly :) */
727 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
729 /* FIXME also wait for all pending P_BARRIER_ACK? */
731 if (new_role == R_SECONDARY) {
732 if (get_ldev(device)) {
733 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
737 mutex_lock(&device->resource->conf_update);
738 nc = connection->net_conf;
740 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
741 mutex_unlock(&device->resource->conf_update);
743 if (get_ldev(device)) {
744 if (((device->state.conn < C_CONNECTED ||
745 device->state.pdsk <= D_FAILED)
746 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
747 drbd_uuid_new_current(device);
749 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
754 /* writeout of activity log covered areas of the bitmap
755 * to stable storage done in after state change already */
757 if (device->state.conn >= C_WF_REPORT_PARAMS) {
758 /* if this was forced, we should consider sync */
760 drbd_send_uuids(peer_device);
761 drbd_send_current_state(peer_device);
764 drbd_md_sync(device);
765 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
766 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
768 mutex_unlock(device->state_mutex);
772 static const char *from_attrs_err_to_txt(int err)
774 return err == -ENOMSG ? "required attribute missing" :
775 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
776 err == -EEXIST ? "can not change invariant setting" :
777 "invalid attribute value";
780 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
782 struct drbd_config_context adm_ctx;
783 struct set_role_parms parms;
785 enum drbd_ret_code retcode;
787 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
788 if (!adm_ctx.reply_skb)
790 if (retcode != NO_ERROR)
793 memset(&parms, 0, sizeof(parms));
794 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
795 err = set_role_parms_from_attrs(&parms, info);
797 retcode = ERR_MANDATORY_TAG;
798 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
803 mutex_lock(&adm_ctx.resource->adm_mutex);
805 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
806 retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
808 retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
810 mutex_unlock(&adm_ctx.resource->adm_mutex);
813 drbd_adm_finish(&adm_ctx, info, retcode);
817 /* Initializes the md.*_offset members, so we are able to find
818 * the on disk meta data.
820 * We currently have two possible layouts:
822 * |----------- md_size_sect ------------------|
823 * [ 4k superblock ][ activity log ][ Bitmap ]
825 * | bm_offset = al_offset + X |
826 * ==> bitmap sectors = md_size_sect - bm_offset
829 * |----------- md_size_sect ------------------|
830 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
832 * | bm_offset = al_offset - Y |
833 * ==> bitmap sectors = Y = al_offset - bm_offset
835 * Activity log size used to be fixed 32kB,
836 * but is about to become configurable.
838 static void drbd_md_set_sector_offsets(struct drbd_device *device,
839 struct drbd_backing_dev *bdev)
841 sector_t md_size_sect = 0;
842 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
844 bdev->md.md_offset = drbd_md_ss(bdev);
846 switch (bdev->md.meta_dev_idx) {
848 /* v07 style fixed size indexed meta data */
849 bdev->md.md_size_sect = MD_128MB_SECT;
850 bdev->md.al_offset = MD_4kB_SECT;
851 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
853 case DRBD_MD_INDEX_FLEX_EXT:
854 /* just occupy the full device; unit: sectors */
855 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
856 bdev->md.al_offset = MD_4kB_SECT;
857 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
859 case DRBD_MD_INDEX_INTERNAL:
860 case DRBD_MD_INDEX_FLEX_INT:
861 /* al size is still fixed */
862 bdev->md.al_offset = -al_size_sect;
863 /* we need (slightly less than) ~ this much bitmap sectors: */
864 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
865 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
866 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
867 md_size_sect = ALIGN(md_size_sect, 8);
869 /* plus the "drbd meta data super block",
870 * and the activity log; */
871 md_size_sect += MD_4kB_SECT + al_size_sect;
873 bdev->md.md_size_sect = md_size_sect;
874 /* bitmap offset is adjusted by 'super' block size */
875 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
880 /* input size is expected to be in KB */
881 char *ppsize(char *buf, unsigned long long size)
883 /* Needs 9 bytes at max including trailing NUL:
884 * -1ULL ==> "16384 EB" */
885 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
887 while (size >= 10000 && base < sizeof(units)-1) {
889 size = (size >> 10) + !!(size & (1<<9));
892 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
897 /* there is still a theoretical deadlock when called from receiver
898 * on an D_INCONSISTENT R_PRIMARY:
899 * remote READ does inc_ap_bio, receiver would need to receive answer
900 * packet from remote to dec_ap_bio again.
901 * receiver receive_sizes(), comes here,
902 * waits for ap_bio_cnt == 0. -> deadlock.
903 * but this cannot happen, actually, because:
904 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
905 * (not connected, or bad/no disk on peer):
906 * see drbd_fail_request_early, ap_bio_cnt is zero.
907 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
908 * peer may not initiate a resize.
910 /* Note these are not to be confused with
911 * drbd_adm_suspend_io/drbd_adm_resume_io,
912 * which are (sub) state changes triggered by admin (drbdsetup),
913 * and can be long lived.
914 * This changes an device->flag, is triggered by drbd internals,
915 * and should be short-lived. */
916 /* It needs to be a counter, since multiple threads might
917 independently suspend and resume IO. */
918 void drbd_suspend_io(struct drbd_device *device)
920 atomic_inc(&device->suspend_cnt);
921 if (drbd_suspended(device))
923 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
926 void drbd_resume_io(struct drbd_device *device)
928 if (atomic_dec_and_test(&device->suspend_cnt))
929 wake_up(&device->misc_wait);
933 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
934 * @device: DRBD device.
936 * Returns 0 on success, negative return values indicate errors.
937 * You should call drbd_md_sync() after calling this function.
939 enum determine_dev_size
940 drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
942 struct md_offsets_and_sizes {
943 u64 last_agreed_sect;
950 u32 al_stripe_size_4k;
952 sector_t u_size, size;
953 struct drbd_md *md = &device->ldev->md;
956 int md_moved, la_size_changed;
957 enum determine_dev_size rv = DS_UNCHANGED;
959 /* We may change the on-disk offsets of our meta data below. Lock out
960 * anything that may cause meta data IO, to avoid acting on incomplete
961 * layout changes or scribbling over meta data that is in the process
964 * Move is not exactly correct, btw, currently we have all our meta
965 * data in core memory, to "move" it we just write it all out, there
967 drbd_suspend_io(device);
968 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
970 drbd_resume_io(device);
974 /* remember current offset and sizes */
975 prev.last_agreed_sect = md->la_size_sect;
976 prev.md_offset = md->md_offset;
977 prev.al_offset = md->al_offset;
978 prev.bm_offset = md->bm_offset;
979 prev.md_size_sect = md->md_size_sect;
980 prev.al_stripes = md->al_stripes;
981 prev.al_stripe_size_4k = md->al_stripe_size_4k;
984 /* rs is non NULL if we should change the AL layout only */
985 md->al_stripes = rs->al_stripes;
986 md->al_stripe_size_4k = rs->al_stripe_size / 4;
987 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
990 drbd_md_set_sector_offsets(device, device->ldev);
993 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
995 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
997 if (size < prev.last_agreed_sect) {
998 if (rs && u_size == 0) {
999 /* Remove "rs &&" later. This check should always be active, but
1000 right now the receiver expects the permissive behavior */
1001 drbd_warn(device, "Implicit shrink not allowed. "
1002 "Use --size=%llus for explicit shrink.\n",
1003 (unsigned long long)size);
1004 rv = DS_ERROR_SHRINK;
1007 rv = DS_ERROR_SPACE_MD;
1008 if (rv != DS_UNCHANGED)
1012 if (drbd_get_capacity(device->this_bdev) != size ||
1013 drbd_bm_capacity(device) != size) {
1015 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
1016 if (unlikely(err)) {
1017 /* currently there is only one error: ENOMEM! */
1018 size = drbd_bm_capacity(device);
1020 drbd_err(device, "OUT OF MEMORY! "
1021 "Could not allocate bitmap!\n");
1023 drbd_err(device, "BM resizing failed. "
1024 "Leaving size unchanged\n");
1028 /* racy, see comments above. */
1029 drbd_set_my_capacity(device, size);
1030 md->la_size_sect = size;
1035 la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
1037 md_moved = prev.md_offset != md->md_offset
1038 || prev.md_size_sect != md->md_size_sect;
1040 if (la_size_changed || md_moved || rs) {
1043 /* We do some synchronous IO below, which may take some time.
1044 * Clear the timer, to avoid scary "timer expired!" messages,
1045 * "Superblock" is written out at least twice below, anyways. */
1046 del_timer(&device->md_sync_timer);
1048 /* We won't change the "al-extents" setting, we just may need
1049 * to move the on-disk location of the activity log ringbuffer.
1050 * Lock for transaction is good enough, it may well be "dirty"
1051 * or even "starving". */
1052 wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1054 /* mark current on-disk bitmap and activity log as unreliable */
1055 prev_flags = md->flags;
1056 md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
1057 drbd_md_write(device, buffer);
1059 drbd_al_initialize(device, buffer);
1061 drbd_info(device, "Writing the whole bitmap, %s\n",
1062 la_size_changed && md_moved ? "size changed and md moved" :
1063 la_size_changed ? "size changed" : "md moved");
1064 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1065 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1066 "size changed", BM_LOCKED_MASK);
1068 /* on-disk bitmap and activity log is authoritative again
1069 * (unless there was an IO error meanwhile...) */
1070 md->flags = prev_flags;
1071 drbd_md_write(device, buffer);
1074 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1075 md->al_stripes, md->al_stripe_size_4k * 4);
1078 if (size > prev.last_agreed_sect)
1079 rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1080 if (size < prev.last_agreed_sect)
1085 /* restore previous offset and sizes */
1086 md->la_size_sect = prev.last_agreed_sect;
1087 md->md_offset = prev.md_offset;
1088 md->al_offset = prev.al_offset;
1089 md->bm_offset = prev.bm_offset;
1090 md->md_size_sect = prev.md_size_sect;
1091 md->al_stripes = prev.al_stripes;
1092 md->al_stripe_size_4k = prev.al_stripe_size_4k;
1093 md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
1095 lc_unlock(device->act_log);
1096 wake_up(&device->al_wait);
1097 drbd_md_put_buffer(device);
1098 drbd_resume_io(device);
1104 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1105 sector_t u_size, int assume_peer_has_space)
1107 sector_t p_size = device->p_size; /* partner's disk size. */
1108 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1109 sector_t m_size; /* my size */
1112 m_size = drbd_get_max_capacity(bdev);
1114 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1115 drbd_warn(device, "Resize while not connected was forced by the user!\n");
1119 if (p_size && m_size) {
1120 size = min_t(sector_t, p_size, m_size);
1123 size = la_size_sect;
1124 if (m_size && m_size < size)
1126 if (p_size && p_size < size)
1137 drbd_err(device, "Both nodes diskless!\n");
1141 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1142 (unsigned long)u_size>>1, (unsigned long)size>>1);
1151 * drbd_check_al_size() - Ensures that the AL is of the right size
1152 * @device: DRBD device.
1154 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1155 * failed, and 0 on success. You should call drbd_md_sync() after you called
1158 static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1160 struct lru_cache *n, *t;
1161 struct lc_element *e;
1162 unsigned int in_use;
1165 if (device->act_log &&
1166 device->act_log->nr_elements == dc->al_extents)
1170 t = device->act_log;
1171 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1172 dc->al_extents, sizeof(struct lc_element), 0);
1175 drbd_err(device, "Cannot allocate act_log lru!\n");
1178 spin_lock_irq(&device->al_lock);
1180 for (i = 0; i < t->nr_elements; i++) {
1181 e = lc_element_by_index(t, i);
1183 drbd_err(device, "refcnt(%d)==%d\n",
1184 e->lc_number, e->refcnt);
1185 in_use += e->refcnt;
1189 device->act_log = n;
1190 spin_unlock_irq(&device->al_lock);
1192 drbd_err(device, "Activity log still in use!\n");
1198 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1202 static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
1204 q->limits.discard_granularity = granularity;
1207 static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1209 /* when we introduced REQ_WRITE_SAME support, we also bumped
1210 * our maximum supported batch bio size used for discards. */
1211 if (connection->agreed_features & DRBD_FF_WSAME)
1212 return DRBD_MAX_BBIO_SECTORS;
1213 /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1214 return AL_EXTENT_SIZE >> 9;
1217 static void decide_on_discard_support(struct drbd_device *device,
1218 struct request_queue *q,
1219 struct request_queue *b,
1220 bool discard_zeroes_if_aligned)
1222 /* q = drbd device queue (device->rq_queue)
1223 * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
1224 * or NULL if diskless
1226 struct drbd_connection *connection = first_peer_device(device)->connection;
1227 bool can_do = b ? blk_queue_discard(b) : true;
1229 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
1231 drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
1234 /* We don't care for the granularity, really.
1235 * Stacking limits below should fix it for the local
1236 * device. Whether or not it is a suitable granularity
1237 * on the remote device is not our problem, really. If
1238 * you care, you need to use devices with similar
1239 * topology on all peers. */
1240 blk_queue_discard_granularity(q, 512);
1241 q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
1242 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1243 q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
1245 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1246 blk_queue_discard_granularity(q, 0);
1247 q->limits.max_discard_sectors = 0;
1248 q->limits.max_write_zeroes_sectors = 0;
1252 static void fixup_discard_if_not_supported(struct request_queue *q)
1254 /* To avoid confusion, if this queue does not support discard, clear
1255 * max_discard_sectors, which is what lsblk -D reports to the user.
1256 * Older kernels got this wrong in "stack limits".
1258 if (!blk_queue_discard(q)) {
1259 blk_queue_max_discard_sectors(q, 0);
1260 blk_queue_discard_granularity(q, 0);
1264 static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
1266 /* Fixup max_write_zeroes_sectors after blk_queue_stack_limits():
1267 * if we can handle "zeroes" efficiently on the protocol,
1268 * we want to do that, even if our backend does not announce
1269 * max_write_zeroes_sectors itself. */
1270 struct drbd_connection *connection = first_peer_device(device)->connection;
1271 /* If the peer announces WZEROES support, use it. Otherwise, rather
1272 * send explicit zeroes than rely on some discard-zeroes-data magic. */
1273 if (connection->agreed_features & DRBD_FF_WZEROES)
1274 q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
1276 q->limits.max_write_zeroes_sectors = 0;
1279 static void decide_on_write_same_support(struct drbd_device *device,
1280 struct request_queue *q,
1281 struct request_queue *b, struct o_qlim *o,
1282 bool disable_write_same)
1284 struct drbd_peer_device *peer_device = first_peer_device(device);
1285 struct drbd_connection *connection = peer_device->connection;
1286 bool can_do = b ? b->limits.max_write_same_sectors : true;
1288 if (can_do && disable_write_same) {
1290 drbd_info(peer_device, "WRITE_SAME disabled by config\n");
1293 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
1295 drbd_info(peer_device, "peer does not support WRITE_SAME\n");
1299 /* logical block size; queue_logical_block_size(NULL) is 512 */
1300 unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
1301 unsigned int me_lbs_b = queue_logical_block_size(b);
1302 unsigned int me_lbs = queue_logical_block_size(q);
1304 if (me_lbs_b != me_lbs) {
1306 "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
1308 /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
1311 if (me_lbs_b != peer_lbs) {
1312 drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
1315 drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
1318 me_lbs = max(me_lbs, me_lbs_b);
1319 /* We cannot change the logical block size of an in-use queue.
1320 * We can only hope that access happens to be properly aligned.
1321 * If not, the peer will likely produce an IO error, and detach. */
1322 if (peer_lbs > me_lbs) {
1323 if (device->state.role != R_PRIMARY) {
1324 blk_queue_logical_block_size(q, peer_lbs);
1325 drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
1327 drbd_warn(peer_device,
1328 "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
1333 if (can_do && !o->write_same_capable) {
1334 /* If we introduce an open-coded write-same loop on the receiving side,
1335 * the peer would present itself as "capable". */
1336 drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
1341 blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
1344 static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1345 unsigned int max_bio_size, struct o_qlim *o)
1347 struct request_queue * const q = device->rq_queue;
1348 unsigned int max_hw_sectors = max_bio_size >> 9;
1349 unsigned int max_segments = 0;
1350 struct request_queue *b = NULL;
1351 struct disk_conf *dc;
1352 bool discard_zeroes_if_aligned = true;
1353 bool disable_write_same = false;
1356 b = bdev->backing_bdev->bd_disk->queue;
1358 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1360 dc = rcu_dereference(device->ldev->disk_conf);
1361 max_segments = dc->max_bio_bvecs;
1362 discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
1363 disable_write_same = dc->disable_write_same;
1366 blk_set_stacking_limits(&q->limits);
1369 blk_queue_max_hw_sectors(q, max_hw_sectors);
1370 /* This is the workaround for "bio would need to, but cannot, be split" */
1371 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1372 blk_queue_segment_boundary(q, PAGE_SIZE-1);
1373 decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
1374 decide_on_write_same_support(device, q, b, o, disable_write_same);
1377 blk_queue_stack_limits(q, b);
1379 if (q->backing_dev_info->ra_pages !=
1380 b->backing_dev_info->ra_pages) {
1381 drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1382 q->backing_dev_info->ra_pages,
1383 b->backing_dev_info->ra_pages);
1384 q->backing_dev_info->ra_pages =
1385 b->backing_dev_info->ra_pages;
1388 fixup_discard_if_not_supported(q);
1389 fixup_write_zeroes(device, q);
1392 void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
1394 unsigned int now, new, local, peer;
1396 now = queue_max_hw_sectors(device->rq_queue) << 9;
1397 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1398 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1401 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1402 device->local_max_bio_size = local;
1404 local = min(local, DRBD_MAX_BIO_SIZE);
1406 /* We may ignore peer limits if the peer is modern enough.
1407 Because new from 8.3.8 onwards the peer can use multiple
1408 BIOs for a single peer_request */
1409 if (device->state.conn >= C_WF_REPORT_PARAMS) {
1410 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1411 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1412 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1413 else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1414 peer = DRBD_MAX_SIZE_H80_PACKET;
1415 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1416 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1418 peer = DRBD_MAX_BIO_SIZE;
1420 /* We may later detach and re-attach on a disconnected Primary.
1421 * Avoid this setting to jump back in that case.
1422 * We want to store what we know the peer DRBD can handle,
1423 * not what the peer IO backend can handle. */
1424 if (peer > device->peer_max_bio_size)
1425 device->peer_max_bio_size = peer;
1427 new = min(local, peer);
1429 if (device->state.role == R_PRIMARY && new < now)
1430 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1433 drbd_info(device, "max BIO size = %u\n", new);
1435 drbd_setup_queue_param(device, bdev, new, o);
1438 /* Starts the worker thread */
1439 static void conn_reconfig_start(struct drbd_connection *connection)
1441 drbd_thread_start(&connection->worker);
1442 drbd_flush_workqueue(&connection->sender_work);
1445 /* if still unconfigured, stops worker again. */
1446 static void conn_reconfig_done(struct drbd_connection *connection)
1449 spin_lock_irq(&connection->resource->req_lock);
1450 stop_threads = conn_all_vols_unconf(connection) &&
1451 connection->cstate == C_STANDALONE;
1452 spin_unlock_irq(&connection->resource->req_lock);
1454 /* ack_receiver thread and ack_sender workqueue are implicitly
1455 * stopped by receiver in conn_disconnect() */
1456 drbd_thread_stop(&connection->receiver);
1457 drbd_thread_stop(&connection->worker);
1461 /* Make sure IO is suspended before calling this function(). */
1462 static void drbd_suspend_al(struct drbd_device *device)
1466 if (!lc_try_lock(device->act_log)) {
1467 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1471 drbd_al_shrink(device);
1472 spin_lock_irq(&device->resource->req_lock);
1473 if (device->state.conn < C_CONNECTED)
1474 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1475 spin_unlock_irq(&device->resource->req_lock);
1476 lc_unlock(device->act_log);
1479 drbd_info(device, "Suspended AL updates\n");
1483 static bool should_set_defaults(struct genl_info *info)
1485 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1486 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1489 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1491 /* This is limited by 16 bit "slot" numbers,
1492 * and by available on-disk context storage.
1494 * Also (u16)~0 is special (denotes a "free" extent).
1496 * One transaction occupies one 4kB on-disk block,
1497 * we have n such blocks in the on disk ring buffer,
1498 * the "current" transaction may fail (n-1),
1499 * and there is 919 slot numbers context information per transaction.
1501 * 72 transaction blocks amounts to more than 2**16 context slots,
1502 * so cap there first.
1504 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1505 const unsigned int sufficient_on_disk =
1506 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1507 /AL_CONTEXT_PER_TRANSACTION;
1509 unsigned int al_size_4k = bdev->md.al_size_4k;
1511 if (al_size_4k > sufficient_on_disk)
1514 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1517 static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1519 return a->disk_barrier != b->disk_barrier ||
1520 a->disk_flushes != b->disk_flushes ||
1521 a->disk_drain != b->disk_drain;
1524 static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1525 struct drbd_backing_dev *nbc)
1527 struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
1529 if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1530 disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1531 if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1532 disk_conf->al_extents = drbd_al_extents_max(nbc);
1534 if (!blk_queue_discard(q)) {
1535 if (disk_conf->rs_discard_granularity) {
1536 disk_conf->rs_discard_granularity = 0; /* disable feature */
1537 drbd_info(device, "rs_discard_granularity feature disabled\n");
1541 if (disk_conf->rs_discard_granularity) {
1542 int orig_value = disk_conf->rs_discard_granularity;
1545 if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
1546 disk_conf->rs_discard_granularity = q->limits.discard_granularity;
1548 remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
1549 disk_conf->rs_discard_granularity += remainder;
1551 if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
1552 disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
1554 if (disk_conf->rs_discard_granularity != orig_value)
1555 drbd_info(device, "rs_discard_granularity changed to %d\n",
1556 disk_conf->rs_discard_granularity);
1560 static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1564 if (device->act_log &&
1565 device->act_log->nr_elements == dc->al_extents)
1568 drbd_suspend_io(device);
1569 /* If IO completion is currently blocked, we would likely wait
1570 * "forever" for the activity log to become unused. So we don't. */
1571 if (atomic_read(&device->ap_bio_cnt))
1574 wait_event(device->al_wait, lc_try_lock(device->act_log));
1575 drbd_al_shrink(device);
1576 err = drbd_check_al_size(device, dc);
1577 lc_unlock(device->act_log);
1578 wake_up(&device->al_wait);
1580 drbd_resume_io(device);
1584 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1586 struct drbd_config_context adm_ctx;
1587 enum drbd_ret_code retcode;
1588 struct drbd_device *device;
1589 struct disk_conf *new_disk_conf, *old_disk_conf;
1590 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1593 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1594 if (!adm_ctx.reply_skb)
1596 if (retcode != NO_ERROR)
1599 device = adm_ctx.device;
1600 mutex_lock(&adm_ctx.resource->adm_mutex);
1602 /* we also need a disk
1603 * to change the options on */
1604 if (!get_ldev(device)) {
1605 retcode = ERR_NO_DISK;
1609 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1610 if (!new_disk_conf) {
1611 retcode = ERR_NOMEM;
1615 mutex_lock(&device->resource->conf_update);
1616 old_disk_conf = device->ldev->disk_conf;
1617 *new_disk_conf = *old_disk_conf;
1618 if (should_set_defaults(info))
1619 set_disk_conf_defaults(new_disk_conf);
1621 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1622 if (err && err != -ENOMSG) {
1623 retcode = ERR_MANDATORY_TAG;
1624 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1628 if (!expect(new_disk_conf->resync_rate >= 1))
1629 new_disk_conf->resync_rate = 1;
1631 sanitize_disk_conf(device, new_disk_conf, device->ldev);
1633 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1634 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1636 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1637 if (fifo_size != device->rs_plan_s->size) {
1638 new_plan = fifo_alloc(fifo_size);
1640 drbd_err(device, "kmalloc of fifo_buffer failed");
1641 retcode = ERR_NOMEM;
1646 err = disk_opts_check_al_size(device, new_disk_conf);
1648 /* Could be just "busy". Ignore?
1649 * Introduce dedicated error code? */
1650 drbd_msg_put_info(adm_ctx.reply_skb,
1651 "Try again without changing current al-extents setting");
1652 retcode = ERR_NOMEM;
1656 lock_all_resources();
1657 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1658 if (retcode == NO_ERROR) {
1659 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1660 drbd_resync_after_changed(device);
1662 unlock_all_resources();
1664 if (retcode != NO_ERROR)
1668 old_plan = device->rs_plan_s;
1669 rcu_assign_pointer(device->rs_plan_s, new_plan);
1672 mutex_unlock(&device->resource->conf_update);
1674 if (new_disk_conf->al_updates)
1675 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1677 device->ldev->md.flags |= MDF_AL_DISABLED;
1679 if (new_disk_conf->md_flushes)
1680 clear_bit(MD_NO_FUA, &device->flags);
1682 set_bit(MD_NO_FUA, &device->flags);
1684 if (write_ordering_changed(old_disk_conf, new_disk_conf))
1685 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1687 if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned
1688 || old_disk_conf->disable_write_same != new_disk_conf->disable_write_same)
1689 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1691 drbd_md_sync(device);
1693 if (device->state.conn >= C_CONNECTED) {
1694 struct drbd_peer_device *peer_device;
1696 for_each_peer_device(peer_device, device)
1697 drbd_send_sync_param(peer_device);
1701 kfree(old_disk_conf);
1703 mod_timer(&device->request_timer, jiffies + HZ);
1707 mutex_unlock(&device->resource->conf_update);
1709 kfree(new_disk_conf);
1714 mutex_unlock(&adm_ctx.resource->adm_mutex);
1716 drbd_adm_finish(&adm_ctx, info, retcode);
1720 static struct block_device *open_backing_dev(struct drbd_device *device,
1721 const char *bdev_path, void *claim_ptr, bool do_bd_link)
1723 struct block_device *bdev;
1726 bdev = blkdev_get_by_path(bdev_path,
1727 FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
1729 drbd_err(device, "open(\"%s\") failed with %ld\n",
1730 bdev_path, PTR_ERR(bdev));
1737 err = bd_link_disk_holder(bdev, device->vdisk);
1739 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1740 drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1742 bdev = ERR_PTR(err);
1747 static int open_backing_devices(struct drbd_device *device,
1748 struct disk_conf *new_disk_conf,
1749 struct drbd_backing_dev *nbc)
1751 struct block_device *bdev;
1753 bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
1755 return ERR_OPEN_DISK;
1756 nbc->backing_bdev = bdev;
1759 * meta_dev_idx >= 0: external fixed size, possibly multiple
1760 * drbd sharing one meta device. TODO in that case, paranoia
1761 * check that [md_bdev, meta_dev_idx] is not yet used by some
1762 * other drbd minor! (if you use drbd.conf + drbdadm, that
1763 * should check it for you already; but if you don't, or
1764 * someone fooled it, we need to double check here)
1766 bdev = open_backing_dev(device, new_disk_conf->meta_dev,
1767 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1768 * if potentially shared with other drbd minors */
1769 (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1770 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1771 * as would happen with internal metadata. */
1772 (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1773 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
1775 return ERR_OPEN_MD_DISK;
1776 nbc->md_bdev = bdev;
1780 static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
1786 bd_unlink_disk_holder(bdev, device->vdisk);
1787 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1790 void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1795 close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev);
1796 close_backing_dev(device, ldev->backing_bdev, true);
1798 kfree(ldev->disk_conf);
1802 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1804 struct drbd_config_context adm_ctx;
1805 struct drbd_device *device;
1806 struct drbd_peer_device *peer_device;
1807 struct drbd_connection *connection;
1809 enum drbd_ret_code retcode;
1810 enum determine_dev_size dd;
1811 sector_t max_possible_sectors;
1812 sector_t min_md_device_sectors;
1813 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1814 struct disk_conf *new_disk_conf = NULL;
1815 struct lru_cache *resync_lru = NULL;
1816 struct fifo_buffer *new_plan = NULL;
1817 union drbd_state ns, os;
1818 enum drbd_state_rv rv;
1819 struct net_conf *nc;
1821 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1822 if (!adm_ctx.reply_skb)
1824 if (retcode != NO_ERROR)
1827 device = adm_ctx.device;
1828 mutex_lock(&adm_ctx.resource->adm_mutex);
1829 peer_device = first_peer_device(device);
1830 connection = peer_device->connection;
1831 conn_reconfig_start(connection);
1833 /* if you want to reconfigure, please tear down first */
1834 if (device->state.disk > D_DISKLESS) {
1835 retcode = ERR_DISK_CONFIGURED;
1838 /* It may just now have detached because of IO error. Make sure
1839 * drbd_ldev_destroy is done already, we may end up here very fast,
1840 * e.g. if someone calls attach from the on-io-error handler,
1841 * to realize a "hot spare" feature (not that I'd recommend that) */
1842 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1844 /* make sure there is no leftover from previous force-detach attempts */
1845 clear_bit(FORCE_DETACH, &device->flags);
1846 clear_bit(WAS_IO_ERROR, &device->flags);
1847 clear_bit(WAS_READ_ERROR, &device->flags);
1849 /* and no leftover from previously aborted resync or verify, either */
1850 device->rs_total = 0;
1851 device->rs_failed = 0;
1852 atomic_set(&device->rs_pending_cnt, 0);
1854 /* allocation not in the IO path, drbdsetup context */
1855 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1857 retcode = ERR_NOMEM;
1860 spin_lock_init(&nbc->md.uuid_lock);
1862 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1863 if (!new_disk_conf) {
1864 retcode = ERR_NOMEM;
1867 nbc->disk_conf = new_disk_conf;
1869 set_disk_conf_defaults(new_disk_conf);
1870 err = disk_conf_from_attrs(new_disk_conf, info);
1872 retcode = ERR_MANDATORY_TAG;
1873 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1877 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1878 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1880 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1882 retcode = ERR_NOMEM;
1886 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1887 retcode = ERR_MD_IDX_INVALID;
1892 nc = rcu_dereference(connection->net_conf);
1894 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1896 retcode = ERR_STONITH_AND_PROT_A;
1902 retcode = open_backing_devices(device, new_disk_conf, nbc);
1903 if (retcode != NO_ERROR)
1906 if ((nbc->backing_bdev == nbc->md_bdev) !=
1907 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1908 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1909 retcode = ERR_MD_IDX_INVALID;
1913 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1914 1, 61, sizeof(struct bm_extent),
1915 offsetof(struct bm_extent, lce));
1917 retcode = ERR_NOMEM;
1921 /* Read our meta data super block early.
1922 * This also sets other on-disk offsets. */
1923 retcode = drbd_md_read(device, nbc);
1924 if (retcode != NO_ERROR)
1927 sanitize_disk_conf(device, new_disk_conf, nbc);
1929 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1930 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1931 (unsigned long long) drbd_get_max_capacity(nbc),
1932 (unsigned long long) new_disk_conf->disk_size);
1933 retcode = ERR_DISK_TOO_SMALL;
1937 if (new_disk_conf->meta_dev_idx < 0) {
1938 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1939 /* at least one MB, otherwise it does not make sense */
1940 min_md_device_sectors = (2<<10);
1942 max_possible_sectors = DRBD_MAX_SECTORS;
1943 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1946 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1947 retcode = ERR_MD_DISK_TOO_SMALL;
1948 drbd_warn(device, "refusing attach: md-device too small, "
1949 "at least %llu sectors needed for this meta-disk type\n",
1950 (unsigned long long) min_md_device_sectors);
1954 /* Make sure the new disk is big enough
1955 * (we may currently be R_PRIMARY with no local disk...) */
1956 if (drbd_get_max_capacity(nbc) <
1957 drbd_get_capacity(device->this_bdev)) {
1958 retcode = ERR_DISK_TOO_SMALL;
1962 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1964 if (nbc->known_size > max_possible_sectors) {
1965 drbd_warn(device, "==> truncating very big lower level device "
1966 "to currently maximum possible %llu sectors <==\n",
1967 (unsigned long long) max_possible_sectors);
1968 if (new_disk_conf->meta_dev_idx >= 0)
1969 drbd_warn(device, "==>> using internal or flexible "
1970 "meta data may help <<==\n");
1973 drbd_suspend_io(device);
1974 /* also wait for the last barrier ack. */
1975 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1976 * We need a way to either ignore barrier acks for barriers sent before a device
1977 * was attached, or a way to wait for all pending barrier acks to come in.
1978 * As barriers are counted per resource,
1979 * we'd need to suspend io on all devices of a resource.
1981 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1982 /* and for any other previously queued work */
1983 drbd_flush_workqueue(&connection->sender_work);
1985 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1986 retcode = rv; /* FIXME: Type mismatch. */
1987 drbd_resume_io(device);
1988 if (rv < SS_SUCCESS)
1991 if (!get_ldev_if_state(device, D_ATTACHING))
1992 goto force_diskless;
1994 if (!device->bitmap) {
1995 if (drbd_bm_init(device)) {
1996 retcode = ERR_NOMEM;
1997 goto force_diskless_dec;
2001 if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
2002 (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
2003 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
2004 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
2005 (unsigned long long)device->ed_uuid);
2006 retcode = ERR_DATA_NOT_CURRENT;
2007 goto force_diskless_dec;
2010 /* Since we are diskless, fix the activity log first... */
2011 if (drbd_check_al_size(device, new_disk_conf)) {
2012 retcode = ERR_NOMEM;
2013 goto force_diskless_dec;
2016 /* Prevent shrinking of consistent devices ! */
2018 unsigned long long nsz = drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0);
2019 unsigned long long eff = nbc->md.la_size_sect;
2020 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && nsz < eff) {
2021 if (nsz == nbc->disk_conf->disk_size) {
2022 drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
2024 drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
2025 drbd_msg_sprintf_info(adm_ctx.reply_skb,
2026 "To-be-attached device has last effective > current size, and is consistent\n"
2027 "(%llu > %llu sectors). Refusing to attach.", eff, nsz);
2028 retcode = ERR_IMPLICIT_SHRINK;
2029 goto force_diskless_dec;
2034 lock_all_resources();
2035 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
2036 if (retcode != NO_ERROR) {
2037 unlock_all_resources();
2038 goto force_diskless_dec;
2041 /* Reset the "barriers don't work" bits here, then force meta data to
2042 * be written, to ensure we determine if barriers are supported. */
2043 if (new_disk_conf->md_flushes)
2044 clear_bit(MD_NO_FUA, &device->flags);
2046 set_bit(MD_NO_FUA, &device->flags);
2048 /* Point of no return reached.
2049 * Devices and memory are no longer released by error cleanup below.
2050 * now device takes over responsibility, and the state engine should
2051 * clean it up somewhere. */
2052 D_ASSERT(device, device->ldev == NULL);
2054 device->resync = resync_lru;
2055 device->rs_plan_s = new_plan;
2058 new_disk_conf = NULL;
2061 drbd_resync_after_changed(device);
2062 drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
2063 unlock_all_resources();
2065 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
2066 set_bit(CRASHED_PRIMARY, &device->flags);
2068 clear_bit(CRASHED_PRIMARY, &device->flags);
2070 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2071 !(device->state.role == R_PRIMARY && device->resource->susp_nod))
2072 set_bit(CRASHED_PRIMARY, &device->flags);
2074 device->send_cnt = 0;
2075 device->recv_cnt = 0;
2076 device->read_cnt = 0;
2077 device->writ_cnt = 0;
2079 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
2081 /* If I am currently not R_PRIMARY,
2082 * but meta data primary indicator is set,
2083 * I just now recover from a hard crash,
2084 * and have been R_PRIMARY before that crash.
2086 * Now, if I had no connection before that crash
2087 * (have been degraded R_PRIMARY), chances are that
2088 * I won't find my peer now either.
2090 * In that case, and _only_ in that case,
2091 * we use the degr-wfc-timeout instead of the default,
2092 * so we can automatically recover from a crash of a
2093 * degraded but active "cluster" after a certain timeout.
2095 clear_bit(USE_DEGR_WFC_T, &device->flags);
2096 if (device->state.role != R_PRIMARY &&
2097 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2098 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2099 set_bit(USE_DEGR_WFC_T, &device->flags);
2101 dd = drbd_determine_dev_size(device, 0, NULL);
2102 if (dd <= DS_ERROR) {
2103 retcode = ERR_NOMEM_BITMAP;
2104 goto force_diskless_dec;
2105 } else if (dd == DS_GREW)
2106 set_bit(RESYNC_AFTER_NEG, &device->flags);
2108 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2109 (test_bit(CRASHED_PRIMARY, &device->flags) &&
2110 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
2111 drbd_info(device, "Assuming that all blocks are out of sync "
2112 "(aka FullSync)\n");
2113 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2114 "set_n_write from attaching", BM_LOCKED_MASK)) {
2115 retcode = ERR_IO_MD_DISK;
2116 goto force_diskless_dec;
2119 if (drbd_bitmap_io(device, &drbd_bm_read,
2120 "read from attaching", BM_LOCKED_MASK)) {
2121 retcode = ERR_IO_MD_DISK;
2122 goto force_diskless_dec;
2126 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2127 drbd_suspend_al(device); /* IO is still suspended here... */
2129 spin_lock_irq(&device->resource->req_lock);
2130 os = drbd_read_state(device);
2132 /* If MDF_CONSISTENT is not set go into inconsistent state,
2133 otherwise investigate MDF_WasUpToDate...
2134 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2135 otherwise into D_CONSISTENT state.
2137 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2138 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
2139 ns.disk = D_CONSISTENT;
2141 ns.disk = D_OUTDATED;
2143 ns.disk = D_INCONSISTENT;
2146 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
2147 ns.pdsk = D_OUTDATED;
2150 if (ns.disk == D_CONSISTENT &&
2151 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
2152 ns.disk = D_UP_TO_DATE;
2154 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2155 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2156 this point, because drbd_request_state() modifies these
2159 if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2160 device->ldev->md.flags &= ~MDF_AL_DISABLED;
2162 device->ldev->md.flags |= MDF_AL_DISABLED;
2166 /* In case we are C_CONNECTED postpone any decision on the new disk
2167 state after the negotiation phase. */
2168 if (device->state.conn == C_CONNECTED) {
2169 device->new_state_tmp.i = ns.i;
2171 ns.disk = D_NEGOTIATING;
2173 /* We expect to receive up-to-date UUIDs soon.
2174 To avoid a race in receive_state, free p_uuid while
2175 holding req_lock. I.e. atomic with the state change */
2176 kfree(device->p_uuid);
2177 device->p_uuid = NULL;
2180 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
2181 spin_unlock_irq(&device->resource->req_lock);
2183 if (rv < SS_SUCCESS)
2184 goto force_diskless_dec;
2186 mod_timer(&device->request_timer, jiffies + HZ);
2188 if (device->state.role == R_PRIMARY)
2189 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
2191 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
2193 drbd_md_mark_dirty(device);
2194 drbd_md_sync(device);
2196 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2198 conn_reconfig_done(connection);
2199 mutex_unlock(&adm_ctx.resource->adm_mutex);
2200 drbd_adm_finish(&adm_ctx, info, retcode);
2206 drbd_force_state(device, NS(disk, D_DISKLESS));
2207 drbd_md_sync(device);
2209 conn_reconfig_done(connection);
2211 close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev);
2212 close_backing_dev(device, nbc->backing_bdev, true);
2215 kfree(new_disk_conf);
2216 lc_destroy(resync_lru);
2218 mutex_unlock(&adm_ctx.resource->adm_mutex);
2220 drbd_adm_finish(&adm_ctx, info, retcode);
2224 static int adm_detach(struct drbd_device *device, int force)
2227 set_bit(FORCE_DETACH, &device->flags);
2228 drbd_force_state(device, NS(disk, D_FAILED));
2232 return drbd_request_detach_interruptible(device);
2235 /* Detaching the disk is a process in multiple stages. First we need to lock
2236 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2237 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2238 * internal references as well.
2239 * Only then we have finally detached. */
2240 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
2242 struct drbd_config_context adm_ctx;
2243 enum drbd_ret_code retcode;
2244 struct detach_parms parms = { };
2247 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2248 if (!adm_ctx.reply_skb)
2250 if (retcode != NO_ERROR)
2253 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2254 err = detach_parms_from_attrs(&parms, info);
2256 retcode = ERR_MANDATORY_TAG;
2257 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2262 mutex_lock(&adm_ctx.resource->adm_mutex);
2263 retcode = adm_detach(adm_ctx.device, parms.force_detach);
2264 mutex_unlock(&adm_ctx.resource->adm_mutex);
2266 drbd_adm_finish(&adm_ctx, info, retcode);
2270 static bool conn_resync_running(struct drbd_connection *connection)
2272 struct drbd_peer_device *peer_device;
2277 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2278 struct drbd_device *device = peer_device->device;
2279 if (device->state.conn == C_SYNC_SOURCE ||
2280 device->state.conn == C_SYNC_TARGET ||
2281 device->state.conn == C_PAUSED_SYNC_S ||
2282 device->state.conn == C_PAUSED_SYNC_T) {
2292 static bool conn_ov_running(struct drbd_connection *connection)
2294 struct drbd_peer_device *peer_device;
2299 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2300 struct drbd_device *device = peer_device->device;
2301 if (device->state.conn == C_VERIFY_S ||
2302 device->state.conn == C_VERIFY_T) {
2312 static enum drbd_ret_code
2313 _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2315 struct drbd_peer_device *peer_device;
2318 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2319 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2320 return ERR_NEED_APV_100;
2322 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2323 return ERR_NEED_APV_100;
2325 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2326 return ERR_NEED_APV_100;
2329 if (!new_net_conf->two_primaries &&
2330 conn_highest_role(connection) == R_PRIMARY &&
2331 conn_highest_peer(connection) == R_PRIMARY)
2332 return ERR_NEED_ALLOW_TWO_PRI;
2334 if (new_net_conf->two_primaries &&
2335 (new_net_conf->wire_protocol != DRBD_PROT_C))
2336 return ERR_NOT_PROTO_C;
2338 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2339 struct drbd_device *device = peer_device->device;
2340 if (get_ldev(device)) {
2341 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2343 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2344 return ERR_STONITH_AND_PROT_A;
2346 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2347 return ERR_DISCARD_IMPOSSIBLE;
2350 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2351 return ERR_CONG_NOT_PROTO_A;
2356 static enum drbd_ret_code
2357 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2359 enum drbd_ret_code rv;
2360 struct drbd_peer_device *peer_device;
2364 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2367 /* connection->peer_devices protected by genl_lock() here */
2368 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2369 struct drbd_device *device = peer_device->device;
2370 if (!device->bitmap) {
2371 if (drbd_bm_init(device))
2380 struct crypto_shash *verify_tfm;
2381 struct crypto_shash *csums_tfm;
2382 struct crypto_shash *cram_hmac_tfm;
2383 struct crypto_shash *integrity_tfm;
2387 alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
2392 *tfm = crypto_alloc_shash(tfm_name, 0, 0);
2401 static enum drbd_ret_code
2402 alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2404 char hmac_name[CRYPTO_MAX_ALG_NAME];
2405 enum drbd_ret_code rv;
2407 rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
2411 rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
2415 rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2419 if (new_net_conf->cram_hmac_alg[0] != 0) {
2420 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2421 new_net_conf->cram_hmac_alg);
2423 rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2430 static void free_crypto(struct crypto *crypto)
2432 crypto_free_shash(crypto->cram_hmac_tfm);
2433 crypto_free_shash(crypto->integrity_tfm);
2434 crypto_free_shash(crypto->csums_tfm);
2435 crypto_free_shash(crypto->verify_tfm);
2438 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2440 struct drbd_config_context adm_ctx;
2441 enum drbd_ret_code retcode;
2442 struct drbd_connection *connection;
2443 struct net_conf *old_net_conf, *new_net_conf = NULL;
2445 int ovr; /* online verify running */
2446 int rsr; /* re-sync running */
2447 struct crypto crypto = { };
2449 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2450 if (!adm_ctx.reply_skb)
2452 if (retcode != NO_ERROR)
2455 connection = adm_ctx.connection;
2456 mutex_lock(&adm_ctx.resource->adm_mutex);
2458 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2459 if (!new_net_conf) {
2460 retcode = ERR_NOMEM;
2464 conn_reconfig_start(connection);
2466 mutex_lock(&connection->data.mutex);
2467 mutex_lock(&connection->resource->conf_update);
2468 old_net_conf = connection->net_conf;
2470 if (!old_net_conf) {
2471 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2472 retcode = ERR_INVALID_REQUEST;
2476 *new_net_conf = *old_net_conf;
2477 if (should_set_defaults(info))
2478 set_net_conf_defaults(new_net_conf);
2480 err = net_conf_from_attrs_for_change(new_net_conf, info);
2481 if (err && err != -ENOMSG) {
2482 retcode = ERR_MANDATORY_TAG;
2483 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2487 retcode = check_net_options(connection, new_net_conf);
2488 if (retcode != NO_ERROR)
2491 /* re-sync running */
2492 rsr = conn_resync_running(connection);
2493 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2494 retcode = ERR_CSUMS_RESYNC_RUNNING;
2498 /* online verify running */
2499 ovr = conn_ov_running(connection);
2500 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2501 retcode = ERR_VERIFY_RUNNING;
2505 retcode = alloc_crypto(&crypto, new_net_conf);
2506 if (retcode != NO_ERROR)
2509 rcu_assign_pointer(connection->net_conf, new_net_conf);
2512 crypto_free_shash(connection->csums_tfm);
2513 connection->csums_tfm = crypto.csums_tfm;
2514 crypto.csums_tfm = NULL;
2517 crypto_free_shash(connection->verify_tfm);
2518 connection->verify_tfm = crypto.verify_tfm;
2519 crypto.verify_tfm = NULL;
2522 crypto_free_shash(connection->integrity_tfm);
2523 connection->integrity_tfm = crypto.integrity_tfm;
2524 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2525 /* Do this without trying to take connection->data.mutex again. */
2526 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2528 crypto_free_shash(connection->cram_hmac_tfm);
2529 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2531 mutex_unlock(&connection->resource->conf_update);
2532 mutex_unlock(&connection->data.mutex);
2534 kfree(old_net_conf);
2536 if (connection->cstate >= C_WF_REPORT_PARAMS) {
2537 struct drbd_peer_device *peer_device;
2540 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2541 drbd_send_sync_param(peer_device);
2547 mutex_unlock(&connection->resource->conf_update);
2548 mutex_unlock(&connection->data.mutex);
2549 free_crypto(&crypto);
2550 kfree(new_net_conf);
2552 conn_reconfig_done(connection);
2554 mutex_unlock(&adm_ctx.resource->adm_mutex);
2556 drbd_adm_finish(&adm_ctx, info, retcode);
2560 static void connection_to_info(struct connection_info *info,
2561 struct drbd_connection *connection)
2563 info->conn_connection_state = connection->cstate;
2564 info->conn_role = conn_highest_peer(connection);
2567 static void peer_device_to_info(struct peer_device_info *info,
2568 struct drbd_peer_device *peer_device)
2570 struct drbd_device *device = peer_device->device;
2572 info->peer_repl_state =
2573 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2574 info->peer_disk_state = device->state.pdsk;
2575 info->peer_resync_susp_user = device->state.user_isp;
2576 info->peer_resync_susp_peer = device->state.peer_isp;
2577 info->peer_resync_susp_dependency = device->state.aftr_isp;
2580 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2582 struct connection_info connection_info;
2583 enum drbd_notification_type flags;
2584 unsigned int peer_devices = 0;
2585 struct drbd_config_context adm_ctx;
2586 struct drbd_peer_device *peer_device;
2587 struct net_conf *old_net_conf, *new_net_conf = NULL;
2588 struct crypto crypto = { };
2589 struct drbd_resource *resource;
2590 struct drbd_connection *connection;
2591 enum drbd_ret_code retcode;
2595 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2597 if (!adm_ctx.reply_skb)
2599 if (retcode != NO_ERROR)
2601 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2602 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2603 retcode = ERR_INVALID_REQUEST;
2607 /* No need for _rcu here. All reconfiguration is
2608 * strictly serialized on genl_lock(). We are protected against
2609 * concurrent reconfiguration/addition/deletion */
2610 for_each_resource(resource, &drbd_resources) {
2611 for_each_connection(connection, resource) {
2612 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2613 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2614 connection->my_addr_len)) {
2615 retcode = ERR_LOCAL_ADDR;
2619 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2620 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2621 connection->peer_addr_len)) {
2622 retcode = ERR_PEER_ADDR;
2628 mutex_lock(&adm_ctx.resource->adm_mutex);
2629 connection = first_connection(adm_ctx.resource);
2630 conn_reconfig_start(connection);
2632 if (connection->cstate > C_STANDALONE) {
2633 retcode = ERR_NET_CONFIGURED;
2637 /* allocation not in the IO path, drbdsetup / netlink process context */
2638 new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2639 if (!new_net_conf) {
2640 retcode = ERR_NOMEM;
2644 set_net_conf_defaults(new_net_conf);
2646 err = net_conf_from_attrs(new_net_conf, info);
2647 if (err && err != -ENOMSG) {
2648 retcode = ERR_MANDATORY_TAG;
2649 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2653 retcode = check_net_options(connection, new_net_conf);
2654 if (retcode != NO_ERROR)
2657 retcode = alloc_crypto(&crypto, new_net_conf);
2658 if (retcode != NO_ERROR)
2661 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2663 drbd_flush_workqueue(&connection->sender_work);
2665 mutex_lock(&adm_ctx.resource->conf_update);
2666 old_net_conf = connection->net_conf;
2668 retcode = ERR_NET_CONFIGURED;
2669 mutex_unlock(&adm_ctx.resource->conf_update);
2672 rcu_assign_pointer(connection->net_conf, new_net_conf);
2674 conn_free_crypto(connection);
2675 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2676 connection->integrity_tfm = crypto.integrity_tfm;
2677 connection->csums_tfm = crypto.csums_tfm;
2678 connection->verify_tfm = crypto.verify_tfm;
2680 connection->my_addr_len = nla_len(adm_ctx.my_addr);
2681 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2682 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2683 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2685 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2689 connection_to_info(&connection_info, connection);
2690 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2691 mutex_lock(¬ification_mutex);
2692 notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2693 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2694 struct peer_device_info peer_device_info;
2696 peer_device_to_info(&peer_device_info, peer_device);
2697 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2698 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2700 mutex_unlock(¬ification_mutex);
2701 mutex_unlock(&adm_ctx.resource->conf_update);
2704 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2705 struct drbd_device *device = peer_device->device;
2706 device->send_cnt = 0;
2707 device->recv_cnt = 0;
2711 retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2713 conn_reconfig_done(connection);
2714 mutex_unlock(&adm_ctx.resource->adm_mutex);
2715 drbd_adm_finish(&adm_ctx, info, retcode);
2719 free_crypto(&crypto);
2720 kfree(new_net_conf);
2722 conn_reconfig_done(connection);
2723 mutex_unlock(&adm_ctx.resource->adm_mutex);
2725 drbd_adm_finish(&adm_ctx, info, retcode);
2729 static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2731 enum drbd_conns cstate;
2732 enum drbd_state_rv rv;
2735 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2736 force ? CS_HARD : 0);
2739 case SS_NOTHING_TO_DO:
2741 case SS_ALREADY_STANDALONE:
2743 case SS_PRIMARY_NOP:
2744 /* Our state checking code wants to see the peer outdated. */
2745 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2747 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2748 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2751 case SS_CW_FAILED_BY_PEER:
2752 spin_lock_irq(&connection->resource->req_lock);
2753 cstate = connection->cstate;
2754 spin_unlock_irq(&connection->resource->req_lock);
2755 if (cstate <= C_WF_CONNECTION)
2757 /* The peer probably wants to see us outdated. */
2758 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2759 disk, D_OUTDATED), 0);
2760 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2761 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2766 /* no special handling necessary */
2769 if (rv >= SS_SUCCESS) {
2770 enum drbd_state_rv rv2;
2771 /* No one else can reconfigure the network while I am here.
2772 * The state handling only uses drbd_thread_stop_nowait(),
2773 * we want to really wait here until the receiver is no more.
2775 drbd_thread_stop(&connection->receiver);
2777 /* Race breaker. This additional state change request may be
2778 * necessary, if this was a forced disconnect during a receiver
2779 * restart. We may have "killed" the receiver thread just
2780 * after drbd_receiver() returned. Typically, we should be
2781 * C_STANDALONE already, now, and this becomes a no-op.
2783 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2784 CS_VERBOSE | CS_HARD);
2785 if (rv2 < SS_SUCCESS)
2786 drbd_err(connection,
2787 "unexpected rv2=%d in conn_try_disconnect()\n",
2789 /* Unlike in DRBD 9, the state engine has generated
2790 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2795 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2797 struct drbd_config_context adm_ctx;
2798 struct disconnect_parms parms;
2799 struct drbd_connection *connection;
2800 enum drbd_state_rv rv;
2801 enum drbd_ret_code retcode;
2804 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2805 if (!adm_ctx.reply_skb)
2807 if (retcode != NO_ERROR)
2810 connection = adm_ctx.connection;
2811 memset(&parms, 0, sizeof(parms));
2812 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2813 err = disconnect_parms_from_attrs(&parms, info);
2815 retcode = ERR_MANDATORY_TAG;
2816 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2821 mutex_lock(&adm_ctx.resource->adm_mutex);
2822 rv = conn_try_disconnect(connection, parms.force_disconnect);
2823 if (rv < SS_SUCCESS)
2824 retcode = rv; /* FIXME: Type mismatch. */
2827 mutex_unlock(&adm_ctx.resource->adm_mutex);
2829 drbd_adm_finish(&adm_ctx, info, retcode);
2833 void resync_after_online_grow(struct drbd_device *device)
2835 int iass; /* I am sync source */
2837 drbd_info(device, "Resync of new storage after online grow\n");
2838 if (device->state.role != device->state.peer)
2839 iass = (device->state.role == R_PRIMARY);
2841 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2844 drbd_start_resync(device, C_SYNC_SOURCE);
2846 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2849 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2851 struct drbd_config_context adm_ctx;
2852 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2853 struct resize_parms rs;
2854 struct drbd_device *device;
2855 enum drbd_ret_code retcode;
2856 enum determine_dev_size dd;
2857 bool change_al_layout = false;
2858 enum dds_flags ddsf;
2862 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2863 if (!adm_ctx.reply_skb)
2865 if (retcode != NO_ERROR)
2868 mutex_lock(&adm_ctx.resource->adm_mutex);
2869 device = adm_ctx.device;
2870 if (!get_ldev(device)) {
2871 retcode = ERR_NO_DISK;
2875 memset(&rs, 0, sizeof(struct resize_parms));
2876 rs.al_stripes = device->ldev->md.al_stripes;
2877 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2878 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2879 err = resize_parms_from_attrs(&rs, info);
2881 retcode = ERR_MANDATORY_TAG;
2882 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2887 if (device->state.conn > C_CONNECTED) {
2888 retcode = ERR_RESIZE_RESYNC;
2892 if (device->state.role == R_SECONDARY &&
2893 device->state.peer == R_SECONDARY) {
2894 retcode = ERR_NO_PRIMARY;
2898 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2899 retcode = ERR_NEED_APV_93;
2904 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2906 if (u_size != (sector_t)rs.resize_size) {
2907 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2908 if (!new_disk_conf) {
2909 retcode = ERR_NOMEM;
2914 if (device->ldev->md.al_stripes != rs.al_stripes ||
2915 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2916 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2918 if (al_size_k > (16 * 1024 * 1024)) {
2919 retcode = ERR_MD_LAYOUT_TOO_BIG;
2923 if (al_size_k < MD_32kB_SECT/2) {
2924 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2928 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2929 retcode = ERR_MD_LAYOUT_CONNECTED;
2933 change_al_layout = true;
2936 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2937 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2939 if (new_disk_conf) {
2940 mutex_lock(&device->resource->conf_update);
2941 old_disk_conf = device->ldev->disk_conf;
2942 *new_disk_conf = *old_disk_conf;
2943 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2944 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2945 mutex_unlock(&device->resource->conf_update);
2947 kfree(old_disk_conf);
2948 new_disk_conf = NULL;
2951 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2952 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2953 drbd_md_sync(device);
2955 if (dd == DS_ERROR) {
2956 retcode = ERR_NOMEM_BITMAP;
2958 } else if (dd == DS_ERROR_SPACE_MD) {
2959 retcode = ERR_MD_LAYOUT_NO_FIT;
2961 } else if (dd == DS_ERROR_SHRINK) {
2962 retcode = ERR_IMPLICIT_SHRINK;
2966 if (device->state.conn == C_CONNECTED) {
2968 set_bit(RESIZE_PENDING, &device->flags);
2970 drbd_send_uuids(first_peer_device(device));
2971 drbd_send_sizes(first_peer_device(device), 1, ddsf);
2975 mutex_unlock(&adm_ctx.resource->adm_mutex);
2977 drbd_adm_finish(&adm_ctx, info, retcode);
2982 kfree(new_disk_conf);
2986 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2988 struct drbd_config_context adm_ctx;
2989 enum drbd_ret_code retcode;
2990 struct res_opts res_opts;
2993 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2994 if (!adm_ctx.reply_skb)
2996 if (retcode != NO_ERROR)
2999 res_opts = adm_ctx.resource->res_opts;
3000 if (should_set_defaults(info))
3001 set_res_opts_defaults(&res_opts);
3003 err = res_opts_from_attrs(&res_opts, info);
3004 if (err && err != -ENOMSG) {
3005 retcode = ERR_MANDATORY_TAG;
3006 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3010 mutex_lock(&adm_ctx.resource->adm_mutex);
3011 err = set_resource_options(adm_ctx.resource, &res_opts);
3013 retcode = ERR_INVALID_REQUEST;
3015 retcode = ERR_NOMEM;
3017 mutex_unlock(&adm_ctx.resource->adm_mutex);
3020 drbd_adm_finish(&adm_ctx, info, retcode);
3024 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
3026 struct drbd_config_context adm_ctx;
3027 struct drbd_device *device;
3028 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3030 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3031 if (!adm_ctx.reply_skb)
3033 if (retcode != NO_ERROR)
3036 device = adm_ctx.device;
3037 if (!get_ldev(device)) {
3038 retcode = ERR_NO_DISK;
3042 mutex_lock(&adm_ctx.resource->adm_mutex);
3044 /* If there is still bitmap IO pending, probably because of a previous
3045 * resync just being finished, wait for it before requesting a new resync.
3046 * Also wait for it's after_state_ch(). */
3047 drbd_suspend_io(device);
3048 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3049 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3051 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
3052 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
3053 * try to start a resync handshake as sync target for full sync.
3055 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
3056 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
3057 if (retcode >= SS_SUCCESS) {
3058 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
3059 "set_n_write from invalidate", BM_LOCKED_MASK))
3060 retcode = ERR_IO_MD_DISK;
3063 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
3064 drbd_resume_io(device);
3065 mutex_unlock(&adm_ctx.resource->adm_mutex);
3068 drbd_adm_finish(&adm_ctx, info, retcode);
3072 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
3073 union drbd_state mask, union drbd_state val)
3075 struct drbd_config_context adm_ctx;
3076 enum drbd_ret_code retcode;
3078 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3079 if (!adm_ctx.reply_skb)
3081 if (retcode != NO_ERROR)
3084 mutex_lock(&adm_ctx.resource->adm_mutex);
3085 retcode = drbd_request_state(adm_ctx.device, mask, val);
3086 mutex_unlock(&adm_ctx.resource->adm_mutex);
3088 drbd_adm_finish(&adm_ctx, info, retcode);
3092 static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
3096 rv = drbd_bmio_set_n_write(device);
3097 drbd_suspend_al(device);
3101 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
3103 struct drbd_config_context adm_ctx;
3104 int retcode; /* drbd_ret_code, drbd_state_rv */
3105 struct drbd_device *device;
3107 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3108 if (!adm_ctx.reply_skb)
3110 if (retcode != NO_ERROR)
3113 device = adm_ctx.device;
3114 if (!get_ldev(device)) {
3115 retcode = ERR_NO_DISK;
3119 mutex_lock(&adm_ctx.resource->adm_mutex);
3121 /* If there is still bitmap IO pending, probably because of a previous
3122 * resync just being finished, wait for it before requesting a new resync.
3123 * Also wait for it's after_state_ch(). */
3124 drbd_suspend_io(device);
3125 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3126 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3128 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3129 * in the bitmap. Otherwise, try to start a resync handshake
3130 * as sync source for full sync.
3132 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
3133 /* The peer will get a resync upon connect anyways. Just make that
3134 into a full resync. */
3135 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
3136 if (retcode >= SS_SUCCESS) {
3137 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
3138 "set_n_write from invalidate_peer",
3139 BM_LOCKED_SET_ALLOWED))
3140 retcode = ERR_IO_MD_DISK;
3143 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3144 drbd_resume_io(device);
3145 mutex_unlock(&adm_ctx.resource->adm_mutex);
3148 drbd_adm_finish(&adm_ctx, info, retcode);
3152 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
3154 struct drbd_config_context adm_ctx;
3155 enum drbd_ret_code retcode;
3157 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3158 if (!adm_ctx.reply_skb)
3160 if (retcode != NO_ERROR)
3163 mutex_lock(&adm_ctx.resource->adm_mutex);
3164 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
3165 retcode = ERR_PAUSE_IS_SET;
3166 mutex_unlock(&adm_ctx.resource->adm_mutex);
3168 drbd_adm_finish(&adm_ctx, info, retcode);
3172 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
3174 struct drbd_config_context adm_ctx;
3175 union drbd_dev_state s;
3176 enum drbd_ret_code retcode;
3178 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3179 if (!adm_ctx.reply_skb)
3181 if (retcode != NO_ERROR)
3184 mutex_lock(&adm_ctx.resource->adm_mutex);
3185 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3186 s = adm_ctx.device->state;
3187 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3188 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3189 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3191 retcode = ERR_PAUSE_IS_CLEAR;
3194 mutex_unlock(&adm_ctx.resource->adm_mutex);
3196 drbd_adm_finish(&adm_ctx, info, retcode);
3200 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
3202 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
3205 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
3207 struct drbd_config_context adm_ctx;
3208 struct drbd_device *device;
3209 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3211 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3212 if (!adm_ctx.reply_skb)
3214 if (retcode != NO_ERROR)
3217 mutex_lock(&adm_ctx.resource->adm_mutex);
3218 device = adm_ctx.device;
3219 if (test_bit(NEW_CUR_UUID, &device->flags)) {
3220 if (get_ldev_if_state(device, D_ATTACHING)) {
3221 drbd_uuid_new_current(device);
3224 /* This is effectively a multi-stage "forced down".
3225 * The NEW_CUR_UUID bit is supposedly only set, if we
3226 * lost the replication connection, and are configured
3227 * to freeze IO and wait for some fence-peer handler.
3228 * So we still don't have a replication connection.
3229 * And now we don't have a local disk either. After
3230 * resume, we will fail all pending and new IO, because
3231 * we don't have any data anymore. Which means we will
3232 * eventually be able to terminate all users of this
3233 * device, and then take it down. By bumping the
3234 * "effective" data uuid, we make sure that you really
3235 * need to tear down before you reconfigure, we will
3236 * the refuse to re-connect or re-attach (because no
3237 * matching real data uuid exists).
3240 get_random_bytes(&val, sizeof(u64));
3241 drbd_set_ed_uuid(device, val);
3242 drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3244 clear_bit(NEW_CUR_UUID, &device->flags);
3246 drbd_suspend_io(device);
3247 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
3248 if (retcode == SS_SUCCESS) {
3249 if (device->state.conn < C_CONNECTED)
3250 tl_clear(first_peer_device(device)->connection);
3251 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
3252 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
3254 drbd_resume_io(device);
3255 mutex_unlock(&adm_ctx.resource->adm_mutex);
3257 drbd_adm_finish(&adm_ctx, info, retcode);
3261 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
3263 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
3266 static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3267 struct drbd_resource *resource,
3268 struct drbd_connection *connection,
3269 struct drbd_device *device)
3272 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
3274 goto nla_put_failure;
3276 nla_put_u32(skb, T_ctx_volume, device->vnr))
3277 goto nla_put_failure;
3278 if (nla_put_string(skb, T_ctx_resource_name, resource->name))
3279 goto nla_put_failure;
3281 if (connection->my_addr_len &&
3282 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3283 goto nla_put_failure;
3284 if (connection->peer_addr_len &&
3285 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3286 goto nla_put_failure;
3288 nla_nest_end(skb, nla);
3293 nla_nest_cancel(skb, nla);
3298 * The generic netlink dump callbacks are called outside the genl_lock(), so
3299 * they cannot use the simple attribute parsing code which uses global
3302 static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3304 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3305 const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3308 nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3309 DRBD_NLA_CFG_CONTEXT);
3312 return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
3315 static void resource_to_info(struct resource_info *, struct drbd_resource *);
3317 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3319 struct drbd_genlmsghdr *dh;
3320 struct drbd_resource *resource;
3321 struct resource_info resource_info;
3322 struct resource_statistics resource_statistics;
3327 for_each_resource_rcu(resource, &drbd_resources)
3328 if (resource == (struct drbd_resource *)cb->args[0])
3329 goto found_resource;
3330 err = 0; /* resource was probably deleted */
3333 resource = list_entry(&drbd_resources,
3334 struct drbd_resource, resources);
3337 list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3344 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3345 cb->nlh->nlmsg_seq, &drbd_genl_family,
3346 NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3351 dh->ret_code = NO_ERROR;
3352 err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3355 err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3358 resource_to_info(&resource_info, resource);
3359 err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3362 resource_statistics.res_stat_write_ordering = resource->write_ordering;
3363 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3366 cb->args[0] = (long)resource;
3367 genlmsg_end(skb, dh);
3377 static void device_to_statistics(struct device_statistics *s,
3378 struct drbd_device *device)
3380 memset(s, 0, sizeof(*s));
3381 s->dev_upper_blocked = !may_inc_ap_bio(device);
3382 if (get_ldev(device)) {
3383 struct drbd_md *md = &device->ldev->md;
3384 u64 *history_uuids = (u64 *)s->history_uuids;
3385 struct request_queue *q;
3388 spin_lock_irq(&md->uuid_lock);
3389 s->dev_current_uuid = md->uuid[UI_CURRENT];
3390 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3391 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3392 history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3393 for (; n < HISTORY_UUIDS; n++)
3394 history_uuids[n] = 0;
3395 s->history_uuids_len = HISTORY_UUIDS;
3396 spin_unlock_irq(&md->uuid_lock);
3398 s->dev_disk_flags = md->flags;
3399 q = bdev_get_queue(device->ldev->backing_bdev);
3400 s->dev_lower_blocked =
3401 bdi_congested(q->backing_dev_info,
3402 (1 << WB_async_congested) |
3403 (1 << WB_sync_congested));
3406 s->dev_size = drbd_get_capacity(device->this_bdev);
3407 s->dev_read = device->read_cnt;
3408 s->dev_write = device->writ_cnt;
3409 s->dev_al_writes = device->al_writ_cnt;
3410 s->dev_bm_writes = device->bm_writ_cnt;
3411 s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3412 s->dev_lower_pending = atomic_read(&device->local_cnt);
3413 s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3414 s->dev_exposed_data_uuid = device->ed_uuid;
3417 static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3420 struct drbd_resource *resource =
3421 (struct drbd_resource *)cb->args[0];
3422 kref_put(&resource->kref, drbd_destroy_resource);
3428 int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3429 return put_resource_in_arg0(cb, 7);
3432 static void device_to_info(struct device_info *, struct drbd_device *);
3434 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3436 struct nlattr *resource_filter;
3437 struct drbd_resource *resource;
3438 struct drbd_device *uninitialized_var(device);
3439 int minor, err, retcode;
3440 struct drbd_genlmsghdr *dh;
3441 struct device_info device_info;
3442 struct device_statistics device_statistics;
3443 struct idr *idr_to_search;
3445 resource = (struct drbd_resource *)cb->args[0];
3446 if (!cb->args[0] && !cb->args[1]) {
3447 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3448 if (resource_filter) {
3449 retcode = ERR_RES_NOT_KNOWN;
3450 resource = drbd_find_resource(nla_data(resource_filter));
3453 cb->args[0] = (long)resource;
3458 minor = cb->args[1];
3459 idr_to_search = resource ? &resource->devices : &drbd_devices;
3460 device = idr_get_next(idr_to_search, &minor);
3465 idr_for_each_entry_continue(idr_to_search, device, minor) {
3467 goto put_result; /* only one iteration */
3470 goto out; /* no more devices */
3473 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3474 cb->nlh->nlmsg_seq, &drbd_genl_family,
3475 NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3479 dh->ret_code = retcode;
3481 if (retcode == NO_ERROR) {
3482 dh->minor = device->minor;
3483 err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3486 if (get_ldev(device)) {
3487 struct disk_conf *disk_conf =
3488 rcu_dereference(device->ldev->disk_conf);
3490 err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3495 device_to_info(&device_info, device);
3496 err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3500 device_to_statistics(&device_statistics, device);
3501 err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3504 cb->args[1] = minor + 1;
3506 genlmsg_end(skb, dh);
3516 int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3518 return put_resource_in_arg0(cb, 6);
3521 enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3523 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3525 struct nlattr *resource_filter;
3526 struct drbd_resource *resource = NULL, *next_resource;
3527 struct drbd_connection *uninitialized_var(connection);
3528 int err = 0, retcode;
3529 struct drbd_genlmsghdr *dh;
3530 struct connection_info connection_info;
3531 struct connection_statistics connection_statistics;
3534 resource = (struct drbd_resource *)cb->args[0];
3536 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3537 if (resource_filter) {
3538 retcode = ERR_RES_NOT_KNOWN;
3539 resource = drbd_find_resource(nla_data(resource_filter));
3542 cb->args[0] = (long)resource;
3543 cb->args[1] = SINGLE_RESOURCE;
3547 if (list_empty(&drbd_resources))
3549 resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3550 kref_get(&resource->kref);
3551 cb->args[0] = (long)resource;
3552 cb->args[1] = ITERATE_RESOURCES;
3557 mutex_lock(&resource->conf_update);
3560 for_each_connection_rcu(connection, resource)
3561 if (connection == (struct drbd_connection *)cb->args[2])
3562 goto found_connection;
3563 /* connection was probably deleted */
3564 goto no_more_connections;
3566 connection = list_entry(&resource->connections, struct drbd_connection, connections);
3569 list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3570 if (!has_net_conf(connection))
3573 goto put_result; /* only one iteration */
3576 no_more_connections:
3577 if (cb->args[1] == ITERATE_RESOURCES) {
3578 for_each_resource_rcu(next_resource, &drbd_resources) {
3579 if (next_resource == resource)
3580 goto found_resource;
3582 /* resource was probably deleted */
3587 list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3588 mutex_unlock(&resource->conf_update);
3589 kref_put(&resource->kref, drbd_destroy_resource);
3590 resource = next_resource;
3591 kref_get(&resource->kref);
3592 cb->args[0] = (long)resource;
3596 goto out; /* no more resources */
3599 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3600 cb->nlh->nlmsg_seq, &drbd_genl_family,
3601 NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3605 dh->ret_code = retcode;
3607 if (retcode == NO_ERROR) {
3608 struct net_conf *net_conf;
3610 err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3613 net_conf = rcu_dereference(connection->net_conf);
3615 err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3619 connection_to_info(&connection_info, connection);
3620 err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3623 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3624 err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3627 cb->args[2] = (long)connection;
3629 genlmsg_end(skb, dh);
3635 mutex_unlock(&resource->conf_update);
3641 enum mdf_peer_flag {
3642 MDF_PEER_CONNECTED = 1 << 0,
3643 MDF_PEER_OUTDATED = 1 << 1,
3644 MDF_PEER_FENCING = 1 << 2,
3645 MDF_PEER_FULL_SYNC = 1 << 3,
3648 static void peer_device_to_statistics(struct peer_device_statistics *s,
3649 struct drbd_peer_device *peer_device)
3651 struct drbd_device *device = peer_device->device;
3653 memset(s, 0, sizeof(*s));
3654 s->peer_dev_received = device->recv_cnt;
3655 s->peer_dev_sent = device->send_cnt;
3656 s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3657 atomic_read(&device->rs_pending_cnt);
3658 s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3659 s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3660 s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3661 if (get_ldev(device)) {
3662 struct drbd_md *md = &device->ldev->md;
3664 spin_lock_irq(&md->uuid_lock);
3665 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3666 spin_unlock_irq(&md->uuid_lock);
3668 (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3669 MDF_PEER_CONNECTED : 0) +
3670 (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3671 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3672 MDF_PEER_OUTDATED : 0) +
3673 /* FIXME: MDF_PEER_FENCING? */
3674 (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3675 MDF_PEER_FULL_SYNC : 0);
3680 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3682 return put_resource_in_arg0(cb, 9);
3685 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3687 struct nlattr *resource_filter;
3688 struct drbd_resource *resource;
3689 struct drbd_device *uninitialized_var(device);
3690 struct drbd_peer_device *peer_device = NULL;
3691 int minor, err, retcode;
3692 struct drbd_genlmsghdr *dh;
3693 struct idr *idr_to_search;
3695 resource = (struct drbd_resource *)cb->args[0];
3696 if (!cb->args[0] && !cb->args[1]) {
3697 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3698 if (resource_filter) {
3699 retcode = ERR_RES_NOT_KNOWN;
3700 resource = drbd_find_resource(nla_data(resource_filter));
3704 cb->args[0] = (long)resource;
3708 minor = cb->args[1];
3709 idr_to_search = resource ? &resource->devices : &drbd_devices;
3710 device = idr_find(idr_to_search, minor);
3715 device = idr_get_next(idr_to_search, &minor);
3722 for_each_peer_device(peer_device, device)
3723 if (peer_device == (struct drbd_peer_device *)cb->args[2])
3724 goto found_peer_device;
3725 /* peer device was probably deleted */
3728 /* Make peer_device point to the list head (not the first entry). */
3729 peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3732 list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3733 if (!has_net_conf(peer_device->connection))
3736 goto put_result; /* only one iteration */
3741 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3742 cb->nlh->nlmsg_seq, &drbd_genl_family,
3743 NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3747 dh->ret_code = retcode;
3749 if (retcode == NO_ERROR) {
3750 struct peer_device_info peer_device_info;
3751 struct peer_device_statistics peer_device_statistics;
3754 err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3757 peer_device_to_info(&peer_device_info, peer_device);
3758 err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3761 peer_device_to_statistics(&peer_device_statistics, peer_device);
3762 err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3765 cb->args[1] = minor;
3766 cb->args[2] = (long)peer_device;
3768 genlmsg_end(skb, dh);
3778 * Return the connection of @resource if @resource has exactly one connection.
3780 static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3782 struct list_head *connections = &resource->connections;
3784 if (list_empty(connections) || connections->next->next != connections)
3786 return list_first_entry(&resource->connections, struct drbd_connection, connections);
3789 static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
3790 const struct sib_info *sib)
3792 struct drbd_resource *resource = device->resource;
3793 struct state_info *si = NULL; /* for sizeof(si->member); */
3797 int exclude_sensitive;
3799 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3800 * to. So we better exclude_sensitive information.
3802 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3803 * in the context of the requesting user process. Exclude sensitive
3804 * information, unless current has superuser.
3806 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3807 * relies on the current implementation of netlink_dump(), which
3808 * executes the dump callback successively from netlink_recvmsg(),
3809 * always in the context of the receiving process */
3810 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3812 got_ldev = get_ldev(device);
3814 /* We need to add connection name and volume number information still.
3815 * Minor number is in drbd_genlmsghdr. */
3816 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3817 goto nla_put_failure;
3819 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3820 goto nla_put_failure;
3824 struct disk_conf *disk_conf;
3826 disk_conf = rcu_dereference(device->ldev->disk_conf);
3827 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3830 struct net_conf *nc;
3832 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3834 err = net_conf_to_skb(skb, nc, exclude_sensitive);
3838 goto nla_put_failure;
3840 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
3842 goto nla_put_failure;
3843 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3844 nla_put_u32(skb, T_current_state, device->state.i) ||
3845 nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
3846 nla_put_u64_0pad(skb, T_capacity,
3847 drbd_get_capacity(device->this_bdev)) ||
3848 nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3849 nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3850 nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3851 nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3852 nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3853 nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3854 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3855 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3856 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3857 goto nla_put_failure;
3862 spin_lock_irq(&device->ldev->md.uuid_lock);
3863 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3864 spin_unlock_irq(&device->ldev->md.uuid_lock);
3867 goto nla_put_failure;
3869 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3870 nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3871 nla_put_u64_0pad(skb, T_bits_oos,
3872 drbd_bm_total_weight(device)))
3873 goto nla_put_failure;
3874 if (C_SYNC_SOURCE <= device->state.conn &&
3875 C_PAUSED_SYNC_T >= device->state.conn) {
3876 if (nla_put_u64_0pad(skb, T_bits_rs_total,
3877 device->rs_total) ||
3878 nla_put_u64_0pad(skb, T_bits_rs_failed,
3880 goto nla_put_failure;
3885 switch(sib->sib_reason) {
3886 case SIB_SYNC_PROGRESS:
3887 case SIB_GET_STATUS_REPLY:
3889 case SIB_STATE_CHANGE:
3890 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3891 nla_put_u32(skb, T_new_state, sib->ns.i))
3892 goto nla_put_failure;
3894 case SIB_HELPER_POST:
3895 if (nla_put_u32(skb, T_helper_exit_code,
3896 sib->helper_exit_code))
3897 goto nla_put_failure;
3899 case SIB_HELPER_PRE:
3900 if (nla_put_string(skb, T_helper, sib->helper_name))
3901 goto nla_put_failure;
3905 nla_nest_end(skb, nla);
3915 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3917 struct drbd_config_context adm_ctx;
3918 enum drbd_ret_code retcode;
3921 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3922 if (!adm_ctx.reply_skb)
3924 if (retcode != NO_ERROR)
3927 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3929 nlmsg_free(adm_ctx.reply_skb);
3933 drbd_adm_finish(&adm_ctx, info, retcode);
3937 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3939 struct drbd_device *device;
3940 struct drbd_genlmsghdr *dh;
3941 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3942 struct drbd_resource *resource = NULL;
3943 struct drbd_resource *tmp;
3944 unsigned volume = cb->args[1];
3946 /* Open coded, deferred, iteration:
3947 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3948 * connection = "first connection of resource or undefined";
3949 * idr_for_each_entry(&resource->devices, device, i) {
3953 * where resource is cb->args[0];
3954 * and i is cb->args[1];
3956 * cb->args[2] indicates if we shall loop over all resources,
3957 * or just dump all volumes of a single resource.
3959 * This may miss entries inserted after this dump started,
3960 * or entries deleted before they are reached.
3962 * We need to make sure the device won't disappear while
3963 * we are looking at it, and revalidate our iterators
3964 * on each iteration.
3967 /* synchronize with conn_create()/drbd_destroy_connection() */
3969 /* revalidate iterator position */
3970 for_each_resource_rcu(tmp, &drbd_resources) {
3972 /* first iteration */
3984 device = idr_get_next(&resource->devices, &volume);
3986 /* No more volumes to dump on this resource.
3987 * Advance resource iterator. */
3988 pos = list_entry_rcu(resource->resources.next,
3989 struct drbd_resource, resources);
3990 /* Did we dump any volume of this resource yet? */
3992 /* If we reached the end of the list,
3993 * or only a single resource dump was requested,
3995 if (&pos->resources == &drbd_resources || cb->args[2])
4003 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
4004 cb->nlh->nlmsg_seq, &drbd_genl_family,
4005 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
4010 /* This is a connection without a single volume.
4011 * Suprisingly enough, it may have a network
4013 struct drbd_connection *connection;
4016 dh->ret_code = NO_ERROR;
4017 connection = the_only_connection(resource);
4018 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
4021 struct net_conf *nc;
4023 nc = rcu_dereference(connection->net_conf);
4024 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
4030 D_ASSERT(device, device->vnr == volume);
4031 D_ASSERT(device, device->resource == resource);
4033 dh->minor = device_to_minor(device);
4034 dh->ret_code = NO_ERROR;
4036 if (nla_put_status_info(skb, device, NULL)) {
4038 genlmsg_cancel(skb, dh);
4042 genlmsg_end(skb, dh);
4047 /* where to start the next iteration */
4048 cb->args[0] = (long)pos;
4049 cb->args[1] = (pos == resource) ? volume + 1 : 0;
4051 /* No more resources/volumes/minors found results in an empty skb.
4052 * Which will terminate the dump. */
4057 * Request status of all resources, or of all volumes within a single resource.
4059 * This is a dump, as the answer may not fit in a single reply skb otherwise.
4060 * Which means we cannot use the family->attrbuf or other such members, because
4061 * dump is NOT protected by the genl_lock(). During dump, we only have access
4062 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
4064 * Once things are setup properly, we call into get_one_status().
4066 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
4068 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
4070 const char *resource_name;
4071 struct drbd_resource *resource;
4074 /* Is this a followup call? */
4076 /* ... of a single resource dump,
4077 * and the resource iterator has been advanced already? */
4078 if (cb->args[2] && cb->args[2] != cb->args[0])
4079 return 0; /* DONE. */
4083 /* First call (from netlink_dump_start). We need to figure out
4084 * which resource(s) the user wants us to dump. */
4085 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
4086 nlmsg_attrlen(cb->nlh, hdrlen),
4087 DRBD_NLA_CFG_CONTEXT);
4089 /* No explicit context given. Dump all. */
4092 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
4093 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
4095 return PTR_ERR(nla);
4096 /* context given, but no name present? */
4099 resource_name = nla_data(nla);
4100 if (!*resource_name)
4102 resource = drbd_find_resource(resource_name);
4106 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
4108 /* prime iterators, and set "filter" mode mark:
4109 * only dump this connection. */
4110 cb->args[0] = (long)resource;
4111 /* cb->args[1] = 0; passed in this way. */
4112 cb->args[2] = (long)resource;
4115 return get_one_status(skb, cb);
4118 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
4120 struct drbd_config_context adm_ctx;
4121 enum drbd_ret_code retcode;
4122 struct timeout_parms tp;
4125 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4126 if (!adm_ctx.reply_skb)
4128 if (retcode != NO_ERROR)
4132 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4133 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
4136 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
4138 nlmsg_free(adm_ctx.reply_skb);
4142 drbd_adm_finish(&adm_ctx, info, retcode);
4146 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
4148 struct drbd_config_context adm_ctx;
4149 struct drbd_device *device;
4150 enum drbd_ret_code retcode;
4151 struct start_ov_parms parms;
4153 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4154 if (!adm_ctx.reply_skb)
4156 if (retcode != NO_ERROR)
4159 device = adm_ctx.device;
4161 /* resume from last known position, if possible */
4162 parms.ov_start_sector = device->ov_start_sector;
4163 parms.ov_stop_sector = ULLONG_MAX;
4164 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
4165 int err = start_ov_parms_from_attrs(&parms, info);
4167 retcode = ERR_MANDATORY_TAG;
4168 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4172 mutex_lock(&adm_ctx.resource->adm_mutex);
4174 /* w_make_ov_request expects position to be aligned */
4175 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4176 device->ov_stop_sector = parms.ov_stop_sector;
4178 /* If there is still bitmap IO pending, e.g. previous resync or verify
4179 * just being finished, wait for it before requesting a new resync. */
4180 drbd_suspend_io(device);
4181 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4182 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4183 drbd_resume_io(device);
4185 mutex_unlock(&adm_ctx.resource->adm_mutex);
4187 drbd_adm_finish(&adm_ctx, info, retcode);
4192 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
4194 struct drbd_config_context adm_ctx;
4195 struct drbd_device *device;
4196 enum drbd_ret_code retcode;
4197 int skip_initial_sync = 0;
4199 struct new_c_uuid_parms args;
4201 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4202 if (!adm_ctx.reply_skb)
4204 if (retcode != NO_ERROR)
4207 device = adm_ctx.device;
4208 memset(&args, 0, sizeof(args));
4209 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
4210 err = new_c_uuid_parms_from_attrs(&args, info);
4212 retcode = ERR_MANDATORY_TAG;
4213 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4218 mutex_lock(&adm_ctx.resource->adm_mutex);
4219 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
4221 if (!get_ldev(device)) {
4222 retcode = ERR_NO_DISK;
4226 /* this is "skip initial sync", assume to be clean */
4227 if (device->state.conn == C_CONNECTED &&
4228 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
4229 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
4230 drbd_info(device, "Preparing to skip initial sync\n");
4231 skip_initial_sync = 1;
4232 } else if (device->state.conn != C_STANDALONE) {
4233 retcode = ERR_CONNECTED;
4237 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4238 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
4240 if (args.clear_bm) {
4241 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4242 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
4244 drbd_err(device, "Writing bitmap failed with %d\n", err);
4245 retcode = ERR_IO_MD_DISK;
4247 if (skip_initial_sync) {
4248 drbd_send_uuids_skip_initial_sync(first_peer_device(device));
4249 _drbd_uuid_set(device, UI_BITMAP, 0);
4250 drbd_print_uuids(device, "cleared bitmap UUID");
4251 spin_lock_irq(&device->resource->req_lock);
4252 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4254 spin_unlock_irq(&device->resource->req_lock);
4258 drbd_md_sync(device);
4262 mutex_unlock(device->state_mutex);
4263 mutex_unlock(&adm_ctx.resource->adm_mutex);
4265 drbd_adm_finish(&adm_ctx, info, retcode);
4269 static enum drbd_ret_code
4270 drbd_check_resource_name(struct drbd_config_context *adm_ctx)
4272 const char *name = adm_ctx->resource_name;
4273 if (!name || !name[0]) {
4274 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
4275 return ERR_MANDATORY_TAG;
4277 /* if we want to use these in sysfs/configfs/debugfs some day,
4278 * we must not allow slashes */
4279 if (strchr(name, '/')) {
4280 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
4281 return ERR_INVALID_REQUEST;
4286 static void resource_to_info(struct resource_info *info,
4287 struct drbd_resource *resource)
4289 info->res_role = conn_highest_role(first_connection(resource));
4290 info->res_susp = resource->susp;
4291 info->res_susp_nod = resource->susp_nod;
4292 info->res_susp_fen = resource->susp_fen;
4295 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
4297 struct drbd_connection *connection;
4298 struct drbd_config_context adm_ctx;
4299 enum drbd_ret_code retcode;
4300 struct res_opts res_opts;
4303 retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
4304 if (!adm_ctx.reply_skb)
4306 if (retcode != NO_ERROR)
4309 set_res_opts_defaults(&res_opts);
4310 err = res_opts_from_attrs(&res_opts, info);
4311 if (err && err != -ENOMSG) {
4312 retcode = ERR_MANDATORY_TAG;
4313 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4317 retcode = drbd_check_resource_name(&adm_ctx);
4318 if (retcode != NO_ERROR)
4321 if (adm_ctx.resource) {
4322 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4323 retcode = ERR_INVALID_REQUEST;
4324 drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
4326 /* else: still NO_ERROR */
4330 /* not yet safe for genl_family.parallel_ops */
4331 mutex_lock(&resources_mutex);
4332 connection = conn_create(adm_ctx.resource_name, &res_opts);
4333 mutex_unlock(&resources_mutex);
4336 struct resource_info resource_info;
4338 mutex_lock(¬ification_mutex);
4339 resource_to_info(&resource_info, connection->resource);
4340 notify_resource_state(NULL, 0, connection->resource,
4341 &resource_info, NOTIFY_CREATE);
4342 mutex_unlock(¬ification_mutex);
4344 retcode = ERR_NOMEM;
4347 drbd_adm_finish(&adm_ctx, info, retcode);
4351 static void device_to_info(struct device_info *info,
4352 struct drbd_device *device)
4354 info->dev_disk_state = device->state.disk;
4358 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
4360 struct drbd_config_context adm_ctx;
4361 struct drbd_genlmsghdr *dh = info->userhdr;
4362 enum drbd_ret_code retcode;
4364 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4365 if (!adm_ctx.reply_skb)
4367 if (retcode != NO_ERROR)
4370 if (dh->minor > MINORMASK) {
4371 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
4372 retcode = ERR_INVALID_REQUEST;
4375 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
4376 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
4377 retcode = ERR_INVALID_REQUEST;
4381 /* drbd_adm_prepare made sure already
4382 * that first_peer_device(device)->connection and device->vnr match the request. */
4383 if (adm_ctx.device) {
4384 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
4385 retcode = ERR_MINOR_OR_VOLUME_EXISTS;
4386 /* else: still NO_ERROR */
4390 mutex_lock(&adm_ctx.resource->adm_mutex);
4391 retcode = drbd_create_device(&adm_ctx, dh->minor);
4392 if (retcode == NO_ERROR) {
4393 struct drbd_device *device;
4394 struct drbd_peer_device *peer_device;
4395 struct device_info info;
4396 unsigned int peer_devices = 0;
4397 enum drbd_notification_type flags;
4399 device = minor_to_device(dh->minor);
4400 for_each_peer_device(peer_device, device) {
4401 if (!has_net_conf(peer_device->connection))
4406 device_to_info(&info, device);
4407 mutex_lock(¬ification_mutex);
4408 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4409 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4410 for_each_peer_device(peer_device, device) {
4411 struct peer_device_info peer_device_info;
4413 if (!has_net_conf(peer_device->connection))
4415 peer_device_to_info(&peer_device_info, peer_device);
4416 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4417 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4418 NOTIFY_CREATE | flags);
4420 mutex_unlock(¬ification_mutex);
4422 mutex_unlock(&adm_ctx.resource->adm_mutex);
4424 drbd_adm_finish(&adm_ctx, info, retcode);
4428 static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
4430 struct drbd_peer_device *peer_device;
4432 if (device->state.disk == D_DISKLESS &&
4433 /* no need to be device->state.conn == C_STANDALONE &&
4434 * we may want to delete a minor from a live replication group.
4436 device->state.role == R_SECONDARY) {
4437 struct drbd_connection *connection =
4438 first_connection(device->resource);
4440 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
4441 CS_VERBOSE + CS_WAIT_COMPLETE);
4443 /* If the state engine hasn't stopped the sender thread yet, we
4444 * need to flush the sender work queue before generating the
4445 * DESTROY events here. */
4446 if (get_t_state(&connection->worker) == RUNNING)
4447 drbd_flush_workqueue(&connection->sender_work);
4449 mutex_lock(¬ification_mutex);
4450 for_each_peer_device(peer_device, device) {
4451 if (!has_net_conf(peer_device->connection))
4453 notify_peer_device_state(NULL, 0, peer_device, NULL,
4454 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4456 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4457 mutex_unlock(¬ification_mutex);
4459 drbd_delete_device(device);
4462 return ERR_MINOR_CONFIGURED;
4465 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
4467 struct drbd_config_context adm_ctx;
4468 enum drbd_ret_code retcode;
4470 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4471 if (!adm_ctx.reply_skb)
4473 if (retcode != NO_ERROR)
4476 mutex_lock(&adm_ctx.resource->adm_mutex);
4477 retcode = adm_del_minor(adm_ctx.device);
4478 mutex_unlock(&adm_ctx.resource->adm_mutex);
4480 drbd_adm_finish(&adm_ctx, info, retcode);
4484 static int adm_del_resource(struct drbd_resource *resource)
4486 struct drbd_connection *connection;
4488 for_each_connection(connection, resource) {
4489 if (connection->cstate > C_STANDALONE)
4490 return ERR_NET_CONFIGURED;
4492 if (!idr_is_empty(&resource->devices))
4493 return ERR_RES_IN_USE;
4495 /* The state engine has stopped the sender thread, so we don't
4496 * need to flush the sender work queue before generating the
4497 * DESTROY event here. */
4498 mutex_lock(¬ification_mutex);
4499 notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4500 mutex_unlock(¬ification_mutex);
4502 mutex_lock(&resources_mutex);
4503 list_del_rcu(&resource->resources);
4504 mutex_unlock(&resources_mutex);
4505 /* Make sure all threads have actually stopped: state handling only
4506 * does drbd_thread_stop_nowait(). */
4507 list_for_each_entry(connection, &resource->connections, connections)
4508 drbd_thread_stop(&connection->worker);
4510 drbd_free_resource(resource);
4514 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
4516 struct drbd_config_context adm_ctx;
4517 struct drbd_resource *resource;
4518 struct drbd_connection *connection;
4519 struct drbd_device *device;
4520 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4523 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4524 if (!adm_ctx.reply_skb)
4526 if (retcode != NO_ERROR)
4529 resource = adm_ctx.resource;
4530 mutex_lock(&resource->adm_mutex);
4532 for_each_connection(connection, resource) {
4533 struct drbd_peer_device *peer_device;
4535 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4536 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4537 if (retcode < SS_SUCCESS) {
4538 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
4543 retcode = conn_try_disconnect(connection, 0);
4544 if (retcode < SS_SUCCESS) {
4545 drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
4551 idr_for_each_entry(&resource->devices, device, i) {
4552 retcode = adm_detach(device, 0);
4553 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
4554 drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
4559 /* delete volumes */
4560 idr_for_each_entry(&resource->devices, device, i) {
4561 retcode = adm_del_minor(device);
4562 if (retcode != NO_ERROR) {
4563 /* "can not happen" */
4564 drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
4569 retcode = adm_del_resource(resource);
4571 mutex_unlock(&resource->adm_mutex);
4573 drbd_adm_finish(&adm_ctx, info, retcode);
4577 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
4579 struct drbd_config_context adm_ctx;
4580 struct drbd_resource *resource;
4581 enum drbd_ret_code retcode;
4583 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4584 if (!adm_ctx.reply_skb)
4586 if (retcode != NO_ERROR)
4588 resource = adm_ctx.resource;
4590 mutex_lock(&resource->adm_mutex);
4591 retcode = adm_del_resource(resource);
4592 mutex_unlock(&resource->adm_mutex);
4594 drbd_adm_finish(&adm_ctx, info, retcode);
4598 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
4600 struct sk_buff *msg;
4601 struct drbd_genlmsghdr *d_out;
4605 seq = atomic_inc_return(&drbd_genl_seq);
4606 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4611 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4612 if (!d_out) /* cannot happen, but anyways. */
4613 goto nla_put_failure;
4614 d_out->minor = device_to_minor(device);
4615 d_out->ret_code = NO_ERROR;
4617 if (nla_put_status_info(msg, device, sib))
4618 goto nla_put_failure;
4619 genlmsg_end(msg, d_out);
4620 err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
4621 /* msg has been consumed or freed in netlink_broadcast() */
4622 if (err && err != -ESRCH)
4630 drbd_err(device, "Error %d while broadcasting event. "
4631 "Event seq:%u sib_reason:%u\n",
4632 err, seq, sib->sib_reason);
4635 static int nla_put_notification_header(struct sk_buff *msg,
4636 enum drbd_notification_type type)
4638 struct drbd_notification_header nh = {
4642 return drbd_notification_header_to_skb(msg, &nh, true);
4645 void notify_resource_state(struct sk_buff *skb,
4647 struct drbd_resource *resource,
4648 struct resource_info *resource_info,
4649 enum drbd_notification_type type)
4651 struct resource_statistics resource_statistics;
4652 struct drbd_genlmsghdr *dh;
4653 bool multicast = false;
4657 seq = atomic_inc_return(¬ify_genl_seq);
4658 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4666 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4668 goto nla_put_failure;
4670 dh->ret_code = NO_ERROR;
4671 if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4672 nla_put_notification_header(skb, type) ||
4673 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4674 resource_info_to_skb(skb, resource_info, true)))
4675 goto nla_put_failure;
4676 resource_statistics.res_stat_write_ordering = resource->write_ordering;
4677 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4679 goto nla_put_failure;
4680 genlmsg_end(skb, dh);
4682 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4683 /* skb has been consumed or freed in netlink_broadcast() */
4684 if (err && err != -ESRCH)
4692 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4696 void notify_device_state(struct sk_buff *skb,
4698 struct drbd_device *device,
4699 struct device_info *device_info,
4700 enum drbd_notification_type type)
4702 struct device_statistics device_statistics;
4703 struct drbd_genlmsghdr *dh;
4704 bool multicast = false;
4708 seq = atomic_inc_return(¬ify_genl_seq);
4709 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4717 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4719 goto nla_put_failure;
4720 dh->minor = device->minor;
4721 dh->ret_code = NO_ERROR;
4722 if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4723 nla_put_notification_header(skb, type) ||
4724 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4725 device_info_to_skb(skb, device_info, true)))
4726 goto nla_put_failure;
4727 device_to_statistics(&device_statistics, device);
4728 device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4729 genlmsg_end(skb, dh);
4731 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4732 /* skb has been consumed or freed in netlink_broadcast() */
4733 if (err && err != -ESRCH)
4741 drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4745 void notify_connection_state(struct sk_buff *skb,
4747 struct drbd_connection *connection,
4748 struct connection_info *connection_info,
4749 enum drbd_notification_type type)
4751 struct connection_statistics connection_statistics;
4752 struct drbd_genlmsghdr *dh;
4753 bool multicast = false;
4757 seq = atomic_inc_return(¬ify_genl_seq);
4758 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4766 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4768 goto nla_put_failure;
4770 dh->ret_code = NO_ERROR;
4771 if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4772 nla_put_notification_header(skb, type) ||
4773 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4774 connection_info_to_skb(skb, connection_info, true)))
4775 goto nla_put_failure;
4776 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4777 connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4778 genlmsg_end(skb, dh);
4780 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4781 /* skb has been consumed or freed in netlink_broadcast() */
4782 if (err && err != -ESRCH)
4790 drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4794 void notify_peer_device_state(struct sk_buff *skb,
4796 struct drbd_peer_device *peer_device,
4797 struct peer_device_info *peer_device_info,
4798 enum drbd_notification_type type)
4800 struct peer_device_statistics peer_device_statistics;
4801 struct drbd_resource *resource = peer_device->device->resource;
4802 struct drbd_genlmsghdr *dh;
4803 bool multicast = false;
4807 seq = atomic_inc_return(¬ify_genl_seq);
4808 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4816 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4818 goto nla_put_failure;
4820 dh->ret_code = NO_ERROR;
4821 if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4822 nla_put_notification_header(skb, type) ||
4823 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4824 peer_device_info_to_skb(skb, peer_device_info, true)))
4825 goto nla_put_failure;
4826 peer_device_to_statistics(&peer_device_statistics, peer_device);
4827 peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4828 genlmsg_end(skb, dh);
4830 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4831 /* skb has been consumed or freed in netlink_broadcast() */
4832 if (err && err != -ESRCH)
4840 drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4844 void notify_helper(enum drbd_notification_type type,
4845 struct drbd_device *device, struct drbd_connection *connection,
4846 const char *name, int status)
4848 struct drbd_resource *resource = device ? device->resource : connection->resource;
4849 struct drbd_helper_info helper_info;
4850 unsigned int seq = atomic_inc_return(¬ify_genl_seq);
4851 struct sk_buff *skb = NULL;
4852 struct drbd_genlmsghdr *dh;
4855 strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4856 helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4857 helper_info.helper_status = status;
4859 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4865 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4868 dh->minor = device ? device->minor : -1;
4869 dh->ret_code = NO_ERROR;
4870 mutex_lock(¬ification_mutex);
4871 if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4872 nla_put_notification_header(skb, type) ||
4873 drbd_helper_info_to_skb(skb, &helper_info, true))
4875 genlmsg_end(skb, dh);
4876 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4878 /* skb has been consumed or freed in netlink_broadcast() */
4879 if (err && err != -ESRCH)
4881 mutex_unlock(¬ification_mutex);
4885 mutex_unlock(¬ification_mutex);
4888 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4892 static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4894 struct drbd_genlmsghdr *dh;
4898 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4900 goto nla_put_failure;
4902 dh->ret_code = NO_ERROR;
4903 if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4904 goto nla_put_failure;
4905 genlmsg_end(skb, dh);
4910 pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4913 static void free_state_changes(struct list_head *list)
4915 while (!list_empty(list)) {
4916 struct drbd_state_change *state_change =
4917 list_first_entry(list, struct drbd_state_change, list);
4918 list_del(&state_change->list);
4919 forget_state_change(state_change);
4923 static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4926 state_change->n_connections +
4927 state_change->n_devices +
4928 state_change->n_devices * state_change->n_connections;
4931 static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4933 struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4934 unsigned int seq = cb->args[2];
4936 enum drbd_notification_type flags = 0;
4938 /* There is no need for taking notification_mutex here: it doesn't
4939 matter if the initial state events mix with later state chage
4940 events; we can always tell the events apart by the NOTIFY_EXISTS
4944 if (cb->args[5] == 1) {
4945 notify_initial_state_done(skb, seq);
4949 if (cb->args[4] < cb->args[3])
4950 flags |= NOTIFY_CONTINUES;
4952 notify_resource_state_change(skb, seq, state_change->resource,
4953 NOTIFY_EXISTS | flags);
4957 if (n < state_change->n_connections) {
4958 notify_connection_state_change(skb, seq, &state_change->connections[n],
4959 NOTIFY_EXISTS | flags);
4962 n -= state_change->n_connections;
4963 if (n < state_change->n_devices) {
4964 notify_device_state_change(skb, seq, &state_change->devices[n],
4965 NOTIFY_EXISTS | flags);
4968 n -= state_change->n_devices;
4969 if (n < state_change->n_devices * state_change->n_connections) {
4970 notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4971 NOTIFY_EXISTS | flags);
4976 if (cb->args[4] == cb->args[3]) {
4977 struct drbd_state_change *next_state_change =
4978 list_entry(state_change->list.next,
4979 struct drbd_state_change, list);
4980 cb->args[0] = (long)next_state_change;
4981 cb->args[3] = notifications_for_state_change(next_state_change);
4988 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4990 struct drbd_resource *resource;
4993 if (cb->args[5] >= 1) {
4994 if (cb->args[5] > 1)
4995 return get_initial_state(skb, cb);
4997 struct drbd_state_change *state_change =
4998 (struct drbd_state_change *)cb->args[0];
5000 /* connect list to head */
5001 list_add(&head, &state_change->list);
5002 free_state_changes(&head);
5007 cb->args[5] = 2; /* number of iterations */
5008 mutex_lock(&resources_mutex);
5009 for_each_resource(resource, &drbd_resources) {
5010 struct drbd_state_change *state_change;
5012 state_change = remember_old_state(resource, GFP_KERNEL);
5013 if (!state_change) {
5014 if (!list_empty(&head))
5015 free_state_changes(&head);
5016 mutex_unlock(&resources_mutex);
5019 copy_old_to_new_state_change(state_change);
5020 list_add_tail(&state_change->list, &head);
5021 cb->args[5] += notifications_for_state_change(state_change);
5023 mutex_unlock(&resources_mutex);
5025 if (!list_empty(&head)) {
5026 struct drbd_state_change *state_change =
5027 list_entry(head.next, struct drbd_state_change, list);
5028 cb->args[0] = (long)state_change;
5029 cb->args[3] = notifications_for_state_change(state_change);
5030 list_del(&head); /* detach list from head */
5033 cb->args[2] = cb->nlh->nlmsg_seq;
5034 return get_initial_state(skb, cb);