Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / drivers / block / drbd / drbd_nl.c
CommitLineData
93c68cc4 1// SPDX-License-Identifier: GPL-2.0-only
b411b363
PR
2/*
3 drbd_nl.c
4
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
b411b363
PR
11
12 */
13
f88c5d90
LE
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
b411b363
PR
16#include <linux/module.h>
17#include <linux/drbd.h>
18#include <linux/in.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/slab.h>
b411b363
PR
22#include <linux/blkpg.h>
23#include <linux/cpumask.h>
24#include "drbd_int.h"
a3603a6e 25#include "drbd_protocol.h"
265be2d0 26#include "drbd_req.h"
a2972846 27#include "drbd_state_change.h"
5f60d5f6 28#include <linux/unaligned.h>
b411b363 29#include <linux/drbd_limits.h>
87f7be4c 30#include <linux/kthread.h>
b411b363 31
3b98c0c2
LE
32#include <net/genetlink.h>
33
34/* .doit */
35// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
36// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
37
05a10ec7
AG
38int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
39int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
3b98c0c2 40
789c1b62
AG
41int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
42int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
85f75dd7 43int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
44
45int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
46int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
f399002e 47int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
48int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
49int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
f399002e 50int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
51int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
52int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
53int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
54int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
56int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
57int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
59int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
60int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
f399002e 62int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
63int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
65/* .dumpit */
66int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
a55bbd37
AG
67int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
68int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
69int drbd_adm_dump_devices_done(struct netlink_callback *cb);
70int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
71int drbd_adm_dump_connections_done(struct netlink_callback *cb);
72int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
73int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
a2972846 74int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
3b98c0c2
LE
75
76#include <linux/drbd_genl_api.h>
01b39b50 77#include "drbd_nla.h"
3b98c0c2
LE
78#include <linux/genl_magic_func.h>
79
a2972846
AG
80static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
81static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
82
83DEFINE_MUTEX(notification_mutex);
84
75e27d37 85/* used bdev_open_by_path, to claim our meta data device(s) */
b411b363
PR
86static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
87
3b98c0c2
LE
88static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
89{
90 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
91 if (genlmsg_reply(skb, info))
f88c5d90 92 pr_err("error sending genl reply\n");
b411b363 93}
3b98c0c2
LE
94
95/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
96 * reason it could fail was no space in skb, and there are 4k available. */
f221f4bc 97static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
3b98c0c2 98{
3b98c0c2
LE
99 struct nlattr *nla;
100 int err = -EMSGSIZE;
101
102 if (!info || !info[0])
103 return 0;
104
ae0be8de 105 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
3b98c0c2
LE
106 if (!nla)
107 return err;
108
109 err = nla_put_string(skb, T_info_text, info);
110 if (err) {
111 nla_nest_cancel(skb, nla);
112 return err;
113 } else
114 nla_nest_end(skb, nla);
115 return 0;
b411b363
PR
116}
117
4ef2a4f4
LE
118__printf(2, 3)
119static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
120{
121 va_list args;
122 struct nlattr *nla, *txt;
123 int err = -EMSGSIZE;
124 int len;
125
ae0be8de 126 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
4ef2a4f4
LE
127 if (!nla)
128 return err;
129
130 txt = nla_reserve(skb, T_info_text, 256);
131 if (!txt) {
132 nla_nest_cancel(skb, nla);
133 return err;
134 }
135 va_start(args, fmt);
136 len = vscnprintf(nla_data(txt), 256, fmt, args);
137 va_end(args);
138
139 /* maybe: retry with larger reserve, if truncated */
140 txt->nla_len = nla_attr_size(len+1);
141 nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
142 nla_nest_end(skb, nla);
143
144 return 0;
145}
146
3b98c0c2
LE
147/* This would be a good candidate for a "pre_doit" hook,
148 * and per-family private info->pointers.
149 * But we need to stay compatible with older kernels.
150 * If it returns successfully, adm_ctx members are valid.
9e276872
LE
151 *
152 * At this point, we still rely on the global genl_lock().
153 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
154 * to add additional synchronization against object destruction/modification.
3b98c0c2
LE
155 */
156#define DRBD_ADM_NEED_MINOR 1
44e52cfa 157#define DRBD_ADM_NEED_RESOURCE 2
089c075d 158#define DRBD_ADM_NEED_CONNECTION 4
a910b123
LE
159static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
160 struct sk_buff *skb, struct genl_info *info, unsigned flags)
3b98c0c2 161{
bffcc688 162 struct drbd_genlmsghdr *d_in = genl_info_userhdr(info);
3b98c0c2
LE
163 const u8 cmd = info->genlhdr->cmd;
164 int err;
165
a910b123 166 memset(adm_ctx, 0, sizeof(*adm_ctx));
3b98c0c2
LE
167
168 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
98683650 169 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
3b98c0c2
LE
170 return -EPERM;
171
a910b123
LE
172 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
173 if (!adm_ctx->reply_skb) {
1e2a2551 174 err = -ENOMEM;
3b98c0c2 175 goto fail;
1e2a2551 176 }
3b98c0c2 177
a910b123 178 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
3b98c0c2
LE
179 info, &drbd_genl_family, 0, cmd);
180 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
181 * but anyways */
a910b123 182 if (!adm_ctx->reply_dh) {
1e2a2551 183 err = -ENOMEM;
3b98c0c2 184 goto fail;
1e2a2551 185 }
3b98c0c2 186
a910b123
LE
187 adm_ctx->reply_dh->minor = d_in->minor;
188 adm_ctx->reply_dh->ret_code = NO_ERROR;
3b98c0c2 189
a910b123 190 adm_ctx->volume = VOLUME_UNSPECIFIED;
3b98c0c2
LE
191 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
192 struct nlattr *nla;
193 /* parse and validate only */
f399002e 194 err = drbd_cfg_context_from_attrs(NULL, info);
3b98c0c2
LE
195 if (err)
196 goto fail;
197
198 /* It was present, and valid,
199 * copy it over to the reply skb. */
a910b123 200 err = nla_put_nohdr(adm_ctx->reply_skb,
3b98c0c2
LE
201 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
202 info->attrs[DRBD_NLA_CFG_CONTEXT]);
203 if (err)
204 goto fail;
205
9e276872 206 /* and assign stuff to the adm_ctx */
3b98c0c2 207 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
089c075d 208 if (nla)
a910b123 209 adm_ctx->volume = nla_get_u32(nla);
7c3063cc 210 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
3b98c0c2 211 if (nla)
a910b123
LE
212 adm_ctx->resource_name = nla_data(nla);
213 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
214 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
215 if ((adm_ctx->my_addr &&
216 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
217 (adm_ctx->peer_addr &&
218 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
089c075d
AG
219 err = -EINVAL;
220 goto fail;
221 }
222 }
3b98c0c2 223
a910b123
LE
224 adm_ctx->minor = d_in->minor;
225 adm_ctx->device = minor_to_device(d_in->minor);
9e276872
LE
226
227 /* We are protected by the global genl_lock().
228 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
229 * so make sure this object stays around. */
230 if (adm_ctx->device)
231 kref_get(&adm_ctx->device->kref);
232
a910b123
LE
233 if (adm_ctx->resource_name) {
234 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
4bc76048 235 }
3b98c0c2 236
a910b123
LE
237 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
238 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
3b98c0c2
LE
239 return ERR_MINOR_INVALID;
240 }
a910b123
LE
241 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
242 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
243 if (adm_ctx->resource_name)
a10f6b8a 244 return ERR_RES_NOT_KNOWN;
3b98c0c2
LE
245 return ERR_INVALID_REQUEST;
246 }
247
089c075d 248 if (flags & DRBD_ADM_NEED_CONNECTION) {
a910b123
LE
249 if (adm_ctx->resource) {
250 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
089c075d
AG
251 return ERR_INVALID_REQUEST;
252 }
a910b123
LE
253 if (adm_ctx->device) {
254 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
089c075d
AG
255 return ERR_INVALID_REQUEST;
256 }
a910b123
LE
257 if (adm_ctx->my_addr && adm_ctx->peer_addr)
258 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
259 nla_len(adm_ctx->my_addr),
260 nla_data(adm_ctx->peer_addr),
261 nla_len(adm_ctx->peer_addr));
262 if (!adm_ctx->connection) {
263 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
089c075d
AG
264 return ERR_INVALID_REQUEST;
265 }
266 }
267
3b98c0c2 268 /* some more paranoia, if the request was over-determined */
a910b123
LE
269 if (adm_ctx->device && adm_ctx->resource &&
270 adm_ctx->device->resource != adm_ctx->resource) {
afa69539
KW
271 pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
272 adm_ctx->minor, adm_ctx->resource->name,
273 adm_ctx->device->resource->name);
a910b123 274 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
527f4b24
LE
275 return ERR_INVALID_REQUEST;
276 }
a910b123
LE
277 if (adm_ctx->device &&
278 adm_ctx->volume != VOLUME_UNSPECIFIED &&
279 adm_ctx->volume != adm_ctx->device->vnr) {
afa69539
KW
280 pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
281 adm_ctx->minor, adm_ctx->volume,
282 adm_ctx->device->vnr, adm_ctx->device->resource->name);
a910b123 283 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
3b98c0c2
LE
284 return ERR_INVALID_REQUEST;
285 }
0ace9dfa 286
9e276872
LE
287 /* still, provide adm_ctx->resource always, if possible. */
288 if (!adm_ctx->resource) {
289 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
290 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
291 if (adm_ctx->resource)
292 kref_get(&adm_ctx->resource->kref);
293 }
294
3b98c0c2
LE
295 return NO_ERROR;
296
297fail:
a910b123
LE
298 nlmsg_free(adm_ctx->reply_skb);
299 adm_ctx->reply_skb = NULL;
1e2a2551 300 return err;
3b98c0c2
LE
301}
302
a910b123
LE
303static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
304 struct genl_info *info, int retcode)
3b98c0c2 305{
9e276872
LE
306 if (adm_ctx->device) {
307 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
308 adm_ctx->device = NULL;
309 }
a910b123
LE
310 if (adm_ctx->connection) {
311 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
312 adm_ctx->connection = NULL;
0ace9dfa 313 }
a910b123
LE
314 if (adm_ctx->resource) {
315 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
316 adm_ctx->resource = NULL;
4bc76048 317 }
0ace9dfa 318
a910b123 319 if (!adm_ctx->reply_skb)
3b98c0c2
LE
320 return -ENOMEM;
321
a910b123
LE
322 adm_ctx->reply_dh->ret_code = retcode;
323 drbd_adm_send_reply(adm_ctx->reply_skb, info);
3b98c0c2
LE
324 return 0;
325}
b411b363 326
bde89a9e 327static void setup_khelper_env(struct drbd_connection *connection, char **envp)
b411b363 328{
6b75dced 329 char *afs;
b411b363 330
089c075d 331 /* FIXME: A future version will not allow this case. */
bde89a9e 332 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
089c075d
AG
333 return;
334
bde89a9e 335 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
089c075d
AG
336 case AF_INET6:
337 afs = "ipv6";
338 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
bde89a9e 339 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
b411b363 340 break;
089c075d
AG
341 case AF_INET:
342 afs = "ipv4";
343 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
bde89a9e 344 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
b411b363 345 break;
089c075d
AG
346 default:
347 afs = "ssocks";
348 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
bde89a9e 349 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
b411b363 350 }
089c075d 351 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
6b75dced 352}
b411b363 353
b30ab791 354int drbd_khelper(struct drbd_device *device, char *cmd)
b411b363
PR
355{
356 char *envp[] = { "HOME=/",
357 "TERM=linux",
358 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
6b75dced
PR
359 (char[20]) { }, /* address family */
360 (char[60]) { }, /* address */
b411b363 361 NULL };
0982368b 362 char mb[14];
8ab761e1 363 char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
a6b32bc3 364 struct drbd_connection *connection = first_peer_device(device)->connection;
6b75dced 365 struct sib_info sib;
b411b363
PR
366 int ret;
367
bde89a9e
AG
368 if (current == connection->worker.task)
369 set_bit(CALLBACK_PENDING, &connection->flags);
c2ba686f 370
0982368b 371 snprintf(mb, 14, "minor-%d", device_to_minor(device));
bde89a9e 372 setup_khelper_env(connection, envp);
b411b363 373
1090c056
LE
374 /* The helper may take some time.
375 * write out any unsynced meta data changes now */
b30ab791 376 drbd_md_sync(device);
1090c056 377
8ab761e1 378 drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
3b98c0c2
LE
379 sib.sib_reason = SIB_HELPER_PRE;
380 sib.helper_name = cmd;
b30ab791 381 drbd_bcast_event(device, &sib);
a2972846 382 notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
8ab761e1 383 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
b411b363 384 if (ret)
d0180171 385 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
8ab761e1 386 drbd_usermode_helper, cmd, mb,
b411b363
PR
387 (ret >> 8) & 0xff, ret);
388 else
d0180171 389 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
8ab761e1 390 drbd_usermode_helper, cmd, mb,
b411b363 391 (ret >> 8) & 0xff, ret);
3b98c0c2
LE
392 sib.sib_reason = SIB_HELPER_POST;
393 sib.helper_exit_code = ret;
b30ab791 394 drbd_bcast_event(device, &sib);
a2972846 395 notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
b411b363 396
bde89a9e
AG
397 if (current == connection->worker.task)
398 clear_bit(CALLBACK_PENDING, &connection->flags);
b411b363
PR
399
400 if (ret < 0) /* Ignore any ERRNOs we got. */
401 ret = 0;
402
403 return ret;
404}
405
7e5fec31 406enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
6b75dced
PR
407{
408 char *envp[] = { "HOME=/",
409 "TERM=linux",
410 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
411 (char[20]) { }, /* address family */
412 (char[60]) { }, /* address */
413 NULL };
77c556f6 414 char *resource_name = connection->resource->name;
8ab761e1 415 char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
6b75dced
PR
416 int ret;
417
bde89a9e
AG
418 setup_khelper_env(connection, envp);
419 conn_md_sync(connection);
6b75dced 420
8ab761e1 421 drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
6b75dced 422 /* TODO: conn_bcast_event() ?? */
a2972846 423 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
6b75dced 424
8ab761e1 425 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
6b75dced 426 if (ret)
1ec861eb 427 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
8ab761e1 428 drbd_usermode_helper, cmd, resource_name,
6b75dced
PR
429 (ret >> 8) & 0xff, ret);
430 else
1ec861eb 431 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
8ab761e1 432 drbd_usermode_helper, cmd, resource_name,
6b75dced
PR
433 (ret >> 8) & 0xff, ret);
434 /* TODO: conn_bcast_event() ?? */
a2972846 435 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
c2ba686f 436
b411b363
PR
437 if (ret < 0) /* Ignore any ERRNOs we got. */
438 ret = 0;
439
440 return ret;
441}
442
bde89a9e 443static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
b411b363 444{
cb703454 445 enum drbd_fencing_p fp = FP_NOT_AVAIL;
c06ece6b 446 struct drbd_peer_device *peer_device;
cb703454
PR
447 int vnr;
448
695d08fa 449 rcu_read_lock();
c06ece6b
AG
450 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
451 struct drbd_device *device = peer_device->device;
b30ab791 452 if (get_ldev_if_state(device, D_CONSISTENT)) {
c06ece6b
AG
453 struct disk_conf *disk_conf =
454 rcu_dereference(peer_device->device->ldev->disk_conf);
455 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
b30ab791 456 put_ldev(device);
cb703454
PR
457 }
458 }
695d08fa 459 rcu_read_unlock();
cb703454
PR
460
461 return fp;
462}
463
7bd000cb
LE
464static bool resource_is_supended(struct drbd_resource *resource)
465{
466 return resource->susp || resource->susp_fen || resource->susp_nod;
467}
468
bde89a9e 469bool conn_try_outdate_peer(struct drbd_connection *connection)
b411b363 470{
7bd000cb 471 struct drbd_resource * const resource = connection->resource;
28e448bb 472 unsigned int connect_cnt;
cb703454
PR
473 union drbd_state mask = { };
474 union drbd_state val = { };
475 enum drbd_fencing_p fp;
b411b363
PR
476 char *ex_to_string;
477 int r;
b411b363 478
7bd000cb 479 spin_lock_irq(&resource->req_lock);
bde89a9e 480 if (connection->cstate >= C_WF_REPORT_PARAMS) {
1ec861eb 481 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
7bd000cb 482 spin_unlock_irq(&resource->req_lock);
cb703454
PR
483 return false;
484 }
b411b363 485
bde89a9e 486 connect_cnt = connection->connect_cnt;
7bd000cb 487 spin_unlock_irq(&resource->req_lock);
28e448bb 488
bde89a9e 489 fp = highest_fencing_policy(connection);
cb703454
PR
490 switch (fp) {
491 case FP_NOT_AVAIL:
1ec861eb 492 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
7bd000cb
LE
493 spin_lock_irq(&resource->req_lock);
494 if (connection->cstate < C_WF_REPORT_PARAMS) {
495 _conn_request_state(connection,
496 (union drbd_state) { { .susp_fen = 1 } },
497 (union drbd_state) { { .susp_fen = 0 } },
498 CS_VERBOSE | CS_HARD | CS_DC_SUSP);
499 /* We are no longer suspended due to the fencing policy.
500 * We may still be suspended due to the on-no-data-accessible policy.
501 * If that was OND_IO_ERROR, fail pending requests. */
502 if (!resource_is_supended(resource))
503 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
504 }
505 /* Else: in case we raced with a connection handshake,
506 * let the handshake figure out if we maybe can RESEND,
507 * and do not resume/fail pending requests here.
508 * Worst case is we stay suspended for now, which may be
509 * resolved by either re-establishing the replication link, or
510 * the next link failure, or eventually the administrator. */
511 spin_unlock_irq(&resource->req_lock);
512 return false;
513
cb703454
PR
514 case FP_DONT_CARE:
515 return true;
516 default: ;
b411b363
PR
517 }
518
bde89a9e 519 r = conn_khelper(connection, "fence-peer");
b411b363
PR
520
521 switch ((r>>8) & 0xff) {
7e5fec31 522 case P_INCONSISTENT: /* peer is inconsistent */
b411b363 523 ex_to_string = "peer is inconsistent or worse";
cb703454
PR
524 mask.pdsk = D_MASK;
525 val.pdsk = D_INCONSISTENT;
b411b363 526 break;
7e5fec31 527 case P_OUTDATED: /* peer got outdated, or was already outdated */
b411b363 528 ex_to_string = "peer was fenced";
cb703454
PR
529 mask.pdsk = D_MASK;
530 val.pdsk = D_OUTDATED;
b411b363 531 break;
7e5fec31 532 case P_DOWN: /* peer was down */
bde89a9e 533 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
b411b363
PR
534 /* we will(have) create(d) a new UUID anyways... */
535 ex_to_string = "peer is unreachable, assumed to be dead";
cb703454
PR
536 mask.pdsk = D_MASK;
537 val.pdsk = D_OUTDATED;
b411b363
PR
538 } else {
539 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
b411b363
PR
540 }
541 break;
7e5fec31 542 case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
b411b363
PR
543 * This is useful when an unconnected R_SECONDARY is asked to
544 * become R_PRIMARY, but finds the other peer being active. */
545 ex_to_string = "peer is active";
1ec861eb 546 drbd_warn(connection, "Peer is primary, outdating myself.\n");
cb703454
PR
547 mask.disk = D_MASK;
548 val.disk = D_OUTDATED;
b411b363 549 break;
7e5fec31
FF
550 case P_FENCING:
551 /* THINK: do we need to handle this
552 * like case 4, or more like case 5? */
b411b363 553 if (fp != FP_STONITH)
1ec861eb 554 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
b411b363 555 ex_to_string = "peer was stonithed";
cb703454
PR
556 mask.pdsk = D_MASK;
557 val.pdsk = D_OUTDATED;
b411b363
PR
558 break;
559 default:
560 /* The script is broken ... */
1ec861eb 561 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
cb703454 562 return false; /* Eventually leave IO frozen */
b411b363
PR
563 }
564
1ec861eb 565 drbd_info(connection, "fence-peer helper returned %d (%s)\n",
cb703454 566 (r>>8) & 0xff, ex_to_string);
fb22c402 567
cb703454 568 /* Not using
bde89a9e 569 conn_request_state(connection, mask, val, CS_VERBOSE);
cb703454
PR
570 here, because we might were able to re-establish the connection in the
571 meantime. */
7bd000cb 572 spin_lock_irq(&resource->req_lock);
bde89a9e
AG
573 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
574 if (connection->connect_cnt != connect_cnt)
28e448bb
PR
575 /* In case the connection was established and droped
576 while the fence-peer handler was running, ignore it */
1ec861eb 577 drbd_info(connection, "Ignoring fence-peer exit code\n");
28e448bb 578 else
bde89a9e 579 _conn_request_state(connection, mask, val, CS_VERBOSE);
28e448bb 580 }
7bd000cb 581 spin_unlock_irq(&resource->req_lock);
cb703454 582
bde89a9e 583 return conn_highest_pdsk(connection) <= D_OUTDATED;
b411b363
PR
584}
585
87f7be4c
PR
586static int _try_outdate_peer_async(void *data)
587{
bde89a9e 588 struct drbd_connection *connection = (struct drbd_connection *)data;
87f7be4c 589
bde89a9e 590 conn_try_outdate_peer(connection);
87f7be4c 591
05a10ec7 592 kref_put(&connection->kref, drbd_destroy_connection);
87f7be4c
PR
593 return 0;
594}
595
bde89a9e 596void conn_try_outdate_peer_async(struct drbd_connection *connection)
87f7be4c
PR
597{
598 struct task_struct *opa;
599
bde89a9e 600 kref_get(&connection->kref);
fee10990 601 /* We may have just sent a signal to this thread
bbc1c5e8
LE
602 * to get it out of some blocking network function.
603 * Clear signals; otherwise kthread_run(), which internally uses
604 * wait_on_completion_killable(), will mistake our pending signal
605 * for a new fatal signal and fail. */
606 flush_signals(current);
bde89a9e 607 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
9dc9fbb3 608 if (IS_ERR(opa)) {
1ec861eb 609 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
05a10ec7 610 kref_put(&connection->kref, drbd_destroy_connection);
9dc9fbb3 611 }
87f7be4c 612}
b411b363 613
bf885f8a 614enum drbd_state_rv
44a4d551 615drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
b411b363 616{
44a4d551
LE
617 struct drbd_peer_device *const peer_device = first_peer_device(device);
618 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
b411b363 619 const int max_tries = 4;
bf885f8a 620 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
44ed167d 621 struct net_conf *nc;
b411b363
PR
622 int try = 0;
623 int forced = 0;
624 union drbd_state mask, val;
b411b363 625
b6f85ef9
AG
626 if (new_role == R_PRIMARY) {
627 struct drbd_connection *connection;
628
629 /* Detect dead peers as soon as possible. */
630
631 rcu_read_lock();
632 for_each_connection(connection, device->resource)
633 request_ping(connection);
634 rcu_read_unlock();
635 }
b411b363 636
b30ab791 637 mutex_lock(device->state_mutex);
b411b363
PR
638
639 mask.i = 0; mask.role = R_MASK;
640 val.i = 0; val.role = new_role;
641
642 while (try++ < max_tries) {
a8821531 643 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
b411b363
PR
644
645 /* in case we first succeeded to outdate,
646 * but now suddenly could establish a connection */
bf885f8a 647 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
b411b363
PR
648 val.pdsk = 0;
649 mask.pdsk = 0;
650 continue;
651 }
652
bf885f8a 653 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
b30ab791
AG
654 (device->state.disk < D_UP_TO_DATE &&
655 device->state.disk >= D_INCONSISTENT)) {
b411b363
PR
656 mask.disk = D_MASK;
657 val.disk = D_UP_TO_DATE;
658 forced = 1;
659 continue;
660 }
661
bf885f8a 662 if (rv == SS_NO_UP_TO_DATE_DISK &&
b30ab791 663 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
0b0ba1ef 664 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
b411b363 665
44a4d551 666 if (conn_try_outdate_peer(connection)) {
b411b363
PR
667 val.disk = D_UP_TO_DATE;
668 mask.disk = D_MASK;
669 }
b411b363
PR
670 continue;
671 }
672
bf885f8a 673 if (rv == SS_NOTHING_TO_DO)
3b98c0c2 674 goto out;
bf885f8a 675 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
44a4d551 676 if (!conn_try_outdate_peer(connection) && force) {
d0180171 677 drbd_warn(device, "Forced into split brain situation!\n");
cb703454
PR
678 mask.pdsk = D_MASK;
679 val.pdsk = D_OUTDATED;
b411b363 680
cb703454 681 }
b411b363
PR
682 continue;
683 }
bf885f8a 684 if (rv == SS_TWO_PRIMARIES) {
b411b363
PR
685 /* Maybe the peer is detected as dead very soon...
686 retry at most once more in this case. */
9848b6dd
LE
687 if (try < max_tries) {
688 int timeo;
b411b363 689 try = max_tries - 1;
9848b6dd
LE
690 rcu_read_lock();
691 nc = rcu_dereference(connection->net_conf);
692 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
693 rcu_read_unlock();
694 schedule_timeout_interruptible(timeo);
695 }
b411b363
PR
696 continue;
697 }
bf885f8a 698 if (rv < SS_SUCCESS) {
b30ab791 699 rv = _drbd_request_state(device, mask, val,
b411b363 700 CS_VERBOSE + CS_WAIT_COMPLETE);
bf885f8a 701 if (rv < SS_SUCCESS)
3b98c0c2 702 goto out;
b411b363
PR
703 }
704 break;
705 }
706
bf885f8a 707 if (rv < SS_SUCCESS)
3b98c0c2 708 goto out;
b411b363
PR
709
710 if (forced)
d0180171 711 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
b411b363
PR
712
713 /* Wait until nothing is on the fly :) */
b30ab791 714 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
b411b363 715
b6dd1a89
LE
716 /* FIXME also wait for all pending P_BARRIER_ACK? */
717
b411b363 718 if (new_role == R_SECONDARY) {
b30ab791
AG
719 if (get_ldev(device)) {
720 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
721 put_ldev(device);
b411b363
PR
722 }
723 } else {
66ce6dbc 724 mutex_lock(&device->resource->conf_update);
44a4d551 725 nc = connection->net_conf;
44ed167d 726 if (nc)
6139f60d 727 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
66ce6dbc 728 mutex_unlock(&device->resource->conf_update);
91fd4dad 729
b30ab791
AG
730 if (get_ldev(device)) {
731 if (((device->state.conn < C_CONNECTED ||
732 device->state.pdsk <= D_FAILED)
733 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
734 drbd_uuid_new_current(device);
b411b363 735
b30ab791
AG
736 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
737 put_ldev(device);
b411b363
PR
738 }
739 }
740
19f843aa
LE
741 /* writeout of activity log covered areas of the bitmap
742 * to stable storage done in after state change already */
b411b363 743
b30ab791 744 if (device->state.conn >= C_WF_REPORT_PARAMS) {
b411b363
PR
745 /* if this was forced, we should consider sync */
746 if (forced)
44a4d551
LE
747 drbd_send_uuids(peer_device);
748 drbd_send_current_state(peer_device);
b411b363
PR
749 }
750
b30ab791 751 drbd_md_sync(device);
720979fb 752 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
b30ab791 753 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
3b98c0c2 754out:
b30ab791 755 mutex_unlock(device->state_mutex);
bf885f8a 756 return rv;
b411b363
PR
757}
758
3b98c0c2 759static const char *from_attrs_err_to_txt(int err)
ef50a3e3 760{
3b98c0c2
LE
761 return err == -ENOMSG ? "required attribute missing" :
762 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
f399002e 763 err == -EEXIST ? "can not change invariant setting" :
3b98c0c2 764 "invalid attribute value";
ef50a3e3 765}
b411b363 766
3b98c0c2 767int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
b411b363 768{
a910b123 769 struct drbd_config_context adm_ctx;
3b98c0c2
LE
770 struct set_role_parms parms;
771 int err;
772 enum drbd_ret_code retcode;
4b28f3b4 773 enum drbd_state_rv rv;
b411b363 774
a910b123 775 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
776 if (!adm_ctx.reply_skb)
777 return retcode;
778 if (retcode != NO_ERROR)
779 goto out;
b411b363 780
3b98c0c2
LE
781 memset(&parms, 0, sizeof(parms));
782 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
f399002e 783 err = set_role_parms_from_attrs(&parms, info);
3b98c0c2
LE
784 if (err) {
785 retcode = ERR_MANDATORY_TAG;
a910b123 786 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3b98c0c2
LE
787 goto out;
788 }
789 }
9e276872
LE
790 genl_unlock();
791 mutex_lock(&adm_ctx.resource->adm_mutex);
b411b363 792
3b98c0c2 793 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
4b28f3b4 794 rv = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
3b98c0c2 795 else
4b28f3b4 796 rv = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
9e276872
LE
797
798 mutex_unlock(&adm_ctx.resource->adm_mutex);
799 genl_lock();
4b28f3b4
AB
800 drbd_adm_finish(&adm_ctx, info, rv);
801 return 0;
3b98c0c2 802out:
a910b123 803 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
804 return 0;
805}
806
ae8bf312
LE
807/* Initializes the md.*_offset members, so we are able to find
808 * the on disk meta data.
809 *
810 * We currently have two possible layouts:
811 * external:
812 * |----------- md_size_sect ------------------|
813 * [ 4k superblock ][ activity log ][ Bitmap ]
814 * | al_offset == 8 |
815 * | bm_offset = al_offset + X |
816 * ==> bitmap sectors = md_size_sect - bm_offset
817 *
818 * internal:
819 * |----------- md_size_sect ------------------|
820 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
821 * | al_offset < 0 |
822 * | bm_offset = al_offset - Y |
823 * ==> bitmap sectors = Y = al_offset - bm_offset
824 *
825 * Activity log size used to be fixed 32kB,
826 * but is about to become configurable.
827 */
b30ab791 828static void drbd_md_set_sector_offsets(struct drbd_device *device,
b411b363
PR
829 struct drbd_backing_dev *bdev)
830{
831 sector_t md_size_sect = 0;
c04ccaa6 832 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
daeda1cc 833
3a4d4eb3
LE
834 bdev->md.md_offset = drbd_md_ss(bdev);
835
68e41a43 836 switch (bdev->md.meta_dev_idx) {
b411b363
PR
837 default:
838 /* v07 style fixed size indexed meta data */
ae8bf312 839 bdev->md.md_size_sect = MD_128MB_SECT;
ae8bf312
LE
840 bdev->md.al_offset = MD_4kB_SECT;
841 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
b411b363
PR
842 break;
843 case DRBD_MD_INDEX_FLEX_EXT:
844 /* just occupy the full device; unit: sectors */
845 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
ae8bf312
LE
846 bdev->md.al_offset = MD_4kB_SECT;
847 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
b411b363
PR
848 break;
849 case DRBD_MD_INDEX_INTERNAL:
850 case DRBD_MD_INDEX_FLEX_INT:
b411b363 851 /* al size is still fixed */
ae8bf312 852 bdev->md.al_offset = -al_size_sect;
b411b363
PR
853 /* we need (slightly less than) ~ this much bitmap sectors: */
854 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
855 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
856 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
857 md_size_sect = ALIGN(md_size_sect, 8);
858
859 /* plus the "drbd meta data super block",
860 * and the activity log; */
ae8bf312 861 md_size_sect += MD_4kB_SECT + al_size_sect;
b411b363
PR
862
863 bdev->md.md_size_sect = md_size_sect;
864 /* bitmap offset is adjusted by 'super' block size */
ae8bf312 865 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
b411b363
PR
866 break;
867 }
868}
869
4b0715f0 870/* input size is expected to be in KB */
b411b363
PR
871char *ppsize(char *buf, unsigned long long size)
872{
4b0715f0
LE
873 /* Needs 9 bytes at max including trailing NUL:
874 * -1ULL ==> "16384 EB" */
b411b363
PR
875 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
876 int base = 0;
4b0715f0 877 while (size >= 10000 && base < sizeof(units)-1) {
b411b363
PR
878 /* shift + round */
879 size = (size >> 10) + !!(size & (1<<9));
880 base++;
881 }
4b0715f0 882 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
b411b363
PR
883
884 return buf;
885}
886
887/* there is still a theoretical deadlock when called from receiver
888 * on an D_INCONSISTENT R_PRIMARY:
889 * remote READ does inc_ap_bio, receiver would need to receive answer
890 * packet from remote to dec_ap_bio again.
891 * receiver receive_sizes(), comes here,
892 * waits for ap_bio_cnt == 0. -> deadlock.
893 * but this cannot happen, actually, because:
894 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
895 * (not connected, or bad/no disk on peer):
896 * see drbd_fail_request_early, ap_bio_cnt is zero.
897 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
898 * peer may not initiate a resize.
899 */
3b98c0c2
LE
900/* Note these are not to be confused with
901 * drbd_adm_suspend_io/drbd_adm_resume_io,
902 * which are (sub) state changes triggered by admin (drbdsetup),
903 * and can be long lived.
b30ab791 904 * This changes an device->flag, is triggered by drbd internals,
3b98c0c2 905 * and should be short-lived. */
7dbb4386
PR
906/* It needs to be a counter, since multiple threads might
907 independently suspend and resume IO. */
b30ab791 908void drbd_suspend_io(struct drbd_device *device)
b411b363 909{
7dbb4386 910 atomic_inc(&device->suspend_cnt);
b30ab791 911 if (drbd_suspended(device))
265be2d0 912 return;
b30ab791 913 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
b411b363
PR
914}
915
b30ab791 916void drbd_resume_io(struct drbd_device *device)
b411b363 917{
7dbb4386
PR
918 if (atomic_dec_and_test(&device->suspend_cnt))
919 wake_up(&device->misc_wait);
b411b363
PR
920}
921
a425711c 922/*
b411b363 923 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
b30ab791 924 * @device: DRBD device.
b411b363
PR
925 *
926 * Returns 0 on success, negative return values indicate errors.
927 * You should call drbd_md_sync() after calling this function.
928 */
d752b269 929enum determine_dev_size
b30ab791 930drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
b411b363 931{
8011e249
LE
932 struct md_offsets_and_sizes {
933 u64 last_agreed_sect;
934 u64 md_offset;
935 s32 al_offset;
936 s32 bm_offset;
937 u32 md_size_sect;
938
939 u32 al_stripes;
940 u32 al_stripe_size_4k;
941 } prev;
942 sector_t u_size, size;
b30ab791 943 struct drbd_md *md = &device->ldev->md;
d752b269 944 void *buffer;
b411b363
PR
945
946 int md_moved, la_size_changed;
e96c9633 947 enum determine_dev_size rv = DS_UNCHANGED;
b411b363 948
5f7c0124
LE
949 /* We may change the on-disk offsets of our meta data below. Lock out
950 * anything that may cause meta data IO, to avoid acting on incomplete
951 * layout changes or scribbling over meta data that is in the process
952 * of being moved.
b411b363 953 *
5f7c0124
LE
954 * Move is not exactly correct, btw, currently we have all our meta
955 * data in core memory, to "move" it we just write it all out, there
956 * are no reads. */
b30ab791 957 drbd_suspend_io(device);
e37d2438 958 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
d752b269 959 if (!buffer) {
b30ab791 960 drbd_resume_io(device);
d752b269
PR
961 return DS_ERROR;
962 }
b411b363 963
8011e249
LE
964 /* remember current offset and sizes */
965 prev.last_agreed_sect = md->la_size_sect;
966 prev.md_offset = md->md_offset;
967 prev.al_offset = md->al_offset;
968 prev.bm_offset = md->bm_offset;
969 prev.md_size_sect = md->md_size_sect;
970 prev.al_stripes = md->al_stripes;
971 prev.al_stripe_size_4k = md->al_stripe_size_4k;
b411b363 972
d752b269
PR
973 if (rs) {
974 /* rs is non NULL if we should change the AL layout only */
d752b269
PR
975 md->al_stripes = rs->al_stripes;
976 md->al_stripe_size_4k = rs->al_stripe_size / 4;
977 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
978 }
979
b30ab791 980 drbd_md_set_sector_offsets(device, device->ldev);
b411b363 981
daeda1cc 982 rcu_read_lock();
b30ab791 983 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
daeda1cc 984 rcu_read_unlock();
b30ab791 985 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
b411b363 986
8011e249 987 if (size < prev.last_agreed_sect) {
d752b269
PR
988 if (rs && u_size == 0) {
989 /* Remove "rs &&" later. This check should always be active, but
990 right now the receiver expects the permissive behavior */
d0180171 991 drbd_warn(device, "Implicit shrink not allowed. "
d752b269
PR
992 "Use --size=%llus for explicit shrink.\n",
993 (unsigned long long)size);
994 rv = DS_ERROR_SHRINK;
995 }
996 if (u_size > size)
997 rv = DS_ERROR_SPACE_MD;
998 if (rv != DS_UNCHANGED)
999 goto err_out;
1000 }
1001
155bd9d1 1002 if (get_capacity(device->vdisk) != size ||
b30ab791 1003 drbd_bm_capacity(device) != size) {
b411b363 1004 int err;
b30ab791 1005 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
b411b363
PR
1006 if (unlikely(err)) {
1007 /* currently there is only one error: ENOMEM! */
8011e249 1008 size = drbd_bm_capacity(device);
b411b363 1009 if (size == 0) {
d0180171 1010 drbd_err(device, "OUT OF MEMORY! "
b411b363
PR
1011 "Could not allocate bitmap!\n");
1012 } else {
d0180171 1013 drbd_err(device, "BM resizing failed. "
8011e249 1014 "Leaving size unchanged\n");
b411b363 1015 }
e96c9633 1016 rv = DS_ERROR;
b411b363
PR
1017 }
1018 /* racy, see comments above. */
b30ab791 1019 drbd_set_my_capacity(device, size);
8011e249 1020 md->la_size_sect = size;
b411b363 1021 }
d752b269
PR
1022 if (rv <= DS_ERROR)
1023 goto err_out;
b411b363 1024
8011e249 1025 la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
b411b363 1026
8011e249
LE
1027 md_moved = prev.md_offset != md->md_offset
1028 || prev.md_size_sect != md->md_size_sect;
b411b363 1029
d752b269
PR
1030 if (la_size_changed || md_moved || rs) {
1031 u32 prev_flags;
24dccabb 1032
fcb09674
LE
1033 /* We do some synchronous IO below, which may take some time.
1034 * Clear the timer, to avoid scary "timer expired!" messages,
1035 * "Superblock" is written out at least twice below, anyways. */
8fa7292f 1036 timer_delete(&device->md_sync_timer);
d752b269 1037
5f7c0124
LE
1038 /* We won't change the "al-extents" setting, we just may need
1039 * to move the on-disk location of the activity log ringbuffer.
1040 * Lock for transaction is good enough, it may well be "dirty"
1041 * or even "starving". */
1042 wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1043
1044 /* mark current on-disk bitmap and activity log as unreliable */
d752b269 1045 prev_flags = md->flags;
5f7c0124 1046 md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
b30ab791 1047 drbd_md_write(device, buffer);
d752b269 1048
5f7c0124
LE
1049 drbd_al_initialize(device, buffer);
1050
d0180171 1051 drbd_info(device, "Writing the whole bitmap, %s\n",
b411b363
PR
1052 la_size_changed && md_moved ? "size changed and md moved" :
1053 la_size_changed ? "size changed" : "md moved");
20ceb2b2 1054 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
b30ab791 1055 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
8164dd6c 1056 "size changed", BM_LOCKED_MASK, NULL);
d752b269 1057
5f7c0124
LE
1058 /* on-disk bitmap and activity log is authoritative again
1059 * (unless there was an IO error meanwhile...) */
d752b269 1060 md->flags = prev_flags;
b30ab791 1061 drbd_md_write(device, buffer);
d752b269
PR
1062
1063 if (rs)
d0180171
AG
1064 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1065 md->al_stripes, md->al_stripe_size_4k * 4);
b411b363
PR
1066 }
1067
8011e249
LE
1068 if (size > prev.last_agreed_sect)
1069 rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1070 if (size < prev.last_agreed_sect)
e96c9633 1071 rv = DS_SHRUNK;
d752b269
PR
1072
1073 if (0) {
1074 err_out:
8011e249
LE
1075 /* restore previous offset and sizes */
1076 md->la_size_sect = prev.last_agreed_sect;
1077 md->md_offset = prev.md_offset;
1078 md->al_offset = prev.al_offset;
1079 md->bm_offset = prev.bm_offset;
1080 md->md_size_sect = prev.md_size_sect;
1081 md->al_stripes = prev.al_stripes;
1082 md->al_stripe_size_4k = prev.al_stripe_size_4k;
1083 md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
d752b269 1084 }
b30ab791
AG
1085 lc_unlock(device->act_log);
1086 wake_up(&device->al_wait);
1087 drbd_md_put_buffer(device);
1088 drbd_resume_io(device);
b411b363
PR
1089
1090 return rv;
1091}
1092
1093sector_t
b30ab791 1094drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
ef5e44a6 1095 sector_t u_size, int assume_peer_has_space)
b411b363 1096{
b30ab791 1097 sector_t p_size = device->p_size; /* partner's disk size. */
cccac985 1098 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
b411b363 1099 sector_t m_size; /* my size */
b411b363
PR
1100 sector_t size = 0;
1101
1102 m_size = drbd_get_max_capacity(bdev);
1103
b30ab791 1104 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
d0180171 1105 drbd_warn(device, "Resize while not connected was forced by the user!\n");
a393db6f
PR
1106 p_size = m_size;
1107 }
1108
b411b363
PR
1109 if (p_size && m_size) {
1110 size = min_t(sector_t, p_size, m_size);
1111 } else {
cccac985
LE
1112 if (la_size_sect) {
1113 size = la_size_sect;
b411b363
PR
1114 if (m_size && m_size < size)
1115 size = m_size;
1116 if (p_size && p_size < size)
1117 size = p_size;
1118 } else {
1119 if (m_size)
1120 size = m_size;
1121 if (p_size)
1122 size = p_size;
1123 }
1124 }
1125
1126 if (size == 0)
d0180171 1127 drbd_err(device, "Both nodes diskless!\n");
b411b363
PR
1128
1129 if (u_size) {
1130 if (u_size > size)
d0180171 1131 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
b411b363
PR
1132 (unsigned long)u_size>>1, (unsigned long)size>>1);
1133 else
1134 size = u_size;
1135 }
1136
1137 return size;
1138}
1139
a425711c 1140/*
b411b363 1141 * drbd_check_al_size() - Ensures that the AL is of the right size
b30ab791 1142 * @device: DRBD device.
b411b363
PR
1143 *
1144 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1145 * failed, and 0 on success. You should call drbd_md_sync() after you called
1146 * this function.
1147 */
b30ab791 1148static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
b411b363
PR
1149{
1150 struct lru_cache *n, *t;
1151 struct lc_element *e;
1152 unsigned int in_use;
1153 int i;
1154
b30ab791
AG
1155 if (device->act_log &&
1156 device->act_log->nr_elements == dc->al_extents)
b411b363
PR
1157 return 0;
1158
1159 in_use = 0;
b30ab791 1160 t = device->act_log;
7ad651b5 1161 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
f399002e 1162 dc->al_extents, sizeof(struct lc_element), 0);
b411b363
PR
1163
1164 if (n == NULL) {
d0180171 1165 drbd_err(device, "Cannot allocate act_log lru!\n");
b411b363
PR
1166 return -ENOMEM;
1167 }
b30ab791 1168 spin_lock_irq(&device->al_lock);
b411b363
PR
1169 if (t) {
1170 for (i = 0; i < t->nr_elements; i++) {
1171 e = lc_element_by_index(t, i);
1172 if (e->refcnt)
d0180171 1173 drbd_err(device, "refcnt(%d)==%d\n",
b411b363
PR
1174 e->lc_number, e->refcnt);
1175 in_use += e->refcnt;
1176 }
1177 }
1178 if (!in_use)
b30ab791
AG
1179 device->act_log = n;
1180 spin_unlock_irq(&device->al_lock);
b411b363 1181 if (in_use) {
d0180171 1182 drbd_err(device, "Activity log still in use!\n");
b411b363
PR
1183 lc_destroy(n);
1184 return -EBUSY;
1185 } else {
d01efcee 1186 lc_destroy(t);
b411b363 1187 }
b30ab791 1188 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
b411b363
PR
1189 return 0;
1190}
1191
342d81fd 1192static unsigned int drbd_max_peer_bio_size(struct drbd_device *device)
69ba1ee9 1193{
342d81fd
CH
1194 /*
1195 * We may ignore peer limits if the peer is modern enough. From 8.3.8
1196 * onwards the peer can use multiple BIOs for a single peer_request.
1197 */
1198 if (device->state.conn < C_WF_REPORT_PARAMS)
1199 return device->peer_max_bio_size;
1200
1201 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1202 return min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1203
1204 /*
1205 * Correct old drbd (up to 8.3.7) if it believes it can do more than
1206 * 32KiB.
1207 */
1208 if (first_peer_device(device)->connection->agreed_pro_version == 94)
1209 return DRBD_MAX_SIZE_H80_PACKET;
1210
1211 /*
1212 * drbd 8.3.8 onwards, before 8.4.0
1213 */
1214 if (first_peer_device(device)->connection->agreed_pro_version < 100)
1215 return DRBD_MAX_BIO_SIZE_P95;
1216 return DRBD_MAX_BIO_SIZE;
69ba1ee9 1217}
9104d31a
LE
1218
1219static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1220{
1221 /* when we introduced REQ_WRITE_SAME support, we also bumped
1222 * our maximum supported batch bio size used for discards. */
1223 if (connection->agreed_features & DRBD_FF_WSAME)
1224 return DRBD_MAX_BBIO_SECTORS;
1225 /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1226 return AL_EXTENT_SIZE >> 9;
1227}
1228
5eaee6e9 1229static bool drbd_discard_supported(struct drbd_connection *connection,
998e9cbc 1230 struct drbd_backing_dev *bdev)
69ba1ee9 1231{
70200574 1232 if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
5eaee6e9 1233 return false;
998e9cbc
CH
1234
1235 if (connection->cstate >= C_CONNECTED &&
1236 !(connection->agreed_features & DRBD_FF_TRIM)) {
1237 drbd_info(connection,
1238 "peer DRBD too old, does not support TRIM: disabling discards\n");
5eaee6e9 1239 return false;
69ba1ee9 1240 }
998e9cbc 1241
5eaee6e9 1242 return true;
69ba1ee9
LE
1243}
1244
2828908d
CH
1245/* This is the workaround for "bio would need to, but cannot, be split" */
1246static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
f31e583a 1247{
2828908d 1248 unsigned int max_segments;
f31e583a 1249
2828908d
CH
1250 rcu_read_lock();
1251 max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
1252 rcu_read_unlock();
21b87a7d 1253
2828908d
CH
1254 if (!max_segments)
1255 return BLK_MAX_SEGMENTS;
1256 return max_segments;
21b87a7d
PR
1257}
1258
342d81fd
CH
1259void drbd_reconsider_queue_parameters(struct drbd_device *device,
1260 struct drbd_backing_dev *bdev, struct o_qlim *o)
b411b363 1261{
e6dfe748
CH
1262 struct drbd_connection *connection =
1263 first_peer_device(device)->connection;
b30ab791 1264 struct request_queue * const q = device->rq_queue;
e16344e5 1265 unsigned int now = queue_max_hw_sectors(q) << 9;
e6dfe748 1266 struct queue_limits lim;
c1b3156f 1267 struct request_queue *b = NULL;
342d81fd 1268 unsigned int new;
99432fcc 1269
8fe39aac
PR
1270 if (bdev) {
1271 b = bdev->backing_bdev->bd_disk->queue;
99432fcc 1272
342d81fd
CH
1273 device->local_max_bio_size =
1274 queue_max_hw_sectors(b) << SECTOR_SHIFT;
99432fcc 1275 }
b411b363 1276
342d81fd
CH
1277 /*
1278 * We may later detach and re-attach on a disconnected Primary. Avoid
1279 * decreasing the value in this case.
1280 *
1281 * We want to store what we know the peer DRBD can handle, not what the
1282 * peer IO backend can handle.
1283 */
1284 new = min3(DRBD_MAX_BIO_SIZE, device->local_max_bio_size,
1285 max(drbd_max_peer_bio_size(device), device->peer_max_bio_size));
1286 if (new != now) {
1287 if (device->state.role == R_PRIMARY && new < now)
1288 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n",
1289 new, now);
d0180171 1290 drbd_info(device, "max BIO size = %u\n", new);
99432fcc 1291 }
b411b363 1292
e6dfe748 1293 lim = queue_limits_start_update(q);
8fe39aac 1294 if (bdev) {
e6dfe748
CH
1295 blk_set_stacking_limits(&lim);
1296 lim.max_segments = drbd_backing_dev_max_segments(device);
e16344e5 1297 } else {
e6dfe748 1298 lim.max_segments = BLK_MAX_SEGMENTS;
b411b363 1299 }
99432fcc 1300
e6dfe748
CH
1301 lim.max_hw_sectors = new >> SECTOR_SHIFT;
1302 lim.seg_boundary_mask = PAGE_SIZE - 1;
e16344e5 1303
e6dfe748
CH
1304 /*
1305 * We don't care for the granularity, really.
1306 *
1307 * Stacking limits below should fix it for the local device. Whether or
1308 * not it is a suitable granularity on the remote device is not our
1309 * problem, really. If you care, you need to use devices with similar
1310 * topology on all peers.
1311 */
1312 if (drbd_discard_supported(connection, bdev)) {
1313 lim.discard_granularity = 512;
1314 lim.max_hw_discard_sectors =
1315 drbd_max_discard_sectors(connection);
1316 } else {
1317 lim.discard_granularity = 0;
1318 lim.max_hw_discard_sectors = 0;
fa090e70 1319 }
99432fcc 1320
e6dfe748
CH
1321 if (bdev)
1322 blk_stack_limits(&lim, &b->limits, 0);
99432fcc 1323
e6dfe748
CH
1324 /*
1325 * If we can handle "zeroes" efficiently on the protocol, we want to do
1326 * that, even if our backend does not announce max_write_zeroes_sectors
1327 * itself.
1328 */
1329 if (connection->agreed_features & DRBD_FF_WZEROES)
1330 lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
1331 else
1332 lim.max_write_zeroes_sectors = 0;
1333
1334 if ((lim.discard_granularity >> SECTOR_SHIFT) >
1335 lim.max_hw_discard_sectors) {
1336 lim.discard_granularity = 0;
1337 lim.max_hw_discard_sectors = 0;
e16344e5 1338 }
99432fcc 1339
e6dfe748
CH
1340 if (queue_limits_commit_update(q, &lim))
1341 drbd_err(device, "setting new queue limits failed\n");
b411b363
PR
1342}
1343
a18e9d1e 1344/* Starts the worker thread */
bde89a9e 1345static void conn_reconfig_start(struct drbd_connection *connection)
b411b363 1346{
bde89a9e 1347 drbd_thread_start(&connection->worker);
b5043c5e 1348 drbd_flush_workqueue(&connection->sender_work);
b411b363
PR
1349}
1350
a18e9d1e 1351/* if still unconfigured, stops worker again. */
bde89a9e 1352static void conn_reconfig_done(struct drbd_connection *connection)
b411b363 1353{
992d6e91 1354 bool stop_threads;
0500813f 1355 spin_lock_irq(&connection->resource->req_lock);
bde89a9e
AG
1356 stop_threads = conn_all_vols_unconf(connection) &&
1357 connection->cstate == C_STANDALONE;
0500813f 1358 spin_unlock_irq(&connection->resource->req_lock);
992d6e91 1359 if (stop_threads) {
668700b4
PR
1360 /* ack_receiver thread and ack_sender workqueue are implicitly
1361 * stopped by receiver in conn_disconnect() */
bde89a9e
AG
1362 drbd_thread_stop(&connection->receiver);
1363 drbd_thread_stop(&connection->worker);
992d6e91 1364 }
b411b363
PR
1365}
1366
0778286a 1367/* Make sure IO is suspended before calling this function(). */
b30ab791 1368static void drbd_suspend_al(struct drbd_device *device)
0778286a
PR
1369{
1370 int s = 0;
1371
b30ab791 1372 if (!lc_try_lock(device->act_log)) {
d0180171 1373 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
0778286a
PR
1374 return;
1375 }
1376
b30ab791 1377 drbd_al_shrink(device);
0500813f 1378 spin_lock_irq(&device->resource->req_lock);
b30ab791
AG
1379 if (device->state.conn < C_CONNECTED)
1380 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
0500813f 1381 spin_unlock_irq(&device->resource->req_lock);
b30ab791 1382 lc_unlock(device->act_log);
0778286a
PR
1383
1384 if (s)
d0180171 1385 drbd_info(device, "Suspended AL updates\n");
0778286a
PR
1386}
1387
5979e361
LE
1388
1389static bool should_set_defaults(struct genl_info *info)
1390{
bffcc688
JK
1391 struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
1392
1393 return 0 != (dh->flags & DRBD_GENL_F_SET_DEFAULTS);
5979e361
LE
1394}
1395
5bbcf5e6 1396static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
d589a21e 1397{
5bbcf5e6
LE
1398 /* This is limited by 16 bit "slot" numbers,
1399 * and by available on-disk context storage.
1400 *
1401 * Also (u16)~0 is special (denotes a "free" extent).
1402 *
1403 * One transaction occupies one 4kB on-disk block,
1404 * we have n such blocks in the on disk ring buffer,
1405 * the "current" transaction may fail (n-1),
1406 * and there is 919 slot numbers context information per transaction.
1407 *
1408 * 72 transaction blocks amounts to more than 2**16 context slots,
1409 * so cap there first.
1410 */
1411 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1412 const unsigned int sufficient_on_disk =
1413 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1414 /AL_CONTEXT_PER_TRANSACTION;
d589a21e 1415
5bbcf5e6
LE
1416 unsigned int al_size_4k = bdev->md.al_size_4k;
1417
1418 if (al_size_4k > sufficient_on_disk)
1419 return max_al_nr;
1420
1421 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
d589a21e
PR
1422}
1423
70df7092
LE
1424static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1425{
1426 return a->disk_barrier != b->disk_barrier ||
1427 a->disk_flushes != b->disk_flushes ||
1428 a->disk_drain != b->disk_drain;
1429}
1430
a5ca66c4
PR
1431static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1432 struct drbd_backing_dev *nbc)
c5c23854 1433{
cf0fbf89 1434 struct block_device *bdev = nbc->backing_bdev;
a5ca66c4 1435
c5c23854
PR
1436 if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1437 disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1438 if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1439 disk_conf->al_extents = drbd_al_extents_max(nbc);
a5ca66c4 1440
70200574 1441 if (!bdev_max_discard_sectors(bdev)) {
65f5be35
LE
1442 if (disk_conf->rs_discard_granularity) {
1443 disk_conf->rs_discard_granularity = 0; /* disable feature */
1444 drbd_info(device, "rs_discard_granularity feature disabled\n");
1445 }
a5ca66c4
PR
1446 }
1447
1448 if (disk_conf->rs_discard_granularity) {
1449 int orig_value = disk_conf->rs_discard_granularity;
cf0fbf89 1450 sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
7b47ef52 1451 unsigned int discard_granularity = bdev_discard_granularity(bdev);
a5ca66c4
PR
1452 int remainder;
1453
7b47ef52
CH
1454 if (discard_granularity > disk_conf->rs_discard_granularity)
1455 disk_conf->rs_discard_granularity = discard_granularity;
a5ca66c4 1456
7b47ef52
CH
1457 remainder = disk_conf->rs_discard_granularity %
1458 discard_granularity;
a5ca66c4
PR
1459 disk_conf->rs_discard_granularity += remainder;
1460
cf0fbf89
CH
1461 if (disk_conf->rs_discard_granularity > discard_size)
1462 disk_conf->rs_discard_granularity = discard_size;
a5ca66c4
PR
1463
1464 if (disk_conf->rs_discard_granularity != orig_value)
1465 drbd_info(device, "rs_discard_granularity changed to %d\n",
1466 disk_conf->rs_discard_granularity);
1467 }
c5c23854
PR
1468}
1469
f708bd08
LE
1470static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1471{
1472 int err = -EBUSY;
1473
1474 if (device->act_log &&
1475 device->act_log->nr_elements == dc->al_extents)
1476 return 0;
1477
1478 drbd_suspend_io(device);
1479 /* If IO completion is currently blocked, we would likely wait
1480 * "forever" for the activity log to become unused. So we don't. */
1481 if (atomic_read(&device->ap_bio_cnt))
1482 goto out;
1483
1484 wait_event(device->al_wait, lc_try_lock(device->act_log));
1485 drbd_al_shrink(device);
1486 err = drbd_check_al_size(device, dc);
1487 lc_unlock(device->act_log);
1488 wake_up(&device->al_wait);
1489out:
1490 drbd_resume_io(device);
1491 return err;
1492}
1493
f399002e
LE
1494int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1495{
a910b123 1496 struct drbd_config_context adm_ctx;
f399002e 1497 enum drbd_ret_code retcode;
b30ab791 1498 struct drbd_device *device;
daeda1cc 1499 struct disk_conf *new_disk_conf, *old_disk_conf;
813472ce 1500 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
6a365874
SK
1501 int err;
1502 unsigned int fifo_size;
f399002e 1503
a910b123 1504 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
f399002e
LE
1505 if (!adm_ctx.reply_skb)
1506 return retcode;
1507 if (retcode != NO_ERROR)
9e276872 1508 goto finish;
f399002e 1509
b30ab791 1510 device = adm_ctx.device;
9e276872 1511 mutex_lock(&adm_ctx.resource->adm_mutex);
f399002e
LE
1512
1513 /* we also need a disk
1514 * to change the options on */
b30ab791 1515 if (!get_ldev(device)) {
f399002e
LE
1516 retcode = ERR_NO_DISK;
1517 goto out;
1518 }
1519
daeda1cc 1520 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
5ecc72c3 1521 if (!new_disk_conf) {
f399002e
LE
1522 retcode = ERR_NOMEM;
1523 goto fail;
1524 }
1525
0500813f 1526 mutex_lock(&device->resource->conf_update);
b30ab791 1527 old_disk_conf = device->ldev->disk_conf;
daeda1cc 1528 *new_disk_conf = *old_disk_conf;
5979e361 1529 if (should_set_defaults(info))
b966b5dd 1530 set_disk_conf_defaults(new_disk_conf);
5979e361 1531
5ecc72c3 1532 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
c75b9b10 1533 if (err && err != -ENOMSG) {
f399002e 1534 retcode = ERR_MANDATORY_TAG;
a910b123 1535 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
8e229434 1536 goto fail_unlock;
f399002e
LE
1537 }
1538
677b3672 1539 if (!expect(device, new_disk_conf->resync_rate >= 1))
5ecc72c3 1540 new_disk_conf->resync_rate = 1;
f399002e 1541
a5ca66c4 1542 sanitize_disk_conf(device, new_disk_conf, device->ldev);
5bbcf5e6
LE
1543
1544 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1545 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
f399002e 1546
5ecc72c3 1547 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
b30ab791 1548 if (fifo_size != device->rs_plan_s->size) {
813472ce
PR
1549 new_plan = fifo_alloc(fifo_size);
1550 if (!new_plan) {
d0180171 1551 drbd_err(device, "kmalloc of fifo_buffer failed");
f399002e 1552 retcode = ERR_NOMEM;
daeda1cc 1553 goto fail_unlock;
f399002e
LE
1554 }
1555 }
1556
f708bd08 1557 err = disk_opts_check_al_size(device, new_disk_conf);
f399002e 1558 if (err) {
f708bd08
LE
1559 /* Could be just "busy". Ignore?
1560 * Introduce dedicated error code? */
1561 drbd_msg_put_info(adm_ctx.reply_skb,
1562 "Try again without changing current al-extents setting");
f399002e 1563 retcode = ERR_NOMEM;
daeda1cc 1564 goto fail_unlock;
f399002e
LE
1565 }
1566
28bc3b8c 1567 lock_all_resources();
b30ab791 1568 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
dc97b708 1569 if (retcode == NO_ERROR) {
b30ab791
AG
1570 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1571 drbd_resync_after_changed(device);
dc97b708 1572 }
28bc3b8c 1573 unlock_all_resources();
f399002e 1574
daeda1cc
PR
1575 if (retcode != NO_ERROR)
1576 goto fail_unlock;
f399002e 1577
813472ce 1578 if (new_plan) {
b30ab791
AG
1579 old_plan = device->rs_plan_s;
1580 rcu_assign_pointer(device->rs_plan_s, new_plan);
9958c857 1581 }
9958c857 1582
0500813f 1583 mutex_unlock(&device->resource->conf_update);
27eb13e9 1584
9a51ab1c 1585 if (new_disk_conf->al_updates)
b30ab791 1586 device->ldev->md.flags &= ~MDF_AL_DISABLED;
9a51ab1c 1587 else
b30ab791 1588 device->ldev->md.flags |= MDF_AL_DISABLED;
9a51ab1c 1589
691631c0 1590 if (new_disk_conf->md_flushes)
b30ab791 1591 clear_bit(MD_NO_FUA, &device->flags);
691631c0 1592 else
b30ab791 1593 set_bit(MD_NO_FUA, &device->flags);
691631c0 1594
70df7092 1595 if (write_ordering_changed(old_disk_conf, new_disk_conf))
f6ba8636 1596 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
27eb13e9 1597
a34592ff
CH
1598 if (old_disk_conf->discard_zeroes_if_aligned !=
1599 new_disk_conf->discard_zeroes_if_aligned)
9104d31a 1600 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
dd4f699d 1601
b30ab791 1602 drbd_md_sync(device);
f399002e 1603
69a22773
AG
1604 if (device->state.conn >= C_CONNECTED) {
1605 struct drbd_peer_device *peer_device;
1606
1607 for_each_peer_device(peer_device, device)
1608 drbd_send_sync_param(peer_device);
1609 }
f399002e 1610
a77b2109 1611 kvfree_rcu_mightsleep(old_disk_conf);
813472ce 1612 kfree(old_plan);
b30ab791 1613 mod_timer(&device->request_timer, jiffies + HZ);
daeda1cc
PR
1614 goto success;
1615
1616fail_unlock:
0500813f 1617 mutex_unlock(&device->resource->conf_update);
f399002e 1618 fail:
5ecc72c3 1619 kfree(new_disk_conf);
813472ce 1620 kfree(new_plan);
daeda1cc 1621success:
b30ab791 1622 put_ldev(device);
f399002e 1623 out:
9e276872
LE
1624 mutex_unlock(&adm_ctx.resource->adm_mutex);
1625 finish:
a910b123 1626 drbd_adm_finish(&adm_ctx, info, retcode);
f399002e
LE
1627 return 0;
1628}
1629
20e6a8d0 1630static struct file *open_backing_dev(struct drbd_device *device,
63a7c8ad
LE
1631 const char *bdev_path, void *claim_ptr, bool do_bd_link)
1632{
20e6a8d0 1633 struct file *file;
63a7c8ad
LE
1634 int err = 0;
1635
20e6a8d0
CB
1636 file = bdev_file_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
1637 claim_ptr, NULL);
1638 if (IS_ERR(file)) {
63a7c8ad 1639 drbd_err(device, "open(\"%s\") failed with %ld\n",
20e6a8d0
CB
1640 bdev_path, PTR_ERR(file));
1641 return file;
63a7c8ad
LE
1642 }
1643
1644 if (!do_bd_link)
20e6a8d0 1645 return file;
63a7c8ad 1646
20e6a8d0 1647 err = bd_link_disk_holder(file_bdev(file), device->vdisk);
63a7c8ad 1648 if (err) {
20e6a8d0 1649 fput(file);
63a7c8ad
LE
1650 drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1651 bdev_path, err);
20e6a8d0 1652 file = ERR_PTR(err);
63a7c8ad 1653 }
20e6a8d0 1654 return file;
63a7c8ad
LE
1655}
1656
1657static int open_backing_devices(struct drbd_device *device,
1658 struct disk_conf *new_disk_conf,
1659 struct drbd_backing_dev *nbc)
1660{
20e6a8d0 1661 struct file *file;
63a7c8ad 1662
20e6a8d0 1663 file = open_backing_dev(device, new_disk_conf->backing_dev, device,
75e27d37 1664 true);
20e6a8d0 1665 if (IS_ERR(file))
63a7c8ad 1666 return ERR_OPEN_DISK;
20e6a8d0
CB
1667 nbc->backing_bdev = file_bdev(file);
1668 nbc->backing_bdev_file = file;
63a7c8ad
LE
1669
1670 /*
1671 * meta_dev_idx >= 0: external fixed size, possibly multiple
1672 * drbd sharing one meta device. TODO in that case, paranoia
1673 * check that [md_bdev, meta_dev_idx] is not yet used by some
1674 * other drbd minor! (if you use drbd.conf + drbdadm, that
1675 * should check it for you already; but if you don't, or
1676 * someone fooled it, we need to double check here)
1677 */
20e6a8d0 1678 file = open_backing_dev(device, new_disk_conf->meta_dev,
63a7c8ad
LE
1679 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1680 * if potentially shared with other drbd minors */
1681 (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1682 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1683 * as would happen with internal metadata. */
1684 (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1685 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
20e6a8d0 1686 if (IS_ERR(file))
63a7c8ad 1687 return ERR_OPEN_MD_DISK;
20e6a8d0
CB
1688 nbc->md_bdev = file_bdev(file);
1689 nbc->f_md_bdev = file;
63a7c8ad
LE
1690 return NO_ERROR;
1691}
1692
75e27d37 1693static void close_backing_dev(struct drbd_device *device,
20e6a8d0 1694 struct file *bdev_file, bool do_bd_unlink)
63a7c8ad 1695{
20e6a8d0 1696 if (!bdev_file)
63a7c8ad
LE
1697 return;
1698 if (do_bd_unlink)
20e6a8d0
CB
1699 bd_unlink_disk_holder(file_bdev(bdev_file), device->vdisk);
1700 fput(bdev_file);
63a7c8ad
LE
1701}
1702
1703void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1704{
1705 if (ldev == NULL)
1706 return;
1707
20e6a8d0 1708 close_backing_dev(device, ldev->f_md_bdev,
2736e8ee 1709 ldev->md_bdev != ldev->backing_bdev);
20e6a8d0 1710 close_backing_dev(device, ldev->backing_bdev_file, true);
63a7c8ad
LE
1711
1712 kfree(ldev->disk_conf);
1713 kfree(ldev);
1714}
1715
3b98c0c2 1716int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
b411b363 1717{
a910b123 1718 struct drbd_config_context adm_ctx;
b30ab791 1719 struct drbd_device *device;
44a4d551
LE
1720 struct drbd_peer_device *peer_device;
1721 struct drbd_connection *connection;
3b98c0c2 1722 int err;
116676ca 1723 enum drbd_ret_code retcode;
b411b363
PR
1724 enum determine_dev_size dd;
1725 sector_t max_possible_sectors;
1726 sector_t min_md_device_sectors;
1727 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
daeda1cc 1728 struct disk_conf *new_disk_conf = NULL;
b411b363 1729 struct lru_cache *resync_lru = NULL;
9958c857 1730 struct fifo_buffer *new_plan = NULL;
b411b363 1731 union drbd_state ns, os;
f2024e7c 1732 enum drbd_state_rv rv;
44ed167d 1733 struct net_conf *nc;
b411b363 1734
a910b123 1735 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
1736 if (!adm_ctx.reply_skb)
1737 return retcode;
1738 if (retcode != NO_ERROR)
40cbf085 1739 goto finish;
b411b363 1740
b30ab791 1741 device = adm_ctx.device;
9e276872 1742 mutex_lock(&adm_ctx.resource->adm_mutex);
44a4d551 1743 peer_device = first_peer_device(device);
3b8a44f8 1744 connection = peer_device->connection;
44a4d551 1745 conn_reconfig_start(connection);
b411b363
PR
1746
1747 /* if you want to reconfigure, please tear down first */
b30ab791 1748 if (device->state.disk > D_DISKLESS) {
b411b363
PR
1749 retcode = ERR_DISK_CONFIGURED;
1750 goto fail;
1751 }
82f59cc6
LE
1752 /* It may just now have detached because of IO error. Make sure
1753 * drbd_ldev_destroy is done already, we may end up here very fast,
1754 * e.g. if someone calls attach from the on-io-error handler,
1755 * to realize a "hot spare" feature (not that I'd recommend that) */
e334f550 1756 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
b411b363 1757
383606e0 1758 /* make sure there is no leftover from previous force-detach attempts */
b30ab791
AG
1759 clear_bit(FORCE_DETACH, &device->flags);
1760 clear_bit(WAS_IO_ERROR, &device->flags);
1761 clear_bit(WAS_READ_ERROR, &device->flags);
383606e0 1762
0029d624 1763 /* and no leftover from previously aborted resync or verify, either */
b30ab791
AG
1764 device->rs_total = 0;
1765 device->rs_failed = 0;
1766 atomic_set(&device->rs_pending_cnt, 0);
0029d624 1767
3b98c0c2 1768 /* allocation not in the IO path, drbdsetup context */
b411b363
PR
1769 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1770 if (!nbc) {
1771 retcode = ERR_NOMEM;
1772 goto fail;
1773 }
9f2247bb
PR
1774 spin_lock_init(&nbc->md.uuid_lock);
1775
daeda1cc
PR
1776 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1777 if (!new_disk_conf) {
1778 retcode = ERR_NOMEM;
b411b363
PR
1779 goto fail;
1780 }
daeda1cc 1781 nbc->disk_conf = new_disk_conf;
b411b363 1782
daeda1cc
PR
1783 set_disk_conf_defaults(new_disk_conf);
1784 err = disk_conf_from_attrs(new_disk_conf, info);
3b98c0c2 1785 if (err) {
b411b363 1786 retcode = ERR_MANDATORY_TAG;
a910b123 1787 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
b411b363
PR
1788 goto fail;
1789 }
1790
5bbcf5e6
LE
1791 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1792 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
d589a21e 1793
9958c857
PR
1794 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1795 if (!new_plan) {
1796 retcode = ERR_NOMEM;
1797 goto fail;
1798 }
1799
daeda1cc 1800 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
b411b363
PR
1801 retcode = ERR_MD_IDX_INVALID;
1802 goto fail;
1803 }
1804
44ed167d 1805 rcu_read_lock();
44a4d551 1806 nc = rcu_dereference(connection->net_conf);
44ed167d 1807 if (nc) {
daeda1cc 1808 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
44ed167d 1809 rcu_read_unlock();
47ff2d0a 1810 retcode = ERR_STONITH_AND_PROT_A;
28bc3b8c 1811 goto fail;
47ff2d0a
PR
1812 }
1813 }
44ed167d 1814 rcu_read_unlock();
47ff2d0a 1815
63a7c8ad
LE
1816 retcode = open_backing_devices(device, new_disk_conf, nbc);
1817 if (retcode != NO_ERROR)
28bc3b8c 1818 goto fail;
b411b363 1819
e525fd89 1820 if ((nbc->backing_bdev == nbc->md_bdev) !=
daeda1cc
PR
1821 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1822 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
e525fd89 1823 retcode = ERR_MD_IDX_INVALID;
28bc3b8c 1824 goto fail;
b411b363
PR
1825 }
1826
1827 resync_lru = lc_create("resync", drbd_bm_ext_cache,
46a15bc3 1828 1, 61, sizeof(struct bm_extent),
b411b363
PR
1829 offsetof(struct bm_extent, lce));
1830 if (!resync_lru) {
1831 retcode = ERR_NOMEM;
28bc3b8c 1832 goto fail;
b411b363
PR
1833 }
1834
c04ccaa6
LE
1835 /* Read our meta data super block early.
1836 * This also sets other on-disk offsets. */
b30ab791 1837 retcode = drbd_md_read(device, nbc);
c04ccaa6 1838 if (retcode != NO_ERROR)
28bc3b8c 1839 goto fail;
b411b363 1840
a5ca66c4 1841 sanitize_disk_conf(device, new_disk_conf, nbc);
5bbcf5e6 1842
daeda1cc 1843 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
d0180171 1844 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
b411b363 1845 (unsigned long long) drbd_get_max_capacity(nbc),
daeda1cc 1846 (unsigned long long) new_disk_conf->disk_size);
7948bcdc 1847 retcode = ERR_DISK_TOO_SMALL;
28bc3b8c 1848 goto fail;
b411b363
PR
1849 }
1850
daeda1cc 1851 if (new_disk_conf->meta_dev_idx < 0) {
b411b363
PR
1852 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1853 /* at least one MB, otherwise it does not make sense */
1854 min_md_device_sectors = (2<<10);
1855 } else {
1856 max_possible_sectors = DRBD_MAX_SECTORS;
ae8bf312 1857 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
b411b363
PR
1858 }
1859
b411b363 1860 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
7948bcdc 1861 retcode = ERR_MD_DISK_TOO_SMALL;
d0180171 1862 drbd_warn(device, "refusing attach: md-device too small, "
b411b363
PR
1863 "at least %llu sectors needed for this meta-disk type\n",
1864 (unsigned long long) min_md_device_sectors);
28bc3b8c 1865 goto fail;
b411b363
PR
1866 }
1867
1868 /* Make sure the new disk is big enough
1869 * (we may currently be R_PRIMARY with no local disk...) */
155bd9d1 1870 if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) {
7948bcdc 1871 retcode = ERR_DISK_TOO_SMALL;
28bc3b8c 1872 goto fail;
b411b363
PR
1873 }
1874
1875 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1876
1352994b 1877 if (nbc->known_size > max_possible_sectors) {
d0180171 1878 drbd_warn(device, "==> truncating very big lower level device "
1352994b
LE
1879 "to currently maximum possible %llu sectors <==\n",
1880 (unsigned long long) max_possible_sectors);
daeda1cc 1881 if (new_disk_conf->meta_dev_idx >= 0)
d0180171 1882 drbd_warn(device, "==>> using internal or flexible "
1352994b
LE
1883 "meta data may help <<==\n");
1884 }
1885
b30ab791 1886 drbd_suspend_io(device);
b411b363 1887 /* also wait for the last barrier ack. */
b6dd1a89
LE
1888 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1889 * We need a way to either ignore barrier acks for barriers sent before a device
1890 * was attached, or a way to wait for all pending barrier acks to come in.
1891 * As barriers are counted per resource,
1892 * we'd need to suspend io on all devices of a resource.
1893 */
b30ab791 1894 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
b411b363 1895 /* and for any other previously queued work */
44a4d551 1896 drbd_flush_workqueue(&connection->sender_work);
b411b363 1897
b30ab791 1898 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1f1e87b4 1899 retcode = (enum drbd_ret_code)rv;
b30ab791 1900 drbd_resume_io(device);
f2024e7c 1901 if (rv < SS_SUCCESS)
28bc3b8c 1902 goto fail;
b411b363 1903
b30ab791 1904 if (!get_ldev_if_state(device, D_ATTACHING))
b411b363
PR
1905 goto force_diskless;
1906
b30ab791
AG
1907 if (!device->bitmap) {
1908 if (drbd_bm_init(device)) {
b411b363
PR
1909 retcode = ERR_NOMEM;
1910 goto force_diskless_dec;
1911 }
1912 }
1913
fe43ed97
LE
1914 if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
1915 (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
1916 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
d0180171 1917 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
b30ab791 1918 (unsigned long long)device->ed_uuid);
b411b363
PR
1919 retcode = ERR_DATA_NOT_CURRENT;
1920 goto force_diskless_dec;
1921 }
1922
1923 /* Since we are diskless, fix the activity log first... */
b30ab791 1924 if (drbd_check_al_size(device, new_disk_conf)) {
b411b363
PR
1925 retcode = ERR_NOMEM;
1926 goto force_diskless_dec;
1927 }
1928
1929 /* Prevent shrinking of consistent devices ! */
4ef2a4f4
LE
1930 {
1931 unsigned long long nsz = drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0);
1932 unsigned long long eff = nbc->md.la_size_sect;
1933 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && nsz < eff) {
1934 if (nsz == nbc->disk_conf->disk_size) {
1935 drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
1936 } else {
1937 drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
1938 drbd_msg_sprintf_info(adm_ctx.reply_skb,
1939 "To-be-attached device has last effective > current size, and is consistent\n"
1940 "(%llu > %llu sectors). Refusing to attach.", eff, nsz);
1941 retcode = ERR_IMPLICIT_SHRINK;
1942 goto force_diskless_dec;
1943 }
1944 }
b411b363
PR
1945 }
1946
28bc3b8c
AG
1947 lock_all_resources();
1948 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1949 if (retcode != NO_ERROR) {
1950 unlock_all_resources();
1951 goto force_diskless_dec;
1952 }
1953
b411b363
PR
1954 /* Reset the "barriers don't work" bits here, then force meta data to
1955 * be written, to ensure we determine if barriers are supported. */
e544046a 1956 if (new_disk_conf->md_flushes)
b30ab791 1957 clear_bit(MD_NO_FUA, &device->flags);
b411b363 1958 else
b30ab791 1959 set_bit(MD_NO_FUA, &device->flags);
b411b363
PR
1960
1961 /* Point of no return reached.
1962 * Devices and memory are no longer released by error cleanup below.
b30ab791 1963 * now device takes over responsibility, and the state engine should
b411b363 1964 * clean it up somewhere. */
0b0ba1ef 1965 D_ASSERT(device, device->ldev == NULL);
b30ab791
AG
1966 device->ldev = nbc;
1967 device->resync = resync_lru;
1968 device->rs_plan_s = new_plan;
b411b363
PR
1969 nbc = NULL;
1970 resync_lru = NULL;
daeda1cc 1971 new_disk_conf = NULL;
9958c857 1972 new_plan = NULL;
b411b363 1973
1ec317d3 1974 drbd_resync_after_changed(device);
f6ba8636 1975 drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
28bc3b8c 1976 unlock_all_resources();
b411b363 1977
b30ab791
AG
1978 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1979 set_bit(CRASHED_PRIMARY, &device->flags);
b411b363 1980 else
b30ab791 1981 clear_bit(CRASHED_PRIMARY, &device->flags);
b411b363 1982
b30ab791 1983 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
6bbf53ca 1984 !(device->state.role == R_PRIMARY && device->resource->susp_nod))
b30ab791 1985 set_bit(CRASHED_PRIMARY, &device->flags);
b411b363 1986
b30ab791
AG
1987 device->send_cnt = 0;
1988 device->recv_cnt = 0;
1989 device->read_cnt = 0;
1990 device->writ_cnt = 0;
b411b363 1991
9104d31a 1992 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
b411b363
PR
1993
1994 /* If I am currently not R_PRIMARY,
1995 * but meta data primary indicator is set,
1996 * I just now recover from a hard crash,
1997 * and have been R_PRIMARY before that crash.
1998 *
1999 * Now, if I had no connection before that crash
2000 * (have been degraded R_PRIMARY), chances are that
2001 * I won't find my peer now either.
2002 *
2003 * In that case, and _only_ in that case,
2004 * we use the degr-wfc-timeout instead of the default,
2005 * so we can automatically recover from a crash of a
2006 * degraded but active "cluster" after a certain timeout.
2007 */
b30ab791
AG
2008 clear_bit(USE_DEGR_WFC_T, &device->flags);
2009 if (device->state.role != R_PRIMARY &&
2010 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2011 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2012 set_bit(USE_DEGR_WFC_T, &device->flags);
b411b363 2013
b30ab791 2014 dd = drbd_determine_dev_size(device, 0, NULL);
d752b269 2015 if (dd <= DS_ERROR) {
b411b363
PR
2016 retcode = ERR_NOMEM_BITMAP;
2017 goto force_diskless_dec;
e96c9633 2018 } else if (dd == DS_GREW)
b30ab791 2019 set_bit(RESYNC_AFTER_NEG, &device->flags);
b411b363 2020
b30ab791
AG
2021 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2022 (test_bit(CRASHED_PRIMARY, &device->flags) &&
2023 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
d0180171 2024 drbd_info(device, "Assuming that all blocks are out of sync "
b411b363 2025 "(aka FullSync)\n");
b30ab791 2026 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
8164dd6c
AG
2027 "set_n_write from attaching", BM_LOCKED_MASK,
2028 NULL)) {
b411b363
PR
2029 retcode = ERR_IO_MD_DISK;
2030 goto force_diskless_dec;
2031 }
2032 } else {
b30ab791 2033 if (drbd_bitmap_io(device, &drbd_bm_read,
8164dd6c
AG
2034 "read from attaching", BM_LOCKED_MASK,
2035 NULL)) {
19f843aa
LE
2036 retcode = ERR_IO_MD_DISK;
2037 goto force_diskless_dec;
2038 }
b411b363
PR
2039 }
2040
b30ab791
AG
2041 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2042 drbd_suspend_al(device); /* IO is still suspended here... */
0778286a 2043
0500813f 2044 spin_lock_irq(&device->resource->req_lock);
b30ab791 2045 os = drbd_read_state(device);
78bae59b 2046 ns = os;
b411b363
PR
2047 /* If MDF_CONSISTENT is not set go into inconsistent state,
2048 otherwise investigate MDF_WasUpToDate...
2049 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2050 otherwise into D_CONSISTENT state.
2051 */
b30ab791
AG
2052 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2053 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
b411b363
PR
2054 ns.disk = D_CONSISTENT;
2055 else
2056 ns.disk = D_OUTDATED;
2057 } else {
2058 ns.disk = D_INCONSISTENT;
2059 }
2060
b30ab791 2061 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
b411b363
PR
2062 ns.pdsk = D_OUTDATED;
2063
daeda1cc
PR
2064 rcu_read_lock();
2065 if (ns.disk == D_CONSISTENT &&
b30ab791 2066 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
b411b363
PR
2067 ns.disk = D_UP_TO_DATE;
2068
2069 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2070 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2071 this point, because drbd_request_state() modifies these
2072 flags. */
2073
b30ab791
AG
2074 if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2075 device->ldev->md.flags &= ~MDF_AL_DISABLED;
9a51ab1c 2076 else
b30ab791 2077 device->ldev->md.flags |= MDF_AL_DISABLED;
9a51ab1c
PR
2078
2079 rcu_read_unlock();
2080
b411b363
PR
2081 /* In case we are C_CONNECTED postpone any decision on the new disk
2082 state after the negotiation phase. */
b30ab791
AG
2083 if (device->state.conn == C_CONNECTED) {
2084 device->new_state_tmp.i = ns.i;
b411b363
PR
2085 ns.i = os.i;
2086 ns.disk = D_NEGOTIATING;
dc66c74d
PR
2087
2088 /* We expect to receive up-to-date UUIDs soon.
2089 To avoid a race in receive_state, free p_uuid while
2090 holding req_lock. I.e. atomic with the state change */
b30ab791
AG
2091 kfree(device->p_uuid);
2092 device->p_uuid = NULL;
b411b363
PR
2093 }
2094
b30ab791 2095 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
0500813f 2096 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
2097
2098 if (rv < SS_SUCCESS)
2099 goto force_diskless_dec;
2100
b30ab791 2101 mod_timer(&device->request_timer, jiffies + HZ);
cdfda633 2102
b30ab791
AG
2103 if (device->state.role == R_PRIMARY)
2104 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
b411b363 2105 else
b30ab791 2106 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
b411b363 2107
b30ab791
AG
2108 drbd_md_mark_dirty(device);
2109 drbd_md_sync(device);
b411b363 2110
b30ab791
AG
2111 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2112 put_ldev(device);
44a4d551 2113 conn_reconfig_done(connection);
9e276872 2114 mutex_unlock(&adm_ctx.resource->adm_mutex);
a910b123 2115 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
2116 return 0;
2117
2118 force_diskless_dec:
b30ab791 2119 put_ldev(device);
b411b363 2120 force_diskless:
b30ab791
AG
2121 drbd_force_state(device, NS(disk, D_DISKLESS));
2122 drbd_md_sync(device);
b411b363 2123 fail:
44a4d551 2124 conn_reconfig_done(connection);
b411b363 2125 if (nbc) {
20e6a8d0 2126 close_backing_dev(device, nbc->f_md_bdev,
2736e8ee 2127 nbc->md_bdev != nbc->backing_bdev);
20e6a8d0 2128 close_backing_dev(device, nbc->backing_bdev_file, true);
b411b363
PR
2129 kfree(nbc);
2130 }
daeda1cc 2131 kfree(new_disk_conf);
b411b363 2132 lc_destroy(resync_lru);
9958c857 2133 kfree(new_plan);
9e276872 2134 mutex_unlock(&adm_ctx.resource->adm_mutex);
40cbf085 2135 finish:
a910b123 2136 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
2137 return 0;
2138}
2139
b30ab791 2140static int adm_detach(struct drbd_device *device, int force)
b411b363 2141{
cdfda633 2142 if (force) {
b30ab791
AG
2143 set_bit(FORCE_DETACH, &device->flags);
2144 drbd_force_state(device, NS(disk, D_FAILED));
33d32fa7 2145 return SS_SUCCESS;
02ee8f95
PR
2146 }
2147
33d32fa7 2148 return drbd_request_detach_interruptible(device);
b411b363
PR
2149}
2150
82f59cc6
LE
2151/* Detaching the disk is a process in multiple stages. First we need to lock
2152 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2153 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2154 * internal references as well.
2155 * Only then we have finally detached. */
3b98c0c2 2156int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
b411b363 2157{
a910b123 2158 struct drbd_config_context adm_ctx;
116676ca 2159 enum drbd_ret_code retcode;
cdfda633
PR
2160 struct detach_parms parms = { };
2161 int err;
b411b363 2162
a910b123 2163 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
2164 if (!adm_ctx.reply_skb)
2165 return retcode;
2166 if (retcode != NO_ERROR)
2167 goto out;
b411b363 2168
cdfda633
PR
2169 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2170 err = detach_parms_from_attrs(&parms, info);
2171 if (err) {
2172 retcode = ERR_MANDATORY_TAG;
a910b123 2173 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
cdfda633
PR
2174 goto out;
2175 }
b411b363
PR
2176 }
2177
9e276872 2178 mutex_lock(&adm_ctx.resource->adm_mutex);
b30ab791 2179 retcode = adm_detach(adm_ctx.device, parms.force_detach);
9e276872 2180 mutex_unlock(&adm_ctx.resource->adm_mutex);
3b98c0c2 2181out:
a910b123 2182 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
2183 return 0;
2184}
b411b363 2185
bde89a9e 2186static bool conn_resync_running(struct drbd_connection *connection)
f399002e 2187{
c06ece6b 2188 struct drbd_peer_device *peer_device;
695d08fa 2189 bool rv = false;
f399002e
LE
2190 int vnr;
2191
695d08fa 2192 rcu_read_lock();
c06ece6b
AG
2193 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2194 struct drbd_device *device = peer_device->device;
b30ab791
AG
2195 if (device->state.conn == C_SYNC_SOURCE ||
2196 device->state.conn == C_SYNC_TARGET ||
2197 device->state.conn == C_PAUSED_SYNC_S ||
2198 device->state.conn == C_PAUSED_SYNC_T) {
695d08fa
PR
2199 rv = true;
2200 break;
2201 }
b411b363 2202 }
695d08fa 2203 rcu_read_unlock();
b411b363 2204
695d08fa 2205 return rv;
f399002e 2206}
47ff2d0a 2207
bde89a9e 2208static bool conn_ov_running(struct drbd_connection *connection)
f399002e 2209{
c06ece6b 2210 struct drbd_peer_device *peer_device;
695d08fa 2211 bool rv = false;
f399002e
LE
2212 int vnr;
2213
695d08fa 2214 rcu_read_lock();
c06ece6b
AG
2215 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2216 struct drbd_device *device = peer_device->device;
b30ab791
AG
2217 if (device->state.conn == C_VERIFY_S ||
2218 device->state.conn == C_VERIFY_T) {
695d08fa
PR
2219 rv = true;
2220 break;
47ff2d0a
PR
2221 }
2222 }
695d08fa 2223 rcu_read_unlock();
b411b363 2224
695d08fa 2225 return rv;
f399002e 2226}
422028b1 2227
cd64397c 2228static enum drbd_ret_code
270eb5c9 2229_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
cd64397c 2230{
c06ece6b 2231 struct drbd_peer_device *peer_device;
cd64397c 2232 int i;
b411b363 2233
270eb5c9
AG
2234 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2235 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
dcb20d1a 2236 return ERR_NEED_APV_100;
b411b363 2237
270eb5c9 2238 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
dcb20d1a
PR
2239 return ERR_NEED_APV_100;
2240
270eb5c9 2241 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
dcb20d1a 2242 return ERR_NEED_APV_100;
b411b363
PR
2243 }
2244
270eb5c9 2245 if (!new_net_conf->two_primaries &&
bde89a9e
AG
2246 conn_highest_role(connection) == R_PRIMARY &&
2247 conn_highest_peer(connection) == R_PRIMARY)
dcb20d1a 2248 return ERR_NEED_ALLOW_TWO_PRI;
b411b363 2249
270eb5c9
AG
2250 if (new_net_conf->two_primaries &&
2251 (new_net_conf->wire_protocol != DRBD_PROT_C))
cd64397c
PR
2252 return ERR_NOT_PROTO_C;
2253
c06ece6b
AG
2254 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2255 struct drbd_device *device = peer_device->device;
b30ab791
AG
2256 if (get_ldev(device)) {
2257 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2258 put_ldev(device);
270eb5c9 2259 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
cd64397c 2260 return ERR_STONITH_AND_PROT_A;
b411b363 2261 }
270eb5c9 2262 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
eb12010e 2263 return ERR_DISCARD_IMPOSSIBLE;
b411b363
PR
2264 }
2265
270eb5c9 2266 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
cd64397c 2267 return ERR_CONG_NOT_PROTO_A;
b411b363 2268
cd64397c
PR
2269 return NO_ERROR;
2270}
b411b363 2271
44ed167d 2272static enum drbd_ret_code
270eb5c9 2273check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
44ed167d 2274{
e9d5d4a0 2275 enum drbd_ret_code rv;
c06ece6b 2276 struct drbd_peer_device *peer_device;
44ed167d 2277 int i;
b411b363 2278
44ed167d 2279 rcu_read_lock();
270eb5c9 2280 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
44ed167d 2281 rcu_read_unlock();
b411b363 2282
179e20b8 2283 /* connection->peer_devices protected by genl_lock() here */
c06ece6b
AG
2284 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2285 struct drbd_device *device = peer_device->device;
b30ab791
AG
2286 if (!device->bitmap) {
2287 if (drbd_bm_init(device))
44ed167d 2288 return ERR_NOMEM;
b411b363
PR
2289 }
2290 }
2291
44ed167d
PR
2292 return rv;
2293}
b411b363 2294
0fd0ea06 2295struct crypto {
3d0e6375
KC
2296 struct crypto_shash *verify_tfm;
2297 struct crypto_shash *csums_tfm;
9534d671 2298 struct crypto_shash *cram_hmac_tfm;
3d0e6375 2299 struct crypto_shash *integrity_tfm;
0fd0ea06 2300};
b411b363 2301
0fd0ea06 2302static int
9534d671 2303alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
0fd0ea06
PR
2304{
2305 if (!tfm_name[0])
2306 return NO_ERROR;
b411b363 2307
9534d671
HX
2308 *tfm = crypto_alloc_shash(tfm_name, 0, 0);
2309 if (IS_ERR(*tfm)) {
2310 *tfm = NULL;
2311 return err_alg;
2312 }
2313
2314 return NO_ERROR;
2315}
2316
0fd0ea06 2317static enum drbd_ret_code
270eb5c9 2318alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
0fd0ea06
PR
2319{
2320 char hmac_name[CRYPTO_MAX_ALG_NAME];
2321 enum drbd_ret_code rv;
0fd0ea06 2322
3d0e6375 2323 rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
9534d671 2324 ERR_CSUMS_ALG);
0fd0ea06
PR
2325 if (rv != NO_ERROR)
2326 return rv;
3d0e6375 2327 rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
9534d671 2328 ERR_VERIFY_ALG);
0fd0ea06
PR
2329 if (rv != NO_ERROR)
2330 return rv;
3d0e6375 2331 rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
9534d671 2332 ERR_INTEGRITY_ALG);
0fd0ea06
PR
2333 if (rv != NO_ERROR)
2334 return rv;
270eb5c9 2335 if (new_net_conf->cram_hmac_alg[0] != 0) {
0fd0ea06 2336 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
270eb5c9 2337 new_net_conf->cram_hmac_alg);
b411b363 2338
9534d671
HX
2339 rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2340 ERR_AUTH_ALG);
b411b363
PR
2341 }
2342
0fd0ea06
PR
2343 return rv;
2344}
b411b363 2345
0fd0ea06
PR
2346static void free_crypto(struct crypto *crypto)
2347{
9534d671 2348 crypto_free_shash(crypto->cram_hmac_tfm);
3d0e6375
KC
2349 crypto_free_shash(crypto->integrity_tfm);
2350 crypto_free_shash(crypto->csums_tfm);
2351 crypto_free_shash(crypto->verify_tfm);
0fd0ea06 2352}
b411b363 2353
f399002e
LE
2354int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2355{
a910b123 2356 struct drbd_config_context adm_ctx;
f399002e 2357 enum drbd_ret_code retcode;
bde89a9e 2358 struct drbd_connection *connection;
270eb5c9 2359 struct net_conf *old_net_conf, *new_net_conf = NULL;
f399002e
LE
2360 int err;
2361 int ovr; /* online verify running */
2362 int rsr; /* re-sync running */
0fd0ea06 2363 struct crypto crypto = { };
b411b363 2364
a910b123 2365 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
f399002e
LE
2366 if (!adm_ctx.reply_skb)
2367 return retcode;
2368 if (retcode != NO_ERROR)
9e276872 2369 goto finish;
b411b363 2370
bde89a9e 2371 connection = adm_ctx.connection;
9e276872 2372 mutex_lock(&adm_ctx.resource->adm_mutex);
b411b363 2373
270eb5c9
AG
2374 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2375 if (!new_net_conf) {
f399002e
LE
2376 retcode = ERR_NOMEM;
2377 goto out;
2378 }
b411b363 2379
bde89a9e 2380 conn_reconfig_start(connection);
b411b363 2381
bde89a9e 2382 mutex_lock(&connection->data.mutex);
0500813f 2383 mutex_lock(&connection->resource->conf_update);
270eb5c9 2384 old_net_conf = connection->net_conf;
2561b9c1 2385
270eb5c9 2386 if (!old_net_conf) {
a910b123 2387 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
f399002e 2388 retcode = ERR_INVALID_REQUEST;
2561b9c1
PR
2389 goto fail;
2390 }
2391
270eb5c9 2392 *new_net_conf = *old_net_conf;
5979e361 2393 if (should_set_defaults(info))
270eb5c9 2394 set_net_conf_defaults(new_net_conf);
f399002e 2395
270eb5c9 2396 err = net_conf_from_attrs_for_change(new_net_conf, info);
c75b9b10 2397 if (err && err != -ENOMSG) {
f399002e 2398 retcode = ERR_MANDATORY_TAG;
a910b123 2399 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
f399002e 2400 goto fail;
2561b9c1 2401 }
b411b363 2402
270eb5c9 2403 retcode = check_net_options(connection, new_net_conf);
cd64397c
PR
2404 if (retcode != NO_ERROR)
2405 goto fail;
b411b363 2406
f399002e 2407 /* re-sync running */
bde89a9e 2408 rsr = conn_resync_running(connection);
270eb5c9 2409 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
f399002e 2410 retcode = ERR_CSUMS_RESYNC_RUNNING;
91fd4dad 2411 goto fail;
b411b363
PR
2412 }
2413
f399002e 2414 /* online verify running */
bde89a9e 2415 ovr = conn_ov_running(connection);
270eb5c9 2416 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
0fd0ea06 2417 retcode = ERR_VERIFY_RUNNING;
b411b363 2418 goto fail;
f399002e 2419 }
b411b363 2420
270eb5c9 2421 retcode = alloc_crypto(&crypto, new_net_conf);
0fd0ea06 2422 if (retcode != NO_ERROR)
b411b363 2423 goto fail;
f399002e 2424
270eb5c9 2425 rcu_assign_pointer(connection->net_conf, new_net_conf);
f399002e
LE
2426
2427 if (!rsr) {
3d0e6375 2428 crypto_free_shash(connection->csums_tfm);
bde89a9e 2429 connection->csums_tfm = crypto.csums_tfm;
0fd0ea06 2430 crypto.csums_tfm = NULL;
f399002e
LE
2431 }
2432 if (!ovr) {
3d0e6375 2433 crypto_free_shash(connection->verify_tfm);
bde89a9e 2434 connection->verify_tfm = crypto.verify_tfm;
0fd0ea06 2435 crypto.verify_tfm = NULL;
b411b363
PR
2436 }
2437
3d0e6375 2438 crypto_free_shash(connection->integrity_tfm);
bde89a9e
AG
2439 connection->integrity_tfm = crypto.integrity_tfm;
2440 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2441 /* Do this without trying to take connection->data.mutex again. */
2442 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
0fd0ea06 2443
9534d671 2444 crypto_free_shash(connection->cram_hmac_tfm);
bde89a9e 2445 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
0fd0ea06 2446
0500813f 2447 mutex_unlock(&connection->resource->conf_update);
bde89a9e 2448 mutex_unlock(&connection->data.mutex);
a77b2109 2449 kvfree_rcu_mightsleep(old_net_conf);
91fd4dad 2450
69a22773
AG
2451 if (connection->cstate >= C_WF_REPORT_PARAMS) {
2452 struct drbd_peer_device *peer_device;
2453 int vnr;
2454
2455 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2456 drbd_send_sync_param(peer_device);
2457 }
f399002e 2458
91fd4dad
PR
2459 goto done;
2460
b411b363 2461 fail:
0500813f 2462 mutex_unlock(&connection->resource->conf_update);
bde89a9e 2463 mutex_unlock(&connection->data.mutex);
0fd0ea06 2464 free_crypto(&crypto);
270eb5c9 2465 kfree(new_net_conf);
91fd4dad 2466 done:
bde89a9e 2467 conn_reconfig_done(connection);
f399002e 2468 out:
9e276872
LE
2469 mutex_unlock(&adm_ctx.resource->adm_mutex);
2470 finish:
a910b123 2471 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
2472 return 0;
2473}
2474
a2972846
AG
2475static void connection_to_info(struct connection_info *info,
2476 struct drbd_connection *connection)
2477{
2478 info->conn_connection_state = connection->cstate;
2479 info->conn_role = conn_highest_peer(connection);
2480}
2481
2482static void peer_device_to_info(struct peer_device_info *info,
2483 struct drbd_peer_device *peer_device)
2484{
2485 struct drbd_device *device = peer_device->device;
2486
2487 info->peer_repl_state =
2488 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2489 info->peer_disk_state = device->state.pdsk;
2490 info->peer_resync_susp_user = device->state.user_isp;
2491 info->peer_resync_susp_peer = device->state.peer_isp;
2492 info->peer_resync_susp_dependency = device->state.aftr_isp;
2493}
2494
3b98c0c2 2495int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
b411b363 2496{
a2972846
AG
2497 struct connection_info connection_info;
2498 enum drbd_notification_type flags;
2499 unsigned int peer_devices = 0;
a910b123 2500 struct drbd_config_context adm_ctx;
c06ece6b 2501 struct drbd_peer_device *peer_device;
270eb5c9 2502 struct net_conf *old_net_conf, *new_net_conf = NULL;
0fd0ea06 2503 struct crypto crypto = { };
77c556f6 2504 struct drbd_resource *resource;
bde89a9e 2505 struct drbd_connection *connection;
3b98c0c2 2506 enum drbd_ret_code retcode;
4b28f3b4 2507 enum drbd_state_rv rv;
3b98c0c2
LE
2508 int i;
2509 int err;
b411b363 2510
a910b123 2511 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
b411b363 2512
3b98c0c2
LE
2513 if (!adm_ctx.reply_skb)
2514 return retcode;
2515 if (retcode != NO_ERROR)
2516 goto out;
089c075d 2517 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
a910b123 2518 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
089c075d
AG
2519 retcode = ERR_INVALID_REQUEST;
2520 goto out;
2521 }
b411b363 2522
089c075d
AG
2523 /* No need for _rcu here. All reconfiguration is
2524 * strictly serialized on genl_lock(). We are protected against
2525 * concurrent reconfiguration/addition/deletion */
77c556f6
AG
2526 for_each_resource(resource, &drbd_resources) {
2527 for_each_connection(connection, resource) {
2528 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2529 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2530 connection->my_addr_len)) {
2531 retcode = ERR_LOCAL_ADDR;
2532 goto out;
2533 }
b411b363 2534
77c556f6
AG
2535 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2536 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2537 connection->peer_addr_len)) {
2538 retcode = ERR_PEER_ADDR;
2539 goto out;
2540 }
089c075d 2541 }
b411b363
PR
2542 }
2543
9e276872 2544 mutex_lock(&adm_ctx.resource->adm_mutex);
3ab706fe 2545 connection = first_connection(adm_ctx.resource);
bde89a9e 2546 conn_reconfig_start(connection);
b411b363 2547
bde89a9e 2548 if (connection->cstate > C_STANDALONE) {
b411b363 2549 retcode = ERR_NET_CONFIGURED;
b411b363
PR
2550 goto fail;
2551 }
2552
a209b4ae 2553 /* allocation not in the IO path, drbdsetup / netlink process context */
270eb5c9
AG
2554 new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2555 if (!new_net_conf) {
b411b363 2556 retcode = ERR_NOMEM;
b411b363
PR
2557 goto fail;
2558 }
2559
270eb5c9 2560 set_net_conf_defaults(new_net_conf);
b411b363 2561
270eb5c9 2562 err = net_conf_from_attrs(new_net_conf, info);
25e40932 2563 if (err && err != -ENOMSG) {
b411b363 2564 retcode = ERR_MANDATORY_TAG;
a910b123 2565 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
b411b363
PR
2566 goto fail;
2567 }
2568
270eb5c9 2569 retcode = check_net_options(connection, new_net_conf);
cd64397c 2570 if (retcode != NO_ERROR)
422028b1 2571 goto fail;
b411b363 2572
270eb5c9 2573 retcode = alloc_crypto(&crypto, new_net_conf);
0fd0ea06
PR
2574 if (retcode != NO_ERROR)
2575 goto fail;
b411b363 2576
270eb5c9 2577 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
7b4e4d31 2578
b5043c5e 2579 drbd_flush_workqueue(&connection->sender_work);
b411b363 2580
0500813f 2581 mutex_lock(&adm_ctx.resource->conf_update);
270eb5c9
AG
2582 old_net_conf = connection->net_conf;
2583 if (old_net_conf) {
b411b363 2584 retcode = ERR_NET_CONFIGURED;
0500813f 2585 mutex_unlock(&adm_ctx.resource->conf_update);
b411b363
PR
2586 goto fail;
2587 }
270eb5c9 2588 rcu_assign_pointer(connection->net_conf, new_net_conf);
b411b363 2589
bde89a9e
AG
2590 conn_free_crypto(connection);
2591 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2592 connection->integrity_tfm = crypto.integrity_tfm;
2593 connection->csums_tfm = crypto.csums_tfm;
2594 connection->verify_tfm = crypto.verify_tfm;
b411b363 2595
bde89a9e
AG
2596 connection->my_addr_len = nla_len(adm_ctx.my_addr);
2597 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2598 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2599 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
b411b363 2600
a2972846
AG
2601 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2602 peer_devices++;
2603 }
2604
2605 connection_to_info(&connection_info, connection);
2606 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2607 mutex_lock(&notification_mutex);
2608 notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2609 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2610 struct peer_device_info peer_device_info;
2611
2612 peer_device_to_info(&peer_device_info, peer_device);
2613 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2614 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2615 }
2616 mutex_unlock(&notification_mutex);
0500813f 2617 mutex_unlock(&adm_ctx.resource->conf_update);
b411b363 2618
695d08fa 2619 rcu_read_lock();
c06ece6b
AG
2620 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2621 struct drbd_device *device = peer_device->device;
b30ab791
AG
2622 device->send_cnt = 0;
2623 device->recv_cnt = 0;
b411b363 2624 }
695d08fa 2625 rcu_read_unlock();
b411b363 2626
4b28f3b4 2627 rv = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
b411b363 2628
bde89a9e 2629 conn_reconfig_done(connection);
9e276872 2630 mutex_unlock(&adm_ctx.resource->adm_mutex);
4b28f3b4 2631 drbd_adm_finish(&adm_ctx, info, rv);
b411b363 2632 return 0;
b411b363 2633
b411b363 2634fail:
0fd0ea06 2635 free_crypto(&crypto);
270eb5c9 2636 kfree(new_net_conf);
b411b363 2637
bde89a9e 2638 conn_reconfig_done(connection);
9e276872 2639 mutex_unlock(&adm_ctx.resource->adm_mutex);
3b98c0c2 2640out:
a910b123 2641 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
2642 return 0;
2643}
2644
bde89a9e 2645static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
85f75dd7 2646{
be80ff88 2647 enum drbd_conns cstate;
85f75dd7 2648 enum drbd_state_rv rv;
85f75dd7 2649
be80ff88 2650repeat:
bde89a9e 2651 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
f3dfa40a 2652 force ? CS_HARD : 0);
85f75dd7
LE
2653
2654 switch (rv) {
2655 case SS_NOTHING_TO_DO:
f3dfa40a 2656 break;
85f75dd7
LE
2657 case SS_ALREADY_STANDALONE:
2658 return SS_SUCCESS;
2659 case SS_PRIMARY_NOP:
2660 /* Our state checking code wants to see the peer outdated. */
bde89a9e 2661 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2bd5ed5d
PR
2662
2663 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
bde89a9e 2664 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2bd5ed5d 2665
85f75dd7
LE
2666 break;
2667 case SS_CW_FAILED_BY_PEER:
be80ff88
LE
2668 spin_lock_irq(&connection->resource->req_lock);
2669 cstate = connection->cstate;
2670 spin_unlock_irq(&connection->resource->req_lock);
2671 if (cstate <= C_WF_CONNECTION)
2672 goto repeat;
85f75dd7 2673 /* The peer probably wants to see us outdated. */
bde89a9e 2674 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
85f75dd7
LE
2675 disk, D_OUTDATED), 0);
2676 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
bde89a9e 2677 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
f3dfa40a 2678 CS_HARD);
b411b363 2679 }
85f75dd7
LE
2680 break;
2681 default:;
2682 /* no special handling necessary */
2683 }
2684
f3dfa40a
LE
2685 if (rv >= SS_SUCCESS) {
2686 enum drbd_state_rv rv2;
2687 /* No one else can reconfigure the network while I am here.
2688 * The state handling only uses drbd_thread_stop_nowait(),
2689 * we want to really wait here until the receiver is no more.
2690 */
9693da23 2691 drbd_thread_stop(&connection->receiver);
f3dfa40a
LE
2692
2693 /* Race breaker. This additional state change request may be
2694 * necessary, if this was a forced disconnect during a receiver
2695 * restart. We may have "killed" the receiver thread just
8fe60551 2696 * after drbd_receiver() returned. Typically, we should be
f3dfa40a
LE
2697 * C_STANDALONE already, now, and this becomes a no-op.
2698 */
bde89a9e 2699 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
f3dfa40a
LE
2700 CS_VERBOSE | CS_HARD);
2701 if (rv2 < SS_SUCCESS)
1ec861eb 2702 drbd_err(connection,
f3dfa40a
LE
2703 "unexpected rv2=%d in conn_try_disconnect()\n",
2704 rv2);
a2972846
AG
2705 /* Unlike in DRBD 9, the state engine has generated
2706 * NOTIFY_DESTROY events before clearing connection->net_conf. */
b411b363 2707 }
85f75dd7
LE
2708 return rv;
2709}
b411b363 2710
3b98c0c2 2711int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
b411b363 2712{
a910b123 2713 struct drbd_config_context adm_ctx;
3b98c0c2 2714 struct disconnect_parms parms;
bde89a9e 2715 struct drbd_connection *connection;
85f75dd7 2716 enum drbd_state_rv rv;
3b98c0c2
LE
2717 enum drbd_ret_code retcode;
2718 int err;
2561b9c1 2719
a910b123 2720 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
3b98c0c2
LE
2721 if (!adm_ctx.reply_skb)
2722 return retcode;
2723 if (retcode != NO_ERROR)
2561b9c1 2724 goto fail;
b411b363 2725
bde89a9e 2726 connection = adm_ctx.connection;
3b98c0c2
LE
2727 memset(&parms, 0, sizeof(parms));
2728 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
f399002e 2729 err = disconnect_parms_from_attrs(&parms, info);
3b98c0c2
LE
2730 if (err) {
2731 retcode = ERR_MANDATORY_TAG;
a910b123 2732 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
b411b363
PR
2733 goto fail;
2734 }
2735 }
2736
9e276872 2737 mutex_lock(&adm_ctx.resource->adm_mutex);
bde89a9e 2738 rv = conn_try_disconnect(connection, parms.force_disconnect);
9e276872 2739 mutex_unlock(&adm_ctx.resource->adm_mutex);
4b28f3b4
AB
2740 if (rv < SS_SUCCESS) {
2741 drbd_adm_finish(&adm_ctx, info, rv);
2742 return 0;
2743 }
2744 retcode = NO_ERROR;
b411b363 2745 fail:
a910b123 2746 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
2747 return 0;
2748}
2749
b30ab791 2750void resync_after_online_grow(struct drbd_device *device)
b411b363
PR
2751{
2752 int iass; /* I am sync source */
2753
d0180171 2754 drbd_info(device, "Resync of new storage after online grow\n");
b30ab791
AG
2755 if (device->state.role != device->state.peer)
2756 iass = (device->state.role == R_PRIMARY);
b411b363 2757 else
a6b32bc3 2758 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
b411b363
PR
2759
2760 if (iass)
b30ab791 2761 drbd_start_resync(device, C_SYNC_SOURCE);
b411b363 2762 else
b30ab791 2763 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
b411b363
PR
2764}
2765
3b98c0c2 2766int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
b411b363 2767{
a910b123 2768 struct drbd_config_context adm_ctx;
daeda1cc 2769 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3b98c0c2 2770 struct resize_parms rs;
b30ab791 2771 struct drbd_device *device;
3b98c0c2 2772 enum drbd_ret_code retcode;
b411b363 2773 enum determine_dev_size dd;
d752b269 2774 bool change_al_layout = false;
6495d2c6 2775 enum dds_flags ddsf;
daeda1cc 2776 sector_t u_size;
3b98c0c2 2777 int err;
b411b363 2778
a910b123 2779 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
2780 if (!adm_ctx.reply_skb)
2781 return retcode;
2782 if (retcode != NO_ERROR)
9e276872 2783 goto finish;
3b98c0c2 2784
9e276872 2785 mutex_lock(&adm_ctx.resource->adm_mutex);
b30ab791
AG
2786 device = adm_ctx.device;
2787 if (!get_ldev(device)) {
d752b269
PR
2788 retcode = ERR_NO_DISK;
2789 goto fail;
2790 }
2791
3b98c0c2 2792 memset(&rs, 0, sizeof(struct resize_parms));
b30ab791
AG
2793 rs.al_stripes = device->ldev->md.al_stripes;
2794 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
3b98c0c2 2795 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
f399002e 2796 err = resize_parms_from_attrs(&rs, info);
b411b363 2797 if (err) {
3b98c0c2 2798 retcode = ERR_MANDATORY_TAG;
a910b123 2799 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
d752b269 2800 goto fail_ldev;
b411b363
PR
2801 }
2802 }
2803
b30ab791 2804 if (device->state.conn > C_CONNECTED) {
b411b363 2805 retcode = ERR_RESIZE_RESYNC;
d752b269 2806 goto fail_ldev;
b411b363 2807 }
b411b363 2808
b30ab791
AG
2809 if (device->state.role == R_SECONDARY &&
2810 device->state.peer == R_SECONDARY) {
b411b363 2811 retcode = ERR_NO_PRIMARY;
d752b269 2812 goto fail_ldev;
b411b363 2813 }
b411b363 2814
a6b32bc3 2815 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
6495d2c6 2816 retcode = ERR_NEED_APV_93;
9bcd2521 2817 goto fail_ldev;
6495d2c6
PR
2818 }
2819
daeda1cc 2820 rcu_read_lock();
b30ab791 2821 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
daeda1cc
PR
2822 rcu_read_unlock();
2823 if (u_size != (sector_t)rs.resize_size) {
2824 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2825 if (!new_disk_conf) {
778f271d 2826 retcode = ERR_NOMEM;
9bcd2521 2827 goto fail_ldev;
778f271d
PR
2828 }
2829 }
2830
b30ab791
AG
2831 if (device->ldev->md.al_stripes != rs.al_stripes ||
2832 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
d752b269
PR
2833 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2834
2835 if (al_size_k > (16 * 1024 * 1024)) {
2836 retcode = ERR_MD_LAYOUT_TOO_BIG;
2837 goto fail_ldev;
2838 }
2839
2840 if (al_size_k < MD_32kB_SECT/2) {
2841 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2842 goto fail_ldev;
2843 }
2844
cdc6af8d 2845 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
d752b269
PR
2846 retcode = ERR_MD_LAYOUT_CONNECTED;
2847 goto fail_ldev;
2848 }
2849
2850 change_al_layout = true;
2851 }
2852
b30ab791
AG
2853 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2854 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
b411b363 2855
daeda1cc 2856 if (new_disk_conf) {
0500813f 2857 mutex_lock(&device->resource->conf_update);
b30ab791 2858 old_disk_conf = device->ldev->disk_conf;
daeda1cc
PR
2859 *new_disk_conf = *old_disk_conf;
2860 new_disk_conf->disk_size = (sector_t)rs.resize_size;
b30ab791 2861 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
0500813f 2862 mutex_unlock(&device->resource->conf_update);
a77b2109 2863 kvfree_rcu_mightsleep(old_disk_conf);
70644786 2864 new_disk_conf = NULL;
b411b363
PR
2865 }
2866
6495d2c6 2867 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
b30ab791
AG
2868 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2869 drbd_md_sync(device);
2870 put_ldev(device);
e96c9633 2871 if (dd == DS_ERROR) {
b411b363
PR
2872 retcode = ERR_NOMEM_BITMAP;
2873 goto fail;
d752b269
PR
2874 } else if (dd == DS_ERROR_SPACE_MD) {
2875 retcode = ERR_MD_LAYOUT_NO_FIT;
2876 goto fail;
2877 } else if (dd == DS_ERROR_SHRINK) {
2878 retcode = ERR_IMPLICIT_SHRINK;
2879 goto fail;
b411b363 2880 }
778f271d 2881
b30ab791 2882 if (device->state.conn == C_CONNECTED) {
e96c9633 2883 if (dd == DS_GREW)
b30ab791 2884 set_bit(RESIZE_PENDING, &device->flags);
b411b363 2885
69a22773
AG
2886 drbd_send_uuids(first_peer_device(device));
2887 drbd_send_sizes(first_peer_device(device), 1, ddsf);
778f271d
PR
2888 }
2889
b411b363 2890 fail:
9e276872
LE
2891 mutex_unlock(&adm_ctx.resource->adm_mutex);
2892 finish:
a910b123 2893 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363 2894 return 0;
b411b363 2895
9bcd2521 2896 fail_ldev:
b30ab791 2897 put_ldev(device);
70644786 2898 kfree(new_disk_conf);
9bcd2521 2899 goto fail;
b411b363 2900}
b411b363 2901
f399002e 2902int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
b411b363 2903{
a910b123 2904 struct drbd_config_context adm_ctx;
3b98c0c2 2905 enum drbd_ret_code retcode;
b57a1e27 2906 struct res_opts res_opts;
f399002e 2907 int err;
b411b363 2908
a910b123 2909 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3b98c0c2
LE
2910 if (!adm_ctx.reply_skb)
2911 return retcode;
2912 if (retcode != NO_ERROR)
2913 goto fail;
b411b363 2914
eb6bea67 2915 res_opts = adm_ctx.resource->res_opts;
5979e361 2916 if (should_set_defaults(info))
b966b5dd 2917 set_res_opts_defaults(&res_opts);
b411b363 2918
b57a1e27 2919 err = res_opts_from_attrs(&res_opts, info);
c75b9b10 2920 if (err && err != -ENOMSG) {
b411b363 2921 retcode = ERR_MANDATORY_TAG;
a910b123 2922 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
b411b363
PR
2923 goto fail;
2924 }
2925
9e276872 2926 mutex_lock(&adm_ctx.resource->adm_mutex);
eb6bea67 2927 err = set_resource_options(adm_ctx.resource, &res_opts);
afbbfa88
AG
2928 if (err) {
2929 retcode = ERR_INVALID_REQUEST;
2930 if (err == -ENOMEM)
2931 retcode = ERR_NOMEM;
b411b363 2932 }
9e276872 2933 mutex_unlock(&adm_ctx.resource->adm_mutex);
b411b363 2934
b411b363 2935fail:
a910b123 2936 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
2937 return 0;
2938}
2939
3b98c0c2 2940int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
b411b363 2941{
a910b123 2942 struct drbd_config_context adm_ctx;
b30ab791 2943 struct drbd_device *device;
3b98c0c2
LE
2944 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2945
a910b123 2946 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
2947 if (!adm_ctx.reply_skb)
2948 return retcode;
2949 if (retcode != NO_ERROR)
2950 goto out;
2951
b30ab791 2952 device = adm_ctx.device;
8fe39aac
PR
2953 if (!get_ldev(device)) {
2954 retcode = ERR_NO_DISK;
2955 goto out;
2956 }
2957
2958 mutex_lock(&adm_ctx.resource->adm_mutex);
b411b363 2959
194bfb32 2960 /* If there is still bitmap IO pending, probably because of a previous
7ee1fb93
LE
2961 * resync just being finished, wait for it before requesting a new resync.
2962 * Also wait for it's after_state_ch(). */
b30ab791
AG
2963 drbd_suspend_io(device);
2964 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
b5043c5e 2965 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
194bfb32 2966
0b2dafcd
PR
2967 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2968 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2969 * try to start a resync handshake as sync target for full sync.
9376d9f8 2970 */
b30ab791
AG
2971 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
2972 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
0b2dafcd 2973 if (retcode >= SS_SUCCESS) {
b30ab791 2974 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
8164dd6c 2975 "set_n_write from invalidate", BM_LOCKED_MASK, NULL))
0b2dafcd
PR
2976 retcode = ERR_IO_MD_DISK;
2977 }
2978 } else
b30ab791
AG
2979 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2980 drbd_resume_io(device);
9e276872 2981 mutex_unlock(&adm_ctx.resource->adm_mutex);
8fe39aac 2982 put_ldev(device);
3b98c0c2 2983out:
a910b123 2984 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
2985 return 0;
2986}
2987
3b98c0c2
LE
2988static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2989 union drbd_state mask, union drbd_state val)
b411b363 2990{
a910b123 2991 struct drbd_config_context adm_ctx;
3b98c0c2 2992 enum drbd_ret_code retcode;
194bfb32 2993
a910b123 2994 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
2995 if (!adm_ctx.reply_skb)
2996 return retcode;
2997 if (retcode != NO_ERROR)
2998 goto out;
b411b363 2999
9e276872 3000 mutex_lock(&adm_ctx.resource->adm_mutex);
b30ab791 3001 retcode = drbd_request_state(adm_ctx.device, mask, val);
9e276872 3002 mutex_unlock(&adm_ctx.resource->adm_mutex);
3b98c0c2 3003out:
a910b123 3004 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
3005 return 0;
3006}
3007
8164dd6c
AG
3008static int drbd_bmio_set_susp_al(struct drbd_device *device,
3009 struct drbd_peer_device *peer_device) __must_hold(local)
0778286a
PR
3010{
3011 int rv;
3012
8164dd6c 3013 rv = drbd_bmio_set_n_write(device, peer_device);
b30ab791 3014 drbd_suspend_al(device);
0778286a
PR
3015 return rv;
3016}
3017
3b98c0c2 3018int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
b411b363 3019{
a910b123 3020 struct drbd_config_context adm_ctx;
25b0d6c8 3021 int retcode; /* drbd_ret_code, drbd_state_rv */
b30ab791 3022 struct drbd_device *device;
25b0d6c8 3023
a910b123 3024 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
25b0d6c8
PR
3025 if (!adm_ctx.reply_skb)
3026 return retcode;
3027 if (retcode != NO_ERROR)
3028 goto out;
3029
b30ab791 3030 device = adm_ctx.device;
8fe39aac
PR
3031 if (!get_ldev(device)) {
3032 retcode = ERR_NO_DISK;
3033 goto out;
3034 }
3035
3036 mutex_lock(&adm_ctx.resource->adm_mutex);
b411b363 3037
194bfb32 3038 /* If there is still bitmap IO pending, probably because of a previous
7ee1fb93
LE
3039 * resync just being finished, wait for it before requesting a new resync.
3040 * Also wait for it's after_state_ch(). */
b30ab791
AG
3041 drbd_suspend_io(device);
3042 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
b5043c5e 3043 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
194bfb32 3044
0b2dafcd
PR
3045 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3046 * in the bitmap. Otherwise, try to start a resync handshake
3047 * as sync source for full sync.
3048 */
b30ab791 3049 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
0b2dafcd
PR
3050 /* The peer will get a resync upon connect anyways. Just make that
3051 into a full resync. */
b30ab791 3052 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
0b2dafcd 3053 if (retcode >= SS_SUCCESS) {
b30ab791 3054 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
0b2dafcd 3055 "set_n_write from invalidate_peer",
8164dd6c 3056 BM_LOCKED_SET_ALLOWED, NULL))
0b2dafcd
PR
3057 retcode = ERR_IO_MD_DISK;
3058 }
3059 } else
b30ab791
AG
3060 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3061 drbd_resume_io(device);
9e276872 3062 mutex_unlock(&adm_ctx.resource->adm_mutex);
8fe39aac 3063 put_ldev(device);
25b0d6c8 3064out:
a910b123 3065 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
3066 return 0;
3067}
3068
3b98c0c2 3069int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
b411b363 3070{
a910b123 3071 struct drbd_config_context adm_ctx;
3b98c0c2 3072 enum drbd_ret_code retcode;
b411b363 3073
a910b123 3074 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
3075 if (!adm_ctx.reply_skb)
3076 return retcode;
3077 if (retcode != NO_ERROR)
3078 goto out;
b411b363 3079
9e276872 3080 mutex_lock(&adm_ctx.resource->adm_mutex);
b30ab791 3081 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
3b98c0c2 3082 retcode = ERR_PAUSE_IS_SET;
9e276872 3083 mutex_unlock(&adm_ctx.resource->adm_mutex);
3b98c0c2 3084out:
a910b123 3085 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
3086 return 0;
3087}
3088
3b98c0c2 3089int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
b411b363 3090{
a910b123 3091 struct drbd_config_context adm_ctx;
da9fbc27 3092 union drbd_dev_state s;
3b98c0c2
LE
3093 enum drbd_ret_code retcode;
3094
a910b123 3095 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
3096 if (!adm_ctx.reply_skb)
3097 return retcode;
3098 if (retcode != NO_ERROR)
3099 goto out;
b411b363 3100
9e276872 3101 mutex_lock(&adm_ctx.resource->adm_mutex);
b30ab791
AG
3102 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3103 s = adm_ctx.device->state;
cd88d030
PR
3104 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3105 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3106 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3107 } else {
3108 retcode = ERR_PAUSE_IS_CLEAR;
3109 }
3110 }
9e276872 3111 mutex_unlock(&adm_ctx.resource->adm_mutex);
3b98c0c2 3112out:
a910b123 3113 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
3114 return 0;
3115}
3116
3b98c0c2 3117int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
b411b363 3118{
3b98c0c2 3119 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
b411b363
PR
3120}
3121
3b98c0c2 3122int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
b411b363 3123{
a910b123 3124 struct drbd_config_context adm_ctx;
b30ab791 3125 struct drbd_device *device;
3b98c0c2
LE
3126 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3127
a910b123 3128 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
3129 if (!adm_ctx.reply_skb)
3130 return retcode;
3131 if (retcode != NO_ERROR)
3132 goto out;
3133
9e276872 3134 mutex_lock(&adm_ctx.resource->adm_mutex);
b30ab791
AG
3135 device = adm_ctx.device;
3136 if (test_bit(NEW_CUR_UUID, &device->flags)) {
9fa48269
LE
3137 if (get_ldev_if_state(device, D_ATTACHING)) {
3138 drbd_uuid_new_current(device);
3139 put_ldev(device);
3140 } else {
3141 /* This is effectively a multi-stage "forced down".
3142 * The NEW_CUR_UUID bit is supposedly only set, if we
3143 * lost the replication connection, and are configured
3144 * to freeze IO and wait for some fence-peer handler.
3145 * So we still don't have a replication connection.
3146 * And now we don't have a local disk either. After
3147 * resume, we will fail all pending and new IO, because
3148 * we don't have any data anymore. Which means we will
3149 * eventually be able to terminate all users of this
3150 * device, and then take it down. By bumping the
3151 * "effective" data uuid, we make sure that you really
3152 * need to tear down before you reconfigure, we will
3153 * the refuse to re-connect or re-attach (because no
3154 * matching real data uuid exists).
3155 */
3156 u64 val;
3157 get_random_bytes(&val, sizeof(u64));
3158 drbd_set_ed_uuid(device, val);
3159 drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3160 }
b30ab791 3161 clear_bit(NEW_CUR_UUID, &device->flags);
43a5182c 3162 }
b30ab791
AG
3163 drbd_suspend_io(device);
3164 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
3b98c0c2 3165 if (retcode == SS_SUCCESS) {
b30ab791 3166 if (device->state.conn < C_CONNECTED)
a6b32bc3 3167 tl_clear(first_peer_device(device)->connection);
b30ab791 3168 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
a6b32bc3 3169 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
265be2d0 3170 }
b30ab791 3171 drbd_resume_io(device);
9e276872 3172 mutex_unlock(&adm_ctx.resource->adm_mutex);
3b98c0c2 3173out:
a910b123 3174 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
3175 return 0;
3176}
3177
3b98c0c2 3178int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
b411b363 3179{
3b98c0c2 3180 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
b411b363
PR
3181}
3182
251b8f8e
AG
3183static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3184 struct drbd_resource *resource,
3185 struct drbd_connection *connection,
3186 struct drbd_device *device)
b411b363 3187{
543cc10b 3188 struct nlattr *nla;
ae0be8de 3189 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
543cc10b
LE
3190 if (!nla)
3191 goto nla_put_failure;
251b8f8e
AG
3192 if (device &&
3193 nla_put_u32(skb, T_ctx_volume, device->vnr))
26ec9287 3194 goto nla_put_failure;
f597f6b8 3195 if (nla_put_string(skb, T_ctx_resource_name, resource->name))
26ec9287 3196 goto nla_put_failure;
251b8f8e
AG
3197 if (connection) {
3198 if (connection->my_addr_len &&
3199 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3200 goto nla_put_failure;
3201 if (connection->peer_addr_len &&
3202 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3203 goto nla_put_failure;
3204 }
543cc10b
LE
3205 nla_nest_end(skb, nla);
3206 return 0;
b411b363 3207
543cc10b
LE
3208nla_put_failure:
3209 if (nla)
3210 nla_nest_cancel(skb, nla);
3211 return -EMSGSIZE;
3212}
b411b363 3213
a55bbd37
AG
3214/*
3215 * The generic netlink dump callbacks are called outside the genl_lock(), so
3216 * they cannot use the simple attribute parsing code which uses global
3217 * attribute tables.
3218 */
3219static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3220{
3221 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3222 const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3223 struct nlattr *nla;
3224
3225 nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3226 DRBD_NLA_CFG_CONTEXT);
3227 if (!nla)
3228 return NULL;
3229 return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
3230}
3231
3232static void resource_to_info(struct resource_info *, struct drbd_resource *);
3233
3234int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3235{
3236 struct drbd_genlmsghdr *dh;
3237 struct drbd_resource *resource;
3238 struct resource_info resource_info;
3239 struct resource_statistics resource_statistics;
3240 int err;
3241
3242 rcu_read_lock();
3243 if (cb->args[0]) {
3244 for_each_resource_rcu(resource, &drbd_resources)
3245 if (resource == (struct drbd_resource *)cb->args[0])
3246 goto found_resource;
3247 err = 0; /* resource was probably deleted */
3248 goto out;
3249 }
3250 resource = list_entry(&drbd_resources,
3251 struct drbd_resource, resources);
3252
3253found_resource:
3254 list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3255 goto put_result;
3256 }
3257 err = 0;
3258 goto out;
3259
3260put_result:
3261 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3262 cb->nlh->nlmsg_seq, &drbd_genl_family,
3263 NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3264 err = -ENOMEM;
3265 if (!dh)
3266 goto out;
3267 dh->minor = -1U;
3268 dh->ret_code = NO_ERROR;
3269 err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3270 if (err)
3271 goto out;
3272 err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3273 if (err)
3274 goto out;
3275 resource_to_info(&resource_info, resource);
3276 err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3277 if (err)
3278 goto out;
3279 resource_statistics.res_stat_write_ordering = resource->write_ordering;
3280 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3281 if (err)
3282 goto out;
3283 cb->args[0] = (long)resource;
3284 genlmsg_end(skb, dh);
3285 err = 0;
3286
3287out:
3288 rcu_read_unlock();
3289 if (err)
3290 return err;
3291 return skb->len;
3292}
3293
3294static void device_to_statistics(struct device_statistics *s,
3295 struct drbd_device *device)
3296{
3297 memset(s, 0, sizeof(*s));
3298 s->dev_upper_blocked = !may_inc_ap_bio(device);
3299 if (get_ldev(device)) {
3300 struct drbd_md *md = &device->ldev->md;
3301 u64 *history_uuids = (u64 *)s->history_uuids;
a55bbd37
AG
3302 int n;
3303
3304 spin_lock_irq(&md->uuid_lock);
3305 s->dev_current_uuid = md->uuid[UI_CURRENT];
3306 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3307 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3308 history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3309 for (; n < HISTORY_UUIDS; n++)
3310 history_uuids[n] = 0;
3311 s->history_uuids_len = HISTORY_UUIDS;
3312 spin_unlock_irq(&md->uuid_lock);
3313
3314 s->dev_disk_flags = md->flags;
a55bbd37
AG
3315 put_ldev(device);
3316 }
155bd9d1 3317 s->dev_size = get_capacity(device->vdisk);
a55bbd37
AG
3318 s->dev_read = device->read_cnt;
3319 s->dev_write = device->writ_cnt;
3320 s->dev_al_writes = device->al_writ_cnt;
3321 s->dev_bm_writes = device->bm_writ_cnt;
3322 s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3323 s->dev_lower_pending = atomic_read(&device->local_cnt);
3324 s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3325 s->dev_exposed_data_uuid = device->ed_uuid;
3326}
3327
3328static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3329{
3330 if (cb->args[0]) {
3331 struct drbd_resource *resource =
3332 (struct drbd_resource *)cb->args[0];
3333 kref_put(&resource->kref, drbd_destroy_resource);
3334 }
3335
3336 return 0;
3337}
3338
3339int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3340 return put_resource_in_arg0(cb, 7);
3341}
3342
3343static void device_to_info(struct device_info *, struct drbd_device *);
3344
3345int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3346{
3347 struct nlattr *resource_filter;
3348 struct drbd_resource *resource;
3f649ab7 3349 struct drbd_device *device;
a55bbd37
AG
3350 int minor, err, retcode;
3351 struct drbd_genlmsghdr *dh;
3352 struct device_info device_info;
3353 struct device_statistics device_statistics;
3354 struct idr *idr_to_search;
3355
3356 resource = (struct drbd_resource *)cb->args[0];
3357 if (!cb->args[0] && !cb->args[1]) {
3358 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3359 if (resource_filter) {
3360 retcode = ERR_RES_NOT_KNOWN;
3361 resource = drbd_find_resource(nla_data(resource_filter));
3362 if (!resource)
3363 goto put_result;
3364 cb->args[0] = (long)resource;
3365 }
3366 }
3367
3368 rcu_read_lock();
3369 minor = cb->args[1];
3370 idr_to_search = resource ? &resource->devices : &drbd_devices;
3371 device = idr_get_next(idr_to_search, &minor);
3372 if (!device) {
3373 err = 0;
3374 goto out;
3375 }
3376 idr_for_each_entry_continue(idr_to_search, device, minor) {
3377 retcode = NO_ERROR;
3378 goto put_result; /* only one iteration */
3379 }
3380 err = 0;
3381 goto out; /* no more devices */
3382
3383put_result:
3384 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3385 cb->nlh->nlmsg_seq, &drbd_genl_family,
3386 NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3387 err = -ENOMEM;
3388 if (!dh)
3389 goto out;
3390 dh->ret_code = retcode;
3391 dh->minor = -1U;
3392 if (retcode == NO_ERROR) {
3393 dh->minor = device->minor;
3394 err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3395 if (err)
3396 goto out;
3397 if (get_ldev(device)) {
3398 struct disk_conf *disk_conf =
3399 rcu_dereference(device->ldev->disk_conf);
3400
3401 err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3402 put_ldev(device);
3403 if (err)
3404 goto out;
3405 }
3406 device_to_info(&device_info, device);
3407 err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3408 if (err)
3409 goto out;
3410
3411 device_to_statistics(&device_statistics, device);
3412 err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3413 if (err)
3414 goto out;
3415 cb->args[1] = minor + 1;
3416 }
3417 genlmsg_end(skb, dh);
3418 err = 0;
3419
3420out:
3421 rcu_read_unlock();
3422 if (err)
3423 return err;
3424 return skb->len;
3425}
3426
3427int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3428{
3429 return put_resource_in_arg0(cb, 6);
3430}
3431
3432enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3433
3434int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3435{
3436 struct nlattr *resource_filter;
3437 struct drbd_resource *resource = NULL, *next_resource;
3f649ab7 3438 struct drbd_connection *connection;
a55bbd37
AG
3439 int err = 0, retcode;
3440 struct drbd_genlmsghdr *dh;
3441 struct connection_info connection_info;
3442 struct connection_statistics connection_statistics;
3443
3444 rcu_read_lock();
3445 resource = (struct drbd_resource *)cb->args[0];
3446 if (!cb->args[0]) {
3447 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3448 if (resource_filter) {
3449 retcode = ERR_RES_NOT_KNOWN;
3450 resource = drbd_find_resource(nla_data(resource_filter));
3451 if (!resource)
3452 goto put_result;
3453 cb->args[0] = (long)resource;
3454 cb->args[1] = SINGLE_RESOURCE;
3455 }
3456 }
3457 if (!resource) {
3458 if (list_empty(&drbd_resources))
3459 goto out;
3460 resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3461 kref_get(&resource->kref);
3462 cb->args[0] = (long)resource;
3463 cb->args[1] = ITERATE_RESOURCES;
3464 }
3465
3466 next_resource:
3467 rcu_read_unlock();
3468 mutex_lock(&resource->conf_update);
3469 rcu_read_lock();
3470 if (cb->args[2]) {
3471 for_each_connection_rcu(connection, resource)
3472 if (connection == (struct drbd_connection *)cb->args[2])
3473 goto found_connection;
3474 /* connection was probably deleted */
3475 goto no_more_connections;
3476 }
3477 connection = list_entry(&resource->connections, struct drbd_connection, connections);
3478
3479found_connection:
3480 list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3481 if (!has_net_conf(connection))
3482 continue;
3483 retcode = NO_ERROR;
3484 goto put_result; /* only one iteration */
3485 }
3486
3487no_more_connections:
3488 if (cb->args[1] == ITERATE_RESOURCES) {
3489 for_each_resource_rcu(next_resource, &drbd_resources) {
3490 if (next_resource == resource)
3491 goto found_resource;
3492 }
3493 /* resource was probably deleted */
3494 }
3495 goto out;
3496
3497found_resource:
3498 list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3499 mutex_unlock(&resource->conf_update);
3500 kref_put(&resource->kref, drbd_destroy_resource);
3501 resource = next_resource;
3502 kref_get(&resource->kref);
3503 cb->args[0] = (long)resource;
3504 cb->args[2] = 0;
3505 goto next_resource;
3506 }
3507 goto out; /* no more resources */
3508
3509put_result:
3510 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3511 cb->nlh->nlmsg_seq, &drbd_genl_family,
3512 NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3513 err = -ENOMEM;
3514 if (!dh)
3515 goto out;
3516 dh->ret_code = retcode;
3517 dh->minor = -1U;
3518 if (retcode == NO_ERROR) {
3519 struct net_conf *net_conf;
3520
3521 err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3522 if (err)
3523 goto out;
3524 net_conf = rcu_dereference(connection->net_conf);
3525 if (net_conf) {
3526 err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3527 if (err)
3528 goto out;
3529 }
3530 connection_to_info(&connection_info, connection);
3531 err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3532 if (err)
3533 goto out;
3534 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3535 err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3536 if (err)
3537 goto out;
3538 cb->args[2] = (long)connection;
3539 }
3540 genlmsg_end(skb, dh);
3541 err = 0;
3542
3543out:
3544 rcu_read_unlock();
3545 if (resource)
3546 mutex_unlock(&resource->conf_update);
3547 if (err)
3548 return err;
3549 return skb->len;
3550}
3551
3552enum mdf_peer_flag {
3553 MDF_PEER_CONNECTED = 1 << 0,
3554 MDF_PEER_OUTDATED = 1 << 1,
3555 MDF_PEER_FENCING = 1 << 2,
3556 MDF_PEER_FULL_SYNC = 1 << 3,
3557};
3558
3559static void peer_device_to_statistics(struct peer_device_statistics *s,
3560 struct drbd_peer_device *peer_device)
3561{
3562 struct drbd_device *device = peer_device->device;
3563
3564 memset(s, 0, sizeof(*s));
3565 s->peer_dev_received = device->recv_cnt;
3566 s->peer_dev_sent = device->send_cnt;
3567 s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3568 atomic_read(&device->rs_pending_cnt);
3569 s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3570 s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3571 s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3572 if (get_ldev(device)) {
3573 struct drbd_md *md = &device->ldev->md;
3574
3575 spin_lock_irq(&md->uuid_lock);
3576 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3577 spin_unlock_irq(&md->uuid_lock);
3578 s->peer_dev_flags =
3579 (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3580 MDF_PEER_CONNECTED : 0) +
3581 (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3582 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3583 MDF_PEER_OUTDATED : 0) +
3584 /* FIXME: MDF_PEER_FENCING? */
3585 (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3586 MDF_PEER_FULL_SYNC : 0);
3587 put_ldev(device);
3588 }
3589}
3590
3591int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3592{
3593 return put_resource_in_arg0(cb, 9);
3594}
3595
3596int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3597{
3598 struct nlattr *resource_filter;
3599 struct drbd_resource *resource;
3f649ab7 3600 struct drbd_device *device;
a55bbd37
AG
3601 struct drbd_peer_device *peer_device = NULL;
3602 int minor, err, retcode;
3603 struct drbd_genlmsghdr *dh;
3604 struct idr *idr_to_search;
3605
3606 resource = (struct drbd_resource *)cb->args[0];
3607 if (!cb->args[0] && !cb->args[1]) {
3608 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3609 if (resource_filter) {
3610 retcode = ERR_RES_NOT_KNOWN;
3611 resource = drbd_find_resource(nla_data(resource_filter));
3612 if (!resource)
3613 goto put_result;
3614 }
3615 cb->args[0] = (long)resource;
3616 }
3617
3618 rcu_read_lock();
3619 minor = cb->args[1];
3620 idr_to_search = resource ? &resource->devices : &drbd_devices;
3621 device = idr_find(idr_to_search, minor);
3622 if (!device) {
3623next_device:
3624 minor++;
3625 cb->args[2] = 0;
3626 device = idr_get_next(idr_to_search, &minor);
3627 if (!device) {
3628 err = 0;
3629 goto out;
3630 }
3631 }
3632 if (cb->args[2]) {
3633 for_each_peer_device(peer_device, device)
3634 if (peer_device == (struct drbd_peer_device *)cb->args[2])
3635 goto found_peer_device;
3636 /* peer device was probably deleted */
3637 goto next_device;
3638 }
3639 /* Make peer_device point to the list head (not the first entry). */
3640 peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3641
3642found_peer_device:
3643 list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3644 if (!has_net_conf(peer_device->connection))
3645 continue;
3646 retcode = NO_ERROR;
3647 goto put_result; /* only one iteration */
3648 }
3649 goto next_device;
3650
3651put_result:
3652 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3653 cb->nlh->nlmsg_seq, &drbd_genl_family,
3654 NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3655 err = -ENOMEM;
3656 if (!dh)
3657 goto out;
3658 dh->ret_code = retcode;
3659 dh->minor = -1U;
3660 if (retcode == NO_ERROR) {
3661 struct peer_device_info peer_device_info;
3662 struct peer_device_statistics peer_device_statistics;
3663
3664 dh->minor = minor;
3665 err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3666 if (err)
3667 goto out;
3668 peer_device_to_info(&peer_device_info, peer_device);
3669 err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3670 if (err)
3671 goto out;
3672 peer_device_to_statistics(&peer_device_statistics, peer_device);
3673 err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3674 if (err)
3675 goto out;
3676 cb->args[1] = minor;
3677 cb->args[2] = (long)peer_device;
3678 }
3679 genlmsg_end(skb, dh);
3680 err = 0;
3681
3682out:
3683 rcu_read_unlock();
3684 if (err)
3685 return err;
3686 return skb->len;
3687}
251b8f8e
AG
3688/*
3689 * Return the connection of @resource if @resource has exactly one connection.
3690 */
3691static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3692{
3693 struct list_head *connections = &resource->connections;
3694
3695 if (list_empty(connections) || connections->next->next != connections)
3696 return NULL;
3697 return list_first_entry(&resource->connections, struct drbd_connection, connections);
3698}
3699
8ce953aa 3700static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
3b98c0c2 3701 const struct sib_info *sib)
b411b363 3702{
251b8f8e 3703 struct drbd_resource *resource = device->resource;
3b98c0c2
LE
3704 struct state_info *si = NULL; /* for sizeof(si->member); */
3705 struct nlattr *nla;
3706 int got_ldev;
3b98c0c2
LE
3707 int err = 0;
3708 int exclude_sensitive;
3709
3710 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3711 * to. So we better exclude_sensitive information.
3712 *
3713 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3714 * in the context of the requesting user process. Exclude sensitive
3715 * information, unless current has superuser.
3716 *
3717 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3718 * relies on the current implementation of netlink_dump(), which
3719 * executes the dump callback successively from netlink_recvmsg(),
3720 * always in the context of the receiving process */
3721 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3722
b30ab791 3723 got_ldev = get_ldev(device);
3b98c0c2
LE
3724
3725 /* We need to add connection name and volume number information still.
3726 * Minor number is in drbd_genlmsghdr. */
251b8f8e 3727 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3b98c0c2 3728 goto nla_put_failure;
3b98c0c2 3729
eb6bea67 3730 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
f399002e
LE
3731 goto nla_put_failure;
3732
daeda1cc 3733 rcu_read_lock();
f9eb7bf4
AG
3734 if (got_ldev) {
3735 struct disk_conf *disk_conf;
44ed167d 3736
b30ab791 3737 disk_conf = rcu_dereference(device->ldev->disk_conf);
f9eb7bf4
AG
3738 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3739 }
3740 if (!err) {
3741 struct net_conf *nc;
3742
a6b32bc3 3743 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
f9eb7bf4
AG
3744 if (nc)
3745 err = net_conf_to_skb(skb, nc, exclude_sensitive);
3746 }
44ed167d
PR
3747 rcu_read_unlock();
3748 if (err)
3749 goto nla_put_failure;
3b98c0c2 3750
ae0be8de 3751 nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
3b98c0c2
LE
3752 if (!nla)
3753 goto nla_put_failure;
26ec9287 3754 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
b30ab791 3755 nla_put_u32(skb, T_current_state, device->state.i) ||
1dee3f59 3756 nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
155bd9d1 3757 nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) ||
1dee3f59
ND
3758 nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3759 nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3760 nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3761 nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3762 nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3763 nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
b30ab791
AG
3764 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3765 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3766 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
26ec9287 3767 goto nla_put_failure;
3b98c0c2
LE
3768
3769 if (got_ldev) {
39a1aa7f 3770 int err;
b411b363 3771
b30ab791
AG
3772 spin_lock_irq(&device->ldev->md.uuid_lock);
3773 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3774 spin_unlock_irq(&device->ldev->md.uuid_lock);
39a1aa7f
PR
3775
3776 if (err)
3777 goto nla_put_failure;
3778
b30ab791 3779 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
1dee3f59
ND
3780 nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3781 nla_put_u64_0pad(skb, T_bits_oos,
3782 drbd_bm_total_weight(device)))
26ec9287 3783 goto nla_put_failure;
b30ab791
AG
3784 if (C_SYNC_SOURCE <= device->state.conn &&
3785 C_PAUSED_SYNC_T >= device->state.conn) {
1dee3f59
ND
3786 if (nla_put_u64_0pad(skb, T_bits_rs_total,
3787 device->rs_total) ||
3788 nla_put_u64_0pad(skb, T_bits_rs_failed,
3789 device->rs_failed))
26ec9287 3790 goto nla_put_failure;
3b98c0c2 3791 }
b411b363 3792 }
b411b363 3793
3b98c0c2
LE
3794 if (sib) {
3795 switch(sib->sib_reason) {
3796 case SIB_SYNC_PROGRESS:
3797 case SIB_GET_STATUS_REPLY:
3798 break;
3799 case SIB_STATE_CHANGE:
26ec9287
AG
3800 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3801 nla_put_u32(skb, T_new_state, sib->ns.i))
3802 goto nla_put_failure;
3b98c0c2
LE
3803 break;
3804 case SIB_HELPER_POST:
26ec9287
AG
3805 if (nla_put_u32(skb, T_helper_exit_code,
3806 sib->helper_exit_code))
3807 goto nla_put_failure;
df561f66 3808 fallthrough;
3b98c0c2 3809 case SIB_HELPER_PRE:
26ec9287
AG
3810 if (nla_put_string(skb, T_helper, sib->helper_name))
3811 goto nla_put_failure;
3b98c0c2
LE
3812 break;
3813 }
b411b363 3814 }
3b98c0c2 3815 nla_nest_end(skb, nla);
b411b363 3816
3b98c0c2
LE
3817 if (0)
3818nla_put_failure:
3819 err = -EMSGSIZE;
3820 if (got_ldev)
b30ab791 3821 put_ldev(device);
3b98c0c2 3822 return err;
b411b363
PR
3823}
3824
3b98c0c2 3825int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
b411b363 3826{
a910b123 3827 struct drbd_config_context adm_ctx;
3b98c0c2
LE
3828 enum drbd_ret_code retcode;
3829 int err;
b411b363 3830
a910b123 3831 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
3832 if (!adm_ctx.reply_skb)
3833 return retcode;
3834 if (retcode != NO_ERROR)
3835 goto out;
b411b363 3836
b30ab791 3837 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3b98c0c2
LE
3838 if (err) {
3839 nlmsg_free(adm_ctx.reply_skb);
3840 return err;
b411b363 3841 }
3b98c0c2 3842out:
a910b123 3843 drbd_adm_finish(&adm_ctx, info, retcode);
3b98c0c2 3844 return 0;
b411b363
PR
3845}
3846
4b7a530f 3847static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
b411b363 3848{
b30ab791 3849 struct drbd_device *device;
3b98c0c2 3850 struct drbd_genlmsghdr *dh;
77c556f6
AG
3851 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3852 struct drbd_resource *resource = NULL;
77c556f6 3853 struct drbd_resource *tmp;
543cc10b
LE
3854 unsigned volume = cb->args[1];
3855
3856 /* Open coded, deferred, iteration:
77c556f6 3857 * for_each_resource_safe(resource, tmp, &drbd_resources) {
251b8f8e
AG
3858 * connection = "first connection of resource or undefined";
3859 * idr_for_each_entry(&resource->devices, device, i) {
543cc10b
LE
3860 * ...
3861 * }
3862 * }
77c556f6 3863 * where resource is cb->args[0];
543cc10b
LE
3864 * and i is cb->args[1];
3865 *
71932efc
LE
3866 * cb->args[2] indicates if we shall loop over all resources,
3867 * or just dump all volumes of a single resource.
3868 *
3b98c0c2
LE
3869 * This may miss entries inserted after this dump started,
3870 * or entries deleted before they are reached.
543cc10b 3871 *
b30ab791 3872 * We need to make sure the device won't disappear while
543cc10b
LE
3873 * we are looking at it, and revalidate our iterators
3874 * on each iteration.
3875 */
b411b363 3876
05a10ec7 3877 /* synchronize with conn_create()/drbd_destroy_connection() */
c141ebda 3878 rcu_read_lock();
543cc10b 3879 /* revalidate iterator position */
77c556f6 3880 for_each_resource_rcu(tmp, &drbd_resources) {
543cc10b
LE
3881 if (pos == NULL) {
3882 /* first iteration */
3883 pos = tmp;
77c556f6 3884 resource = pos;
543cc10b
LE
3885 break;
3886 }
3887 if (tmp == pos) {
77c556f6 3888 resource = pos;
543cc10b
LE
3889 break;
3890 }
b411b363 3891 }
77c556f6
AG
3892 if (resource) {
3893next_resource:
251b8f8e
AG
3894 device = idr_get_next(&resource->devices, &volume);
3895 if (!device) {
77c556f6
AG
3896 /* No more volumes to dump on this resource.
3897 * Advance resource iterator. */
3898 pos = list_entry_rcu(resource->resources.next,
3899 struct drbd_resource, resources);
3900 /* Did we dump any volume of this resource yet? */
543cc10b 3901 if (volume != 0) {
71932efc
LE
3902 /* If we reached the end of the list,
3903 * or only a single resource dump was requested,
3904 * we are done. */
77c556f6 3905 if (&pos->resources == &drbd_resources || cb->args[2])
71932efc 3906 goto out;
543cc10b 3907 volume = 0;
77c556f6
AG
3908 resource = pos;
3909 goto next_resource;
543cc10b
LE
3910 }
3911 }
3912
98683650 3913 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3b98c0c2
LE
3914 cb->nlh->nlmsg_seq, &drbd_genl_family,
3915 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3916 if (!dh)
543cc10b
LE
3917 goto out;
3918
251b8f8e 3919 if (!device) {
bde89a9e 3920 /* This is a connection without a single volume.
367d675d
LE
3921 * Suprisingly enough, it may have a network
3922 * configuration. */
251b8f8e
AG
3923 struct drbd_connection *connection;
3924
543cc10b
LE
3925 dh->minor = -1U;
3926 dh->ret_code = NO_ERROR;
251b8f8e
AG
3927 connection = the_only_connection(resource);
3928 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
367d675d 3929 goto cancel;
251b8f8e
AG
3930 if (connection) {
3931 struct net_conf *nc;
3932
3933 nc = rcu_dereference(connection->net_conf);
3934 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
3935 goto cancel;
3936 }
367d675d 3937 goto done;
543cc10b 3938 }
b411b363 3939
0b0ba1ef 3940 D_ASSERT(device, device->vnr == volume);
251b8f8e 3941 D_ASSERT(device, device->resource == resource);
3b98c0c2 3942
b30ab791 3943 dh->minor = device_to_minor(device);
3b98c0c2
LE
3944 dh->ret_code = NO_ERROR;
3945
b30ab791 3946 if (nla_put_status_info(skb, device, NULL)) {
367d675d 3947cancel:
3b98c0c2 3948 genlmsg_cancel(skb, dh);
543cc10b 3949 goto out;
3b98c0c2 3950 }
367d675d 3951done:
3b98c0c2 3952 genlmsg_end(skb, dh);
bde89a9e 3953 }
b411b363 3954
543cc10b 3955out:
c141ebda 3956 rcu_read_unlock();
543cc10b 3957 /* where to start the next iteration */
bde89a9e 3958 cb->args[0] = (long)pos;
77c556f6 3959 cb->args[1] = (pos == resource) ? volume + 1 : 0;
b411b363 3960
77c556f6 3961 /* No more resources/volumes/minors found results in an empty skb.
543cc10b 3962 * Which will terminate the dump. */
3b98c0c2 3963 return skb->len;
b411b363
PR
3964}
3965
71932efc
LE
3966/*
3967 * Request status of all resources, or of all volumes within a single resource.
3968 *
3969 * This is a dump, as the answer may not fit in a single reply skb otherwise.
3970 * Which means we cannot use the family->attrbuf or other such members, because
3971 * dump is NOT protected by the genl_lock(). During dump, we only have access
3972 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
3973 *
3974 * Once things are setup properly, we call into get_one_status().
b411b363 3975 */
71932efc 3976int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
b411b363 3977{
71932efc
LE
3978 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3979 struct nlattr *nla;
7c3063cc 3980 const char *resource_name;
4bc76048 3981 struct drbd_resource *resource;
7c3063cc 3982 int maxtype;
71932efc
LE
3983
3984 /* Is this a followup call? */
3985 if (cb->args[0]) {
3986 /* ... of a single resource dump,
3987 * and the resource iterator has been advanced already? */
3988 if (cb->args[2] && cb->args[2] != cb->args[0])
3989 return 0; /* DONE. */
3990 goto dump;
3991 }
3992
3993 /* First call (from netlink_dump_start). We need to figure out
3994 * which resource(s) the user wants us to dump. */
3995 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
3996 nlmsg_attrlen(cb->nlh, hdrlen),
3997 DRBD_NLA_CFG_CONTEXT);
3998
3999 /* No explicit context given. Dump all. */
4000 if (!nla)
4001 goto dump;
7c3063cc
AG
4002 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
4003 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
4004 if (IS_ERR(nla))
4005 return PTR_ERR(nla);
71932efc
LE
4006 /* context given, but no name present? */
4007 if (!nla)
4008 return -EINVAL;
7c3063cc 4009 resource_name = nla_data(nla);
4bc76048
AG
4010 if (!*resource_name)
4011 return -ENODEV;
4012 resource = drbd_find_resource(resource_name);
4013 if (!resource)
71932efc
LE
4014 return -ENODEV;
4015
4bc76048 4016 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
0ace9dfa 4017
71932efc 4018 /* prime iterators, and set "filter" mode mark:
bde89a9e 4019 * only dump this connection. */
4bc76048 4020 cb->args[0] = (long)resource;
71932efc 4021 /* cb->args[1] = 0; passed in this way. */
4bc76048 4022 cb->args[2] = (long)resource;
71932efc
LE
4023
4024dump:
4025 return get_one_status(skb, cb);
4026}
b411b363 4027
3b98c0c2 4028int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
b411b363 4029{
a910b123 4030 struct drbd_config_context adm_ctx;
3b98c0c2
LE
4031 enum drbd_ret_code retcode;
4032 struct timeout_parms tp;
4033 int err;
b411b363 4034
a910b123 4035 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
4036 if (!adm_ctx.reply_skb)
4037 return retcode;
4038 if (retcode != NO_ERROR)
4039 goto out;
b411b363 4040
3b98c0c2 4041 tp.timeout_type =
b30ab791
AG
4042 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4043 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
3b98c0c2 4044 UT_DEFAULT;
b411b363 4045
3b98c0c2
LE
4046 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
4047 if (err) {
4048 nlmsg_free(adm_ctx.reply_skb);
4049 return err;
4050 }
4051out:
a910b123 4052 drbd_adm_finish(&adm_ctx, info, retcode);
3b98c0c2 4053 return 0;
b411b363
PR
4054}
4055
3b98c0c2 4056int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
b411b363 4057{
a910b123 4058 struct drbd_config_context adm_ctx;
b30ab791 4059 struct drbd_device *device;
3b98c0c2 4060 enum drbd_ret_code retcode;
58ffa580 4061 struct start_ov_parms parms;
b411b363 4062
a910b123 4063 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
4064 if (!adm_ctx.reply_skb)
4065 return retcode;
4066 if (retcode != NO_ERROR)
4067 goto out;
873b0d5f 4068
b30ab791 4069 device = adm_ctx.device;
58ffa580
LE
4070
4071 /* resume from last known position, if possible */
b30ab791 4072 parms.ov_start_sector = device->ov_start_sector;
58ffa580 4073 parms.ov_stop_sector = ULLONG_MAX;
3b98c0c2 4074 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
f399002e 4075 int err = start_ov_parms_from_attrs(&parms, info);
3b98c0c2
LE
4076 if (err) {
4077 retcode = ERR_MANDATORY_TAG;
a910b123 4078 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3b98c0c2
LE
4079 goto out;
4080 }
b411b363 4081 }
9e276872
LE
4082 mutex_lock(&adm_ctx.resource->adm_mutex);
4083
58ffa580 4084 /* w_make_ov_request expects position to be aligned */
b30ab791
AG
4085 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4086 device->ov_stop_sector = parms.ov_stop_sector;
873b0d5f
LE
4087
4088 /* If there is still bitmap IO pending, e.g. previous resync or verify
4089 * just being finished, wait for it before requesting a new resync. */
b30ab791
AG
4090 drbd_suspend_io(device);
4091 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4092 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4093 drbd_resume_io(device);
9e276872
LE
4094
4095 mutex_unlock(&adm_ctx.resource->adm_mutex);
3b98c0c2 4096out:
a910b123 4097 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
4098 return 0;
4099}
4100
4101
3b98c0c2 4102int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
b411b363 4103{
a910b123 4104 struct drbd_config_context adm_ctx;
b30ab791 4105 struct drbd_device *device;
3b98c0c2 4106 enum drbd_ret_code retcode;
b411b363
PR
4107 int skip_initial_sync = 0;
4108 int err;
3b98c0c2 4109 struct new_c_uuid_parms args;
b411b363 4110
a910b123 4111 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
4112 if (!adm_ctx.reply_skb)
4113 return retcode;
4114 if (retcode != NO_ERROR)
4115 goto out_nolock;
b411b363 4116
b30ab791 4117 device = adm_ctx.device;
3b98c0c2
LE
4118 memset(&args, 0, sizeof(args));
4119 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
f399002e 4120 err = new_c_uuid_parms_from_attrs(&args, info);
3b98c0c2
LE
4121 if (err) {
4122 retcode = ERR_MANDATORY_TAG;
a910b123 4123 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3b98c0c2
LE
4124 goto out_nolock;
4125 }
b411b363
PR
4126 }
4127
9e276872 4128 mutex_lock(&adm_ctx.resource->adm_mutex);
b30ab791 4129 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
b411b363 4130
b30ab791 4131 if (!get_ldev(device)) {
b411b363
PR
4132 retcode = ERR_NO_DISK;
4133 goto out;
4134 }
4135
4136 /* this is "skip initial sync", assume to be clean */
a6b32bc3
AG
4137 if (device->state.conn == C_CONNECTED &&
4138 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
b30ab791 4139 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
d0180171 4140 drbd_info(device, "Preparing to skip initial sync\n");
b411b363 4141 skip_initial_sync = 1;
b30ab791 4142 } else if (device->state.conn != C_STANDALONE) {
b411b363
PR
4143 retcode = ERR_CONNECTED;
4144 goto out_dec;
4145 }
4146
b30ab791
AG
4147 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4148 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
b411b363
PR
4149
4150 if (args.clear_bm) {
b30ab791 4151 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
8164dd6c 4152 "clear_n_write from new_c_uuid", BM_LOCKED_MASK, NULL);
b411b363 4153 if (err) {
d0180171 4154 drbd_err(device, "Writing bitmap failed with %d\n", err);
b411b363
PR
4155 retcode = ERR_IO_MD_DISK;
4156 }
4157 if (skip_initial_sync) {
69a22773 4158 drbd_send_uuids_skip_initial_sync(first_peer_device(device));
b30ab791
AG
4159 _drbd_uuid_set(device, UI_BITMAP, 0);
4160 drbd_print_uuids(device, "cleared bitmap UUID");
0500813f 4161 spin_lock_irq(&device->resource->req_lock);
b30ab791 4162 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
b411b363 4163 CS_VERBOSE, NULL);
0500813f 4164 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
4165 }
4166 }
4167
b30ab791 4168 drbd_md_sync(device);
b411b363 4169out_dec:
b30ab791 4170 put_ldev(device);
b411b363 4171out:
b30ab791 4172 mutex_unlock(device->state_mutex);
9e276872 4173 mutex_unlock(&adm_ctx.resource->adm_mutex);
3b98c0c2 4174out_nolock:
a910b123 4175 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
4176 return 0;
4177}
4178
3b98c0c2 4179static enum drbd_ret_code
a910b123 4180drbd_check_resource_name(struct drbd_config_context *adm_ctx)
b411b363 4181{
a910b123 4182 const char *name = adm_ctx->resource_name;
3b98c0c2 4183 if (!name || !name[0]) {
a910b123 4184 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
3b98c0c2 4185 return ERR_MANDATORY_TAG;
b411b363 4186 }
3b98c0c2
LE
4187 /* if we want to use these in sysfs/configfs/debugfs some day,
4188 * we must not allow slashes */
4189 if (strchr(name, '/')) {
a910b123 4190 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
3b98c0c2 4191 return ERR_INVALID_REQUEST;
b411b363 4192 }
3b98c0c2 4193 return NO_ERROR;
774b3055 4194}
b411b363 4195
a2972846
AG
4196static void resource_to_info(struct resource_info *info,
4197 struct drbd_resource *resource)
4198{
4199 info->res_role = conn_highest_role(first_connection(resource));
4200 info->res_susp = resource->susp;
4201 info->res_susp_nod = resource->susp_nod;
4202 info->res_susp_fen = resource->susp_fen;
4203}
4204
789c1b62 4205int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
b411b363 4206{
a2972846 4207 struct drbd_connection *connection;
a910b123 4208 struct drbd_config_context adm_ctx;
3b98c0c2 4209 enum drbd_ret_code retcode;
afbbfa88
AG
4210 struct res_opts res_opts;
4211 int err;
b411b363 4212
a910b123 4213 retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
3b98c0c2
LE
4214 if (!adm_ctx.reply_skb)
4215 return retcode;
4216 if (retcode != NO_ERROR)
4217 goto out;
b411b363 4218
afbbfa88
AG
4219 set_res_opts_defaults(&res_opts);
4220 err = res_opts_from_attrs(&res_opts, info);
4221 if (err && err != -ENOMSG) {
4222 retcode = ERR_MANDATORY_TAG;
a910b123 4223 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
afbbfa88 4224 goto out;
b411b363
PR
4225 }
4226
a910b123 4227 retcode = drbd_check_resource_name(&adm_ctx);
3b98c0c2
LE
4228 if (retcode != NO_ERROR)
4229 goto out;
b411b363 4230
5c661042 4231 if (adm_ctx.resource) {
38f19616
LE
4232 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4233 retcode = ERR_INVALID_REQUEST;
a910b123 4234 drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
38f19616
LE
4235 }
4236 /* else: still NO_ERROR */
3b98c0c2 4237 goto out;
b411b363 4238 }
b411b363 4239
9e276872 4240 /* not yet safe for genl_family.parallel_ops */
28bc3b8c 4241 mutex_lock(&resources_mutex);
a2972846 4242 connection = conn_create(adm_ctx.resource_name, &res_opts);
28bc3b8c 4243 mutex_unlock(&resources_mutex);
a2972846
AG
4244
4245 if (connection) {
4246 struct resource_info resource_info;
4247
4248 mutex_lock(&notification_mutex);
4249 resource_to_info(&resource_info, connection->resource);
4250 notify_resource_state(NULL, 0, connection->resource,
4251 &resource_info, NOTIFY_CREATE);
4252 mutex_unlock(&notification_mutex);
4253 } else
4254 retcode = ERR_NOMEM;
4255
3b98c0c2 4256out:
a910b123 4257 drbd_adm_finish(&adm_ctx, info, retcode);
3b98c0c2 4258 return 0;
b411b363
PR
4259}
4260
a2972846
AG
4261static void device_to_info(struct device_info *info,
4262 struct drbd_device *device)
4263{
4264 info->dev_disk_state = device->state.disk;
4265}
4266
4267
05a10ec7 4268int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
b411b363 4269{
a910b123 4270 struct drbd_config_context adm_ctx;
bffcc688 4271 struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
3b98c0c2 4272 enum drbd_ret_code retcode;
b411b363 4273
a910b123 4274 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3b98c0c2
LE
4275 if (!adm_ctx.reply_skb)
4276 return retcode;
4277 if (retcode != NO_ERROR)
4278 goto out;
b411b363 4279
f2257a56 4280 if (dh->minor > MINORMASK) {
a910b123 4281 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
3b98c0c2
LE
4282 retcode = ERR_INVALID_REQUEST;
4283 goto out;
b411b363 4284 }
0c8e36d9 4285 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
a910b123 4286 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
3b98c0c2
LE
4287 retcode = ERR_INVALID_REQUEST;
4288 goto out;
b411b363 4289 }
b411b363 4290
38f19616 4291 /* drbd_adm_prepare made sure already
a6b32bc3 4292 * that first_peer_device(device)->connection and device->vnr match the request. */
b30ab791 4293 if (adm_ctx.device) {
38f19616 4294 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
179e20b8 4295 retcode = ERR_MINOR_OR_VOLUME_EXISTS;
38f19616
LE
4296 /* else: still NO_ERROR */
4297 goto out;
b411b363 4298 }
38f19616 4299
9e276872 4300 mutex_lock(&adm_ctx.resource->adm_mutex);
a910b123 4301 retcode = drbd_create_device(&adm_ctx, dh->minor);
a2972846
AG
4302 if (retcode == NO_ERROR) {
4303 struct drbd_device *device;
4304 struct drbd_peer_device *peer_device;
4305 struct device_info info;
4306 unsigned int peer_devices = 0;
4307 enum drbd_notification_type flags;
4308
4309 device = minor_to_device(dh->minor);
4310 for_each_peer_device(peer_device, device) {
4311 if (!has_net_conf(peer_device->connection))
4312 continue;
4313 peer_devices++;
4314 }
4315
4316 device_to_info(&info, device);
4317 mutex_lock(&notification_mutex);
4318 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4319 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4320 for_each_peer_device(peer_device, device) {
4321 struct peer_device_info peer_device_info;
4322
4323 if (!has_net_conf(peer_device->connection))
4324 continue;
4325 peer_device_to_info(&peer_device_info, peer_device);
4326 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4327 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4328 NOTIFY_CREATE | flags);
4329 }
4330 mutex_unlock(&notification_mutex);
4331 }
9e276872 4332 mutex_unlock(&adm_ctx.resource->adm_mutex);
3b98c0c2 4333out:
a910b123 4334 drbd_adm_finish(&adm_ctx, info, retcode);
3b98c0c2 4335 return 0;
b411b363
PR
4336}
4337
05a10ec7 4338static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
b411b363 4339{
a2972846
AG
4340 struct drbd_peer_device *peer_device;
4341
b30ab791
AG
4342 if (device->state.disk == D_DISKLESS &&
4343 /* no need to be device->state.conn == C_STANDALONE &&
85f75dd7
LE
4344 * we may want to delete a minor from a live replication group.
4345 */
b30ab791 4346 device->state.role == R_SECONDARY) {
a2972846
AG
4347 struct drbd_connection *connection =
4348 first_connection(device->resource);
4349
b30ab791 4350 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
369bea63 4351 CS_VERBOSE + CS_WAIT_COMPLETE);
a2972846
AG
4352
4353 /* If the state engine hasn't stopped the sender thread yet, we
4354 * need to flush the sender work queue before generating the
4355 * DESTROY events here. */
4356 if (get_t_state(&connection->worker) == RUNNING)
4357 drbd_flush_workqueue(&connection->sender_work);
4358
4359 mutex_lock(&notification_mutex);
4360 for_each_peer_device(peer_device, device) {
4361 if (!has_net_conf(peer_device->connection))
4362 continue;
4363 notify_peer_device_state(NULL, 0, peer_device, NULL,
4364 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4365 }
4366 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4367 mutex_unlock(&notification_mutex);
4368
f82795d6 4369 drbd_delete_device(device);
85f75dd7
LE
4370 return NO_ERROR;
4371 } else
4372 return ERR_MINOR_CONFIGURED;
b411b363
PR
4373}
4374
05a10ec7 4375int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
b411b363 4376{
a910b123 4377 struct drbd_config_context adm_ctx;
3b98c0c2 4378 enum drbd_ret_code retcode;
b411b363 4379
a910b123 4380 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3b98c0c2
LE
4381 if (!adm_ctx.reply_skb)
4382 return retcode;
4383 if (retcode != NO_ERROR)
4384 goto out;
b411b363 4385
9e276872 4386 mutex_lock(&adm_ctx.resource->adm_mutex);
05a10ec7 4387 retcode = adm_del_minor(adm_ctx.device);
9e276872 4388 mutex_unlock(&adm_ctx.resource->adm_mutex);
85f75dd7 4389out:
a910b123 4390 drbd_adm_finish(&adm_ctx, info, retcode);
85f75dd7 4391 return 0;
b411b363
PR
4392}
4393
179e20b8
AG
4394static int adm_del_resource(struct drbd_resource *resource)
4395{
4396 struct drbd_connection *connection;
4397
4398 for_each_connection(connection, resource) {
4399 if (connection->cstate > C_STANDALONE)
4400 return ERR_NET_CONFIGURED;
4401 }
4402 if (!idr_is_empty(&resource->devices))
4403 return ERR_RES_IN_USE;
4404
a2972846
AG
4405 /* The state engine has stopped the sender thread, so we don't
4406 * need to flush the sender work queue before generating the
4407 * DESTROY event here. */
4408 mutex_lock(&notification_mutex);
4409 notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4410 mutex_unlock(&notification_mutex);
4411
28bc3b8c 4412 mutex_lock(&resources_mutex);
179e20b8 4413 list_del_rcu(&resource->resources);
28bc3b8c 4414 mutex_unlock(&resources_mutex);
179e20b8
AG
4415 /* Make sure all threads have actually stopped: state handling only
4416 * does drbd_thread_stop_nowait(). */
4417 list_for_each_entry(connection, &resource->connections, connections)
4418 drbd_thread_stop(&connection->worker);
4419 synchronize_rcu();
4420 drbd_free_resource(resource);
4421 return NO_ERROR;
4422}
4423
85f75dd7 4424int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
b411b363 4425{
a910b123 4426 struct drbd_config_context adm_ctx;
b6f85ef9
AG
4427 struct drbd_resource *resource;
4428 struct drbd_connection *connection;
4429 struct drbd_device *device;
f3dfa40a 4430 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
85f75dd7 4431 unsigned i;
b411b363 4432
a910b123 4433 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
85f75dd7
LE
4434 if (!adm_ctx.reply_skb)
4435 return retcode;
4436 if (retcode != NO_ERROR)
9e276872 4437 goto finish;
b411b363 4438
b6f85ef9 4439 resource = adm_ctx.resource;
9e276872 4440 mutex_lock(&resource->adm_mutex);
85f75dd7 4441 /* demote */
b6f85ef9
AG
4442 for_each_connection(connection, resource) {
4443 struct drbd_peer_device *peer_device;
4444
4445 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4446 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4447 if (retcode < SS_SUCCESS) {
a910b123 4448 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
b6f85ef9
AG
4449 goto out;
4450 }
4451 }
4452
4453 retcode = conn_try_disconnect(connection, 0);
85f75dd7 4454 if (retcode < SS_SUCCESS) {
a910b123 4455 drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
c141ebda 4456 goto out;
85f75dd7 4457 }
b411b363 4458 }
b411b363 4459
85f75dd7 4460 /* detach */
b6f85ef9
AG
4461 idr_for_each_entry(&resource->devices, device, i) {
4462 retcode = adm_detach(device, 0);
27012382 4463 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
a910b123 4464 drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
c141ebda 4465 goto out;
85f75dd7
LE
4466 }
4467 }
b411b363 4468
85f75dd7 4469 /* delete volumes */
b6f85ef9
AG
4470 idr_for_each_entry(&resource->devices, device, i) {
4471 retcode = adm_del_minor(device);
85f75dd7
LE
4472 if (retcode != NO_ERROR) {
4473 /* "can not happen" */
a910b123 4474 drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
ef356262 4475 goto out;
85f75dd7
LE
4476 }
4477 }
b411b363 4478
179e20b8 4479 retcode = adm_del_resource(resource);
3b98c0c2 4480out:
9e276872
LE
4481 mutex_unlock(&resource->adm_mutex);
4482finish:
a910b123 4483 drbd_adm_finish(&adm_ctx, info, retcode);
3b98c0c2 4484 return 0;
b411b363
PR
4485}
4486
789c1b62 4487int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
b411b363 4488{
a910b123 4489 struct drbd_config_context adm_ctx;
77c556f6 4490 struct drbd_resource *resource;
3b98c0c2 4491 enum drbd_ret_code retcode;
b411b363 4492
a910b123 4493 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3b98c0c2
LE
4494 if (!adm_ctx.reply_skb)
4495 return retcode;
4496 if (retcode != NO_ERROR)
9e276872 4497 goto finish;
77c556f6 4498 resource = adm_ctx.resource;
b411b363 4499
179e20b8
AG
4500 mutex_lock(&resource->adm_mutex);
4501 retcode = adm_del_resource(resource);
9e276872
LE
4502 mutex_unlock(&resource->adm_mutex);
4503finish:
a910b123 4504 drbd_adm_finish(&adm_ctx, info, retcode);
b411b363
PR
4505 return 0;
4506}
4507
b30ab791 4508void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
b411b363 4509{
3b98c0c2
LE
4510 struct sk_buff *msg;
4511 struct drbd_genlmsghdr *d_out;
4512 unsigned seq;
4513 int err = -ENOMEM;
4514
4515 seq = atomic_inc_return(&drbd_genl_seq);
4516 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4517 if (!msg)
4518 goto failed;
4519
4520 err = -EMSGSIZE;
4521 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4522 if (!d_out) /* cannot happen, but anyways. */
4523 goto nla_put_failure;
b30ab791 4524 d_out->minor = device_to_minor(device);
6f9b5f84 4525 d_out->ret_code = NO_ERROR;
3b98c0c2 4526
b30ab791 4527 if (nla_put_status_info(msg, device, sib))
3b98c0c2
LE
4528 goto nla_put_failure;
4529 genlmsg_end(msg, d_out);
d38f8612 4530 err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
3b98c0c2
LE
4531 /* msg has been consumed or freed in netlink_broadcast() */
4532 if (err && err != -ESRCH)
4533 goto failed;
b411b363 4534
3b98c0c2 4535 return;
b411b363 4536
3b98c0c2
LE
4537nla_put_failure:
4538 nlmsg_free(msg);
4539failed:
d0180171 4540 drbd_err(device, "Error %d while broadcasting event. "
3b98c0c2
LE
4541 "Event seq:%u sib_reason:%u\n",
4542 err, seq, sib->sib_reason);
b411b363 4543}
a2972846 4544
a2972846
AG
4545static int nla_put_notification_header(struct sk_buff *msg,
4546 enum drbd_notification_type type)
4547{
4548 struct drbd_notification_header nh = {
4549 .nh_type = type,
4550 };
4551
4552 return drbd_notification_header_to_skb(msg, &nh, true);
4553}
4554
aadb22ba 4555int notify_resource_state(struct sk_buff *skb,
a2972846
AG
4556 unsigned int seq,
4557 struct drbd_resource *resource,
4558 struct resource_info *resource_info,
4559 enum drbd_notification_type type)
4560{
4561 struct resource_statistics resource_statistics;
4562 struct drbd_genlmsghdr *dh;
4563 bool multicast = false;
4564 int err;
4565
4566 if (!skb) {
4567 seq = atomic_inc_return(&notify_genl_seq);
4568 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4569 err = -ENOMEM;
4570 if (!skb)
4571 goto failed;
4572 multicast = true;
4573 }
4574
4575 err = -EMSGSIZE;
4576 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4577 if (!dh)
4578 goto nla_put_failure;
4579 dh->minor = -1U;
4580 dh->ret_code = NO_ERROR;
4581 if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4582 nla_put_notification_header(skb, type) ||
4583 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4584 resource_info_to_skb(skb, resource_info, true)))
4585 goto nla_put_failure;
4586 resource_statistics.res_stat_write_ordering = resource->write_ordering;
4587 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4588 if (err)
4589 goto nla_put_failure;
4590 genlmsg_end(skb, dh);
4591 if (multicast) {
d38f8612 4592 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
a2972846
AG
4593 /* skb has been consumed or freed in netlink_broadcast() */
4594 if (err && err != -ESRCH)
4595 goto failed;
4596 }
aadb22ba 4597 return 0;
a2972846
AG
4598
4599nla_put_failure:
4600 nlmsg_free(skb);
4601failed:
4602 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4603 err, seq);
aadb22ba 4604 return err;
a2972846
AG
4605}
4606
aadb22ba 4607int notify_device_state(struct sk_buff *skb,
a2972846
AG
4608 unsigned int seq,
4609 struct drbd_device *device,
4610 struct device_info *device_info,
4611 enum drbd_notification_type type)
4612{
4613 struct device_statistics device_statistics;
4614 struct drbd_genlmsghdr *dh;
4615 bool multicast = false;
4616 int err;
4617
4618 if (!skb) {
4619 seq = atomic_inc_return(&notify_genl_seq);
4620 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4621 err = -ENOMEM;
4622 if (!skb)
4623 goto failed;
4624 multicast = true;
4625 }
4626
4627 err = -EMSGSIZE;
4628 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4629 if (!dh)
4630 goto nla_put_failure;
4631 dh->minor = device->minor;
4632 dh->ret_code = NO_ERROR;
4633 if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4634 nla_put_notification_header(skb, type) ||
4635 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4636 device_info_to_skb(skb, device_info, true)))
4637 goto nla_put_failure;
4638 device_to_statistics(&device_statistics, device);
4639 device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4640 genlmsg_end(skb, dh);
4641 if (multicast) {
d38f8612 4642 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
a2972846
AG
4643 /* skb has been consumed or freed in netlink_broadcast() */
4644 if (err && err != -ESRCH)
4645 goto failed;
4646 }
aadb22ba 4647 return 0;
a2972846
AG
4648
4649nla_put_failure:
4650 nlmsg_free(skb);
4651failed:
4652 drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4653 err, seq);
aadb22ba 4654 return err;
a2972846
AG
4655}
4656
aadb22ba 4657int notify_connection_state(struct sk_buff *skb,
a2972846
AG
4658 unsigned int seq,
4659 struct drbd_connection *connection,
4660 struct connection_info *connection_info,
4661 enum drbd_notification_type type)
4662{
4663 struct connection_statistics connection_statistics;
4664 struct drbd_genlmsghdr *dh;
4665 bool multicast = false;
4666 int err;
4667
4668 if (!skb) {
4669 seq = atomic_inc_return(&notify_genl_seq);
4670 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4671 err = -ENOMEM;
4672 if (!skb)
4673 goto failed;
4674 multicast = true;
4675 }
4676
4677 err = -EMSGSIZE;
4678 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4679 if (!dh)
4680 goto nla_put_failure;
4681 dh->minor = -1U;
4682 dh->ret_code = NO_ERROR;
4683 if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4684 nla_put_notification_header(skb, type) ||
4685 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4686 connection_info_to_skb(skb, connection_info, true)))
4687 goto nla_put_failure;
4688 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4689 connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4690 genlmsg_end(skb, dh);
4691 if (multicast) {
d38f8612 4692 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
a2972846
AG
4693 /* skb has been consumed or freed in netlink_broadcast() */
4694 if (err && err != -ESRCH)
4695 goto failed;
4696 }
aadb22ba 4697 return 0;
a2972846
AG
4698
4699nla_put_failure:
4700 nlmsg_free(skb);
4701failed:
4702 drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4703 err, seq);
aadb22ba 4704 return err;
a2972846
AG
4705}
4706
aadb22ba 4707int notify_peer_device_state(struct sk_buff *skb,
a2972846
AG
4708 unsigned int seq,
4709 struct drbd_peer_device *peer_device,
4710 struct peer_device_info *peer_device_info,
4711 enum drbd_notification_type type)
4712{
4713 struct peer_device_statistics peer_device_statistics;
4714 struct drbd_resource *resource = peer_device->device->resource;
4715 struct drbd_genlmsghdr *dh;
4716 bool multicast = false;
4717 int err;
4718
4719 if (!skb) {
4720 seq = atomic_inc_return(&notify_genl_seq);
4721 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4722 err = -ENOMEM;
4723 if (!skb)
4724 goto failed;
4725 multicast = true;
4726 }
4727
4728 err = -EMSGSIZE;
4729 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4730 if (!dh)
4731 goto nla_put_failure;
4732 dh->minor = -1U;
4733 dh->ret_code = NO_ERROR;
4734 if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4735 nla_put_notification_header(skb, type) ||
4736 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4737 peer_device_info_to_skb(skb, peer_device_info, true)))
4738 goto nla_put_failure;
4739 peer_device_to_statistics(&peer_device_statistics, peer_device);
4740 peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4741 genlmsg_end(skb, dh);
4742 if (multicast) {
d38f8612 4743 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
a2972846
AG
4744 /* skb has been consumed or freed in netlink_broadcast() */
4745 if (err && err != -ESRCH)
4746 goto failed;
4747 }
aadb22ba 4748 return 0;
a2972846
AG
4749
4750nla_put_failure:
4751 nlmsg_free(skb);
4752failed:
4753 drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4754 err, seq);
aadb22ba 4755 return err;
a2972846
AG
4756}
4757
4758void notify_helper(enum drbd_notification_type type,
4759 struct drbd_device *device, struct drbd_connection *connection,
4760 const char *name, int status)
4761{
4762 struct drbd_resource *resource = device ? device->resource : connection->resource;
4763 struct drbd_helper_info helper_info;
4764 unsigned int seq = atomic_inc_return(&notify_genl_seq);
4765 struct sk_buff *skb = NULL;
4766 struct drbd_genlmsghdr *dh;
4767 int err;
4768
e55e1b48 4769 strscpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
a2972846
AG
4770 helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4771 helper_info.helper_status = status;
4772
4773 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4774 err = -ENOMEM;
4775 if (!skb)
4776 goto fail;
4777
4778 err = -EMSGSIZE;
4779 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4780 if (!dh)
4781 goto fail;
4782 dh->minor = device ? device->minor : -1;
4783 dh->ret_code = NO_ERROR;
4784 mutex_lock(&notification_mutex);
4785 if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4786 nla_put_notification_header(skb, type) ||
4787 drbd_helper_info_to_skb(skb, &helper_info, true))
4788 goto unlock_fail;
4789 genlmsg_end(skb, dh);
d38f8612 4790 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
a2972846
AG
4791 skb = NULL;
4792 /* skb has been consumed or freed in netlink_broadcast() */
4793 if (err && err != -ESRCH)
4794 goto unlock_fail;
4795 mutex_unlock(&notification_mutex);
4796 return;
4797
4798unlock_fail:
4799 mutex_unlock(&notification_mutex);
4800fail:
4801 nlmsg_free(skb);
4802 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4803 err, seq);
4804}
4805
aadb22ba 4806static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
a2972846
AG
4807{
4808 struct drbd_genlmsghdr *dh;
4809 int err;
4810
4811 err = -EMSGSIZE;
4812 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4813 if (!dh)
4814 goto nla_put_failure;
4815 dh->minor = -1U;
4816 dh->ret_code = NO_ERROR;
4817 if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4818 goto nla_put_failure;
4819 genlmsg_end(skb, dh);
aadb22ba 4820 return 0;
a2972846
AG
4821
4822nla_put_failure:
4823 nlmsg_free(skb);
4824 pr_err("Error %d sending event. Event seq:%u\n", err, seq);
aadb22ba 4825 return err;
a2972846
AG
4826}
4827
4828static void free_state_changes(struct list_head *list)
4829{
4830 while (!list_empty(list)) {
4831 struct drbd_state_change *state_change =
4832 list_first_entry(list, struct drbd_state_change, list);
4833 list_del(&state_change->list);
4834 forget_state_change(state_change);
4835 }
4836}
4837
4838static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4839{
4840 return 1 +
4841 state_change->n_connections +
4842 state_change->n_devices +
4843 state_change->n_devices * state_change->n_connections;
4844}
4845
4846static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4847{
4848 struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4849 unsigned int seq = cb->args[2];
4850 unsigned int n;
4851 enum drbd_notification_type flags = 0;
aadb22ba 4852 int err = 0;
a2972846
AG
4853
4854 /* There is no need for taking notification_mutex here: it doesn't
4855 matter if the initial state events mix with later state chage
4856 events; we can always tell the events apart by the NOTIFY_EXISTS
4857 flag. */
4858
4859 cb->args[5]--;
4860 if (cb->args[5] == 1) {
aadb22ba 4861 err = notify_initial_state_done(skb, seq);
a2972846
AG
4862 goto out;
4863 }
4864 n = cb->args[4]++;
4865 if (cb->args[4] < cb->args[3])
4866 flags |= NOTIFY_CONTINUES;
4867 if (n < 1) {
aadb22ba 4868 err = notify_resource_state_change(skb, seq, state_change->resource,
a2972846
AG
4869 NOTIFY_EXISTS | flags);
4870 goto next;
4871 }
4872 n--;
4873 if (n < state_change->n_connections) {
aadb22ba 4874 err = notify_connection_state_change(skb, seq, &state_change->connections[n],
a2972846
AG
4875 NOTIFY_EXISTS | flags);
4876 goto next;
4877 }
4878 n -= state_change->n_connections;
4879 if (n < state_change->n_devices) {
aadb22ba 4880 err = notify_device_state_change(skb, seq, &state_change->devices[n],
a2972846
AG
4881 NOTIFY_EXISTS | flags);
4882 goto next;
4883 }
4884 n -= state_change->n_devices;
4885 if (n < state_change->n_devices * state_change->n_connections) {
aadb22ba 4886 err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
a2972846
AG
4887 NOTIFY_EXISTS | flags);
4888 goto next;
4889 }
4890
4891next:
4892 if (cb->args[4] == cb->args[3]) {
4893 struct drbd_state_change *next_state_change =
4894 list_entry(state_change->list.next,
4895 struct drbd_state_change, list);
4896 cb->args[0] = (long)next_state_change;
4897 cb->args[3] = notifications_for_state_change(next_state_change);
4898 cb->args[4] = 0;
4899 }
4900out:
aadb22ba
LY
4901 if (err)
4902 return err;
4903 else
4904 return skb->len;
a2972846
AG
4905}
4906
4907int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4908{
4909 struct drbd_resource *resource;
4910 LIST_HEAD(head);
4911
4912 if (cb->args[5] >= 1) {
4913 if (cb->args[5] > 1)
4914 return get_initial_state(skb, cb);
4915 if (cb->args[0]) {
4916 struct drbd_state_change *state_change =
4917 (struct drbd_state_change *)cb->args[0];
4918
4919 /* connect list to head */
4920 list_add(&head, &state_change->list);
4921 free_state_changes(&head);
4922 }
4923 return 0;
4924 }
4925
4926 cb->args[5] = 2; /* number of iterations */
4927 mutex_lock(&resources_mutex);
4928 for_each_resource(resource, &drbd_resources) {
4929 struct drbd_state_change *state_change;
4930
4931 state_change = remember_old_state(resource, GFP_KERNEL);
4932 if (!state_change) {
4933 if (!list_empty(&head))
4934 free_state_changes(&head);
4935 mutex_unlock(&resources_mutex);
4936 return -ENOMEM;
4937 }
4938 copy_old_to_new_state_change(state_change);
4939 list_add_tail(&state_change->list, &head);
4940 cb->args[5] += notifications_for_state_change(state_change);
4941 }
4942 mutex_unlock(&resources_mutex);
4943
4944 if (!list_empty(&head)) {
4945 struct drbd_state_change *state_change =
4946 list_entry(head.next, struct drbd_state_change, list);
4947 cb->args[0] = (long)state_change;
4948 cb->args[3] = notifications_for_state_change(state_change);
4949 list_del(&head); /* detach list from head */
4950 }
4951
4952 cb->args[2] = cb->nlh->nlmsg_seq;
4953 return get_initial_state(skb, cb);
4954}