drbd: Also need to check for DRBD_GENLA_F_MANDATORY flags before nla_find_nested()
[linux-block.git] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/in.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
34 #include "drbd_int.h"
35 #include "drbd_req.h"
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
40
41 #include <net/genetlink.h>
42
43 /* .doit */
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
53
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74 /* .dumpit */
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
79
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
82
83 /* Configuration is strictly serialized, because generic netlink message
84  * processing is strictly serialized by the genl_lock().
85  * Which means we can use one static global drbd_config_context struct.
86  */
87 static struct drbd_config_context {
88         /* assigned from drbd_genlmsghdr */
89         unsigned int minor;
90         /* assigned from request attributes, if present */
91         unsigned int volume;
92 #define VOLUME_UNSPECIFIED              (-1U)
93         /* pointer into the request skb,
94          * limited lifetime! */
95         char *resource_name;
96
97         /* reply buffer */
98         struct sk_buff *reply_skb;
99         /* pointer into reply buffer */
100         struct drbd_genlmsghdr *reply_dh;
101         /* resolved from attributes, if possible */
102         struct drbd_conf *mdev;
103         struct drbd_tconn *tconn;
104 } adm_ctx;
105
106 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
107 {
108         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109         if (genlmsg_reply(skb, info))
110                 printk(KERN_ERR "drbd: error sending genl reply\n");
111 }
112
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114  * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info)
116 {
117         struct sk_buff *skb = adm_ctx.reply_skb;
118         struct nlattr *nla;
119         int err = -EMSGSIZE;
120
121         if (!info || !info[0])
122                 return 0;
123
124         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125         if (!nla)
126                 return err;
127
128         err = nla_put_string(skb, T_info_text, info);
129         if (err) {
130                 nla_nest_cancel(skb, nla);
131                 return err;
132         } else
133                 nla_nest_end(skb, nla);
134         return 0;
135 }
136
137 /* This would be a good candidate for a "pre_doit" hook,
138  * and per-family private info->pointers.
139  * But we need to stay compatible with older kernels.
140  * If it returns successfully, adm_ctx members are valid.
141  */
142 #define DRBD_ADM_NEED_MINOR     1
143 #define DRBD_ADM_NEED_CONN      2
144 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
145                 unsigned flags)
146 {
147         struct drbd_genlmsghdr *d_in = info->userhdr;
148         const u8 cmd = info->genlhdr->cmd;
149         int err;
150
151         memset(&adm_ctx, 0, sizeof(adm_ctx));
152
153         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154         if (cmd != DRBD_ADM_GET_STATUS
155         && security_netlink_recv(skb, CAP_SYS_ADMIN))
156                return -EPERM;
157
158         adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159         if (!adm_ctx.reply_skb) {
160                 err = -ENOMEM;
161                 goto fail;
162         }
163
164         adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
165                                         info, &drbd_genl_family, 0, cmd);
166         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
167          * but anyways */
168         if (!adm_ctx.reply_dh) {
169                 err = -ENOMEM;
170                 goto fail;
171         }
172
173         adm_ctx.reply_dh->minor = d_in->minor;
174         adm_ctx.reply_dh->ret_code = NO_ERROR;
175
176         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
177                 struct nlattr *nla;
178                 /* parse and validate only */
179                 err = drbd_cfg_context_from_attrs(NULL, info);
180                 if (err)
181                         goto fail;
182
183                 /* It was present, and valid,
184                  * copy it over to the reply skb. */
185                 err = nla_put_nohdr(adm_ctx.reply_skb,
186                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
187                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
188                 if (err)
189                         goto fail;
190
191                 /* and assign stuff to the global adm_ctx */
192                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
193                 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
194                 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
195                 if (nla)
196                         adm_ctx.resource_name = nla_data(nla);
197         } else
198                 adm_ctx.volume = VOLUME_UNSPECIFIED;
199
200         adm_ctx.minor = d_in->minor;
201         adm_ctx.mdev = minor_to_mdev(d_in->minor);
202         adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
203
204         if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
205                 drbd_msg_put_info("unknown minor");
206                 return ERR_MINOR_INVALID;
207         }
208         if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
209                 drbd_msg_put_info("unknown connection");
210                 return ERR_INVALID_REQUEST;
211         }
212
213         /* some more paranoia, if the request was over-determined */
214         if (adm_ctx.mdev && adm_ctx.tconn &&
215             adm_ctx.mdev->tconn != adm_ctx.tconn) {
216                 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
217                                 adm_ctx.minor, adm_ctx.resource_name,
218                                 adm_ctx.mdev->tconn->name);
219                 drbd_msg_put_info("minor exists in different connection");
220                 return ERR_INVALID_REQUEST;
221         }
222         if (adm_ctx.mdev &&
223             adm_ctx.volume != VOLUME_UNSPECIFIED &&
224             adm_ctx.volume != adm_ctx.mdev->vnr) {
225                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
226                                 adm_ctx.minor, adm_ctx.volume,
227                                 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
228                 drbd_msg_put_info("minor exists as different volume");
229                 return ERR_INVALID_REQUEST;
230         }
231
232         return NO_ERROR;
233
234 fail:
235         nlmsg_free(adm_ctx.reply_skb);
236         adm_ctx.reply_skb = NULL;
237         return err;
238 }
239
240 static int drbd_adm_finish(struct genl_info *info, int retcode)
241 {
242         struct nlattr *nla;
243         const char *resource_name = NULL;
244
245         if (adm_ctx.tconn) {
246                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
247                 adm_ctx.tconn = NULL;
248         }
249
250         if (!adm_ctx.reply_skb)
251                 return -ENOMEM;
252
253         adm_ctx.reply_dh->ret_code = retcode;
254
255         nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
256         if (nla) {
257                 int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
258                 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
259                 if (nla && !IS_ERR(nla))
260                         resource_name = nla_data(nla);
261         }
262
263         drbd_adm_send_reply(adm_ctx.reply_skb, info);
264         return 0;
265 }
266
267 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
268 {
269         char *afs;
270         struct net_conf *nc;
271
272         rcu_read_lock();
273         nc = rcu_dereference(tconn->net_conf);
274         if (nc) {
275                 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
276                 case AF_INET6:
277                         afs = "ipv6";
278                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
279                                  &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
280                         break;
281                 case AF_INET:
282                         afs = "ipv4";
283                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
284                                  &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
285                         break;
286                 default:
287                         afs = "ssocks";
288                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
289                                  &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
290                 }
291                 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
292         }
293         rcu_read_unlock();
294 }
295
296 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
297 {
298         char *envp[] = { "HOME=/",
299                         "TERM=linux",
300                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
301                          (char[20]) { }, /* address family */
302                          (char[60]) { }, /* address */
303                         NULL };
304         char mb[12];
305         char *argv[] = {usermode_helper, cmd, mb, NULL };
306         struct sib_info sib;
307         int ret;
308
309         snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
310         setup_khelper_env(mdev->tconn, envp);
311
312         /* The helper may take some time.
313          * write out any unsynced meta data changes now */
314         drbd_md_sync(mdev);
315
316         dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
317         sib.sib_reason = SIB_HELPER_PRE;
318         sib.helper_name = cmd;
319         drbd_bcast_event(mdev, &sib);
320         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
321         if (ret)
322                 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
323                                 usermode_helper, cmd, mb,
324                                 (ret >> 8) & 0xff, ret);
325         else
326                 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
327                                 usermode_helper, cmd, mb,
328                                 (ret >> 8) & 0xff, ret);
329         sib.sib_reason = SIB_HELPER_POST;
330         sib.helper_exit_code = ret;
331         drbd_bcast_event(mdev, &sib);
332
333         if (ret < 0) /* Ignore any ERRNOs we got. */
334                 ret = 0;
335
336         return ret;
337 }
338
339 static void conn_md_sync(struct drbd_tconn *tconn)
340 {
341         struct drbd_conf *mdev;
342         int vnr;
343
344         rcu_read_lock();
345         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
346                 kref_get(&mdev->kref);
347                 rcu_read_unlock();
348                 drbd_md_sync(mdev);
349                 kref_put(&mdev->kref, &drbd_minor_destroy);
350                 rcu_read_lock();
351         }
352         rcu_read_unlock();
353 }
354
355 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
356 {
357         char *envp[] = { "HOME=/",
358                         "TERM=linux",
359                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
360                          (char[20]) { }, /* address family */
361                          (char[60]) { }, /* address */
362                         NULL };
363         char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
364         int ret;
365
366         setup_khelper_env(tconn, envp);
367         conn_md_sync(tconn);
368
369         conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
370         /* TODO: conn_bcast_event() ?? */
371
372         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
373         if (ret)
374                 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
375                           usermode_helper, cmd, tconn->name,
376                           (ret >> 8) & 0xff, ret);
377         else
378                 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
379                           usermode_helper, cmd, tconn->name,
380                           (ret >> 8) & 0xff, ret);
381         /* TODO: conn_bcast_event() ?? */
382
383         if (ret < 0) /* Ignore any ERRNOs we got. */
384                 ret = 0;
385
386         return ret;
387 }
388
389 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
390 {
391         enum drbd_fencing_p fp = FP_NOT_AVAIL;
392         struct drbd_conf *mdev;
393         int vnr;
394
395         rcu_read_lock();
396         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
397                 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
398                         fp = max_t(enum drbd_fencing_p, fp,
399                                    rcu_dereference(mdev->ldev->disk_conf)->fencing);
400                         put_ldev(mdev);
401                 }
402         }
403         rcu_read_unlock();
404
405         return fp;
406 }
407
408 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
409 {
410         union drbd_state mask = { };
411         union drbd_state val = { };
412         enum drbd_fencing_p fp;
413         char *ex_to_string;
414         int r;
415
416         if (tconn->cstate >= C_WF_REPORT_PARAMS) {
417                 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
418                 return false;
419         }
420
421         fp = highest_fencing_policy(tconn);
422         switch (fp) {
423         case FP_NOT_AVAIL:
424                 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
425                 goto out;
426         case FP_DONT_CARE:
427                 return true;
428         default: ;
429         }
430
431         r = conn_khelper(tconn, "fence-peer");
432
433         switch ((r>>8) & 0xff) {
434         case 3: /* peer is inconsistent */
435                 ex_to_string = "peer is inconsistent or worse";
436                 mask.pdsk = D_MASK;
437                 val.pdsk = D_INCONSISTENT;
438                 break;
439         case 4: /* peer got outdated, or was already outdated */
440                 ex_to_string = "peer was fenced";
441                 mask.pdsk = D_MASK;
442                 val.pdsk = D_OUTDATED;
443                 break;
444         case 5: /* peer was down */
445                 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
446                         /* we will(have) create(d) a new UUID anyways... */
447                         ex_to_string = "peer is unreachable, assumed to be dead";
448                         mask.pdsk = D_MASK;
449                         val.pdsk = D_OUTDATED;
450                 } else {
451                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
452                 }
453                 break;
454         case 6: /* Peer is primary, voluntarily outdate myself.
455                  * This is useful when an unconnected R_SECONDARY is asked to
456                  * become R_PRIMARY, but finds the other peer being active. */
457                 ex_to_string = "peer is active";
458                 conn_warn(tconn, "Peer is primary, outdating myself.\n");
459                 mask.disk = D_MASK;
460                 val.disk = D_OUTDATED;
461                 break;
462         case 7:
463                 if (fp != FP_STONITH)
464                         conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
465                 ex_to_string = "peer was stonithed";
466                 mask.pdsk = D_MASK;
467                 val.pdsk = D_OUTDATED;
468                 break;
469         default:
470                 /* The script is broken ... */
471                 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
472                 return false; /* Eventually leave IO frozen */
473         }
474
475         conn_info(tconn, "fence-peer helper returned %d (%s)\n",
476                   (r>>8) & 0xff, ex_to_string);
477
478  out:
479
480         /* Not using
481            conn_request_state(tconn, mask, val, CS_VERBOSE);
482            here, because we might were able to re-establish the connection in the
483            meantime. */
484         spin_lock_irq(&tconn->req_lock);
485         if (tconn->cstate < C_WF_REPORT_PARAMS)
486                 _conn_request_state(tconn, mask, val, CS_VERBOSE);
487         spin_unlock_irq(&tconn->req_lock);
488
489         return conn_highest_pdsk(tconn) <= D_OUTDATED;
490 }
491
492 static int _try_outdate_peer_async(void *data)
493 {
494         struct drbd_tconn *tconn = (struct drbd_tconn *)data;
495
496         conn_try_outdate_peer(tconn);
497
498         kref_put(&tconn->kref, &conn_destroy);
499         return 0;
500 }
501
502 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
503 {
504         struct task_struct *opa;
505
506         kref_get(&tconn->kref);
507         opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
508         if (IS_ERR(opa)) {
509                 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
510                 kref_put(&tconn->kref, &conn_destroy);
511         }
512 }
513
514 enum drbd_state_rv
515 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
516 {
517         const int max_tries = 4;
518         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
519         struct net_conf *nc;
520         int try = 0;
521         int forced = 0;
522         union drbd_state mask, val;
523
524         if (new_role == R_PRIMARY)
525                 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
526
527         mutex_lock(mdev->state_mutex);
528
529         mask.i = 0; mask.role = R_MASK;
530         val.i  = 0; val.role  = new_role;
531
532         while (try++ < max_tries) {
533                 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
534
535                 /* in case we first succeeded to outdate,
536                  * but now suddenly could establish a connection */
537                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
538                         val.pdsk = 0;
539                         mask.pdsk = 0;
540                         continue;
541                 }
542
543                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
544                     (mdev->state.disk < D_UP_TO_DATE &&
545                      mdev->state.disk >= D_INCONSISTENT)) {
546                         mask.disk = D_MASK;
547                         val.disk  = D_UP_TO_DATE;
548                         forced = 1;
549                         continue;
550                 }
551
552                 if (rv == SS_NO_UP_TO_DATE_DISK &&
553                     mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
554                         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
555
556                         if (conn_try_outdate_peer(mdev->tconn)) {
557                                 val.disk = D_UP_TO_DATE;
558                                 mask.disk = D_MASK;
559                         }
560                         continue;
561                 }
562
563                 if (rv == SS_NOTHING_TO_DO)
564                         goto out;
565                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
566                         if (!conn_try_outdate_peer(mdev->tconn) && force) {
567                                 dev_warn(DEV, "Forced into split brain situation!\n");
568                                 mask.pdsk = D_MASK;
569                                 val.pdsk  = D_OUTDATED;
570
571                         }
572                         continue;
573                 }
574                 if (rv == SS_TWO_PRIMARIES) {
575                         /* Maybe the peer is detected as dead very soon...
576                            retry at most once more in this case. */
577                         int timeo;
578                         rcu_read_lock();
579                         nc = rcu_dereference(mdev->tconn->net_conf);
580                         timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
581                         rcu_read_unlock();
582                         schedule_timeout_interruptible(timeo);
583                         if (try < max_tries)
584                                 try = max_tries - 1;
585                         continue;
586                 }
587                 if (rv < SS_SUCCESS) {
588                         rv = _drbd_request_state(mdev, mask, val,
589                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
590                         if (rv < SS_SUCCESS)
591                                 goto out;
592                 }
593                 break;
594         }
595
596         if (rv < SS_SUCCESS)
597                 goto out;
598
599         if (forced)
600                 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
601
602         /* Wait until nothing is on the fly :) */
603         wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
604
605         if (new_role == R_SECONDARY) {
606                 set_disk_ro(mdev->vdisk, true);
607                 if (get_ldev(mdev)) {
608                         mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
609                         put_ldev(mdev);
610                 }
611         } else {
612                 mutex_lock(&mdev->tconn->conf_update);
613                 nc = mdev->tconn->net_conf;
614                 if (nc)
615                         nc->discard_my_data = 0; /* without copy; single bit op is atomic */
616                 mutex_unlock(&mdev->tconn->conf_update);
617
618                 set_disk_ro(mdev->vdisk, false);
619                 if (get_ldev(mdev)) {
620                         if (((mdev->state.conn < C_CONNECTED ||
621                                mdev->state.pdsk <= D_FAILED)
622                               && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
623                                 drbd_uuid_new_current(mdev);
624
625                         mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
626                         put_ldev(mdev);
627                 }
628         }
629
630         /* writeout of activity log covered areas of the bitmap
631          * to stable storage done in after state change already */
632
633         if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
634                 /* if this was forced, we should consider sync */
635                 if (forced)
636                         drbd_send_uuids(mdev);
637                 drbd_send_state(mdev);
638         }
639
640         drbd_md_sync(mdev);
641
642         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
643 out:
644         mutex_unlock(mdev->state_mutex);
645         return rv;
646 }
647
648 static const char *from_attrs_err_to_txt(int err)
649 {
650         return  err == -ENOMSG ? "required attribute missing" :
651                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
652                 err == -EEXIST ? "can not change invariant setting" :
653                 "invalid attribute value";
654 }
655
656 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
657 {
658         struct set_role_parms parms;
659         int err;
660         enum drbd_ret_code retcode;
661
662         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
663         if (!adm_ctx.reply_skb)
664                 return retcode;
665         if (retcode != NO_ERROR)
666                 goto out;
667
668         memset(&parms, 0, sizeof(parms));
669         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
670                 err = set_role_parms_from_attrs(&parms, info);
671                 if (err) {
672                         retcode = ERR_MANDATORY_TAG;
673                         drbd_msg_put_info(from_attrs_err_to_txt(err));
674                         goto out;
675                 }
676         }
677
678         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
679                 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
680         else
681                 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
682 out:
683         drbd_adm_finish(info, retcode);
684         return 0;
685 }
686
687 /* initializes the md.*_offset members, so we are able to find
688  * the on disk meta data */
689 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
690                                        struct drbd_backing_dev *bdev)
691 {
692         sector_t md_size_sect = 0;
693         int meta_dev_idx;
694
695         rcu_read_lock();
696         meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
697
698         switch (meta_dev_idx) {
699         default:
700                 /* v07 style fixed size indexed meta data */
701                 bdev->md.md_size_sect = MD_RESERVED_SECT;
702                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
703                 bdev->md.al_offset = MD_AL_OFFSET;
704                 bdev->md.bm_offset = MD_BM_OFFSET;
705                 break;
706         case DRBD_MD_INDEX_FLEX_EXT:
707                 /* just occupy the full device; unit: sectors */
708                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
709                 bdev->md.md_offset = 0;
710                 bdev->md.al_offset = MD_AL_OFFSET;
711                 bdev->md.bm_offset = MD_BM_OFFSET;
712                 break;
713         case DRBD_MD_INDEX_INTERNAL:
714         case DRBD_MD_INDEX_FLEX_INT:
715                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
716                 /* al size is still fixed */
717                 bdev->md.al_offset = -MD_AL_SECTORS;
718                 /* we need (slightly less than) ~ this much bitmap sectors: */
719                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
720                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
721                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
722                 md_size_sect = ALIGN(md_size_sect, 8);
723
724                 /* plus the "drbd meta data super block",
725                  * and the activity log; */
726                 md_size_sect += MD_BM_OFFSET;
727
728                 bdev->md.md_size_sect = md_size_sect;
729                 /* bitmap offset is adjusted by 'super' block size */
730                 bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
731                 break;
732         }
733         rcu_read_unlock();
734 }
735
736 /* input size is expected to be in KB */
737 char *ppsize(char *buf, unsigned long long size)
738 {
739         /* Needs 9 bytes at max including trailing NUL:
740          * -1ULL ==> "16384 EB" */
741         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
742         int base = 0;
743         while (size >= 10000 && base < sizeof(units)-1) {
744                 /* shift + round */
745                 size = (size >> 10) + !!(size & (1<<9));
746                 base++;
747         }
748         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
749
750         return buf;
751 }
752
753 /* there is still a theoretical deadlock when called from receiver
754  * on an D_INCONSISTENT R_PRIMARY:
755  *  remote READ does inc_ap_bio, receiver would need to receive answer
756  *  packet from remote to dec_ap_bio again.
757  *  receiver receive_sizes(), comes here,
758  *  waits for ap_bio_cnt == 0. -> deadlock.
759  * but this cannot happen, actually, because:
760  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
761  *  (not connected, or bad/no disk on peer):
762  *  see drbd_fail_request_early, ap_bio_cnt is zero.
763  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
764  *  peer may not initiate a resize.
765  */
766 /* Note these are not to be confused with
767  * drbd_adm_suspend_io/drbd_adm_resume_io,
768  * which are (sub) state changes triggered by admin (drbdsetup),
769  * and can be long lived.
770  * This changes an mdev->flag, is triggered by drbd internals,
771  * and should be short-lived. */
772 void drbd_suspend_io(struct drbd_conf *mdev)
773 {
774         set_bit(SUSPEND_IO, &mdev->flags);
775         if (drbd_suspended(mdev))
776                 return;
777         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
778 }
779
780 void drbd_resume_io(struct drbd_conf *mdev)
781 {
782         clear_bit(SUSPEND_IO, &mdev->flags);
783         wake_up(&mdev->misc_wait);
784 }
785
786 /**
787  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
788  * @mdev:       DRBD device.
789  *
790  * Returns 0 on success, negative return values indicate errors.
791  * You should call drbd_md_sync() after calling this function.
792  */
793 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
794 {
795         sector_t prev_first_sect, prev_size; /* previous meta location */
796         sector_t la_size, u_size;
797         sector_t size;
798         char ppb[10];
799
800         int md_moved, la_size_changed;
801         enum determine_dev_size rv = unchanged;
802
803         /* race:
804          * application request passes inc_ap_bio,
805          * but then cannot get an AL-reference.
806          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
807          *
808          * to avoid that:
809          * Suspend IO right here.
810          * still lock the act_log to not trigger ASSERTs there.
811          */
812         drbd_suspend_io(mdev);
813
814         /* no wait necessary anymore, actually we could assert that */
815         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
816
817         prev_first_sect = drbd_md_first_sector(mdev->ldev);
818         prev_size = mdev->ldev->md.md_size_sect;
819         la_size = mdev->ldev->md.la_size_sect;
820
821         /* TODO: should only be some assert here, not (re)init... */
822         drbd_md_set_sector_offsets(mdev, mdev->ldev);
823
824         rcu_read_lock();
825         u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
826         rcu_read_unlock();
827         size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
828
829         if (drbd_get_capacity(mdev->this_bdev) != size ||
830             drbd_bm_capacity(mdev) != size) {
831                 int err;
832                 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
833                 if (unlikely(err)) {
834                         /* currently there is only one error: ENOMEM! */
835                         size = drbd_bm_capacity(mdev)>>1;
836                         if (size == 0) {
837                                 dev_err(DEV, "OUT OF MEMORY! "
838                                     "Could not allocate bitmap!\n");
839                         } else {
840                                 dev_err(DEV, "BM resizing failed. "
841                                     "Leaving size unchanged at size = %lu KB\n",
842                                     (unsigned long)size);
843                         }
844                         rv = dev_size_error;
845                 }
846                 /* racy, see comments above. */
847                 drbd_set_my_capacity(mdev, size);
848                 mdev->ldev->md.la_size_sect = size;
849                 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
850                      (unsigned long long)size>>1);
851         }
852         if (rv == dev_size_error)
853                 goto out;
854
855         la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
856
857         md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
858                 || prev_size       != mdev->ldev->md.md_size_sect;
859
860         if (la_size_changed || md_moved) {
861                 int err;
862
863                 drbd_al_shrink(mdev); /* All extents inactive. */
864                 dev_info(DEV, "Writing the whole bitmap, %s\n",
865                          la_size_changed && md_moved ? "size changed and md moved" :
866                          la_size_changed ? "size changed" : "md moved");
867                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
868                 err = drbd_bitmap_io(mdev, &drbd_bm_write,
869                                 "size changed", BM_LOCKED_MASK);
870                 if (err) {
871                         rv = dev_size_error;
872                         goto out;
873                 }
874                 drbd_md_mark_dirty(mdev);
875         }
876
877         if (size > la_size)
878                 rv = grew;
879         if (size < la_size)
880                 rv = shrunk;
881 out:
882         lc_unlock(mdev->act_log);
883         wake_up(&mdev->al_wait);
884         drbd_resume_io(mdev);
885
886         return rv;
887 }
888
889 sector_t
890 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
891                   sector_t u_size, int assume_peer_has_space)
892 {
893         sector_t p_size = mdev->p_size;   /* partner's disk size. */
894         sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
895         sector_t m_size; /* my size */
896         sector_t size = 0;
897
898         m_size = drbd_get_max_capacity(bdev);
899
900         if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
901                 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
902                 p_size = m_size;
903         }
904
905         if (p_size && m_size) {
906                 size = min_t(sector_t, p_size, m_size);
907         } else {
908                 if (la_size) {
909                         size = la_size;
910                         if (m_size && m_size < size)
911                                 size = m_size;
912                         if (p_size && p_size < size)
913                                 size = p_size;
914                 } else {
915                         if (m_size)
916                                 size = m_size;
917                         if (p_size)
918                                 size = p_size;
919                 }
920         }
921
922         if (size == 0)
923                 dev_err(DEV, "Both nodes diskless!\n");
924
925         if (u_size) {
926                 if (u_size > size)
927                         dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
928                             (unsigned long)u_size>>1, (unsigned long)size>>1);
929                 else
930                         size = u_size;
931         }
932
933         return size;
934 }
935
936 /**
937  * drbd_check_al_size() - Ensures that the AL is of the right size
938  * @mdev:       DRBD device.
939  *
940  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
941  * failed, and 0 on success. You should call drbd_md_sync() after you called
942  * this function.
943  */
944 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
945 {
946         struct lru_cache *n, *t;
947         struct lc_element *e;
948         unsigned int in_use;
949         int i;
950
951         if (mdev->act_log &&
952             mdev->act_log->nr_elements == dc->al_extents)
953                 return 0;
954
955         in_use = 0;
956         t = mdev->act_log;
957         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
958                 dc->al_extents, sizeof(struct lc_element), 0);
959
960         if (n == NULL) {
961                 dev_err(DEV, "Cannot allocate act_log lru!\n");
962                 return -ENOMEM;
963         }
964         spin_lock_irq(&mdev->al_lock);
965         if (t) {
966                 for (i = 0; i < t->nr_elements; i++) {
967                         e = lc_element_by_index(t, i);
968                         if (e->refcnt)
969                                 dev_err(DEV, "refcnt(%d)==%d\n",
970                                     e->lc_number, e->refcnt);
971                         in_use += e->refcnt;
972                 }
973         }
974         if (!in_use)
975                 mdev->act_log = n;
976         spin_unlock_irq(&mdev->al_lock);
977         if (in_use) {
978                 dev_err(DEV, "Activity log still in use!\n");
979                 lc_destroy(n);
980                 return -EBUSY;
981         } else {
982                 if (t)
983                         lc_destroy(t);
984         }
985         drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
986         return 0;
987 }
988
989 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
990 {
991         struct request_queue * const q = mdev->rq_queue;
992         int max_hw_sectors = max_bio_size >> 9;
993         int max_segments = 0;
994
995         if (get_ldev_if_state(mdev, D_ATTACHING)) {
996                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
997
998                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
999                 rcu_read_lock();
1000                 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1001                 rcu_read_unlock();
1002                 put_ldev(mdev);
1003         }
1004
1005         blk_queue_logical_block_size(q, 512);
1006         blk_queue_max_hw_sectors(q, max_hw_sectors);
1007         /* This is the workaround for "bio would need to, but cannot, be split" */
1008         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1009         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1010
1011         if (get_ldev_if_state(mdev, D_ATTACHING)) {
1012                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1013
1014                 blk_queue_stack_limits(q, b);
1015
1016                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1017                         dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1018                                  q->backing_dev_info.ra_pages,
1019                                  b->backing_dev_info.ra_pages);
1020                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1021                 }
1022                 put_ldev(mdev);
1023         }
1024 }
1025
1026 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1027 {
1028         int now, new, local, peer;
1029
1030         now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1031         local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1032         peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1033
1034         if (get_ldev_if_state(mdev, D_ATTACHING)) {
1035                 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1036                 mdev->local_max_bio_size = local;
1037                 put_ldev(mdev);
1038         }
1039
1040         /* We may ignore peer limits if the peer is modern enough.
1041            Because new from 8.3.8 onwards the peer can use multiple
1042            BIOs for a single peer_request */
1043         if (mdev->state.conn >= C_CONNECTED) {
1044                 if (mdev->tconn->agreed_pro_version < 94)
1045                         peer = mdev->peer_max_bio_size;
1046                 else if (mdev->tconn->agreed_pro_version == 94)
1047                         peer = DRBD_MAX_SIZE_H80_PACKET;
1048                 else /* drbd 8.3.8 onwards */
1049                         peer = DRBD_MAX_BIO_SIZE;
1050         }
1051
1052         new = min_t(int, local, peer);
1053
1054         if (mdev->state.role == R_PRIMARY && new < now)
1055                 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1056
1057         if (new != now)
1058                 dev_info(DEV, "max BIO size = %u\n", new);
1059
1060         drbd_setup_queue_param(mdev, new);
1061 }
1062
1063 /* Starts the worker thread */
1064 static void conn_reconfig_start(struct drbd_tconn *tconn)
1065 {
1066         drbd_thread_start(&tconn->worker);
1067         conn_flush_workqueue(tconn);
1068 }
1069
1070 /* if still unconfigured, stops worker again. */
1071 static void conn_reconfig_done(struct drbd_tconn *tconn)
1072 {
1073         bool stop_threads;
1074         spin_lock_irq(&tconn->req_lock);
1075         stop_threads = conn_all_vols_unconf(tconn);
1076         spin_unlock_irq(&tconn->req_lock);
1077         if (stop_threads) {
1078                 /* asender is implicitly stopped by receiver
1079                  * in conn_disconnect() */
1080                 drbd_thread_stop(&tconn->receiver);
1081                 drbd_thread_stop(&tconn->worker);
1082         }
1083 }
1084
1085 /* Make sure IO is suspended before calling this function(). */
1086 static void drbd_suspend_al(struct drbd_conf *mdev)
1087 {
1088         int s = 0;
1089
1090         if (!lc_try_lock(mdev->act_log)) {
1091                 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1092                 return;
1093         }
1094
1095         drbd_al_shrink(mdev);
1096         spin_lock_irq(&mdev->tconn->req_lock);
1097         if (mdev->state.conn < C_CONNECTED)
1098                 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1099         spin_unlock_irq(&mdev->tconn->req_lock);
1100         lc_unlock(mdev->act_log);
1101
1102         if (s)
1103                 dev_info(DEV, "Suspended AL updates\n");
1104 }
1105
1106
1107 static bool should_set_defaults(struct genl_info *info)
1108 {
1109         unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1110         return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1111 }
1112
1113 static void enforce_disk_conf_limits(struct disk_conf *dc)
1114 {
1115         if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1116                 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1117         if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1118                 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1119
1120         if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1121                 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1122 }
1123
1124 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1125 {
1126         enum drbd_ret_code retcode;
1127         struct drbd_conf *mdev;
1128         struct disk_conf *new_disk_conf, *old_disk_conf;
1129         struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1130         int err, fifo_size;
1131
1132         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1133         if (!adm_ctx.reply_skb)
1134                 return retcode;
1135         if (retcode != NO_ERROR)
1136                 goto out;
1137
1138         mdev = adm_ctx.mdev;
1139
1140         /* we also need a disk
1141          * to change the options on */
1142         if (!get_ldev(mdev)) {
1143                 retcode = ERR_NO_DISK;
1144                 goto out;
1145         }
1146
1147         new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1148         if (!new_disk_conf) {
1149                 retcode = ERR_NOMEM;
1150                 goto fail;
1151         }
1152
1153         mutex_lock(&mdev->tconn->conf_update);
1154         old_disk_conf = mdev->ldev->disk_conf;
1155         *new_disk_conf = *old_disk_conf;
1156         if (should_set_defaults(info))
1157                 set_disk_conf_defaults(new_disk_conf);
1158
1159         err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1160         if (err && err != -ENOMSG) {
1161                 retcode = ERR_MANDATORY_TAG;
1162                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1163         }
1164
1165         if (!expect(new_disk_conf->resync_rate >= 1))
1166                 new_disk_conf->resync_rate = 1;
1167
1168         enforce_disk_conf_limits(new_disk_conf);
1169
1170         fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1171         if (fifo_size != mdev->rs_plan_s->size) {
1172                 new_plan = fifo_alloc(fifo_size);
1173                 if (!new_plan) {
1174                         dev_err(DEV, "kmalloc of fifo_buffer failed");
1175                         retcode = ERR_NOMEM;
1176                         goto fail_unlock;
1177                 }
1178         }
1179
1180         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1181         drbd_al_shrink(mdev);
1182         err = drbd_check_al_size(mdev, new_disk_conf);
1183         lc_unlock(mdev->act_log);
1184         wake_up(&mdev->al_wait);
1185
1186         if (err) {
1187                 retcode = ERR_NOMEM;
1188                 goto fail_unlock;
1189         }
1190
1191         write_lock_irq(&global_state_lock);
1192         retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1193         if (retcode == NO_ERROR) {
1194                 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
1195                 drbd_resync_after_changed(mdev);
1196         }
1197         write_unlock_irq(&global_state_lock);
1198
1199         if (retcode != NO_ERROR)
1200                 goto fail_unlock;
1201
1202         if (new_plan) {
1203                 old_plan = mdev->rs_plan_s;
1204                 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
1205         }
1206
1207         mutex_unlock(&mdev->tconn->conf_update);
1208         drbd_md_sync(mdev);
1209
1210         if (mdev->state.conn >= C_CONNECTED)
1211                 drbd_send_sync_param(mdev);
1212
1213         synchronize_rcu();
1214         kfree(old_disk_conf);
1215         kfree(old_plan);
1216         goto success;
1217
1218 fail_unlock:
1219         mutex_unlock(&mdev->tconn->conf_update);
1220  fail:
1221         kfree(new_disk_conf);
1222         kfree(new_plan);
1223 success:
1224         put_ldev(mdev);
1225  out:
1226         drbd_adm_finish(info, retcode);
1227         return 0;
1228 }
1229
1230 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1231 {
1232         struct drbd_conf *mdev;
1233         int err;
1234         enum drbd_ret_code retcode;
1235         enum determine_dev_size dd;
1236         sector_t max_possible_sectors;
1237         sector_t min_md_device_sectors;
1238         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1239         struct disk_conf *new_disk_conf = NULL;
1240         struct block_device *bdev;
1241         struct lru_cache *resync_lru = NULL;
1242         struct fifo_buffer *new_plan = NULL;
1243         union drbd_state ns, os;
1244         enum drbd_state_rv rv;
1245         struct net_conf *nc;
1246         int cp_discovered = 0;
1247
1248         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1249         if (!adm_ctx.reply_skb)
1250                 return retcode;
1251         if (retcode != NO_ERROR)
1252                 goto finish;
1253
1254         mdev = adm_ctx.mdev;
1255         conn_reconfig_start(mdev->tconn);
1256
1257         /* if you want to reconfigure, please tear down first */
1258         if (mdev->state.disk > D_DISKLESS) {
1259                 retcode = ERR_DISK_CONFIGURED;
1260                 goto fail;
1261         }
1262         /* It may just now have detached because of IO error.  Make sure
1263          * drbd_ldev_destroy is done already, we may end up here very fast,
1264          * e.g. if someone calls attach from the on-io-error handler,
1265          * to realize a "hot spare" feature (not that I'd recommend that) */
1266         wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1267
1268         /* allocation not in the IO path, drbdsetup context */
1269         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1270         if (!nbc) {
1271                 retcode = ERR_NOMEM;
1272                 goto fail;
1273         }
1274         new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1275         if (!new_disk_conf) {
1276                 retcode = ERR_NOMEM;
1277                 goto fail;
1278         }
1279         nbc->disk_conf = new_disk_conf;
1280
1281         set_disk_conf_defaults(new_disk_conf);
1282         err = disk_conf_from_attrs(new_disk_conf, info);
1283         if (err) {
1284                 retcode = ERR_MANDATORY_TAG;
1285                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1286                 goto fail;
1287         }
1288
1289         enforce_disk_conf_limits(new_disk_conf);
1290
1291         new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1292         if (!new_plan) {
1293                 retcode = ERR_NOMEM;
1294                 goto fail;
1295         }
1296
1297         if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1298                 retcode = ERR_MD_IDX_INVALID;
1299                 goto fail;
1300         }
1301
1302         rcu_read_lock();
1303         nc = rcu_dereference(mdev->tconn->net_conf);
1304         if (nc) {
1305                 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1306                         rcu_read_unlock();
1307                         retcode = ERR_STONITH_AND_PROT_A;
1308                         goto fail;
1309                 }
1310         }
1311         rcu_read_unlock();
1312
1313         bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1314                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1315         if (IS_ERR(bdev)) {
1316                 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1317                         PTR_ERR(bdev));
1318                 retcode = ERR_OPEN_DISK;
1319                 goto fail;
1320         }
1321         nbc->backing_bdev = bdev;
1322
1323         /*
1324          * meta_dev_idx >= 0: external fixed size, possibly multiple
1325          * drbd sharing one meta device.  TODO in that case, paranoia
1326          * check that [md_bdev, meta_dev_idx] is not yet used by some
1327          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1328          * should check it for you already; but if you don't, or
1329          * someone fooled it, we need to double check here)
1330          */
1331         bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1332                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1333                                   (new_disk_conf->meta_dev_idx < 0) ?
1334                                   (void *)mdev : (void *)drbd_m_holder);
1335         if (IS_ERR(bdev)) {
1336                 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1337                         PTR_ERR(bdev));
1338                 retcode = ERR_OPEN_MD_DISK;
1339                 goto fail;
1340         }
1341         nbc->md_bdev = bdev;
1342
1343         if ((nbc->backing_bdev == nbc->md_bdev) !=
1344             (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1345              new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1346                 retcode = ERR_MD_IDX_INVALID;
1347                 goto fail;
1348         }
1349
1350         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1351                         1, 61, sizeof(struct bm_extent),
1352                         offsetof(struct bm_extent, lce));
1353         if (!resync_lru) {
1354                 retcode = ERR_NOMEM;
1355                 goto fail;
1356         }
1357
1358         /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1359         drbd_md_set_sector_offsets(mdev, nbc);
1360
1361         if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1362                 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1363                         (unsigned long long) drbd_get_max_capacity(nbc),
1364                         (unsigned long long) new_disk_conf->disk_size);
1365                 retcode = ERR_DISK_TOO_SMALL;
1366                 goto fail;
1367         }
1368
1369         if (new_disk_conf->meta_dev_idx < 0) {
1370                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1371                 /* at least one MB, otherwise it does not make sense */
1372                 min_md_device_sectors = (2<<10);
1373         } else {
1374                 max_possible_sectors = DRBD_MAX_SECTORS;
1375                 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
1376         }
1377
1378         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1379                 retcode = ERR_MD_DISK_TOO_SMALL;
1380                 dev_warn(DEV, "refusing attach: md-device too small, "
1381                      "at least %llu sectors needed for this meta-disk type\n",
1382                      (unsigned long long) min_md_device_sectors);
1383                 goto fail;
1384         }
1385
1386         /* Make sure the new disk is big enough
1387          * (we may currently be R_PRIMARY with no local disk...) */
1388         if (drbd_get_max_capacity(nbc) <
1389             drbd_get_capacity(mdev->this_bdev)) {
1390                 retcode = ERR_DISK_TOO_SMALL;
1391                 goto fail;
1392         }
1393
1394         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1395
1396         if (nbc->known_size > max_possible_sectors) {
1397                 dev_warn(DEV, "==> truncating very big lower level device "
1398                         "to currently maximum possible %llu sectors <==\n",
1399                         (unsigned long long) max_possible_sectors);
1400                 if (new_disk_conf->meta_dev_idx >= 0)
1401                         dev_warn(DEV, "==>> using internal or flexible "
1402                                       "meta data may help <<==\n");
1403         }
1404
1405         drbd_suspend_io(mdev);
1406         /* also wait for the last barrier ack. */
1407         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1408         /* and for any other previously queued work */
1409         drbd_flush_workqueue(mdev);
1410
1411         rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1412         retcode = rv;  /* FIXME: Type mismatch. */
1413         drbd_resume_io(mdev);
1414         if (rv < SS_SUCCESS)
1415                 goto fail;
1416
1417         if (!get_ldev_if_state(mdev, D_ATTACHING))
1418                 goto force_diskless;
1419
1420         drbd_md_set_sector_offsets(mdev, nbc);
1421
1422         if (!mdev->bitmap) {
1423                 if (drbd_bm_init(mdev)) {
1424                         retcode = ERR_NOMEM;
1425                         goto force_diskless_dec;
1426                 }
1427         }
1428
1429         retcode = drbd_md_read(mdev, nbc);
1430         if (retcode != NO_ERROR)
1431                 goto force_diskless_dec;
1432
1433         if (mdev->state.conn < C_CONNECTED &&
1434             mdev->state.role == R_PRIMARY &&
1435             (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1436                 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1437                     (unsigned long long)mdev->ed_uuid);
1438                 retcode = ERR_DATA_NOT_CURRENT;
1439                 goto force_diskless_dec;
1440         }
1441
1442         /* Since we are diskless, fix the activity log first... */
1443         if (drbd_check_al_size(mdev, new_disk_conf)) {
1444                 retcode = ERR_NOMEM;
1445                 goto force_diskless_dec;
1446         }
1447
1448         /* Prevent shrinking of consistent devices ! */
1449         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1450             drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1451                 dev_warn(DEV, "refusing to truncate a consistent device\n");
1452                 retcode = ERR_DISK_TOO_SMALL;
1453                 goto force_diskless_dec;
1454         }
1455
1456         if (!drbd_al_read_log(mdev, nbc)) {
1457                 retcode = ERR_IO_MD_DISK;
1458                 goto force_diskless_dec;
1459         }
1460
1461         /* Reset the "barriers don't work" bits here, then force meta data to
1462          * be written, to ensure we determine if barriers are supported. */
1463         if (new_disk_conf->md_flushes)
1464                 clear_bit(MD_NO_FUA, &mdev->flags);
1465         else
1466                 set_bit(MD_NO_FUA, &mdev->flags);
1467
1468         /* Point of no return reached.
1469          * Devices and memory are no longer released by error cleanup below.
1470          * now mdev takes over responsibility, and the state engine should
1471          * clean it up somewhere.  */
1472         D_ASSERT(mdev->ldev == NULL);
1473         mdev->ldev = nbc;
1474         mdev->resync = resync_lru;
1475         mdev->rs_plan_s = new_plan;
1476         nbc = NULL;
1477         resync_lru = NULL;
1478         new_disk_conf = NULL;
1479         new_plan = NULL;
1480
1481         mdev->write_ordering = WO_bdev_flush;
1482         drbd_bump_write_ordering(mdev, WO_bdev_flush);
1483
1484         if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1485                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1486         else
1487                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1488
1489         if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1490             !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
1491                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1492                 cp_discovered = 1;
1493         }
1494
1495         mdev->send_cnt = 0;
1496         mdev->recv_cnt = 0;
1497         mdev->read_cnt = 0;
1498         mdev->writ_cnt = 0;
1499
1500         drbd_reconsider_max_bio_size(mdev);
1501
1502         /* If I am currently not R_PRIMARY,
1503          * but meta data primary indicator is set,
1504          * I just now recover from a hard crash,
1505          * and have been R_PRIMARY before that crash.
1506          *
1507          * Now, if I had no connection before that crash
1508          * (have been degraded R_PRIMARY), chances are that
1509          * I won't find my peer now either.
1510          *
1511          * In that case, and _only_ in that case,
1512          * we use the degr-wfc-timeout instead of the default,
1513          * so we can automatically recover from a crash of a
1514          * degraded but active "cluster" after a certain timeout.
1515          */
1516         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1517         if (mdev->state.role != R_PRIMARY &&
1518              drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1519             !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1520                 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1521
1522         dd = drbd_determine_dev_size(mdev, 0);
1523         if (dd == dev_size_error) {
1524                 retcode = ERR_NOMEM_BITMAP;
1525                 goto force_diskless_dec;
1526         } else if (dd == grew)
1527                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1528
1529         if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1530                 dev_info(DEV, "Assuming that all blocks are out of sync "
1531                      "(aka FullSync)\n");
1532                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1533                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1534                         retcode = ERR_IO_MD_DISK;
1535                         goto force_diskless_dec;
1536                 }
1537         } else {
1538                 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1539                         "read from attaching", BM_LOCKED_MASK)) {
1540                         retcode = ERR_IO_MD_DISK;
1541                         goto force_diskless_dec;
1542                 }
1543         }
1544
1545         if (cp_discovered) {
1546                 drbd_al_apply_to_bm(mdev);
1547                 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1548                         "crashed primary apply AL", BM_LOCKED_MASK)) {
1549                         retcode = ERR_IO_MD_DISK;
1550                         goto force_diskless_dec;
1551                 }
1552         }
1553
1554         if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1555                 drbd_suspend_al(mdev); /* IO is still suspended here... */
1556
1557         spin_lock_irq(&mdev->tconn->req_lock);
1558         os = drbd_read_state(mdev);
1559         ns = os;
1560         /* If MDF_CONSISTENT is not set go into inconsistent state,
1561            otherwise investigate MDF_WasUpToDate...
1562            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1563            otherwise into D_CONSISTENT state.
1564         */
1565         if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1566                 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1567                         ns.disk = D_CONSISTENT;
1568                 else
1569                         ns.disk = D_OUTDATED;
1570         } else {
1571                 ns.disk = D_INCONSISTENT;
1572         }
1573
1574         if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1575                 ns.pdsk = D_OUTDATED;
1576
1577         rcu_read_lock();
1578         if (ns.disk == D_CONSISTENT &&
1579             (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
1580                 ns.disk = D_UP_TO_DATE;
1581         rcu_read_unlock();
1582
1583         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1584            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1585            this point, because drbd_request_state() modifies these
1586            flags. */
1587
1588         /* In case we are C_CONNECTED postpone any decision on the new disk
1589            state after the negotiation phase. */
1590         if (mdev->state.conn == C_CONNECTED) {
1591                 mdev->new_state_tmp.i = ns.i;
1592                 ns.i = os.i;
1593                 ns.disk = D_NEGOTIATING;
1594
1595                 /* We expect to receive up-to-date UUIDs soon.
1596                    To avoid a race in receive_state, free p_uuid while
1597                    holding req_lock. I.e. atomic with the state change */
1598                 kfree(mdev->p_uuid);
1599                 mdev->p_uuid = NULL;
1600         }
1601
1602         rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1603         spin_unlock_irq(&mdev->tconn->req_lock);
1604
1605         if (rv < SS_SUCCESS)
1606                 goto force_diskless_dec;
1607
1608         if (mdev->state.role == R_PRIMARY)
1609                 mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1610         else
1611                 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1612
1613         drbd_md_mark_dirty(mdev);
1614         drbd_md_sync(mdev);
1615
1616         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1617         put_ldev(mdev);
1618         conn_reconfig_done(mdev->tconn);
1619         drbd_adm_finish(info, retcode);
1620         return 0;
1621
1622  force_diskless_dec:
1623         put_ldev(mdev);
1624  force_diskless:
1625         drbd_force_state(mdev, NS(disk, D_FAILED));
1626         drbd_md_sync(mdev);
1627  fail:
1628         conn_reconfig_done(mdev->tconn);
1629         if (nbc) {
1630                 if (nbc->backing_bdev)
1631                         blkdev_put(nbc->backing_bdev,
1632                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1633                 if (nbc->md_bdev)
1634                         blkdev_put(nbc->md_bdev,
1635                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1636                 kfree(nbc);
1637         }
1638         kfree(new_disk_conf);
1639         lc_destroy(resync_lru);
1640         kfree(new_plan);
1641
1642  finish:
1643         drbd_adm_finish(info, retcode);
1644         return 0;
1645 }
1646
1647 static int adm_detach(struct drbd_conf *mdev)
1648 {
1649         enum drbd_state_rv retcode;
1650         int ret;
1651         drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1652         retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1653         /* D_FAILED will transition to DISKLESS. */
1654         ret = wait_event_interruptible(mdev->misc_wait,
1655                         mdev->state.disk != D_FAILED);
1656         drbd_resume_io(mdev);
1657         if ((int)retcode == (int)SS_IS_DISKLESS)
1658                 retcode = SS_NOTHING_TO_DO;
1659         if (ret)
1660                 retcode = ERR_INTR;
1661         return retcode;
1662 }
1663
1664 /* Detaching the disk is a process in multiple stages.  First we need to lock
1665  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1666  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1667  * internal references as well.
1668  * Only then we have finally detached. */
1669 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1670 {
1671         enum drbd_ret_code retcode;
1672
1673         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1674         if (!adm_ctx.reply_skb)
1675                 return retcode;
1676         if (retcode != NO_ERROR)
1677                 goto out;
1678
1679         retcode = adm_detach(adm_ctx.mdev);
1680 out:
1681         drbd_adm_finish(info, retcode);
1682         return 0;
1683 }
1684
1685 static bool conn_resync_running(struct drbd_tconn *tconn)
1686 {
1687         struct drbd_conf *mdev;
1688         bool rv = false;
1689         int vnr;
1690
1691         rcu_read_lock();
1692         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1693                 if (mdev->state.conn == C_SYNC_SOURCE ||
1694                     mdev->state.conn == C_SYNC_TARGET ||
1695                     mdev->state.conn == C_PAUSED_SYNC_S ||
1696                     mdev->state.conn == C_PAUSED_SYNC_T) {
1697                         rv = true;
1698                         break;
1699                 }
1700         }
1701         rcu_read_unlock();
1702
1703         return rv;
1704 }
1705
1706 static bool conn_ov_running(struct drbd_tconn *tconn)
1707 {
1708         struct drbd_conf *mdev;
1709         bool rv = false;
1710         int vnr;
1711
1712         rcu_read_lock();
1713         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1714                 if (mdev->state.conn == C_VERIFY_S ||
1715                     mdev->state.conn == C_VERIFY_T) {
1716                         rv = true;
1717                         break;
1718                 }
1719         }
1720         rcu_read_unlock();
1721
1722         return rv;
1723 }
1724
1725 static enum drbd_ret_code
1726 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1727 {
1728         struct drbd_conf *mdev;
1729         int i;
1730
1731         if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1732                 if (new_conf->wire_protocol != old_conf->wire_protocol)
1733                         return ERR_NEED_APV_100;
1734
1735                 if (new_conf->two_primaries != old_conf->two_primaries)
1736                         return ERR_NEED_APV_100;
1737
1738                 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1739                         return ERR_NEED_APV_100;
1740
1741                 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1742                         return ERR_NEED_APV_100;
1743         }
1744
1745         if (!new_conf->two_primaries &&
1746             conn_highest_role(tconn) == R_PRIMARY &&
1747             conn_highest_peer(tconn) == R_PRIMARY)
1748                 return ERR_NEED_ALLOW_TWO_PRI;
1749
1750         if (new_conf->two_primaries &&
1751             (new_conf->wire_protocol != DRBD_PROT_C))
1752                 return ERR_NOT_PROTO_C;
1753
1754         idr_for_each_entry(&tconn->volumes, mdev, i) {
1755                 if (get_ldev(mdev)) {
1756                         enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1757                         put_ldev(mdev);
1758                         if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1759                                 return ERR_STONITH_AND_PROT_A;
1760                 }
1761                 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
1762                         return ERR_DISCARD;
1763         }
1764
1765         if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1766                 return ERR_CONG_NOT_PROTO_A;
1767
1768         return NO_ERROR;
1769 }
1770
1771 static enum drbd_ret_code
1772 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1773 {
1774         static enum drbd_ret_code rv;
1775         struct drbd_conf *mdev;
1776         int i;
1777
1778         rcu_read_lock();
1779         rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1780         rcu_read_unlock();
1781
1782         /* tconn->volumes protected by genl_lock() here */
1783         idr_for_each_entry(&tconn->volumes, mdev, i) {
1784                 if (!mdev->bitmap) {
1785                         if(drbd_bm_init(mdev))
1786                                 return ERR_NOMEM;
1787                 }
1788         }
1789
1790         return rv;
1791 }
1792
1793 struct crypto {
1794         struct crypto_hash *verify_tfm;
1795         struct crypto_hash *csums_tfm;
1796         struct crypto_hash *cram_hmac_tfm;
1797         struct crypto_hash *integrity_tfm;
1798         void *int_dig_in;
1799         void *int_dig_vv;
1800 };
1801
1802 static int
1803 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1804 {
1805         if (!tfm_name[0])
1806                 return NO_ERROR;
1807
1808         *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1809         if (IS_ERR(*tfm)) {
1810                 *tfm = NULL;
1811                 return err_alg;
1812         }
1813
1814         return NO_ERROR;
1815 }
1816
1817 static enum drbd_ret_code
1818 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1819 {
1820         char hmac_name[CRYPTO_MAX_ALG_NAME];
1821         enum drbd_ret_code rv;
1822         int hash_size;
1823
1824         rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1825                        ERR_CSUMS_ALG);
1826         if (rv != NO_ERROR)
1827                 return rv;
1828         rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1829                        ERR_VERIFY_ALG);
1830         if (rv != NO_ERROR)
1831                 return rv;
1832         rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1833                        ERR_INTEGRITY_ALG);
1834         if (rv != NO_ERROR)
1835                 return rv;
1836         if (new_conf->cram_hmac_alg[0] != 0) {
1837                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1838                          new_conf->cram_hmac_alg);
1839
1840                 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1841                                ERR_AUTH_ALG);
1842         }
1843         if (crypto->integrity_tfm) {
1844                 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
1845                 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1846                 if (!crypto->int_dig_in)
1847                         return ERR_NOMEM;
1848                 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1849                 if (!crypto->int_dig_vv)
1850                         return ERR_NOMEM;
1851         }
1852
1853         return rv;
1854 }
1855
1856 static void free_crypto(struct crypto *crypto)
1857 {
1858         kfree(crypto->int_dig_in);
1859         kfree(crypto->int_dig_vv);
1860         crypto_free_hash(crypto->cram_hmac_tfm);
1861         crypto_free_hash(crypto->integrity_tfm);
1862         crypto_free_hash(crypto->csums_tfm);
1863         crypto_free_hash(crypto->verify_tfm);
1864 }
1865
1866 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1867 {
1868         enum drbd_ret_code retcode;
1869         struct drbd_tconn *tconn;
1870         struct net_conf *old_conf, *new_conf = NULL;
1871         int err;
1872         int ovr; /* online verify running */
1873         int rsr; /* re-sync running */
1874         struct crypto crypto = { };
1875
1876         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1877         if (!adm_ctx.reply_skb)
1878                 return retcode;
1879         if (retcode != NO_ERROR)
1880                 goto out;
1881
1882         tconn = adm_ctx.tconn;
1883
1884         new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1885         if (!new_conf) {
1886                 retcode = ERR_NOMEM;
1887                 goto out;
1888         }
1889
1890         conn_reconfig_start(tconn);
1891
1892         mutex_lock(&tconn->data.mutex);
1893         mutex_lock(&tconn->conf_update);
1894         old_conf = tconn->net_conf;
1895
1896         if (!old_conf) {
1897                 drbd_msg_put_info("net conf missing, try connect");
1898                 retcode = ERR_INVALID_REQUEST;
1899                 goto fail;
1900         }
1901
1902         *new_conf = *old_conf;
1903         if (should_set_defaults(info))
1904                 set_net_conf_defaults(new_conf);
1905
1906         err = net_conf_from_attrs_for_change(new_conf, info);
1907         if (err && err != -ENOMSG) {
1908                 retcode = ERR_MANDATORY_TAG;
1909                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1910                 goto fail;
1911         }
1912
1913         retcode = check_net_options(tconn, new_conf);
1914         if (retcode != NO_ERROR)
1915                 goto fail;
1916
1917         /* re-sync running */
1918         rsr = conn_resync_running(tconn);
1919         if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
1920                 retcode = ERR_CSUMS_RESYNC_RUNNING;
1921                 goto fail;
1922         }
1923
1924         /* online verify running */
1925         ovr = conn_ov_running(tconn);
1926         if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1927                 retcode = ERR_VERIFY_RUNNING;
1928                 goto fail;
1929         }
1930
1931         retcode = alloc_crypto(&crypto, new_conf);
1932         if (retcode != NO_ERROR)
1933                 goto fail;
1934
1935         rcu_assign_pointer(tconn->net_conf, new_conf);
1936
1937         if (!rsr) {
1938                 crypto_free_hash(tconn->csums_tfm);
1939                 tconn->csums_tfm = crypto.csums_tfm;
1940                 crypto.csums_tfm = NULL;
1941         }
1942         if (!ovr) {
1943                 crypto_free_hash(tconn->verify_tfm);
1944                 tconn->verify_tfm = crypto.verify_tfm;
1945                 crypto.verify_tfm = NULL;
1946         }
1947
1948         kfree(tconn->int_dig_in);
1949         tconn->int_dig_in = crypto.int_dig_in;
1950         kfree(tconn->int_dig_vv);
1951         tconn->int_dig_vv = crypto.int_dig_vv;
1952         crypto_free_hash(tconn->integrity_tfm);
1953         tconn->integrity_tfm = crypto.integrity_tfm;
1954         if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
1955                 /* Do this without trying to take tconn->data.mutex again.  */
1956                 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
1957
1958         crypto_free_hash(tconn->cram_hmac_tfm);
1959         tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1960
1961         mutex_unlock(&tconn->conf_update);
1962         mutex_unlock(&tconn->data.mutex);
1963         synchronize_rcu();
1964         kfree(old_conf);
1965
1966         if (tconn->cstate >= C_WF_REPORT_PARAMS)
1967                 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1968
1969         goto done;
1970
1971  fail:
1972         mutex_unlock(&tconn->conf_update);
1973         mutex_unlock(&tconn->data.mutex);
1974         free_crypto(&crypto);
1975         kfree(new_conf);
1976  done:
1977         conn_reconfig_done(tconn);
1978  out:
1979         drbd_adm_finish(info, retcode);
1980         return 0;
1981 }
1982
1983 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1984 {
1985         struct drbd_conf *mdev;
1986         struct net_conf *old_conf, *new_conf = NULL;
1987         struct crypto crypto = { };
1988         struct drbd_tconn *oconn;
1989         struct drbd_tconn *tconn;
1990         struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1991         enum drbd_ret_code retcode;
1992         int i;
1993         int err;
1994
1995         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1996         if (!adm_ctx.reply_skb)
1997                 return retcode;
1998         if (retcode != NO_ERROR)
1999                 goto out;
2000
2001         tconn = adm_ctx.tconn;
2002         conn_reconfig_start(tconn);
2003
2004         if (tconn->cstate > C_STANDALONE) {
2005                 retcode = ERR_NET_CONFIGURED;
2006                 goto fail;
2007         }
2008
2009         /* allocation not in the IO path, cqueue thread context */
2010         new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
2011         if (!new_conf) {
2012                 retcode = ERR_NOMEM;
2013                 goto fail;
2014         }
2015
2016         set_net_conf_defaults(new_conf);
2017
2018         err = net_conf_from_attrs(new_conf, info);
2019         if (err) {
2020                 retcode = ERR_MANDATORY_TAG;
2021                 drbd_msg_put_info(from_attrs_err_to_txt(err));
2022                 goto fail;
2023         }
2024
2025         retcode = check_net_options(tconn, new_conf);
2026         if (retcode != NO_ERROR)
2027                 goto fail;
2028
2029         retcode = NO_ERROR;
2030
2031         new_my_addr = (struct sockaddr *)&new_conf->my_addr;
2032         new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
2033
2034         /* No need for _rcu here. All reconfiguration is
2035          * strictly serialized on genl_lock(). We are protected against
2036          * concurrent reconfiguration/addition/deletion */
2037         list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
2038                 struct net_conf *nc;
2039                 if (oconn == tconn)
2040                         continue;
2041
2042                 rcu_read_lock();
2043                 nc = rcu_dereference(oconn->net_conf);
2044                 if (nc) {
2045                         taken_addr = (struct sockaddr *)&nc->my_addr;
2046                         if (new_conf->my_addr_len == nc->my_addr_len &&
2047                             !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
2048                                 retcode = ERR_LOCAL_ADDR;
2049
2050                         taken_addr = (struct sockaddr *)&nc->peer_addr;
2051                         if (new_conf->peer_addr_len == nc->peer_addr_len &&
2052                             !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
2053                                 retcode = ERR_PEER_ADDR;
2054                 }
2055                 rcu_read_unlock();
2056                 if (retcode != NO_ERROR)
2057                         goto fail;
2058         }
2059
2060         retcode = alloc_crypto(&crypto, new_conf);
2061         if (retcode != NO_ERROR)
2062                 goto fail;
2063
2064         ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2065
2066         conn_flush_workqueue(tconn);
2067
2068         mutex_lock(&tconn->conf_update);
2069         old_conf = tconn->net_conf;
2070         if (old_conf) {
2071                 retcode = ERR_NET_CONFIGURED;
2072                 mutex_unlock(&tconn->conf_update);
2073                 goto fail;
2074         }
2075         rcu_assign_pointer(tconn->net_conf, new_conf);
2076
2077         conn_free_crypto(tconn);
2078         tconn->int_dig_in = crypto.int_dig_in;
2079         tconn->int_dig_vv = crypto.int_dig_vv;
2080         tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2081         tconn->integrity_tfm = crypto.integrity_tfm;
2082         tconn->csums_tfm = crypto.csums_tfm;
2083         tconn->verify_tfm = crypto.verify_tfm;
2084
2085         mutex_unlock(&tconn->conf_update);
2086
2087         rcu_read_lock();
2088         idr_for_each_entry(&tconn->volumes, mdev, i) {
2089                 mdev->send_cnt = 0;
2090                 mdev->recv_cnt = 0;
2091         }
2092         rcu_read_unlock();
2093
2094         retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2095
2096         conn_reconfig_done(tconn);
2097         drbd_adm_finish(info, retcode);
2098         return 0;
2099
2100 fail:
2101         free_crypto(&crypto);
2102         kfree(new_conf);
2103
2104         conn_reconfig_done(tconn);
2105 out:
2106         drbd_adm_finish(info, retcode);
2107         return 0;
2108 }
2109
2110 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2111 {
2112         enum drbd_state_rv rv;
2113
2114         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2115                         force ? CS_HARD : 0);
2116
2117         switch (rv) {
2118         case SS_NOTHING_TO_DO:
2119                 break;
2120         case SS_ALREADY_STANDALONE:
2121                 return SS_SUCCESS;
2122         case SS_PRIMARY_NOP:
2123                 /* Our state checking code wants to see the peer outdated. */
2124                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2125                                                 pdsk, D_OUTDATED), CS_VERBOSE);
2126                 break;
2127         case SS_CW_FAILED_BY_PEER:
2128                 /* The peer probably wants to see us outdated. */
2129                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2130                                                         disk, D_OUTDATED), 0);
2131                 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2132                         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2133                                         CS_HARD);
2134                 }
2135                 break;
2136         default:;
2137                 /* no special handling necessary */
2138         }
2139
2140         if (rv >= SS_SUCCESS) {
2141                 enum drbd_state_rv rv2;
2142                 /* No one else can reconfigure the network while I am here.
2143                  * The state handling only uses drbd_thread_stop_nowait(),
2144                  * we want to really wait here until the receiver is no more.
2145                  */
2146                 drbd_thread_stop(&adm_ctx.tconn->receiver);
2147
2148                 /* Race breaker.  This additional state change request may be
2149                  * necessary, if this was a forced disconnect during a receiver
2150                  * restart.  We may have "killed" the receiver thread just
2151                  * after drbdd_init() returned.  Typically, we should be
2152                  * C_STANDALONE already, now, and this becomes a no-op.
2153                  */
2154                 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2155                                 CS_VERBOSE | CS_HARD);
2156                 if (rv2 < SS_SUCCESS)
2157                         conn_err(tconn,
2158                                 "unexpected rv2=%d in conn_try_disconnect()\n",
2159                                 rv2);
2160         }
2161         return rv;
2162 }
2163
2164 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2165 {
2166         struct disconnect_parms parms;
2167         struct drbd_tconn *tconn;
2168         enum drbd_state_rv rv;
2169         enum drbd_ret_code retcode;
2170         int err;
2171
2172         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2173         if (!adm_ctx.reply_skb)
2174                 return retcode;
2175         if (retcode != NO_ERROR)
2176                 goto fail;
2177
2178         tconn = adm_ctx.tconn;
2179         memset(&parms, 0, sizeof(parms));
2180         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2181                 err = disconnect_parms_from_attrs(&parms, info);
2182                 if (err) {
2183                         retcode = ERR_MANDATORY_TAG;
2184                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2185                         goto fail;
2186                 }
2187         }
2188
2189         rv = conn_try_disconnect(tconn, parms.force_disconnect);
2190         if (rv < SS_SUCCESS)
2191                 retcode = rv;  /* FIXME: Type mismatch. */
2192         else
2193                 retcode = NO_ERROR;
2194  fail:
2195         drbd_adm_finish(info, retcode);
2196         return 0;
2197 }
2198
2199 void resync_after_online_grow(struct drbd_conf *mdev)
2200 {
2201         int iass; /* I am sync source */
2202
2203         dev_info(DEV, "Resync of new storage after online grow\n");
2204         if (mdev->state.role != mdev->state.peer)
2205                 iass = (mdev->state.role == R_PRIMARY);
2206         else
2207                 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2208
2209         if (iass)
2210                 drbd_start_resync(mdev, C_SYNC_SOURCE);
2211         else
2212                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2213 }
2214
2215 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2216 {
2217         struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2218         struct resize_parms rs;
2219         struct drbd_conf *mdev;
2220         enum drbd_ret_code retcode;
2221         enum determine_dev_size dd;
2222         enum dds_flags ddsf;
2223         sector_t u_size;
2224         int err;
2225
2226         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2227         if (!adm_ctx.reply_skb)
2228                 return retcode;
2229         if (retcode != NO_ERROR)
2230                 goto fail;
2231
2232         memset(&rs, 0, sizeof(struct resize_parms));
2233         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2234                 err = resize_parms_from_attrs(&rs, info);
2235                 if (err) {
2236                         retcode = ERR_MANDATORY_TAG;
2237                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2238                         goto fail;
2239                 }
2240         }
2241
2242         mdev = adm_ctx.mdev;
2243         if (mdev->state.conn > C_CONNECTED) {
2244                 retcode = ERR_RESIZE_RESYNC;
2245                 goto fail;
2246         }
2247
2248         if (mdev->state.role == R_SECONDARY &&
2249             mdev->state.peer == R_SECONDARY) {
2250                 retcode = ERR_NO_PRIMARY;
2251                 goto fail;
2252         }
2253
2254         if (!get_ldev(mdev)) {
2255                 retcode = ERR_NO_DISK;
2256                 goto fail;
2257         }
2258
2259         if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2260                 retcode = ERR_NEED_APV_93;
2261                 goto fail;
2262         }
2263
2264         rcu_read_lock();
2265         u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2266         rcu_read_unlock();
2267         if (u_size != (sector_t)rs.resize_size) {
2268                 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2269                 if (!new_disk_conf) {
2270                         retcode = ERR_NOMEM;
2271                         goto fail;
2272                 }
2273         }
2274
2275         if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2276                 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2277
2278         if (new_disk_conf) {
2279                 mutex_lock(&mdev->tconn->conf_update);
2280                 old_disk_conf = mdev->ldev->disk_conf;
2281                 *new_disk_conf = *old_disk_conf;
2282                 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2283                 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2284                 mutex_unlock(&mdev->tconn->conf_update);
2285                 synchronize_rcu();
2286                 kfree(old_disk_conf);
2287         }
2288
2289         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2290         dd = drbd_determine_dev_size(mdev, ddsf);
2291         drbd_md_sync(mdev);
2292         put_ldev(mdev);
2293         if (dd == dev_size_error) {
2294                 retcode = ERR_NOMEM_BITMAP;
2295                 goto fail;
2296         }
2297
2298         if (mdev->state.conn == C_CONNECTED) {
2299                 if (dd == grew)
2300                         set_bit(RESIZE_PENDING, &mdev->flags);
2301
2302                 drbd_send_uuids(mdev);
2303                 drbd_send_sizes(mdev, 1, ddsf);
2304         }
2305
2306  fail:
2307         drbd_adm_finish(info, retcode);
2308         return 0;
2309 }
2310
2311 void drbd_set_res_opts_defaults(struct res_opts *r)
2312 {
2313         return set_res_opts_defaults(r);
2314 }
2315
2316 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2317 {
2318         enum drbd_ret_code retcode;
2319         cpumask_var_t new_cpu_mask;
2320         struct drbd_tconn *tconn;
2321         struct res_opts res_opts;
2322         int err;
2323
2324         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2325         if (!adm_ctx.reply_skb)
2326                 return retcode;
2327         if (retcode != NO_ERROR)
2328                 goto fail;
2329         tconn = adm_ctx.tconn;
2330
2331         if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2332                 retcode = ERR_NOMEM;
2333                 drbd_msg_put_info("unable to allocate cpumask");
2334                 goto fail;
2335         }
2336
2337         res_opts = tconn->res_opts;
2338         if (should_set_defaults(info))
2339                 set_res_opts_defaults(&res_opts);
2340
2341         err = res_opts_from_attrs(&res_opts, info);
2342         if (err && err != -ENOMSG) {
2343                 retcode = ERR_MANDATORY_TAG;
2344                 drbd_msg_put_info(from_attrs_err_to_txt(err));
2345                 goto fail;
2346         }
2347
2348         /* silently ignore cpu mask on UP kernel */
2349         if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
2350                 err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
2351                                 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2352                 if (err) {
2353                         conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2354                         retcode = ERR_CPU_MASK_PARSE;
2355                         goto fail;
2356                 }
2357         }
2358
2359
2360         tconn->res_opts = res_opts;
2361
2362         if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2363                 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2364                 drbd_calc_cpu_mask(tconn);
2365                 tconn->receiver.reset_cpu_mask = 1;
2366                 tconn->asender.reset_cpu_mask = 1;
2367                 tconn->worker.reset_cpu_mask = 1;
2368         }
2369
2370 fail:
2371         free_cpumask_var(new_cpu_mask);
2372
2373         drbd_adm_finish(info, retcode);
2374         return 0;
2375 }
2376
2377 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2378 {
2379         struct drbd_conf *mdev;
2380         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2381
2382         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2383         if (!adm_ctx.reply_skb)
2384                 return retcode;
2385         if (retcode != NO_ERROR)
2386                 goto out;
2387
2388         mdev = adm_ctx.mdev;
2389
2390         /* If there is still bitmap IO pending, probably because of a previous
2391          * resync just being finished, wait for it before requesting a new resync. */
2392         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2393
2394         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2395
2396         if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2397                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2398
2399         while (retcode == SS_NEED_CONNECTION) {
2400                 spin_lock_irq(&mdev->tconn->req_lock);
2401                 if (mdev->state.conn < C_CONNECTED)
2402                         retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2403                 spin_unlock_irq(&mdev->tconn->req_lock);
2404
2405                 if (retcode != SS_NEED_CONNECTION)
2406                         break;
2407
2408                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2409         }
2410
2411 out:
2412         drbd_adm_finish(info, retcode);
2413         return 0;
2414 }
2415
2416 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2417 {
2418         int rv;
2419
2420         rv = drbd_bmio_set_n_write(mdev);
2421         drbd_suspend_al(mdev);
2422         return rv;
2423 }
2424
2425 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2426                 union drbd_state mask, union drbd_state val)
2427 {
2428         enum drbd_ret_code retcode;
2429
2430         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2431         if (!adm_ctx.reply_skb)
2432                 return retcode;
2433         if (retcode != NO_ERROR)
2434                 goto out;
2435
2436         retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2437 out:
2438         drbd_adm_finish(info, retcode);
2439         return 0;
2440 }
2441
2442 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2443 {
2444         return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2445 }
2446
2447 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2448 {
2449         enum drbd_ret_code retcode;
2450
2451         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2452         if (!adm_ctx.reply_skb)
2453                 return retcode;
2454         if (retcode != NO_ERROR)
2455                 goto out;
2456
2457         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2458                 retcode = ERR_PAUSE_IS_SET;
2459 out:
2460         drbd_adm_finish(info, retcode);
2461         return 0;
2462 }
2463
2464 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2465 {
2466         union drbd_dev_state s;
2467         enum drbd_ret_code retcode;
2468
2469         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2470         if (!adm_ctx.reply_skb)
2471                 return retcode;
2472         if (retcode != NO_ERROR)
2473                 goto out;
2474
2475         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2476                 s = adm_ctx.mdev->state;
2477                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2478                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2479                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2480                 } else {
2481                         retcode = ERR_PAUSE_IS_CLEAR;
2482                 }
2483         }
2484
2485 out:
2486         drbd_adm_finish(info, retcode);
2487         return 0;
2488 }
2489
2490 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2491 {
2492         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2493 }
2494
2495 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2496 {
2497         struct drbd_conf *mdev;
2498         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2499
2500         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2501         if (!adm_ctx.reply_skb)
2502                 return retcode;
2503         if (retcode != NO_ERROR)
2504                 goto out;
2505
2506         mdev = adm_ctx.mdev;
2507         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2508                 drbd_uuid_new_current(mdev);
2509                 clear_bit(NEW_CUR_UUID, &mdev->flags);
2510         }
2511         drbd_suspend_io(mdev);
2512         retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2513         if (retcode == SS_SUCCESS) {
2514                 if (mdev->state.conn < C_CONNECTED)
2515                         tl_clear(mdev->tconn);
2516                 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2517                         tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2518         }
2519         drbd_resume_io(mdev);
2520
2521 out:
2522         drbd_adm_finish(info, retcode);
2523         return 0;
2524 }
2525
2526 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2527 {
2528         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2529 }
2530
2531 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *resource_name, unsigned vnr)
2532 {
2533         struct nlattr *nla;
2534         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2535         if (!nla)
2536                 goto nla_put_failure;
2537         if (vnr != VOLUME_UNSPECIFIED)
2538                 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2539         NLA_PUT_STRING(skb, T_ctx_resource_name, resource_name);
2540         nla_nest_end(skb, nla);
2541         return 0;
2542
2543 nla_put_failure:
2544         if (nla)
2545                 nla_nest_cancel(skb, nla);
2546         return -EMSGSIZE;
2547 }
2548
2549 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2550                 const struct sib_info *sib)
2551 {
2552         struct state_info *si = NULL; /* for sizeof(si->member); */
2553         struct net_conf *nc;
2554         struct nlattr *nla;
2555         int got_ldev;
2556         int err = 0;
2557         int exclude_sensitive;
2558
2559         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2560          * to.  So we better exclude_sensitive information.
2561          *
2562          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2563          * in the context of the requesting user process. Exclude sensitive
2564          * information, unless current has superuser.
2565          *
2566          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2567          * relies on the current implementation of netlink_dump(), which
2568          * executes the dump callback successively from netlink_recvmsg(),
2569          * always in the context of the receiving process */
2570         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2571
2572         got_ldev = get_ldev(mdev);
2573
2574         /* We need to add connection name and volume number information still.
2575          * Minor number is in drbd_genlmsghdr. */
2576         if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2577                 goto nla_put_failure;
2578
2579         if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2580                 goto nla_put_failure;
2581
2582         rcu_read_lock();
2583         if (got_ldev)
2584                 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
2585                         goto nla_put_failure;
2586
2587         nc = rcu_dereference(mdev->tconn->net_conf);
2588         if (nc)
2589                 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2590         rcu_read_unlock();
2591         if (err)
2592                 goto nla_put_failure;
2593
2594         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2595         if (!nla)
2596                 goto nla_put_failure;
2597         NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2598         NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2599         NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2600         NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2601
2602         if (got_ldev) {
2603                 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2604                 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2605                 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2606                 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2607                 if (C_SYNC_SOURCE <= mdev->state.conn &&
2608                     C_PAUSED_SYNC_T >= mdev->state.conn) {
2609                         NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2610                         NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2611                 }
2612         }
2613
2614         if (sib) {
2615                 switch(sib->sib_reason) {
2616                 case SIB_SYNC_PROGRESS:
2617                 case SIB_GET_STATUS_REPLY:
2618                         break;
2619                 case SIB_STATE_CHANGE:
2620                         NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2621                         NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2622                         break;
2623                 case SIB_HELPER_POST:
2624                         NLA_PUT_U32(skb,
2625                                 T_helper_exit_code, sib->helper_exit_code);
2626                         /* fall through */
2627                 case SIB_HELPER_PRE:
2628                         NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2629                         break;
2630                 }
2631         }
2632         nla_nest_end(skb, nla);
2633
2634         if (0)
2635 nla_put_failure:
2636                 err = -EMSGSIZE;
2637         if (got_ldev)
2638                 put_ldev(mdev);
2639         return err;
2640 }
2641
2642 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2643 {
2644         enum drbd_ret_code retcode;
2645         int err;
2646
2647         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2648         if (!adm_ctx.reply_skb)
2649                 return retcode;
2650         if (retcode != NO_ERROR)
2651                 goto out;
2652
2653         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2654         if (err) {
2655                 nlmsg_free(adm_ctx.reply_skb);
2656                 return err;
2657         }
2658 out:
2659         drbd_adm_finish(info, retcode);
2660         return 0;
2661 }
2662
2663 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2664 {
2665         struct drbd_conf *mdev;
2666         struct drbd_genlmsghdr *dh;
2667         struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2668         struct drbd_tconn *tconn = NULL;
2669         struct drbd_tconn *tmp;
2670         unsigned volume = cb->args[1];
2671
2672         /* Open coded, deferred, iteration:
2673          * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2674          *      idr_for_each_entry(&tconn->volumes, mdev, i) {
2675          *        ...
2676          *      }
2677          * }
2678          * where tconn is cb->args[0];
2679          * and i is cb->args[1];
2680          *
2681          * cb->args[2] indicates if we shall loop over all resources,
2682          * or just dump all volumes of a single resource.
2683          *
2684          * This may miss entries inserted after this dump started,
2685          * or entries deleted before they are reached.
2686          *
2687          * We need to make sure the mdev won't disappear while
2688          * we are looking at it, and revalidate our iterators
2689          * on each iteration.
2690          */
2691
2692         /* synchronize with conn_create()/conn_destroy() */
2693         rcu_read_lock();
2694         /* revalidate iterator position */
2695         list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
2696                 if (pos == NULL) {
2697                         /* first iteration */
2698                         pos = tmp;
2699                         tconn = pos;
2700                         break;
2701                 }
2702                 if (tmp == pos) {
2703                         tconn = pos;
2704                         break;
2705                 }
2706         }
2707         if (tconn) {
2708 next_tconn:
2709                 mdev = idr_get_next(&tconn->volumes, &volume);
2710                 if (!mdev) {
2711                         /* No more volumes to dump on this tconn.
2712                          * Advance tconn iterator. */
2713                         pos = list_entry_rcu(tconn->all_tconn.next,
2714                                              struct drbd_tconn, all_tconn);
2715                         /* Did we dump any volume on this tconn yet? */
2716                         if (volume != 0) {
2717                                 /* If we reached the end of the list,
2718                                  * or only a single resource dump was requested,
2719                                  * we are done. */
2720                                 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2721                                         goto out;
2722                                 volume = 0;
2723                                 tconn = pos;
2724                                 goto next_tconn;
2725                         }
2726                 }
2727
2728                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2729                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
2730                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2731                 if (!dh)
2732                         goto out;
2733
2734                 if (!mdev) {
2735                         /* this is a tconn without a single volume */
2736                         dh->minor = -1U;
2737                         dh->ret_code = NO_ERROR;
2738                         if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2739                                 genlmsg_cancel(skb, dh);
2740                         else
2741                                 genlmsg_end(skb, dh);
2742                         goto out;
2743                 }
2744
2745                 D_ASSERT(mdev->vnr == volume);
2746                 D_ASSERT(mdev->tconn == tconn);
2747
2748                 dh->minor = mdev_to_minor(mdev);
2749                 dh->ret_code = NO_ERROR;
2750
2751                 if (nla_put_status_info(skb, mdev, NULL)) {
2752                         genlmsg_cancel(skb, dh);
2753                         goto out;
2754                 }
2755                 genlmsg_end(skb, dh);
2756         }
2757
2758 out:
2759         rcu_read_unlock();
2760         /* where to start the next iteration */
2761         cb->args[0] = (long)pos;
2762         cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2763
2764         /* No more tconns/volumes/minors found results in an empty skb.
2765          * Which will terminate the dump. */
2766         return skb->len;
2767 }
2768
2769 /*
2770  * Request status of all resources, or of all volumes within a single resource.
2771  *
2772  * This is a dump, as the answer may not fit in a single reply skb otherwise.
2773  * Which means we cannot use the family->attrbuf or other such members, because
2774  * dump is NOT protected by the genl_lock().  During dump, we only have access
2775  * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2776  *
2777  * Once things are setup properly, we call into get_one_status().
2778  */
2779 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2780 {
2781         const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2782         struct nlattr *nla;
2783         const char *resource_name;
2784         struct drbd_tconn *tconn;
2785         int maxtype;
2786
2787         /* Is this a followup call? */
2788         if (cb->args[0]) {
2789                 /* ... of a single resource dump,
2790                  * and the resource iterator has been advanced already? */
2791                 if (cb->args[2] && cb->args[2] != cb->args[0])
2792                         return 0; /* DONE. */
2793                 goto dump;
2794         }
2795
2796         /* First call (from netlink_dump_start).  We need to figure out
2797          * which resource(s) the user wants us to dump. */
2798         nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2799                         nlmsg_attrlen(cb->nlh, hdrlen),
2800                         DRBD_NLA_CFG_CONTEXT);
2801
2802         /* No explicit context given.  Dump all. */
2803         if (!nla)
2804                 goto dump;
2805         maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2806         nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2807         if (IS_ERR(nla))
2808                 return PTR_ERR(nla);
2809         /* context given, but no name present? */
2810         if (!nla)
2811                 return -EINVAL;
2812         resource_name = nla_data(nla);
2813         tconn = conn_get_by_name(resource_name);
2814
2815         if (!tconn)
2816                 return -ENODEV;
2817
2818         kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2819
2820         /* prime iterators, and set "filter" mode mark:
2821          * only dump this tconn. */
2822         cb->args[0] = (long)tconn;
2823         /* cb->args[1] = 0; passed in this way. */
2824         cb->args[2] = (long)tconn;
2825
2826 dump:
2827         return get_one_status(skb, cb);
2828 }
2829
2830 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2831 {
2832         enum drbd_ret_code retcode;
2833         struct timeout_parms tp;
2834         int err;
2835
2836         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2837         if (!adm_ctx.reply_skb)
2838                 return retcode;
2839         if (retcode != NO_ERROR)
2840                 goto out;
2841
2842         tp.timeout_type =
2843                 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2844                 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2845                 UT_DEFAULT;
2846
2847         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2848         if (err) {
2849                 nlmsg_free(adm_ctx.reply_skb);
2850                 return err;
2851         }
2852 out:
2853         drbd_adm_finish(info, retcode);
2854         return 0;
2855 }
2856
2857 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2858 {
2859         struct drbd_conf *mdev;
2860         enum drbd_ret_code retcode;
2861
2862         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2863         if (!adm_ctx.reply_skb)
2864                 return retcode;
2865         if (retcode != NO_ERROR)
2866                 goto out;
2867
2868         mdev = adm_ctx.mdev;
2869         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2870                 /* resume from last known position, if possible */
2871                 struct start_ov_parms parms =
2872                         { .ov_start_sector = mdev->ov_start_sector };
2873                 int err = start_ov_parms_from_attrs(&parms, info);
2874                 if (err) {
2875                         retcode = ERR_MANDATORY_TAG;
2876                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2877                         goto out;
2878                 }
2879                 /* w_make_ov_request expects position to be aligned */
2880                 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2881         }
2882         /* If there is still bitmap IO pending, e.g. previous resync or verify
2883          * just being finished, wait for it before requesting a new resync. */
2884         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2885         retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2886 out:
2887         drbd_adm_finish(info, retcode);
2888         return 0;
2889 }
2890
2891
2892 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2893 {
2894         struct drbd_conf *mdev;
2895         enum drbd_ret_code retcode;
2896         int skip_initial_sync = 0;
2897         int err;
2898         struct new_c_uuid_parms args;
2899
2900         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2901         if (!adm_ctx.reply_skb)
2902                 return retcode;
2903         if (retcode != NO_ERROR)
2904                 goto out_nolock;
2905
2906         mdev = adm_ctx.mdev;
2907         memset(&args, 0, sizeof(args));
2908         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2909                 err = new_c_uuid_parms_from_attrs(&args, info);
2910                 if (err) {
2911                         retcode = ERR_MANDATORY_TAG;
2912                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2913                         goto out_nolock;
2914                 }
2915         }
2916
2917         mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2918
2919         if (!get_ldev(mdev)) {
2920                 retcode = ERR_NO_DISK;
2921                 goto out;
2922         }
2923
2924         /* this is "skip initial sync", assume to be clean */
2925         if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2926             mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2927                 dev_info(DEV, "Preparing to skip initial sync\n");
2928                 skip_initial_sync = 1;
2929         } else if (mdev->state.conn != C_STANDALONE) {
2930                 retcode = ERR_CONNECTED;
2931                 goto out_dec;
2932         }
2933
2934         drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2935         drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2936
2937         if (args.clear_bm) {
2938                 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2939                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2940                 if (err) {
2941                         dev_err(DEV, "Writing bitmap failed with %d\n",err);
2942                         retcode = ERR_IO_MD_DISK;
2943                 }
2944                 if (skip_initial_sync) {
2945                         drbd_send_uuids_skip_initial_sync(mdev);
2946                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
2947                         drbd_print_uuids(mdev, "cleared bitmap UUID");
2948                         spin_lock_irq(&mdev->tconn->req_lock);
2949                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2950                                         CS_VERBOSE, NULL);
2951                         spin_unlock_irq(&mdev->tconn->req_lock);
2952                 }
2953         }
2954
2955         drbd_md_sync(mdev);
2956 out_dec:
2957         put_ldev(mdev);
2958 out:
2959         mutex_unlock(mdev->state_mutex);
2960 out_nolock:
2961         drbd_adm_finish(info, retcode);
2962         return 0;
2963 }
2964
2965 static enum drbd_ret_code
2966 drbd_check_resource_name(const char *name)
2967 {
2968         if (!name || !name[0]) {
2969                 drbd_msg_put_info("resource name missing");
2970                 return ERR_MANDATORY_TAG;
2971         }
2972         /* if we want to use these in sysfs/configfs/debugfs some day,
2973          * we must not allow slashes */
2974         if (strchr(name, '/')) {
2975                 drbd_msg_put_info("invalid resource name");
2976                 return ERR_INVALID_REQUEST;
2977         }
2978         return NO_ERROR;
2979 }
2980
2981 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
2982 {
2983         enum drbd_ret_code retcode;
2984
2985         retcode = drbd_adm_prepare(skb, info, 0);
2986         if (!adm_ctx.reply_skb)
2987                 return retcode;
2988         if (retcode != NO_ERROR)
2989                 goto out;
2990
2991         retcode = drbd_check_resource_name(adm_ctx.resource_name);
2992         if (retcode != NO_ERROR)
2993                 goto out;
2994
2995         if (adm_ctx.tconn) {
2996                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2997                         retcode = ERR_INVALID_REQUEST;
2998                         drbd_msg_put_info("resource exists");
2999                 }
3000                 /* else: still NO_ERROR */
3001                 goto out;
3002         }
3003
3004         if (!conn_create(adm_ctx.resource_name))
3005                 retcode = ERR_NOMEM;
3006 out:
3007         drbd_adm_finish(info, retcode);
3008         return 0;
3009 }
3010
3011 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3012 {
3013         struct drbd_genlmsghdr *dh = info->userhdr;
3014         enum drbd_ret_code retcode;
3015
3016         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3017         if (!adm_ctx.reply_skb)
3018                 return retcode;
3019         if (retcode != NO_ERROR)
3020                 goto out;
3021
3022         /* FIXME drop minor_count parameter, limit to MINORMASK */
3023         if (dh->minor >= minor_count) {
3024                 drbd_msg_put_info("requested minor out of range");
3025                 retcode = ERR_INVALID_REQUEST;
3026                 goto out;
3027         }
3028         if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3029                 drbd_msg_put_info("requested volume id out of range");
3030                 retcode = ERR_INVALID_REQUEST;
3031                 goto out;
3032         }
3033
3034         /* drbd_adm_prepare made sure already
3035          * that mdev->tconn and mdev->vnr match the request. */
3036         if (adm_ctx.mdev) {
3037                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3038                         retcode = ERR_MINOR_EXISTS;
3039                 /* else: still NO_ERROR */
3040                 goto out;
3041         }
3042
3043         retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3044 out:
3045         drbd_adm_finish(info, retcode);
3046         return 0;
3047 }
3048
3049 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3050 {
3051         if (mdev->state.disk == D_DISKLESS &&
3052             /* no need to be mdev->state.conn == C_STANDALONE &&
3053              * we may want to delete a minor from a live replication group.
3054              */
3055             mdev->state.role == R_SECONDARY) {
3056                 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3057                 idr_remove(&minors, mdev_to_minor(mdev));
3058                 del_gendisk(mdev->vdisk);
3059                 synchronize_rcu();
3060                 kref_put(&mdev->kref, &drbd_minor_destroy);
3061                 return NO_ERROR;
3062         } else
3063                 return ERR_MINOR_CONFIGURED;
3064 }
3065
3066 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
3067 {
3068         enum drbd_ret_code retcode;
3069
3070         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3071         if (!adm_ctx.reply_skb)
3072                 return retcode;
3073         if (retcode != NO_ERROR)
3074                 goto out;
3075
3076         retcode = adm_delete_minor(adm_ctx.mdev);
3077 out:
3078         drbd_adm_finish(info, retcode);
3079         return 0;
3080 }
3081
3082 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3083 {
3084         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3085         struct drbd_conf *mdev;
3086         unsigned i;
3087
3088         retcode = drbd_adm_prepare(skb, info, 0);
3089         if (!adm_ctx.reply_skb)
3090                 return retcode;
3091         if (retcode != NO_ERROR)
3092                 goto out;
3093
3094         if (!adm_ctx.tconn) {
3095                 retcode = ERR_RES_NOT_KNOWN;
3096                 goto out;
3097         }
3098
3099         /* demote */
3100         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3101                 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3102                 if (retcode < SS_SUCCESS) {
3103                         drbd_msg_put_info("failed to demote");
3104                         goto out;
3105                 }
3106         }
3107
3108         retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3109         if (retcode < SS_SUCCESS) {
3110                 drbd_msg_put_info("failed to disconnect");
3111                 goto out;
3112         }
3113
3114         /* detach */
3115         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3116                 retcode = adm_detach(mdev);
3117                 if (retcode < SS_SUCCESS) {
3118                         drbd_msg_put_info("failed to detach");
3119                         goto out;
3120                 }
3121         }
3122
3123         /* If we reach this, all volumes (of this tconn) are Secondary,
3124          * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3125          * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3126         drbd_thread_stop(&adm_ctx.tconn->worker);
3127
3128         /* Now, nothing can fail anymore */
3129
3130         /* delete volumes */
3131         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3132                 retcode = adm_delete_minor(mdev);
3133                 if (retcode != NO_ERROR) {
3134                         /* "can not happen" */
3135                         drbd_msg_put_info("failed to delete volume");
3136                         goto out;
3137                 }
3138         }
3139
3140         /* delete connection */
3141         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3142                 list_del_rcu(&adm_ctx.tconn->all_tconn);
3143                 synchronize_rcu();
3144                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3145
3146                 retcode = NO_ERROR;
3147         } else {
3148                 /* "can not happen" */
3149                 retcode = ERR_RES_IN_USE;
3150                 drbd_msg_put_info("failed to delete connection");
3151         }
3152         goto out;
3153 out:
3154         drbd_adm_finish(info, retcode);
3155         return 0;
3156 }
3157
3158 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3159 {
3160         enum drbd_ret_code retcode;
3161
3162         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3163         if (!adm_ctx.reply_skb)
3164                 return retcode;
3165         if (retcode != NO_ERROR)
3166                 goto out;
3167
3168         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3169                 list_del_rcu(&adm_ctx.tconn->all_tconn);
3170                 synchronize_rcu();
3171                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3172
3173                 retcode = NO_ERROR;
3174         } else {
3175                 retcode = ERR_RES_IN_USE;
3176         }
3177
3178         if (retcode == NO_ERROR)
3179                 drbd_thread_stop(&adm_ctx.tconn->worker);
3180 out:
3181         drbd_adm_finish(info, retcode);
3182         return 0;
3183 }
3184
3185 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3186 {
3187         static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3188         struct sk_buff *msg;
3189         struct drbd_genlmsghdr *d_out;
3190         unsigned seq;
3191         int err = -ENOMEM;
3192
3193         seq = atomic_inc_return(&drbd_genl_seq);
3194         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3195         if (!msg)
3196                 goto failed;
3197
3198         err = -EMSGSIZE;
3199         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3200         if (!d_out) /* cannot happen, but anyways. */
3201                 goto nla_put_failure;
3202         d_out->minor = mdev_to_minor(mdev);
3203         d_out->ret_code = NO_ERROR;
3204
3205         if (nla_put_status_info(msg, mdev, sib))
3206                 goto nla_put_failure;
3207         genlmsg_end(msg, d_out);
3208         err = drbd_genl_multicast_events(msg, 0);
3209         /* msg has been consumed or freed in netlink_broadcast() */
3210         if (err && err != -ESRCH)
3211                 goto failed;
3212
3213         return;
3214
3215 nla_put_failure:
3216         nlmsg_free(msg);
3217 failed:
3218         dev_err(DEV, "Error %d while broadcasting event. "
3219                         "Event seq:%u sib_reason:%u\n",
3220                         err, seq, sib->sib_reason);
3221 }
3222
3223 int drbd_nla_check_mandatory(int maxtype, struct nlattr *nla)
3224 {
3225         struct nlattr *head = nla_data(nla);
3226         int len = nla_len(nla);
3227         int rem;
3228
3229         /*
3230          * validate_nla (called from nla_parse_nested) ignores attributes
3231          * beyond maxtype, and does not understand the DRBD_GENLA_F_MANDATORY flag.
3232          * In order to have it validate attributes with the DRBD_GENLA_F_MANDATORY
3233          * flag set also, check and remove that flag before calling
3234          * nla_parse_nested.
3235          */
3236
3237         nla_for_each_attr(nla, head, len, rem) {
3238                 if (nla->nla_type & DRBD_GENLA_F_MANDATORY) {
3239                         nla->nla_type &= ~DRBD_GENLA_F_MANDATORY;
3240                         if (nla_type(nla) > maxtype)
3241                                 return -EOPNOTSUPP;
3242                 }
3243         }
3244         return 0;
3245 }
3246
3247 int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
3248                           const struct nla_policy *policy)
3249 {
3250         int err;
3251
3252         err = drbd_nla_check_mandatory(maxtype, nla);
3253         if (!err)
3254                 err = nla_parse_nested(tb, maxtype, nla, policy);
3255
3256         return err;
3257 }
3258
3259 struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype)
3260 {
3261         int err;
3262         /*
3263          * If any nested attribute has the DRBD_GENLA_F_MANDATORY flag set and
3264          * we don't know about that attribute, reject all the nested
3265          * attributes.
3266          */
3267         err = drbd_nla_check_mandatory(maxtype, nla);
3268         if (err)
3269                 return ERR_PTR(err);
3270         return nla_find_nested(nla, attrtype);
3271 }