drbd: Allow online change of replication protocol only with agreed_pv >= 100
[linux-block.git] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/in.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
34 #include "drbd_int.h"
35 #include "drbd_req.h"
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
40
41 #include <net/genetlink.h>
42
43 /* .doit */
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
53
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74 /* .dumpit */
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
79
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
82
83 /* Configuration is strictly serialized, because generic netlink message
84  * processing is strictly serialized by the genl_lock().
85  * Which means we can use one static global drbd_config_context struct.
86  */
87 static struct drbd_config_context {
88         /* assigned from drbd_genlmsghdr */
89         unsigned int minor;
90         /* assigned from request attributes, if present */
91         unsigned int volume;
92 #define VOLUME_UNSPECIFIED              (-1U)
93         /* pointer into the request skb,
94          * limited lifetime! */
95         char *conn_name;
96
97         /* reply buffer */
98         struct sk_buff *reply_skb;
99         /* pointer into reply buffer */
100         struct drbd_genlmsghdr *reply_dh;
101         /* resolved from attributes, if possible */
102         struct drbd_conf *mdev;
103         struct drbd_tconn *tconn;
104 } adm_ctx;
105
106 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
107 {
108         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109         if (genlmsg_reply(skb, info))
110                 printk(KERN_ERR "drbd: error sending genl reply\n");
111 }
112
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114  * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info)
116 {
117         struct sk_buff *skb = adm_ctx.reply_skb;
118         struct nlattr *nla;
119         int err = -EMSGSIZE;
120
121         if (!info || !info[0])
122                 return 0;
123
124         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125         if (!nla)
126                 return err;
127
128         err = nla_put_string(skb, T_info_text, info);
129         if (err) {
130                 nla_nest_cancel(skb, nla);
131                 return err;
132         } else
133                 nla_nest_end(skb, nla);
134         return 0;
135 }
136
137 /* This would be a good candidate for a "pre_doit" hook,
138  * and per-family private info->pointers.
139  * But we need to stay compatible with older kernels.
140  * If it returns successfully, adm_ctx members are valid.
141  */
142 #define DRBD_ADM_NEED_MINOR     1
143 #define DRBD_ADM_NEED_CONN      2
144 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
145                 unsigned flags)
146 {
147         struct drbd_genlmsghdr *d_in = info->userhdr;
148         const u8 cmd = info->genlhdr->cmd;
149         int err;
150
151         memset(&adm_ctx, 0, sizeof(adm_ctx));
152
153         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154         if (cmd != DRBD_ADM_GET_STATUS
155         && security_netlink_recv(skb, CAP_SYS_ADMIN))
156                return -EPERM;
157
158         adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159         if (!adm_ctx.reply_skb)
160                 goto fail;
161
162         adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
163                                         info, &drbd_genl_family, 0, cmd);
164         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
165          * but anyways */
166         if (!adm_ctx.reply_dh)
167                 goto fail;
168
169         adm_ctx.reply_dh->minor = d_in->minor;
170         adm_ctx.reply_dh->ret_code = NO_ERROR;
171
172         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
173                 struct nlattr *nla;
174                 /* parse and validate only */
175                 err = drbd_cfg_context_from_attrs(NULL, info);
176                 if (err)
177                         goto fail;
178
179                 /* It was present, and valid,
180                  * copy it over to the reply skb. */
181                 err = nla_put_nohdr(adm_ctx.reply_skb,
182                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
183                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
184                 if (err)
185                         goto fail;
186
187                 /* and assign stuff to the global adm_ctx */
188                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
189                 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
190                 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
191                 if (nla)
192                         adm_ctx.conn_name = nla_data(nla);
193         } else
194                 adm_ctx.volume = VOLUME_UNSPECIFIED;
195
196         adm_ctx.minor = d_in->minor;
197         adm_ctx.mdev = minor_to_mdev(d_in->minor);
198         adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
199
200         if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
201                 drbd_msg_put_info("unknown minor");
202                 return ERR_MINOR_INVALID;
203         }
204         if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
205                 drbd_msg_put_info("unknown connection");
206                 return ERR_INVALID_REQUEST;
207         }
208
209         /* some more paranoia, if the request was over-determined */
210         if (adm_ctx.mdev && adm_ctx.tconn &&
211             adm_ctx.mdev->tconn != adm_ctx.tconn) {
212                 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213                                 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
214                 drbd_msg_put_info("minor exists in different connection");
215                 return ERR_INVALID_REQUEST;
216         }
217         if (adm_ctx.mdev &&
218             adm_ctx.volume != VOLUME_UNSPECIFIED &&
219             adm_ctx.volume != adm_ctx.mdev->vnr) {
220                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221                                 adm_ctx.minor, adm_ctx.volume,
222                                 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
223                 drbd_msg_put_info("minor exists as different volume");
224                 return ERR_INVALID_REQUEST;
225         }
226         if (adm_ctx.mdev && !adm_ctx.tconn)
227                 adm_ctx.tconn = adm_ctx.mdev->tconn;
228         return NO_ERROR;
229
230 fail:
231         nlmsg_free(adm_ctx.reply_skb);
232         adm_ctx.reply_skb = NULL;
233         return -ENOMEM;
234 }
235
236 static int drbd_adm_finish(struct genl_info *info, int retcode)
237 {
238         struct nlattr *nla;
239         const char *conn_name = NULL;
240
241         if (!adm_ctx.reply_skb)
242                 return -ENOMEM;
243
244         adm_ctx.reply_dh->ret_code = retcode;
245
246         nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
247         if (nla) {
248                 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
249                 if (nla)
250                         conn_name = nla_data(nla);
251         }
252
253         drbd_adm_send_reply(adm_ctx.reply_skb, info);
254         return 0;
255 }
256
257 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
258 {
259         char *afs;
260
261         if (get_net_conf(tconn)) {
262                 switch (((struct sockaddr *)tconn->net_conf->peer_addr)->sa_family) {
263                 case AF_INET6:
264                         afs = "ipv6";
265                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
266                                  &((struct sockaddr_in6 *)tconn->net_conf->peer_addr)->sin6_addr);
267                         break;
268                 case AF_INET:
269                         afs = "ipv4";
270                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
271                                  &((struct sockaddr_in *)tconn->net_conf->peer_addr)->sin_addr);
272                         break;
273                 default:
274                         afs = "ssocks";
275                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
276                                  &((struct sockaddr_in *)tconn->net_conf->peer_addr)->sin_addr);
277                 }
278                 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
279                 put_net_conf(tconn);
280         }
281 }
282
283 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
284 {
285         char *envp[] = { "HOME=/",
286                         "TERM=linux",
287                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
288                          (char[20]) { }, /* address family */
289                          (char[60]) { }, /* address */
290                         NULL };
291         char mb[12];
292         char *argv[] = {usermode_helper, cmd, mb, NULL };
293         struct sib_info sib;
294         int ret;
295
296         snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
297         setup_khelper_env(mdev->tconn, envp);
298
299         /* The helper may take some time.
300          * write out any unsynced meta data changes now */
301         drbd_md_sync(mdev);
302
303         dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
304         sib.sib_reason = SIB_HELPER_PRE;
305         sib.helper_name = cmd;
306         drbd_bcast_event(mdev, &sib);
307         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
308         if (ret)
309                 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
310                                 usermode_helper, cmd, mb,
311                                 (ret >> 8) & 0xff, ret);
312         else
313                 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
314                                 usermode_helper, cmd, mb,
315                                 (ret >> 8) & 0xff, ret);
316         sib.sib_reason = SIB_HELPER_POST;
317         sib.helper_exit_code = ret;
318         drbd_bcast_event(mdev, &sib);
319
320         if (ret < 0) /* Ignore any ERRNOs we got. */
321                 ret = 0;
322
323         return ret;
324 }
325
326 static void conn_md_sync(struct drbd_tconn *tconn)
327 {
328         struct drbd_conf *mdev;
329         int vnr;
330
331         down_read(&drbd_cfg_rwsem);
332         idr_for_each_entry(&tconn->volumes, mdev, vnr)
333                 drbd_md_sync(mdev);
334         up_read(&drbd_cfg_rwsem);
335 }
336
337 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
338 {
339         char *envp[] = { "HOME=/",
340                         "TERM=linux",
341                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
342                          (char[20]) { }, /* address family */
343                          (char[60]) { }, /* address */
344                         NULL };
345         char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
346         int ret;
347
348         setup_khelper_env(tconn, envp);
349         conn_md_sync(tconn);
350
351         conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
352         /* TODO: conn_bcast_event() ?? */
353
354         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
355         if (ret)
356                 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
357                           usermode_helper, cmd, tconn->name,
358                           (ret >> 8) & 0xff, ret);
359         else
360                 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
361                           usermode_helper, cmd, tconn->name,
362                           (ret >> 8) & 0xff, ret);
363         /* TODO: conn_bcast_event() ?? */
364
365         if (ret < 0) /* Ignore any ERRNOs we got. */
366                 ret = 0;
367
368         return ret;
369 }
370
371 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
372 {
373         enum drbd_fencing_p fp = FP_NOT_AVAIL;
374         struct drbd_conf *mdev;
375         int vnr;
376
377         rcu_read_lock();
378         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
379                 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
380                         fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
381                         put_ldev(mdev);
382                 }
383         }
384         rcu_read_unlock();
385
386         return fp;
387 }
388
389 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
390 {
391         union drbd_state mask = { };
392         union drbd_state val = { };
393         enum drbd_fencing_p fp;
394         char *ex_to_string;
395         int r;
396
397         if (tconn->cstate >= C_WF_REPORT_PARAMS) {
398                 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
399                 return false;
400         }
401
402         fp = highest_fencing_policy(tconn);
403         switch (fp) {
404         case FP_NOT_AVAIL:
405                 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
406                 goto out;
407         case FP_DONT_CARE:
408                 return true;
409         default: ;
410         }
411
412         r = conn_khelper(tconn, "fence-peer");
413
414         switch ((r>>8) & 0xff) {
415         case 3: /* peer is inconsistent */
416                 ex_to_string = "peer is inconsistent or worse";
417                 mask.pdsk = D_MASK;
418                 val.pdsk = D_INCONSISTENT;
419                 break;
420         case 4: /* peer got outdated, or was already outdated */
421                 ex_to_string = "peer was fenced";
422                 mask.pdsk = D_MASK;
423                 val.pdsk = D_OUTDATED;
424                 break;
425         case 5: /* peer was down */
426                 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
427                         /* we will(have) create(d) a new UUID anyways... */
428                         ex_to_string = "peer is unreachable, assumed to be dead";
429                         mask.pdsk = D_MASK;
430                         val.pdsk = D_OUTDATED;
431                 } else {
432                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
433                 }
434                 break;
435         case 6: /* Peer is primary, voluntarily outdate myself.
436                  * This is useful when an unconnected R_SECONDARY is asked to
437                  * become R_PRIMARY, but finds the other peer being active. */
438                 ex_to_string = "peer is active";
439                 conn_warn(tconn, "Peer is primary, outdating myself.\n");
440                 mask.disk = D_MASK;
441                 val.disk = D_OUTDATED;
442                 break;
443         case 7:
444                 if (fp != FP_STONITH)
445                         conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
446                 ex_to_string = "peer was stonithed";
447                 mask.pdsk = D_MASK;
448                 val.pdsk = D_OUTDATED;
449                 break;
450         default:
451                 /* The script is broken ... */
452                 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
453                 return false; /* Eventually leave IO frozen */
454         }
455
456         conn_info(tconn, "fence-peer helper returned %d (%s)\n",
457                   (r>>8) & 0xff, ex_to_string);
458
459  out:
460
461         /* Not using
462            conn_request_state(tconn, mask, val, CS_VERBOSE);
463            here, because we might were able to re-establish the connection in the
464            meantime. */
465         spin_lock_irq(&tconn->req_lock);
466         if (tconn->cstate < C_WF_REPORT_PARAMS)
467                 _conn_request_state(tconn, mask, val, CS_VERBOSE);
468         spin_unlock_irq(&tconn->req_lock);
469
470         return conn_highest_pdsk(tconn) <= D_OUTDATED;
471 }
472
473 static int _try_outdate_peer_async(void *data)
474 {
475         struct drbd_tconn *tconn = (struct drbd_tconn *)data;
476
477         conn_try_outdate_peer(tconn);
478
479         return 0;
480 }
481
482 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
483 {
484         struct task_struct *opa;
485
486         opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
487         if (IS_ERR(opa))
488                 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
489 }
490
491 enum drbd_state_rv
492 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
493 {
494         const int max_tries = 4;
495         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
496         int try = 0;
497         int forced = 0;
498         union drbd_state mask, val;
499
500         if (new_role == R_PRIMARY)
501                 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
502
503         mutex_lock(mdev->state_mutex);
504
505         mask.i = 0; mask.role = R_MASK;
506         val.i  = 0; val.role  = new_role;
507
508         while (try++ < max_tries) {
509                 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
510
511                 /* in case we first succeeded to outdate,
512                  * but now suddenly could establish a connection */
513                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
514                         val.pdsk = 0;
515                         mask.pdsk = 0;
516                         continue;
517                 }
518
519                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
520                     (mdev->state.disk < D_UP_TO_DATE &&
521                      mdev->state.disk >= D_INCONSISTENT)) {
522                         mask.disk = D_MASK;
523                         val.disk  = D_UP_TO_DATE;
524                         forced = 1;
525                         continue;
526                 }
527
528                 if (rv == SS_NO_UP_TO_DATE_DISK &&
529                     mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
530                         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
531
532                         if (conn_try_outdate_peer(mdev->tconn)) {
533                                 val.disk = D_UP_TO_DATE;
534                                 mask.disk = D_MASK;
535                         }
536                         continue;
537                 }
538
539                 if (rv == SS_NOTHING_TO_DO)
540                         goto out;
541                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
542                         if (!conn_try_outdate_peer(mdev->tconn) && force) {
543                                 dev_warn(DEV, "Forced into split brain situation!\n");
544                                 mask.pdsk = D_MASK;
545                                 val.pdsk  = D_OUTDATED;
546
547                         }
548                         continue;
549                 }
550                 if (rv == SS_TWO_PRIMARIES) {
551                         /* Maybe the peer is detected as dead very soon...
552                            retry at most once more in this case. */
553                         schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
554                         if (try < max_tries)
555                                 try = max_tries - 1;
556                         continue;
557                 }
558                 if (rv < SS_SUCCESS) {
559                         rv = _drbd_request_state(mdev, mask, val,
560                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
561                         if (rv < SS_SUCCESS)
562                                 goto out;
563                 }
564                 break;
565         }
566
567         if (rv < SS_SUCCESS)
568                 goto out;
569
570         if (forced)
571                 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
572
573         /* Wait until nothing is on the fly :) */
574         wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
575
576         if (new_role == R_SECONDARY) {
577                 set_disk_ro(mdev->vdisk, true);
578                 if (get_ldev(mdev)) {
579                         mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
580                         put_ldev(mdev);
581                 }
582         } else {
583                 if (get_net_conf(mdev->tconn)) {
584                         mdev->tconn->net_conf->want_lose = 0;
585                         put_net_conf(mdev->tconn);
586                 }
587                 set_disk_ro(mdev->vdisk, false);
588                 if (get_ldev(mdev)) {
589                         if (((mdev->state.conn < C_CONNECTED ||
590                                mdev->state.pdsk <= D_FAILED)
591                               && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
592                                 drbd_uuid_new_current(mdev);
593
594                         mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
595                         put_ldev(mdev);
596                 }
597         }
598
599         /* writeout of activity log covered areas of the bitmap
600          * to stable storage done in after state change already */
601
602         if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
603                 /* if this was forced, we should consider sync */
604                 if (forced)
605                         drbd_send_uuids(mdev);
606                 drbd_send_state(mdev);
607         }
608
609         drbd_md_sync(mdev);
610
611         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
612 out:
613         mutex_unlock(mdev->state_mutex);
614         return rv;
615 }
616
617 static const char *from_attrs_err_to_txt(int err)
618 {
619         return  err == -ENOMSG ? "required attribute missing" :
620                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
621                 err == -EEXIST ? "can not change invariant setting" :
622                 "invalid attribute value";
623 }
624
625 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
626 {
627         struct set_role_parms parms;
628         int err;
629         enum drbd_ret_code retcode;
630
631         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
632         if (!adm_ctx.reply_skb)
633                 return retcode;
634         if (retcode != NO_ERROR)
635                 goto out;
636
637         memset(&parms, 0, sizeof(parms));
638         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
639                 err = set_role_parms_from_attrs(&parms, info);
640                 if (err) {
641                         retcode = ERR_MANDATORY_TAG;
642                         drbd_msg_put_info(from_attrs_err_to_txt(err));
643                         goto out;
644                 }
645         }
646
647         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
648                 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
649         else
650                 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
651 out:
652         drbd_adm_finish(info, retcode);
653         return 0;
654 }
655
656 /* initializes the md.*_offset members, so we are able to find
657  * the on disk meta data */
658 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
659                                        struct drbd_backing_dev *bdev)
660 {
661         sector_t md_size_sect = 0;
662         switch (bdev->dc.meta_dev_idx) {
663         default:
664                 /* v07 style fixed size indexed meta data */
665                 bdev->md.md_size_sect = MD_RESERVED_SECT;
666                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
667                 bdev->md.al_offset = MD_AL_OFFSET;
668                 bdev->md.bm_offset = MD_BM_OFFSET;
669                 break;
670         case DRBD_MD_INDEX_FLEX_EXT:
671                 /* just occupy the full device; unit: sectors */
672                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
673                 bdev->md.md_offset = 0;
674                 bdev->md.al_offset = MD_AL_OFFSET;
675                 bdev->md.bm_offset = MD_BM_OFFSET;
676                 break;
677         case DRBD_MD_INDEX_INTERNAL:
678         case DRBD_MD_INDEX_FLEX_INT:
679                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
680                 /* al size is still fixed */
681                 bdev->md.al_offset = -MD_AL_SECTORS;
682                 /* we need (slightly less than) ~ this much bitmap sectors: */
683                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
684                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
685                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
686                 md_size_sect = ALIGN(md_size_sect, 8);
687
688                 /* plus the "drbd meta data super block",
689                  * and the activity log; */
690                 md_size_sect += MD_BM_OFFSET;
691
692                 bdev->md.md_size_sect = md_size_sect;
693                 /* bitmap offset is adjusted by 'super' block size */
694                 bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
695                 break;
696         }
697 }
698
699 /* input size is expected to be in KB */
700 char *ppsize(char *buf, unsigned long long size)
701 {
702         /* Needs 9 bytes at max including trailing NUL:
703          * -1ULL ==> "16384 EB" */
704         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
705         int base = 0;
706         while (size >= 10000 && base < sizeof(units)-1) {
707                 /* shift + round */
708                 size = (size >> 10) + !!(size & (1<<9));
709                 base++;
710         }
711         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
712
713         return buf;
714 }
715
716 /* there is still a theoretical deadlock when called from receiver
717  * on an D_INCONSISTENT R_PRIMARY:
718  *  remote READ does inc_ap_bio, receiver would need to receive answer
719  *  packet from remote to dec_ap_bio again.
720  *  receiver receive_sizes(), comes here,
721  *  waits for ap_bio_cnt == 0. -> deadlock.
722  * but this cannot happen, actually, because:
723  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
724  *  (not connected, or bad/no disk on peer):
725  *  see drbd_fail_request_early, ap_bio_cnt is zero.
726  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
727  *  peer may not initiate a resize.
728  */
729 /* Note these are not to be confused with
730  * drbd_adm_suspend_io/drbd_adm_resume_io,
731  * which are (sub) state changes triggered by admin (drbdsetup),
732  * and can be long lived.
733  * This changes an mdev->flag, is triggered by drbd internals,
734  * and should be short-lived. */
735 void drbd_suspend_io(struct drbd_conf *mdev)
736 {
737         set_bit(SUSPEND_IO, &mdev->flags);
738         if (drbd_suspended(mdev))
739                 return;
740         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
741 }
742
743 void drbd_resume_io(struct drbd_conf *mdev)
744 {
745         clear_bit(SUSPEND_IO, &mdev->flags);
746         wake_up(&mdev->misc_wait);
747 }
748
749 /**
750  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
751  * @mdev:       DRBD device.
752  *
753  * Returns 0 on success, negative return values indicate errors.
754  * You should call drbd_md_sync() after calling this function.
755  */
756 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
757 {
758         sector_t prev_first_sect, prev_size; /* previous meta location */
759         sector_t la_size;
760         sector_t size;
761         char ppb[10];
762
763         int md_moved, la_size_changed;
764         enum determine_dev_size rv = unchanged;
765
766         /* race:
767          * application request passes inc_ap_bio,
768          * but then cannot get an AL-reference.
769          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
770          *
771          * to avoid that:
772          * Suspend IO right here.
773          * still lock the act_log to not trigger ASSERTs there.
774          */
775         drbd_suspend_io(mdev);
776
777         /* no wait necessary anymore, actually we could assert that */
778         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
779
780         prev_first_sect = drbd_md_first_sector(mdev->ldev);
781         prev_size = mdev->ldev->md.md_size_sect;
782         la_size = mdev->ldev->md.la_size_sect;
783
784         /* TODO: should only be some assert here, not (re)init... */
785         drbd_md_set_sector_offsets(mdev, mdev->ldev);
786
787         size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
788
789         if (drbd_get_capacity(mdev->this_bdev) != size ||
790             drbd_bm_capacity(mdev) != size) {
791                 int err;
792                 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
793                 if (unlikely(err)) {
794                         /* currently there is only one error: ENOMEM! */
795                         size = drbd_bm_capacity(mdev)>>1;
796                         if (size == 0) {
797                                 dev_err(DEV, "OUT OF MEMORY! "
798                                     "Could not allocate bitmap!\n");
799                         } else {
800                                 dev_err(DEV, "BM resizing failed. "
801                                     "Leaving size unchanged at size = %lu KB\n",
802                                     (unsigned long)size);
803                         }
804                         rv = dev_size_error;
805                 }
806                 /* racy, see comments above. */
807                 drbd_set_my_capacity(mdev, size);
808                 mdev->ldev->md.la_size_sect = size;
809                 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
810                      (unsigned long long)size>>1);
811         }
812         if (rv == dev_size_error)
813                 goto out;
814
815         la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
816
817         md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
818                 || prev_size       != mdev->ldev->md.md_size_sect;
819
820         if (la_size_changed || md_moved) {
821                 int err;
822
823                 drbd_al_shrink(mdev); /* All extents inactive. */
824                 dev_info(DEV, "Writing the whole bitmap, %s\n",
825                          la_size_changed && md_moved ? "size changed and md moved" :
826                          la_size_changed ? "size changed" : "md moved");
827                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
828                 err = drbd_bitmap_io(mdev, &drbd_bm_write,
829                                 "size changed", BM_LOCKED_MASK);
830                 if (err) {
831                         rv = dev_size_error;
832                         goto out;
833                 }
834                 drbd_md_mark_dirty(mdev);
835         }
836
837         if (size > la_size)
838                 rv = grew;
839         if (size < la_size)
840                 rv = shrunk;
841 out:
842         lc_unlock(mdev->act_log);
843         wake_up(&mdev->al_wait);
844         drbd_resume_io(mdev);
845
846         return rv;
847 }
848
849 sector_t
850 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
851 {
852         sector_t p_size = mdev->p_size;   /* partner's disk size. */
853         sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
854         sector_t m_size; /* my size */
855         sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
856         sector_t size = 0;
857
858         m_size = drbd_get_max_capacity(bdev);
859
860         if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
861                 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
862                 p_size = m_size;
863         }
864
865         if (p_size && m_size) {
866                 size = min_t(sector_t, p_size, m_size);
867         } else {
868                 if (la_size) {
869                         size = la_size;
870                         if (m_size && m_size < size)
871                                 size = m_size;
872                         if (p_size && p_size < size)
873                                 size = p_size;
874                 } else {
875                         if (m_size)
876                                 size = m_size;
877                         if (p_size)
878                                 size = p_size;
879                 }
880         }
881
882         if (size == 0)
883                 dev_err(DEV, "Both nodes diskless!\n");
884
885         if (u_size) {
886                 if (u_size > size)
887                         dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
888                             (unsigned long)u_size>>1, (unsigned long)size>>1);
889                 else
890                         size = u_size;
891         }
892
893         return size;
894 }
895
896 /**
897  * drbd_check_al_size() - Ensures that the AL is of the right size
898  * @mdev:       DRBD device.
899  *
900  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
901  * failed, and 0 on success. You should call drbd_md_sync() after you called
902  * this function.
903  */
904 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
905 {
906         struct lru_cache *n, *t;
907         struct lc_element *e;
908         unsigned int in_use;
909         int i;
910
911         if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
912                 dc->al_extents = DRBD_AL_EXTENTS_MIN;
913
914         if (mdev->act_log &&
915             mdev->act_log->nr_elements == dc->al_extents)
916                 return 0;
917
918         in_use = 0;
919         t = mdev->act_log;
920         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
921                 dc->al_extents, sizeof(struct lc_element), 0);
922
923         if (n == NULL) {
924                 dev_err(DEV, "Cannot allocate act_log lru!\n");
925                 return -ENOMEM;
926         }
927         spin_lock_irq(&mdev->al_lock);
928         if (t) {
929                 for (i = 0; i < t->nr_elements; i++) {
930                         e = lc_element_by_index(t, i);
931                         if (e->refcnt)
932                                 dev_err(DEV, "refcnt(%d)==%d\n",
933                                     e->lc_number, e->refcnt);
934                         in_use += e->refcnt;
935                 }
936         }
937         if (!in_use)
938                 mdev->act_log = n;
939         spin_unlock_irq(&mdev->al_lock);
940         if (in_use) {
941                 dev_err(DEV, "Activity log still in use!\n");
942                 lc_destroy(n);
943                 return -EBUSY;
944         } else {
945                 if (t)
946                         lc_destroy(t);
947         }
948         drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
949         return 0;
950 }
951
952 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
953 {
954         struct request_queue * const q = mdev->rq_queue;
955         int max_hw_sectors = max_bio_size >> 9;
956         int max_segments = 0;
957
958         if (get_ldev_if_state(mdev, D_ATTACHING)) {
959                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
960
961                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
962                 max_segments = mdev->ldev->dc.max_bio_bvecs;
963                 put_ldev(mdev);
964         }
965
966         blk_queue_logical_block_size(q, 512);
967         blk_queue_max_hw_sectors(q, max_hw_sectors);
968         /* This is the workaround for "bio would need to, but cannot, be split" */
969         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
970         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
971
972         if (get_ldev_if_state(mdev, D_ATTACHING)) {
973                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
974
975                 blk_queue_stack_limits(q, b);
976
977                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
978                         dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
979                                  q->backing_dev_info.ra_pages,
980                                  b->backing_dev_info.ra_pages);
981                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
982                 }
983                 put_ldev(mdev);
984         }
985 }
986
987 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
988 {
989         int now, new, local, peer;
990
991         now = queue_max_hw_sectors(mdev->rq_queue) << 9;
992         local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
993         peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
994
995         if (get_ldev_if_state(mdev, D_ATTACHING)) {
996                 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
997                 mdev->local_max_bio_size = local;
998                 put_ldev(mdev);
999         }
1000
1001         /* We may ignore peer limits if the peer is modern enough.
1002            Because new from 8.3.8 onwards the peer can use multiple
1003            BIOs for a single peer_request */
1004         if (mdev->state.conn >= C_CONNECTED) {
1005                 if (mdev->tconn->agreed_pro_version < 94)
1006                         peer = mdev->peer_max_bio_size;
1007                 else if (mdev->tconn->agreed_pro_version == 94)
1008                         peer = DRBD_MAX_SIZE_H80_PACKET;
1009                 else /* drbd 8.3.8 onwards */
1010                         peer = DRBD_MAX_BIO_SIZE;
1011         }
1012
1013         new = min_t(int, local, peer);
1014
1015         if (mdev->state.role == R_PRIMARY && new < now)
1016                 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1017
1018         if (new != now)
1019                 dev_info(DEV, "max BIO size = %u\n", new);
1020
1021         drbd_setup_queue_param(mdev, new);
1022 }
1023
1024 /* serialize deconfig (worker exiting, doing cleanup)
1025  * and reconfig (drbdsetup disk, drbdsetup net)
1026  *
1027  * Wait for a potentially exiting worker, then restart it,
1028  * or start a new one.  Flush any pending work, there may still be an
1029  * after_state_change queued.
1030  */
1031 static void conn_reconfig_start(struct drbd_tconn *tconn)
1032 {
1033         wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
1034         wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
1035         drbd_thread_start(&tconn->worker);
1036         conn_flush_workqueue(tconn);
1037 }
1038
1039 /* if still unconfigured, stops worker again.
1040  * if configured now, clears CONFIG_PENDING.
1041  * wakes potential waiters */
1042 static void conn_reconfig_done(struct drbd_tconn *tconn)
1043 {
1044         spin_lock_irq(&tconn->req_lock);
1045         if (conn_all_vols_unconf(tconn)) {
1046                 set_bit(OBJECT_DYING, &tconn->flags);
1047                 drbd_thread_stop_nowait(&tconn->worker);
1048         } else
1049                 clear_bit(CONFIG_PENDING, &tconn->flags);
1050         spin_unlock_irq(&tconn->req_lock);
1051         wake_up(&tconn->ping_wait);
1052 }
1053
1054 /* Make sure IO is suspended before calling this function(). */
1055 static void drbd_suspend_al(struct drbd_conf *mdev)
1056 {
1057         int s = 0;
1058
1059         if (!lc_try_lock(mdev->act_log)) {
1060                 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1061                 return;
1062         }
1063
1064         drbd_al_shrink(mdev);
1065         spin_lock_irq(&mdev->tconn->req_lock);
1066         if (mdev->state.conn < C_CONNECTED)
1067                 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1068         spin_unlock_irq(&mdev->tconn->req_lock);
1069         lc_unlock(mdev->act_log);
1070
1071         if (s)
1072                 dev_info(DEV, "Suspended AL updates\n");
1073 }
1074
1075 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1076 {
1077         enum drbd_ret_code retcode;
1078         struct drbd_conf *mdev;
1079         struct disk_conf *ndc; /* new disk conf */
1080         int err, fifo_size;
1081         int *rs_plan_s = NULL;
1082
1083         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1084         if (!adm_ctx.reply_skb)
1085                 return retcode;
1086         if (retcode != NO_ERROR)
1087                 goto out;
1088
1089         mdev = adm_ctx.mdev;
1090
1091         /* we also need a disk
1092          * to change the options on */
1093         if (!get_ldev(mdev)) {
1094                 retcode = ERR_NO_DISK;
1095                 goto out;
1096         }
1097
1098 /* FIXME freeze IO, cluster wide.
1099  *
1100  * We should make sure no-one uses
1101  * some half-updated struct when we
1102  * assign it later. */
1103
1104         ndc = kmalloc(sizeof(*ndc), GFP_KERNEL);
1105         if (!ndc) {
1106                 retcode = ERR_NOMEM;
1107                 goto fail;
1108         }
1109
1110         memcpy(ndc, &mdev->ldev->dc, sizeof(*ndc));
1111         err = disk_conf_from_attrs_for_change(ndc, info);
1112         if (err) {
1113                 retcode = ERR_MANDATORY_TAG;
1114                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1115         }
1116
1117         if (!expect(ndc->resync_rate >= 1))
1118                 ndc->resync_rate = 1;
1119
1120         /* clip to allowed range */
1121         if (!expect(ndc->al_extents >= DRBD_AL_EXTENTS_MIN))
1122                 ndc->al_extents = DRBD_AL_EXTENTS_MIN;
1123         if (!expect(ndc->al_extents <= DRBD_AL_EXTENTS_MAX))
1124                 ndc->al_extents = DRBD_AL_EXTENTS_MAX;
1125
1126         /* most sanity checks done, try to assign the new sync-after
1127          * dependency.  need to hold the global lock in there,
1128          * to avoid a race in the dependency loop check. */
1129         retcode = drbd_alter_sa(mdev, ndc->resync_after);
1130         if (retcode != NO_ERROR)
1131                 goto fail;
1132
1133         fifo_size = (ndc->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1134         if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1135                 rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1136                 if (!rs_plan_s) {
1137                         dev_err(DEV, "kmalloc of fifo_buffer failed");
1138                         retcode = ERR_NOMEM;
1139                         goto fail;
1140                 }
1141         }
1142
1143         if (fifo_size != mdev->rs_plan_s.size) {
1144                 kfree(mdev->rs_plan_s.values);
1145                 mdev->rs_plan_s.values = rs_plan_s;
1146                 mdev->rs_plan_s.size   = fifo_size;
1147                 mdev->rs_planed = 0;
1148                 rs_plan_s = NULL;
1149         }
1150
1151         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1152         drbd_al_shrink(mdev);
1153         err = drbd_check_al_size(mdev, ndc);
1154         lc_unlock(mdev->act_log);
1155         wake_up(&mdev->al_wait);
1156
1157         if (err) {
1158                 retcode = ERR_NOMEM;
1159                 goto fail;
1160         }
1161
1162         /* FIXME
1163          * To avoid someone looking at a half-updated struct, we probably
1164          * should have a rw-semaphor on net_conf and disk_conf.
1165          */
1166         mdev->ldev->dc = *ndc;
1167
1168         drbd_md_sync(mdev);
1169
1170
1171         if (mdev->state.conn >= C_CONNECTED)
1172                 drbd_send_sync_param(mdev);
1173
1174  fail:
1175         put_ldev(mdev);
1176         kfree(ndc);
1177         kfree(rs_plan_s);
1178  out:
1179         drbd_adm_finish(info, retcode);
1180         return 0;
1181 }
1182
1183 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1184 {
1185         struct drbd_conf *mdev;
1186         int err;
1187         enum drbd_ret_code retcode;
1188         enum determine_dev_size dd;
1189         sector_t max_possible_sectors;
1190         sector_t min_md_device_sectors;
1191         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1192         struct block_device *bdev;
1193         struct lru_cache *resync_lru = NULL;
1194         union drbd_state ns, os;
1195         enum drbd_state_rv rv;
1196         int cp_discovered = 0;
1197
1198         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1199         if (!adm_ctx.reply_skb)
1200                 return retcode;
1201         if (retcode != NO_ERROR)
1202                 goto finish;
1203
1204         mdev = adm_ctx.mdev;
1205         conn_reconfig_start(mdev->tconn);
1206
1207         /* if you want to reconfigure, please tear down first */
1208         if (mdev->state.disk > D_DISKLESS) {
1209                 retcode = ERR_DISK_CONFIGURED;
1210                 goto fail;
1211         }
1212         /* It may just now have detached because of IO error.  Make sure
1213          * drbd_ldev_destroy is done already, we may end up here very fast,
1214          * e.g. if someone calls attach from the on-io-error handler,
1215          * to realize a "hot spare" feature (not that I'd recommend that) */
1216         wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1217
1218         /* allocation not in the IO path, drbdsetup context */
1219         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1220         if (!nbc) {
1221                 retcode = ERR_NOMEM;
1222                 goto fail;
1223         }
1224
1225         nbc->dc = (struct disk_conf) {
1226                 {}, 0, /* backing_dev */
1227                 {}, 0, /* meta_dev */
1228                 0, /* meta_dev_idx */
1229                 DRBD_DISK_SIZE_SECT_DEF, /* disk_size */
1230                 DRBD_MAX_BIO_BVECS_DEF, /* max_bio_bvecs */
1231                 DRBD_ON_IO_ERROR_DEF, /* on_io_error */
1232                 DRBD_FENCING_DEF, /* fencing */
1233                 DRBD_RATE_DEF, /* resync_rate */
1234                 DRBD_AFTER_DEF, /* resync_after */
1235                 DRBD_AL_EXTENTS_DEF, /* al_extents */
1236                 DRBD_C_PLAN_AHEAD_DEF, /* c_plan_ahead */
1237                 DRBD_C_DELAY_TARGET_DEF, /* c_delay_target */
1238                 DRBD_C_FILL_TARGET_DEF, /* c_fill_target */
1239                 DRBD_C_MAX_RATE_DEF, /* c_max_rate */
1240                 DRBD_C_MIN_RATE_DEF, /* c_min_rate */
1241                 0, /* no_disk_barrier */
1242                 0, /* no_disk_flush */
1243                 0, /* no_disk_drain */
1244                 0, /* no_md_flush */
1245         };
1246
1247         err = disk_conf_from_attrs(&nbc->dc, info);
1248         if (err) {
1249                 retcode = ERR_MANDATORY_TAG;
1250                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1251                 goto fail;
1252         }
1253
1254         if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1255                 retcode = ERR_MD_IDX_INVALID;
1256                 goto fail;
1257         }
1258
1259         if (get_net_conf(mdev->tconn)) {
1260                 int prot = mdev->tconn->net_conf->wire_protocol;
1261                 put_net_conf(mdev->tconn);
1262                 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
1263                         retcode = ERR_STONITH_AND_PROT_A;
1264                         goto fail;
1265                 }
1266         }
1267
1268         bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1269                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1270         if (IS_ERR(bdev)) {
1271                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1272                         PTR_ERR(bdev));
1273                 retcode = ERR_OPEN_DISK;
1274                 goto fail;
1275         }
1276         nbc->backing_bdev = bdev;
1277
1278         /*
1279          * meta_dev_idx >= 0: external fixed size, possibly multiple
1280          * drbd sharing one meta device.  TODO in that case, paranoia
1281          * check that [md_bdev, meta_dev_idx] is not yet used by some
1282          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1283          * should check it for you already; but if you don't, or
1284          * someone fooled it, we need to double check here)
1285          */
1286         bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1287                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1288                                   ((int)nbc->dc.meta_dev_idx < 0) ?
1289                                   (void *)mdev : (void *)drbd_m_holder);
1290         if (IS_ERR(bdev)) {
1291                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1292                         PTR_ERR(bdev));
1293                 retcode = ERR_OPEN_MD_DISK;
1294                 goto fail;
1295         }
1296         nbc->md_bdev = bdev;
1297
1298         if ((nbc->backing_bdev == nbc->md_bdev) !=
1299             (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1300              nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1301                 retcode = ERR_MD_IDX_INVALID;
1302                 goto fail;
1303         }
1304
1305         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1306                         1, 61, sizeof(struct bm_extent),
1307                         offsetof(struct bm_extent, lce));
1308         if (!resync_lru) {
1309                 retcode = ERR_NOMEM;
1310                 goto fail;
1311         }
1312
1313         /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1314         drbd_md_set_sector_offsets(mdev, nbc);
1315
1316         if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1317                 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1318                         (unsigned long long) drbd_get_max_capacity(nbc),
1319                         (unsigned long long) nbc->dc.disk_size);
1320                 retcode = ERR_DISK_TO_SMALL;
1321                 goto fail;
1322         }
1323
1324         if ((int)nbc->dc.meta_dev_idx < 0) {
1325                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1326                 /* at least one MB, otherwise it does not make sense */
1327                 min_md_device_sectors = (2<<10);
1328         } else {
1329                 max_possible_sectors = DRBD_MAX_SECTORS;
1330                 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1331         }
1332
1333         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1334                 retcode = ERR_MD_DISK_TO_SMALL;
1335                 dev_warn(DEV, "refusing attach: md-device too small, "
1336                      "at least %llu sectors needed for this meta-disk type\n",
1337                      (unsigned long long) min_md_device_sectors);
1338                 goto fail;
1339         }
1340
1341         /* Make sure the new disk is big enough
1342          * (we may currently be R_PRIMARY with no local disk...) */
1343         if (drbd_get_max_capacity(nbc) <
1344             drbd_get_capacity(mdev->this_bdev)) {
1345                 retcode = ERR_DISK_TO_SMALL;
1346                 goto fail;
1347         }
1348
1349         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1350
1351         if (nbc->known_size > max_possible_sectors) {
1352                 dev_warn(DEV, "==> truncating very big lower level device "
1353                         "to currently maximum possible %llu sectors <==\n",
1354                         (unsigned long long) max_possible_sectors);
1355                 if ((int)nbc->dc.meta_dev_idx >= 0)
1356                         dev_warn(DEV, "==>> using internal or flexible "
1357                                       "meta data may help <<==\n");
1358         }
1359
1360         drbd_suspend_io(mdev);
1361         /* also wait for the last barrier ack. */
1362         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1363         /* and for any other previously queued work */
1364         drbd_flush_workqueue(mdev);
1365
1366         rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1367         retcode = rv;  /* FIXME: Type mismatch. */
1368         drbd_resume_io(mdev);
1369         if (rv < SS_SUCCESS)
1370                 goto fail;
1371
1372         if (!get_ldev_if_state(mdev, D_ATTACHING))
1373                 goto force_diskless;
1374
1375         drbd_md_set_sector_offsets(mdev, nbc);
1376
1377         if (!mdev->bitmap) {
1378                 if (drbd_bm_init(mdev)) {
1379                         retcode = ERR_NOMEM;
1380                         goto force_diskless_dec;
1381                 }
1382         }
1383
1384         retcode = drbd_md_read(mdev, nbc);
1385         if (retcode != NO_ERROR)
1386                 goto force_diskless_dec;
1387
1388         if (mdev->state.conn < C_CONNECTED &&
1389             mdev->state.role == R_PRIMARY &&
1390             (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1391                 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1392                     (unsigned long long)mdev->ed_uuid);
1393                 retcode = ERR_DATA_NOT_CURRENT;
1394                 goto force_diskless_dec;
1395         }
1396
1397         /* Since we are diskless, fix the activity log first... */
1398         if (drbd_check_al_size(mdev, &nbc->dc)) {
1399                 retcode = ERR_NOMEM;
1400                 goto force_diskless_dec;
1401         }
1402
1403         /* Prevent shrinking of consistent devices ! */
1404         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1405             drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
1406                 dev_warn(DEV, "refusing to truncate a consistent device\n");
1407                 retcode = ERR_DISK_TO_SMALL;
1408                 goto force_diskless_dec;
1409         }
1410
1411         if (!drbd_al_read_log(mdev, nbc)) {
1412                 retcode = ERR_IO_MD_DISK;
1413                 goto force_diskless_dec;
1414         }
1415
1416         /* Reset the "barriers don't work" bits here, then force meta data to
1417          * be written, to ensure we determine if barriers are supported. */
1418         if (nbc->dc.no_md_flush)
1419                 set_bit(MD_NO_FUA, &mdev->flags);
1420         else
1421                 clear_bit(MD_NO_FUA, &mdev->flags);
1422
1423         /* Point of no return reached.
1424          * Devices and memory are no longer released by error cleanup below.
1425          * now mdev takes over responsibility, and the state engine should
1426          * clean it up somewhere.  */
1427         D_ASSERT(mdev->ldev == NULL);
1428         mdev->ldev = nbc;
1429         mdev->resync = resync_lru;
1430         nbc = NULL;
1431         resync_lru = NULL;
1432
1433         mdev->write_ordering = WO_bdev_flush;
1434         drbd_bump_write_ordering(mdev, WO_bdev_flush);
1435
1436         if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1437                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1438         else
1439                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1440
1441         if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1442             !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
1443                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1444                 cp_discovered = 1;
1445         }
1446
1447         mdev->send_cnt = 0;
1448         mdev->recv_cnt = 0;
1449         mdev->read_cnt = 0;
1450         mdev->writ_cnt = 0;
1451
1452         drbd_reconsider_max_bio_size(mdev);
1453
1454         /* If I am currently not R_PRIMARY,
1455          * but meta data primary indicator is set,
1456          * I just now recover from a hard crash,
1457          * and have been R_PRIMARY before that crash.
1458          *
1459          * Now, if I had no connection before that crash
1460          * (have been degraded R_PRIMARY), chances are that
1461          * I won't find my peer now either.
1462          *
1463          * In that case, and _only_ in that case,
1464          * we use the degr-wfc-timeout instead of the default,
1465          * so we can automatically recover from a crash of a
1466          * degraded but active "cluster" after a certain timeout.
1467          */
1468         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1469         if (mdev->state.role != R_PRIMARY &&
1470              drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1471             !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1472                 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1473
1474         dd = drbd_determine_dev_size(mdev, 0);
1475         if (dd == dev_size_error) {
1476                 retcode = ERR_NOMEM_BITMAP;
1477                 goto force_diskless_dec;
1478         } else if (dd == grew)
1479                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1480
1481         if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1482                 dev_info(DEV, "Assuming that all blocks are out of sync "
1483                      "(aka FullSync)\n");
1484                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1485                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1486                         retcode = ERR_IO_MD_DISK;
1487                         goto force_diskless_dec;
1488                 }
1489         } else {
1490                 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1491                         "read from attaching", BM_LOCKED_MASK)) {
1492                         retcode = ERR_IO_MD_DISK;
1493                         goto force_diskless_dec;
1494                 }
1495         }
1496
1497         if (cp_discovered) {
1498                 drbd_al_apply_to_bm(mdev);
1499                 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1500                         "crashed primary apply AL", BM_LOCKED_MASK)) {
1501                         retcode = ERR_IO_MD_DISK;
1502                         goto force_diskless_dec;
1503                 }
1504         }
1505
1506         if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1507                 drbd_suspend_al(mdev); /* IO is still suspended here... */
1508
1509         spin_lock_irq(&mdev->tconn->req_lock);
1510         os = drbd_read_state(mdev);
1511         ns = os;
1512         /* If MDF_CONSISTENT is not set go into inconsistent state,
1513            otherwise investigate MDF_WasUpToDate...
1514            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1515            otherwise into D_CONSISTENT state.
1516         */
1517         if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1518                 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1519                         ns.disk = D_CONSISTENT;
1520                 else
1521                         ns.disk = D_OUTDATED;
1522         } else {
1523                 ns.disk = D_INCONSISTENT;
1524         }
1525
1526         if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1527                 ns.pdsk = D_OUTDATED;
1528
1529         if ( ns.disk == D_CONSISTENT &&
1530             (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1531                 ns.disk = D_UP_TO_DATE;
1532
1533         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1534            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1535            this point, because drbd_request_state() modifies these
1536            flags. */
1537
1538         /* In case we are C_CONNECTED postpone any decision on the new disk
1539            state after the negotiation phase. */
1540         if (mdev->state.conn == C_CONNECTED) {
1541                 mdev->new_state_tmp.i = ns.i;
1542                 ns.i = os.i;
1543                 ns.disk = D_NEGOTIATING;
1544
1545                 /* We expect to receive up-to-date UUIDs soon.
1546                    To avoid a race in receive_state, free p_uuid while
1547                    holding req_lock. I.e. atomic with the state change */
1548                 kfree(mdev->p_uuid);
1549                 mdev->p_uuid = NULL;
1550         }
1551
1552         rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1553         spin_unlock_irq(&mdev->tconn->req_lock);
1554
1555         if (rv < SS_SUCCESS)
1556                 goto force_diskless_dec;
1557
1558         if (mdev->state.role == R_PRIMARY)
1559                 mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1560         else
1561                 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1562
1563         drbd_md_mark_dirty(mdev);
1564         drbd_md_sync(mdev);
1565
1566         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1567         put_ldev(mdev);
1568         conn_reconfig_done(mdev->tconn);
1569         drbd_adm_finish(info, retcode);
1570         return 0;
1571
1572  force_diskless_dec:
1573         put_ldev(mdev);
1574  force_diskless:
1575         drbd_force_state(mdev, NS(disk, D_FAILED));
1576         drbd_md_sync(mdev);
1577  fail:
1578         conn_reconfig_done(mdev->tconn);
1579         if (nbc) {
1580                 if (nbc->backing_bdev)
1581                         blkdev_put(nbc->backing_bdev,
1582                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1583                 if (nbc->md_bdev)
1584                         blkdev_put(nbc->md_bdev,
1585                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1586                 kfree(nbc);
1587         }
1588         lc_destroy(resync_lru);
1589
1590  finish:
1591         drbd_adm_finish(info, retcode);
1592         return 0;
1593 }
1594
1595 static int adm_detach(struct drbd_conf *mdev)
1596 {
1597         enum drbd_state_rv retcode;
1598         drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1599         retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1600         wait_event(mdev->misc_wait,
1601                         mdev->state.disk != D_DISKLESS ||
1602                         !atomic_read(&mdev->local_cnt));
1603         drbd_resume_io(mdev);
1604         return retcode;
1605 }
1606
1607 /* Detaching the disk is a process in multiple stages.  First we need to lock
1608  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1609  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1610  * internal references as well.
1611  * Only then we have finally detached. */
1612 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1613 {
1614         enum drbd_ret_code retcode;
1615
1616         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1617         if (!adm_ctx.reply_skb)
1618                 return retcode;
1619         if (retcode != NO_ERROR)
1620                 goto out;
1621
1622         retcode = adm_detach(adm_ctx.mdev);
1623 out:
1624         drbd_adm_finish(info, retcode);
1625         return 0;
1626 }
1627
1628 static bool conn_resync_running(struct drbd_tconn *tconn)
1629 {
1630         struct drbd_conf *mdev;
1631         bool rv = false;
1632         int vnr;
1633
1634         rcu_read_lock();
1635         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1636                 if (mdev->state.conn == C_SYNC_SOURCE ||
1637                     mdev->state.conn == C_SYNC_TARGET ||
1638                     mdev->state.conn == C_PAUSED_SYNC_S ||
1639                     mdev->state.conn == C_PAUSED_SYNC_T) {
1640                         rv = true;
1641                         break;
1642                 }
1643         }
1644         rcu_read_unlock();
1645
1646         return rv;
1647 }
1648
1649 static bool conn_ov_running(struct drbd_tconn *tconn)
1650 {
1651         struct drbd_conf *mdev;
1652         bool rv = false;
1653         int vnr;
1654
1655         rcu_read_lock();
1656         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1657                 if (mdev->state.conn == C_VERIFY_S ||
1658                     mdev->state.conn == C_VERIFY_T) {
1659                         rv = true;
1660                         break;
1661                 }
1662         }
1663         rcu_read_unlock();
1664
1665         return rv;
1666 }
1667
1668 static enum drbd_ret_code
1669 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1670 {
1671         struct drbd_conf *mdev;
1672         int i;
1673
1674         if (tconn->net_conf && tconn->agreed_pro_version < 100 &&
1675             tconn->cstate == C_WF_REPORT_PARAMS &&
1676             new_conf->wire_protocol != tconn->net_conf->wire_protocol)
1677                 return ERR_NEED_APV_100;
1678
1679         if (new_conf->two_primaries &&
1680             (new_conf->wire_protocol != DRBD_PROT_C))
1681                 return ERR_NOT_PROTO_C;
1682
1683         rcu_read_lock();
1684         idr_for_each_entry(&tconn->volumes, mdev, i) {
1685                 if (get_ldev(mdev)) {
1686                         enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1687                         put_ldev(mdev);
1688                         if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1689                                 rcu_read_unlock();
1690                                 return ERR_STONITH_AND_PROT_A;
1691                         }
1692                 }
1693                 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1694                         rcu_read_unlock();
1695                         return ERR_DISCARD;
1696                 }
1697                 if (!mdev->bitmap) {
1698                         if(drbd_bm_init(mdev)) {
1699                                 rcu_read_unlock();
1700                                 return ERR_NOMEM;
1701                         }
1702                 }
1703         }
1704         rcu_read_unlock();
1705
1706         if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1707                 return ERR_CONG_NOT_PROTO_A;
1708
1709         return NO_ERROR;
1710 }
1711
1712 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1713 {
1714         enum drbd_ret_code retcode;
1715         struct drbd_tconn *tconn;
1716         struct net_conf *new_conf = NULL;
1717         int err;
1718         int ovr; /* online verify running */
1719         int rsr; /* re-sync running */
1720         struct crypto_hash *verify_tfm = NULL;
1721         struct crypto_hash *csums_tfm = NULL;
1722
1723
1724         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1725         if (!adm_ctx.reply_skb)
1726                 return retcode;
1727         if (retcode != NO_ERROR)
1728                 goto out;
1729
1730         tconn = adm_ctx.tconn;
1731
1732         new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1733         if (!new_conf) {
1734                 retcode = ERR_NOMEM;
1735                 goto out;
1736         }
1737
1738         /* we also need a net config
1739          * to change the options on */
1740         if (!get_net_conf(tconn)) {
1741                 drbd_msg_put_info("net conf missing, try connect");
1742                 retcode = ERR_INVALID_REQUEST;
1743                 goto out;
1744         }
1745
1746         conn_reconfig_start(tconn);
1747
1748         memcpy(new_conf, tconn->net_conf, sizeof(*new_conf));
1749         err = net_conf_from_attrs_for_change(new_conf, info);
1750         if (err) {
1751                 retcode = ERR_MANDATORY_TAG;
1752                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1753                 goto fail;
1754         }
1755
1756         retcode = check_net_options(tconn, new_conf);
1757         if (retcode != NO_ERROR)
1758                 goto fail;
1759
1760         /* re-sync running */
1761         rsr = conn_resync_running(tconn);
1762         if (rsr && strcmp(new_conf->csums_alg, tconn->net_conf->csums_alg)) {
1763                 retcode = ERR_CSUMS_RESYNC_RUNNING;
1764                 goto fail;
1765         }
1766
1767         if (!rsr && new_conf->csums_alg[0]) {
1768                 csums_tfm = crypto_alloc_hash(new_conf->csums_alg, 0, CRYPTO_ALG_ASYNC);
1769                 if (IS_ERR(csums_tfm)) {
1770                         csums_tfm = NULL;
1771                         retcode = ERR_CSUMS_ALG;
1772                         goto fail;
1773                 }
1774
1775                 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1776                         retcode = ERR_CSUMS_ALG_ND;
1777                         goto fail;
1778                 }
1779         }
1780
1781         /* online verify running */
1782         ovr = conn_ov_running(tconn);
1783         if (ovr) {
1784                 if (strcmp(new_conf->verify_alg, tconn->net_conf->verify_alg)) {
1785                         retcode = ERR_VERIFY_RUNNING;
1786                         goto fail;
1787                 }
1788         }
1789
1790         if (!ovr && new_conf->verify_alg[0]) {
1791                 verify_tfm = crypto_alloc_hash(new_conf->verify_alg, 0, CRYPTO_ALG_ASYNC);
1792                 if (IS_ERR(verify_tfm)) {
1793                         verify_tfm = NULL;
1794                         retcode = ERR_VERIFY_ALG;
1795                         goto fail;
1796                 }
1797
1798                 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1799                         retcode = ERR_VERIFY_ALG_ND;
1800                         goto fail;
1801                 }
1802         }
1803
1804
1805         /* For now, use struct assignment, not pointer assignment.
1806          * We don't have any means to determine who might still
1807          * keep a local alias into the struct,
1808          * so we cannot just free it and hope for the best :(
1809          * FIXME
1810          * To avoid someone looking at a half-updated struct, we probably
1811          * should have a rw-semaphor on net_conf and disk_conf.
1812          */
1813         *tconn->net_conf = *new_conf;
1814
1815         if (!rsr) {
1816                 crypto_free_hash(tconn->csums_tfm);
1817                 tconn->csums_tfm = csums_tfm;
1818                 csums_tfm = NULL;
1819         }
1820         if (!ovr) {
1821                 crypto_free_hash(tconn->verify_tfm);
1822                 tconn->verify_tfm = verify_tfm;
1823                 verify_tfm = NULL;
1824         }
1825
1826         if (tconn->cstate >= C_WF_REPORT_PARAMS)
1827                 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1828
1829  fail:
1830         crypto_free_hash(csums_tfm);
1831         crypto_free_hash(verify_tfm);
1832         kfree(new_conf);
1833         put_net_conf(tconn);
1834         conn_reconfig_done(tconn);
1835  out:
1836         drbd_adm_finish(info, retcode);
1837         return 0;
1838 }
1839
1840 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1841 {
1842         char hmac_name[CRYPTO_MAX_ALG_NAME];
1843         struct drbd_conf *mdev;
1844         struct net_conf *new_conf = NULL;
1845         struct crypto_hash *tfm = NULL;
1846         struct crypto_hash *integrity_w_tfm = NULL;
1847         struct crypto_hash *integrity_r_tfm = NULL;
1848         void *int_dig_in = NULL;
1849         void *int_dig_vv = NULL;
1850         struct drbd_tconn *oconn;
1851         struct drbd_tconn *tconn;
1852         struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1853         enum drbd_ret_code retcode;
1854         int i;
1855         int err;
1856
1857         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1858         if (!adm_ctx.reply_skb)
1859                 return retcode;
1860         if (retcode != NO_ERROR)
1861                 goto out;
1862
1863         tconn = adm_ctx.tconn;
1864         conn_reconfig_start(tconn);
1865
1866         if (tconn->cstate > C_STANDALONE) {
1867                 retcode = ERR_NET_CONFIGURED;
1868                 goto fail;
1869         }
1870
1871         /* allocation not in the IO path, cqueue thread context */
1872         new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
1873         if (!new_conf) {
1874                 retcode = ERR_NOMEM;
1875                 goto fail;
1876         }
1877
1878         *new_conf = (struct net_conf) {
1879                 {}, 0, /* my_addr */
1880                 {}, 0, /* peer_addr */
1881                 {}, 0, /* shared_secret */
1882                 {}, 0, /* cram_hmac_alg */
1883                 {}, 0, /* integrity_alg */
1884                 {}, 0, /* verify_alg */
1885                 {}, 0, /* csums_alg */
1886                 DRBD_PROTOCOL_DEF, /* wire_protocol */
1887                 DRBD_CONNECT_INT_DEF, /* try_connect_int */
1888                 DRBD_TIMEOUT_DEF, /* timeout */
1889                 DRBD_PING_INT_DEF, /* ping_int */
1890                 DRBD_PING_TIMEO_DEF, /* ping_timeo */
1891                 DRBD_SNDBUF_SIZE_DEF, /* sndbuf_size */
1892                 DRBD_RCVBUF_SIZE_DEF, /* rcvbuf_size */
1893                 DRBD_KO_COUNT_DEF, /* ko_count */
1894                 DRBD_MAX_BUFFERS_DEF, /* max_buffers */
1895                 DRBD_MAX_EPOCH_SIZE_DEF, /* max_epoch_size */
1896                 DRBD_UNPLUG_WATERMARK_DEF, /* unplug_watermark */
1897                 DRBD_AFTER_SB_0P_DEF, /* after_sb_0p */
1898                 DRBD_AFTER_SB_1P_DEF, /* after_sb_1p */
1899                 DRBD_AFTER_SB_2P_DEF, /* after_sb_2p */
1900                 DRBD_RR_CONFLICT_DEF, /* rr_conflict */
1901                 DRBD_ON_CONGESTION_DEF, /* on_congestion */
1902                 DRBD_CONG_FILL_DEF, /* cong_fill */
1903                 DRBD_CONG_EXTENTS_DEF, /* cong_extents */
1904                 0, /* two_primaries */
1905                 0, /* want_lose */
1906                 0, /* no_cork */
1907                 0, /* always_asbp */
1908                 0, /* dry_run */
1909                 0, /* use_rle */
1910         };
1911
1912         err = net_conf_from_attrs(new_conf, info);
1913         if (err) {
1914                 retcode = ERR_MANDATORY_TAG;
1915                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1916                 goto fail;
1917         }
1918
1919         retcode = check_net_options(tconn, new_conf);
1920         if (retcode != NO_ERROR)
1921                 goto fail;
1922
1923         retcode = NO_ERROR;
1924
1925         new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1926         new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1927
1928         /* No need to take drbd_cfg_rwsem here.  All reconfiguration is
1929          * strictly serialized on genl_lock(). We are protected against
1930          * concurrent reconfiguration/addition/deletion */
1931         list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1932                 if (oconn == tconn)
1933                         continue;
1934                 if (get_net_conf(oconn)) {
1935                         taken_addr = (struct sockaddr *)&oconn->net_conf->my_addr;
1936                         if (new_conf->my_addr_len == oconn->net_conf->my_addr_len &&
1937                             !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1938                                 retcode = ERR_LOCAL_ADDR;
1939
1940                         taken_addr = (struct sockaddr *)&oconn->net_conf->peer_addr;
1941                         if (new_conf->peer_addr_len == oconn->net_conf->peer_addr_len &&
1942                             !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1943                                 retcode = ERR_PEER_ADDR;
1944
1945                         put_net_conf(oconn);
1946                         if (retcode != NO_ERROR)
1947                                 goto fail;
1948                 }
1949         }
1950
1951         if (new_conf->cram_hmac_alg[0] != 0) {
1952                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1953                         new_conf->cram_hmac_alg);
1954                 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1955                 if (IS_ERR(tfm)) {
1956                         tfm = NULL;
1957                         retcode = ERR_AUTH_ALG;
1958                         goto fail;
1959                 }
1960
1961                 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1962                         retcode = ERR_AUTH_ALG_ND;
1963                         goto fail;
1964                 }
1965         }
1966
1967         if (new_conf->integrity_alg[0]) {
1968                 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1969                 if (IS_ERR(integrity_w_tfm)) {
1970                         integrity_w_tfm = NULL;
1971                         retcode=ERR_INTEGRITY_ALG;
1972                         goto fail;
1973                 }
1974
1975                 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1976                         retcode=ERR_INTEGRITY_ALG_ND;
1977                         goto fail;
1978                 }
1979
1980                 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1981                 if (IS_ERR(integrity_r_tfm)) {
1982                         integrity_r_tfm = NULL;
1983                         retcode=ERR_INTEGRITY_ALG;
1984                         goto fail;
1985                 }
1986         }
1987
1988         ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1989
1990         /* allocation not in the IO path, cqueue thread context */
1991         if (integrity_w_tfm) {
1992                 i = crypto_hash_digestsize(integrity_w_tfm);
1993                 int_dig_in = kmalloc(i, GFP_KERNEL);
1994                 if (!int_dig_in) {
1995                         retcode = ERR_NOMEM;
1996                         goto fail;
1997                 }
1998                 int_dig_vv = kmalloc(i, GFP_KERNEL);
1999                 if (!int_dig_vv) {
2000                         retcode = ERR_NOMEM;
2001                         goto fail;
2002                 }
2003         }
2004
2005         conn_flush_workqueue(tconn);
2006         spin_lock_irq(&tconn->req_lock);
2007         if (tconn->net_conf != NULL) {
2008                 retcode = ERR_NET_CONFIGURED;
2009                 spin_unlock_irq(&tconn->req_lock);
2010                 goto fail;
2011         }
2012         tconn->net_conf = new_conf;
2013
2014         crypto_free_hash(tconn->cram_hmac_tfm);
2015         tconn->cram_hmac_tfm = tfm;
2016
2017         crypto_free_hash(tconn->integrity_w_tfm);
2018         tconn->integrity_w_tfm = integrity_w_tfm;
2019
2020         crypto_free_hash(tconn->integrity_r_tfm);
2021         tconn->integrity_r_tfm = integrity_r_tfm;
2022
2023         kfree(tconn->int_dig_in);
2024         kfree(tconn->int_dig_vv);
2025         tconn->int_dig_in=int_dig_in;
2026         tconn->int_dig_vv=int_dig_vv;
2027         retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2028         spin_unlock_irq(&tconn->req_lock);
2029
2030         rcu_read_lock();
2031         idr_for_each_entry(&tconn->volumes, mdev, i) {
2032                 mdev->send_cnt = 0;
2033                 mdev->recv_cnt = 0;
2034                 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
2035         }
2036         rcu_read_unlock();
2037         conn_reconfig_done(tconn);
2038         drbd_adm_finish(info, retcode);
2039         return 0;
2040
2041 fail:
2042         kfree(int_dig_in);
2043         kfree(int_dig_vv);
2044         crypto_free_hash(tfm);
2045         crypto_free_hash(integrity_w_tfm);
2046         crypto_free_hash(integrity_r_tfm);
2047         kfree(new_conf);
2048
2049         conn_reconfig_done(tconn);
2050 out:
2051         drbd_adm_finish(info, retcode);
2052         return 0;
2053 }
2054
2055 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2056 {
2057         enum drbd_state_rv rv;
2058         if (force) {
2059                 spin_lock_irq(&tconn->req_lock);
2060                 if (tconn->cstate >= C_WF_CONNECTION)
2061                         _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
2062                 spin_unlock_irq(&tconn->req_lock);
2063                 return SS_SUCCESS;
2064         }
2065
2066         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
2067
2068         switch (rv) {
2069         case SS_NOTHING_TO_DO:
2070         case SS_ALREADY_STANDALONE:
2071                 return SS_SUCCESS;
2072         case SS_PRIMARY_NOP:
2073                 /* Our state checking code wants to see the peer outdated. */
2074                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2075                                                         pdsk, D_OUTDATED), CS_VERBOSE);
2076                 break;
2077         case SS_CW_FAILED_BY_PEER:
2078                 /* The peer probably wants to see us outdated. */
2079                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2080                                                         disk, D_OUTDATED), 0);
2081                 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2082                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
2083                         rv = SS_SUCCESS;
2084                 }
2085                 break;
2086         default:;
2087                 /* no special handling necessary */
2088         }
2089
2090         return rv;
2091 }
2092
2093 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2094 {
2095         struct disconnect_parms parms;
2096         struct drbd_tconn *tconn;
2097         enum drbd_state_rv rv;
2098         enum drbd_ret_code retcode;
2099         int err;
2100
2101         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2102         if (!adm_ctx.reply_skb)
2103                 return retcode;
2104         if (retcode != NO_ERROR)
2105                 goto fail;
2106
2107         tconn = adm_ctx.tconn;
2108         memset(&parms, 0, sizeof(parms));
2109         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2110                 err = disconnect_parms_from_attrs(&parms, info);
2111                 if (err) {
2112                         retcode = ERR_MANDATORY_TAG;
2113                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2114                         goto fail;
2115                 }
2116         }
2117
2118         rv = conn_try_disconnect(tconn, parms.force_disconnect);
2119         if (rv < SS_SUCCESS)
2120                 goto fail;
2121
2122         if (wait_event_interruptible(tconn->ping_wait,
2123                                      tconn->cstate != C_DISCONNECTING)) {
2124                 /* Do not test for mdev->state.conn == C_STANDALONE, since
2125                    someone else might connect us in the mean time! */
2126                 retcode = ERR_INTR;
2127                 goto fail;
2128         }
2129
2130         retcode = NO_ERROR;
2131  fail:
2132         drbd_adm_finish(info, retcode);
2133         return 0;
2134 }
2135
2136 void resync_after_online_grow(struct drbd_conf *mdev)
2137 {
2138         int iass; /* I am sync source */
2139
2140         dev_info(DEV, "Resync of new storage after online grow\n");
2141         if (mdev->state.role != mdev->state.peer)
2142                 iass = (mdev->state.role == R_PRIMARY);
2143         else
2144                 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2145
2146         if (iass)
2147                 drbd_start_resync(mdev, C_SYNC_SOURCE);
2148         else
2149                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2150 }
2151
2152 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2153 {
2154         struct resize_parms rs;
2155         struct drbd_conf *mdev;
2156         enum drbd_ret_code retcode;
2157         enum determine_dev_size dd;
2158         enum dds_flags ddsf;
2159         int err;
2160
2161         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2162         if (!adm_ctx.reply_skb)
2163                 return retcode;
2164         if (retcode != NO_ERROR)
2165                 goto fail;
2166
2167         memset(&rs, 0, sizeof(struct resize_parms));
2168         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2169                 err = resize_parms_from_attrs(&rs, info);
2170                 if (err) {
2171                         retcode = ERR_MANDATORY_TAG;
2172                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2173                         goto fail;
2174                 }
2175         }
2176
2177         mdev = adm_ctx.mdev;
2178         if (mdev->state.conn > C_CONNECTED) {
2179                 retcode = ERR_RESIZE_RESYNC;
2180                 goto fail;
2181         }
2182
2183         if (mdev->state.role == R_SECONDARY &&
2184             mdev->state.peer == R_SECONDARY) {
2185                 retcode = ERR_NO_PRIMARY;
2186                 goto fail;
2187         }
2188
2189         if (!get_ldev(mdev)) {
2190                 retcode = ERR_NO_DISK;
2191                 goto fail;
2192         }
2193
2194         if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2195                 retcode = ERR_NEED_APV_93;
2196                 goto fail;
2197         }
2198
2199         if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2200                 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2201
2202         mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
2203         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2204         dd = drbd_determine_dev_size(mdev, ddsf);
2205         drbd_md_sync(mdev);
2206         put_ldev(mdev);
2207         if (dd == dev_size_error) {
2208                 retcode = ERR_NOMEM_BITMAP;
2209                 goto fail;
2210         }
2211
2212         if (mdev->state.conn == C_CONNECTED) {
2213                 if (dd == grew)
2214                         set_bit(RESIZE_PENDING, &mdev->flags);
2215
2216                 drbd_send_uuids(mdev);
2217                 drbd_send_sizes(mdev, 1, ddsf);
2218         }
2219
2220  fail:
2221         drbd_adm_finish(info, retcode);
2222         return 0;
2223 }
2224
2225 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2226 {
2227         enum drbd_ret_code retcode;
2228         cpumask_var_t new_cpu_mask;
2229         struct drbd_tconn *tconn;
2230         int *rs_plan_s = NULL;
2231         struct res_opts sc;
2232         int err;
2233
2234         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2235         if (!adm_ctx.reply_skb)
2236                 return retcode;
2237         if (retcode != NO_ERROR)
2238                 goto fail;
2239         tconn = adm_ctx.tconn;
2240
2241         if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2242                 retcode = ERR_NOMEM;
2243                 drbd_msg_put_info("unable to allocate cpumask");
2244                 goto fail;
2245         }
2246
2247         if (((struct drbd_genlmsghdr*)info->userhdr)->flags
2248                         & DRBD_GENL_F_SET_DEFAULTS) {
2249                 memset(&sc, 0, sizeof(struct res_opts));
2250                 sc.on_no_data  = DRBD_ON_NO_DATA_DEF;
2251         } else
2252                 sc = tconn->res_opts;
2253
2254         err = res_opts_from_attrs(&sc, info);
2255         if (err) {
2256                 retcode = ERR_MANDATORY_TAG;
2257                 drbd_msg_put_info(from_attrs_err_to_txt(err));
2258                 goto fail;
2259         }
2260
2261         /* silently ignore cpu mask on UP kernel */
2262         if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
2263                 err = __bitmap_parse(sc.cpu_mask, 32, 0,
2264                                 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2265                 if (err) {
2266                         conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2267                         retcode = ERR_CPU_MASK_PARSE;
2268                         goto fail;
2269                 }
2270         }
2271
2272
2273         tconn->res_opts = sc;
2274
2275         if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2276                 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2277                 drbd_calc_cpu_mask(tconn);
2278                 tconn->receiver.reset_cpu_mask = 1;
2279                 tconn->asender.reset_cpu_mask = 1;
2280                 tconn->worker.reset_cpu_mask = 1;
2281         }
2282
2283 fail:
2284         kfree(rs_plan_s);
2285         free_cpumask_var(new_cpu_mask);
2286
2287         drbd_adm_finish(info, retcode);
2288         return 0;
2289 }
2290
2291 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2292 {
2293         struct drbd_conf *mdev;
2294         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2295
2296         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2297         if (!adm_ctx.reply_skb)
2298                 return retcode;
2299         if (retcode != NO_ERROR)
2300                 goto out;
2301
2302         mdev = adm_ctx.mdev;
2303
2304         /* If there is still bitmap IO pending, probably because of a previous
2305          * resync just being finished, wait for it before requesting a new resync. */
2306         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2307
2308         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2309
2310         if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2311                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2312
2313         while (retcode == SS_NEED_CONNECTION) {
2314                 spin_lock_irq(&mdev->tconn->req_lock);
2315                 if (mdev->state.conn < C_CONNECTED)
2316                         retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2317                 spin_unlock_irq(&mdev->tconn->req_lock);
2318
2319                 if (retcode != SS_NEED_CONNECTION)
2320                         break;
2321
2322                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2323         }
2324
2325 out:
2326         drbd_adm_finish(info, retcode);
2327         return 0;
2328 }
2329
2330 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2331 {
2332         int rv;
2333
2334         rv = drbd_bmio_set_n_write(mdev);
2335         drbd_suspend_al(mdev);
2336         return rv;
2337 }
2338
2339 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2340                 union drbd_state mask, union drbd_state val)
2341 {
2342         enum drbd_ret_code retcode;
2343
2344         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2345         if (!adm_ctx.reply_skb)
2346                 return retcode;
2347         if (retcode != NO_ERROR)
2348                 goto out;
2349
2350         retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2351 out:
2352         drbd_adm_finish(info, retcode);
2353         return 0;
2354 }
2355
2356 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2357 {
2358         return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2359 }
2360
2361 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2362 {
2363         enum drbd_ret_code retcode;
2364
2365         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2366         if (!adm_ctx.reply_skb)
2367                 return retcode;
2368         if (retcode != NO_ERROR)
2369                 goto out;
2370
2371         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2372                 retcode = ERR_PAUSE_IS_SET;
2373 out:
2374         drbd_adm_finish(info, retcode);
2375         return 0;
2376 }
2377
2378 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2379 {
2380         union drbd_dev_state s;
2381         enum drbd_ret_code retcode;
2382
2383         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2384         if (!adm_ctx.reply_skb)
2385                 return retcode;
2386         if (retcode != NO_ERROR)
2387                 goto out;
2388
2389         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2390                 s = adm_ctx.mdev->state;
2391                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2392                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2393                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2394                 } else {
2395                         retcode = ERR_PAUSE_IS_CLEAR;
2396                 }
2397         }
2398
2399 out:
2400         drbd_adm_finish(info, retcode);
2401         return 0;
2402 }
2403
2404 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2405 {
2406         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2407 }
2408
2409 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2410 {
2411         struct drbd_conf *mdev;
2412         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2413
2414         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2415         if (!adm_ctx.reply_skb)
2416                 return retcode;
2417         if (retcode != NO_ERROR)
2418                 goto out;
2419
2420         mdev = adm_ctx.mdev;
2421         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2422                 drbd_uuid_new_current(mdev);
2423                 clear_bit(NEW_CUR_UUID, &mdev->flags);
2424         }
2425         drbd_suspend_io(mdev);
2426         retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2427         if (retcode == SS_SUCCESS) {
2428                 if (mdev->state.conn < C_CONNECTED)
2429                         tl_clear(mdev->tconn);
2430                 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2431                         tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2432         }
2433         drbd_resume_io(mdev);
2434
2435 out:
2436         drbd_adm_finish(info, retcode);
2437         return 0;
2438 }
2439
2440 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2441 {
2442         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2443 }
2444
2445 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2446 {
2447         struct nlattr *nla;
2448         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2449         if (!nla)
2450                 goto nla_put_failure;
2451         if (vnr != VOLUME_UNSPECIFIED)
2452                 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2453         NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2454         nla_nest_end(skb, nla);
2455         return 0;
2456
2457 nla_put_failure:
2458         if (nla)
2459                 nla_nest_cancel(skb, nla);
2460         return -EMSGSIZE;
2461 }
2462
2463 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2464                 const struct sib_info *sib)
2465 {
2466         struct state_info *si = NULL; /* for sizeof(si->member); */
2467         struct nlattr *nla;
2468         int got_ldev;
2469         int got_net;
2470         int err = 0;
2471         int exclude_sensitive;
2472
2473         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2474          * to.  So we better exclude_sensitive information.
2475          *
2476          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2477          * in the context of the requesting user process. Exclude sensitive
2478          * information, unless current has superuser.
2479          *
2480          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2481          * relies on the current implementation of netlink_dump(), which
2482          * executes the dump callback successively from netlink_recvmsg(),
2483          * always in the context of the receiving process */
2484         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2485
2486         got_ldev = get_ldev(mdev);
2487         got_net = get_net_conf(mdev->tconn);
2488
2489         /* We need to add connection name and volume number information still.
2490          * Minor number is in drbd_genlmsghdr. */
2491         if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2492                 goto nla_put_failure;
2493
2494         if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2495                 goto nla_put_failure;
2496
2497         if (got_ldev)
2498                 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2499                         goto nla_put_failure;
2500         if (got_net)
2501                 if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
2502                         goto nla_put_failure;
2503
2504         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2505         if (!nla)
2506                 goto nla_put_failure;
2507         NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2508         NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2509         NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2510         NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2511
2512         if (got_ldev) {
2513                 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2514                 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2515                 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2516                 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2517                 if (C_SYNC_SOURCE <= mdev->state.conn &&
2518                     C_PAUSED_SYNC_T >= mdev->state.conn) {
2519                         NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2520                         NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2521                 }
2522         }
2523
2524         if (sib) {
2525                 switch(sib->sib_reason) {
2526                 case SIB_SYNC_PROGRESS:
2527                 case SIB_GET_STATUS_REPLY:
2528                         break;
2529                 case SIB_STATE_CHANGE:
2530                         NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2531                         NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2532                         break;
2533                 case SIB_HELPER_POST:
2534                         NLA_PUT_U32(skb,
2535                                 T_helper_exit_code, sib->helper_exit_code);
2536                         /* fall through */
2537                 case SIB_HELPER_PRE:
2538                         NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2539                         break;
2540                 }
2541         }
2542         nla_nest_end(skb, nla);
2543
2544         if (0)
2545 nla_put_failure:
2546                 err = -EMSGSIZE;
2547         if (got_ldev)
2548                 put_ldev(mdev);
2549         if (got_net)
2550                 put_net_conf(mdev->tconn);
2551         return err;
2552 }
2553
2554 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2555 {
2556         enum drbd_ret_code retcode;
2557         int err;
2558
2559         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2560         if (!adm_ctx.reply_skb)
2561                 return retcode;
2562         if (retcode != NO_ERROR)
2563                 goto out;
2564
2565         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2566         if (err) {
2567                 nlmsg_free(adm_ctx.reply_skb);
2568                 return err;
2569         }
2570 out:
2571         drbd_adm_finish(info, retcode);
2572         return 0;
2573 }
2574
2575 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2576 {
2577         struct drbd_conf *mdev;
2578         struct drbd_genlmsghdr *dh;
2579         struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2580         struct drbd_tconn *tconn = NULL;
2581         struct drbd_tconn *tmp;
2582         unsigned volume = cb->args[1];
2583
2584         /* Open coded, deferred, iteration:
2585          * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2586          *      idr_for_each_entry(&tconn->volumes, mdev, i) {
2587          *        ...
2588          *      }
2589          * }
2590          * where tconn is cb->args[0];
2591          * and i is cb->args[1];
2592          *
2593          * This may miss entries inserted after this dump started,
2594          * or entries deleted before they are reached.
2595          *
2596          * We need to make sure the mdev won't disappear while
2597          * we are looking at it, and revalidate our iterators
2598          * on each iteration.
2599          */
2600
2601         /* synchronize with drbd_new_tconn/drbd_free_tconn */
2602         down_read(&drbd_cfg_rwsem);
2603 next_tconn:
2604         /* revalidate iterator position */
2605         list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2606                 if (pos == NULL) {
2607                         /* first iteration */
2608                         pos = tmp;
2609                         tconn = pos;
2610                         break;
2611                 }
2612                 if (tmp == pos) {
2613                         tconn = pos;
2614                         break;
2615                 }
2616         }
2617         if (tconn) {
2618                 mdev = idr_get_next(&tconn->volumes, &volume);
2619                 if (!mdev) {
2620                         /* No more volumes to dump on this tconn.
2621                          * Advance tconn iterator. */
2622                         pos = list_entry(tconn->all_tconn.next,
2623                                         struct drbd_tconn, all_tconn);
2624                         /* But, did we dump any volume on this tconn yet? */
2625                         if (volume != 0) {
2626                                 tconn = NULL;
2627                                 volume = 0;
2628                                 goto next_tconn;
2629                         }
2630                 }
2631
2632                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2633                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
2634                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2635                 if (!dh)
2636                         goto out;
2637
2638                 if (!mdev) {
2639                         /* this is a tconn without a single volume */
2640                         dh->minor = -1U;
2641                         dh->ret_code = NO_ERROR;
2642                         if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2643                                 genlmsg_cancel(skb, dh);
2644                         else
2645                                 genlmsg_end(skb, dh);
2646                         goto out;
2647                 }
2648
2649                 D_ASSERT(mdev->vnr == volume);
2650                 D_ASSERT(mdev->tconn == tconn);
2651
2652                 dh->minor = mdev_to_minor(mdev);
2653                 dh->ret_code = NO_ERROR;
2654
2655                 if (nla_put_status_info(skb, mdev, NULL)) {
2656                         genlmsg_cancel(skb, dh);
2657                         goto out;
2658                 }
2659                 genlmsg_end(skb, dh);
2660         }
2661
2662 out:
2663         up_read(&drbd_cfg_rwsem);
2664         /* where to start the next iteration */
2665         cb->args[0] = (long)pos;
2666         cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2667
2668         /* No more tconns/volumes/minors found results in an empty skb.
2669          * Which will terminate the dump. */
2670         return skb->len;
2671 }
2672
2673 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2674 {
2675         enum drbd_ret_code retcode;
2676         struct timeout_parms tp;
2677         int err;
2678
2679         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2680         if (!adm_ctx.reply_skb)
2681                 return retcode;
2682         if (retcode != NO_ERROR)
2683                 goto out;
2684
2685         tp.timeout_type =
2686                 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2687                 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2688                 UT_DEFAULT;
2689
2690         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2691         if (err) {
2692                 nlmsg_free(adm_ctx.reply_skb);
2693                 return err;
2694         }
2695 out:
2696         drbd_adm_finish(info, retcode);
2697         return 0;
2698 }
2699
2700 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2701 {
2702         struct drbd_conf *mdev;
2703         enum drbd_ret_code retcode;
2704
2705         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2706         if (!adm_ctx.reply_skb)
2707                 return retcode;
2708         if (retcode != NO_ERROR)
2709                 goto out;
2710
2711         mdev = adm_ctx.mdev;
2712         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2713                 /* resume from last known position, if possible */
2714                 struct start_ov_parms parms =
2715                         { .ov_start_sector = mdev->ov_start_sector };
2716                 int err = start_ov_parms_from_attrs(&parms, info);
2717                 if (err) {
2718                         retcode = ERR_MANDATORY_TAG;
2719                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2720                         goto out;
2721                 }
2722                 /* w_make_ov_request expects position to be aligned */
2723                 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2724         }
2725         /* If there is still bitmap IO pending, e.g. previous resync or verify
2726          * just being finished, wait for it before requesting a new resync. */
2727         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2728         retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2729 out:
2730         drbd_adm_finish(info, retcode);
2731         return 0;
2732 }
2733
2734
2735 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2736 {
2737         struct drbd_conf *mdev;
2738         enum drbd_ret_code retcode;
2739         int skip_initial_sync = 0;
2740         int err;
2741         struct new_c_uuid_parms args;
2742
2743         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2744         if (!adm_ctx.reply_skb)
2745                 return retcode;
2746         if (retcode != NO_ERROR)
2747                 goto out_nolock;
2748
2749         mdev = adm_ctx.mdev;
2750         memset(&args, 0, sizeof(args));
2751         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2752                 err = new_c_uuid_parms_from_attrs(&args, info);
2753                 if (err) {
2754                         retcode = ERR_MANDATORY_TAG;
2755                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2756                         goto out_nolock;
2757                 }
2758         }
2759
2760         mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2761
2762         if (!get_ldev(mdev)) {
2763                 retcode = ERR_NO_DISK;
2764                 goto out;
2765         }
2766
2767         /* this is "skip initial sync", assume to be clean */
2768         if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2769             mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2770                 dev_info(DEV, "Preparing to skip initial sync\n");
2771                 skip_initial_sync = 1;
2772         } else if (mdev->state.conn != C_STANDALONE) {
2773                 retcode = ERR_CONNECTED;
2774                 goto out_dec;
2775         }
2776
2777         drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2778         drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2779
2780         if (args.clear_bm) {
2781                 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2782                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2783                 if (err) {
2784                         dev_err(DEV, "Writing bitmap failed with %d\n",err);
2785                         retcode = ERR_IO_MD_DISK;
2786                 }
2787                 if (skip_initial_sync) {
2788                         drbd_send_uuids_skip_initial_sync(mdev);
2789                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
2790                         drbd_print_uuids(mdev, "cleared bitmap UUID");
2791                         spin_lock_irq(&mdev->tconn->req_lock);
2792                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2793                                         CS_VERBOSE, NULL);
2794                         spin_unlock_irq(&mdev->tconn->req_lock);
2795                 }
2796         }
2797
2798         drbd_md_sync(mdev);
2799 out_dec:
2800         put_ldev(mdev);
2801 out:
2802         mutex_unlock(mdev->state_mutex);
2803 out_nolock:
2804         drbd_adm_finish(info, retcode);
2805         return 0;
2806 }
2807
2808 static enum drbd_ret_code
2809 drbd_check_conn_name(const char *name)
2810 {
2811         if (!name || !name[0]) {
2812                 drbd_msg_put_info("connection name missing");
2813                 return ERR_MANDATORY_TAG;
2814         }
2815         /* if we want to use these in sysfs/configfs/debugfs some day,
2816          * we must not allow slashes */
2817         if (strchr(name, '/')) {
2818                 drbd_msg_put_info("invalid connection name");
2819                 return ERR_INVALID_REQUEST;
2820         }
2821         return NO_ERROR;
2822 }
2823
2824 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2825 {
2826         enum drbd_ret_code retcode;
2827
2828         retcode = drbd_adm_prepare(skb, info, 0);
2829         if (!adm_ctx.reply_skb)
2830                 return retcode;
2831         if (retcode != NO_ERROR)
2832                 goto out;
2833
2834         retcode = drbd_check_conn_name(adm_ctx.conn_name);
2835         if (retcode != NO_ERROR)
2836                 goto out;
2837
2838         if (adm_ctx.tconn) {
2839                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2840                         retcode = ERR_INVALID_REQUEST;
2841                         drbd_msg_put_info("connection exists");
2842                 }
2843                 /* else: still NO_ERROR */
2844                 goto out;
2845         }
2846
2847         if (!drbd_new_tconn(adm_ctx.conn_name))
2848                 retcode = ERR_NOMEM;
2849 out:
2850         drbd_adm_finish(info, retcode);
2851         return 0;
2852 }
2853
2854 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2855 {
2856         struct drbd_genlmsghdr *dh = info->userhdr;
2857         enum drbd_ret_code retcode;
2858
2859         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2860         if (!adm_ctx.reply_skb)
2861                 return retcode;
2862         if (retcode != NO_ERROR)
2863                 goto out;
2864
2865         /* FIXME drop minor_count parameter, limit to MINORMASK */
2866         if (dh->minor >= minor_count) {
2867                 drbd_msg_put_info("requested minor out of range");
2868                 retcode = ERR_INVALID_REQUEST;
2869                 goto out;
2870         }
2871         if (adm_ctx.volume > DRBD_VOLUME_MAX) {
2872                 drbd_msg_put_info("requested volume id out of range");
2873                 retcode = ERR_INVALID_REQUEST;
2874                 goto out;
2875         }
2876
2877         /* drbd_adm_prepare made sure already
2878          * that mdev->tconn and mdev->vnr match the request. */
2879         if (adm_ctx.mdev) {
2880                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
2881                         retcode = ERR_MINOR_EXISTS;
2882                 /* else: still NO_ERROR */
2883                 goto out;
2884         }
2885
2886         down_write(&drbd_cfg_rwsem);
2887         retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2888         up_write(&drbd_cfg_rwsem);
2889 out:
2890         drbd_adm_finish(info, retcode);
2891         return 0;
2892 }
2893
2894 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
2895 {
2896         if (mdev->state.disk == D_DISKLESS &&
2897             /* no need to be mdev->state.conn == C_STANDALONE &&
2898              * we may want to delete a minor from a live replication group.
2899              */
2900             mdev->state.role == R_SECONDARY) {
2901                 drbd_delete_device(mdev);
2902                 return NO_ERROR;
2903         } else
2904                 return ERR_MINOR_CONFIGURED;
2905 }
2906
2907 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2908 {
2909         enum drbd_ret_code retcode;
2910
2911         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2912         if (!adm_ctx.reply_skb)
2913                 return retcode;
2914         if (retcode != NO_ERROR)
2915                 goto out;
2916
2917         down_write(&drbd_cfg_rwsem);
2918         retcode = adm_delete_minor(adm_ctx.mdev);
2919         up_write(&drbd_cfg_rwsem);
2920         /* if this was the last volume of this connection,
2921          * this will terminate all threads */
2922         if (retcode == NO_ERROR)
2923                 conn_reconfig_done(adm_ctx.tconn);
2924 out:
2925         drbd_adm_finish(info, retcode);
2926         return 0;
2927 }
2928
2929 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
2930 {
2931         enum drbd_ret_code retcode;
2932         enum drbd_state_rv rv;
2933         struct drbd_conf *mdev;
2934         unsigned i;
2935
2936         retcode = drbd_adm_prepare(skb, info, 0);
2937         if (!adm_ctx.reply_skb)
2938                 return retcode;
2939         if (retcode != NO_ERROR)
2940                 goto out;
2941
2942         if (!adm_ctx.tconn) {
2943                 retcode = ERR_CONN_NOT_KNOWN;
2944                 goto out;
2945         }
2946
2947         down_read(&drbd_cfg_rwsem);
2948         /* demote */
2949         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
2950                 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
2951                 if (retcode < SS_SUCCESS) {
2952                         drbd_msg_put_info("failed to demote");
2953                         goto out_unlock;
2954                 }
2955         }
2956
2957         /* disconnect */
2958         rv = conn_try_disconnect(adm_ctx.tconn, 0);
2959         if (rv < SS_SUCCESS) {
2960                 retcode = rv; /* enum type mismatch! */
2961                 drbd_msg_put_info("failed to disconnect");
2962                 goto out_unlock;
2963         }
2964
2965         /* detach */
2966         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
2967                 rv = adm_detach(mdev);
2968                 if (rv < SS_SUCCESS) {
2969                         retcode = rv; /* enum type mismatch! */
2970                         drbd_msg_put_info("failed to detach");
2971                         goto out_unlock;
2972                 }
2973         }
2974         up_read(&drbd_cfg_rwsem);
2975
2976         /* delete volumes */
2977         down_write(&drbd_cfg_rwsem);
2978         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
2979                 retcode = adm_delete_minor(mdev);
2980                 if (retcode != NO_ERROR) {
2981                         /* "can not happen" */
2982                         drbd_msg_put_info("failed to delete volume");
2983                         up_write(&drbd_cfg_rwsem);
2984                         goto out;
2985                 }
2986         }
2987
2988         /* stop all threads */
2989         conn_reconfig_done(adm_ctx.tconn);
2990
2991         /* delete connection */
2992         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
2993                 drbd_free_tconn(adm_ctx.tconn);
2994                 retcode = NO_ERROR;
2995         } else {
2996                 /* "can not happen" */
2997                 retcode = ERR_CONN_IN_USE;
2998                 drbd_msg_put_info("failed to delete connection");
2999         }
3000
3001         up_write(&drbd_cfg_rwsem);
3002         goto out;
3003 out_unlock:
3004         up_read(&drbd_cfg_rwsem);
3005 out:
3006         drbd_adm_finish(info, retcode);
3007         return 0;
3008 }
3009
3010 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
3011 {
3012         enum drbd_ret_code retcode;
3013
3014         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3015         if (!adm_ctx.reply_skb)
3016                 return retcode;
3017         if (retcode != NO_ERROR)
3018                 goto out;
3019
3020         down_write(&drbd_cfg_rwsem);
3021         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3022                 drbd_free_tconn(adm_ctx.tconn);
3023                 retcode = NO_ERROR;
3024         } else {
3025                 retcode = ERR_CONN_IN_USE;
3026         }
3027         up_write(&drbd_cfg_rwsem);
3028
3029 out:
3030         drbd_adm_finish(info, retcode);
3031         return 0;
3032 }
3033
3034 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3035 {
3036         static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3037         struct sk_buff *msg;
3038         struct drbd_genlmsghdr *d_out;
3039         unsigned seq;
3040         int err = -ENOMEM;
3041
3042         seq = atomic_inc_return(&drbd_genl_seq);
3043         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3044         if (!msg)
3045                 goto failed;
3046
3047         err = -EMSGSIZE;
3048         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3049         if (!d_out) /* cannot happen, but anyways. */
3050                 goto nla_put_failure;
3051         d_out->minor = mdev_to_minor(mdev);
3052         d_out->ret_code = 0;
3053
3054         if (nla_put_status_info(msg, mdev, sib))
3055                 goto nla_put_failure;
3056         genlmsg_end(msg, d_out);
3057         err = drbd_genl_multicast_events(msg, 0);
3058         /* msg has been consumed or freed in netlink_broadcast() */
3059         if (err && err != -ESRCH)
3060                 goto failed;
3061
3062         return;
3063
3064 nla_put_failure:
3065         nlmsg_free(msg);
3066 failed:
3067         dev_err(DEV, "Error %d while broadcasting event. "
3068                         "Event seq:%u sib_reason:%u\n",
3069                         err, seq, sib->sib_reason);
3070 }