drbd: prepare for more than 32 bit flags
[linux-block.git] / drivers / block / drbd / drbd_nl.c
CommitLineData
b411b363
PR
1/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
b411b363
PR
26#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
32#include <linux/connector.h>
33#include <linux/blkpg.h>
34#include <linux/cpumask.h>
35#include "drbd_int.h"
265be2d0 36#include "drbd_req.h"
b411b363
PR
37#include "drbd_wrappers.h"
38#include <asm/unaligned.h>
39#include <linux/drbd_tag_magic.h>
40#include <linux/drbd_limits.h>
87f7be4c
PR
41#include <linux/compiler.h>
42#include <linux/kthread.h>
b411b363
PR
43
44static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
45static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
46static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
47
48/* see get_sb_bdev and bd_claim */
49static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
50
51/* Generate the tag_list to struct functions */
52#define NL_PACKET(name, number, fields) \
53static int name ## _from_tags(struct drbd_conf *mdev, \
54 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
55static int name ## _from_tags(struct drbd_conf *mdev, \
56 unsigned short *tags, struct name *arg) \
57{ \
58 int tag; \
59 int dlen; \
60 \
61 while ((tag = get_unaligned(tags++)) != TT_END) { \
62 dlen = get_unaligned(tags++); \
63 switch (tag_number(tag)) { \
64 fields \
65 default: \
66 if (tag & T_MANDATORY) { \
67 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
68 return 0; \
69 } \
70 } \
71 tags = (unsigned short *)((char *)tags + dlen); \
72 } \
73 return 1; \
74}
75#define NL_INTEGER(pn, pr, member) \
76 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
77 arg->member = get_unaligned((int *)(tags)); \
78 break;
79#define NL_INT64(pn, pr, member) \
80 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
81 arg->member = get_unaligned((u64 *)(tags)); \
82 break;
83#define NL_BIT(pn, pr, member) \
84 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
85 arg->member = *(char *)(tags) ? 1 : 0; \
86 break;
87#define NL_STRING(pn, pr, member, len) \
88 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
89 if (dlen > len) { \
90 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
91 #member, dlen, (unsigned int)len); \
92 return 0; \
93 } \
94 arg->member ## _len = dlen; \
95 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
96 break;
1d273b92 97#include <linux/drbd_nl.h>
b411b363
PR
98
99/* Generate the struct to tag_list functions */
100#define NL_PACKET(name, number, fields) \
101static unsigned short* \
102name ## _to_tags(struct drbd_conf *mdev, \
103 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
104static unsigned short* \
105name ## _to_tags(struct drbd_conf *mdev, \
106 struct name *arg, unsigned short *tags) \
107{ \
108 fields \
109 return tags; \
110}
111
112#define NL_INTEGER(pn, pr, member) \
113 put_unaligned(pn | pr | TT_INTEGER, tags++); \
114 put_unaligned(sizeof(int), tags++); \
115 put_unaligned(arg->member, (int *)tags); \
116 tags = (unsigned short *)((char *)tags+sizeof(int));
117#define NL_INT64(pn, pr, member) \
118 put_unaligned(pn | pr | TT_INT64, tags++); \
119 put_unaligned(sizeof(u64), tags++); \
120 put_unaligned(arg->member, (u64 *)tags); \
121 tags = (unsigned short *)((char *)tags+sizeof(u64));
122#define NL_BIT(pn, pr, member) \
123 put_unaligned(pn | pr | TT_BIT, tags++); \
124 put_unaligned(sizeof(char), tags++); \
125 *(char *)tags = arg->member; \
126 tags = (unsigned short *)((char *)tags+sizeof(char));
127#define NL_STRING(pn, pr, member, len) \
128 put_unaligned(pn | pr | TT_STRING, tags++); \
129 put_unaligned(arg->member ## _len, tags++); \
130 memcpy(tags, arg->member, arg->member ## _len); \
131 tags = (unsigned short *)((char *)tags + arg->member ## _len);
1d273b92 132#include <linux/drbd_nl.h>
b411b363
PR
133
134void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
135void drbd_nl_send_reply(struct cn_msg *, int);
136
137int drbd_khelper(struct drbd_conf *mdev, char *cmd)
138{
139 char *envp[] = { "HOME=/",
140 "TERM=linux",
141 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
142 NULL, /* Will be set to address family */
143 NULL, /* Will be set to address */
144 NULL };
145
146 char mb[12], af[20], ad[60], *afs;
147 char *argv[] = {usermode_helper, cmd, mb, NULL };
148 int ret;
149
c2ba686f 150 if (current == mdev->worker.task)
06f10adb 151 drbd_set_flag(mdev, CALLBACK_PENDING);
c2ba686f 152
b411b363
PR
153 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
154
155 if (get_net_conf(mdev)) {
156 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
157 case AF_INET6:
158 afs = "ipv6";
159 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
160 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
161 break;
162 case AF_INET:
163 afs = "ipv4";
164 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
165 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
166 break;
167 default:
168 afs = "ssocks";
169 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
170 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
171 }
172 snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
173 envp[3]=af;
174 envp[4]=ad;
175 put_net_conf(mdev);
176 }
177
1090c056
LE
178 /* The helper may take some time.
179 * write out any unsynced meta data changes now */
180 drbd_md_sync(mdev);
181
b411b363
PR
182 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
183
184 drbd_bcast_ev_helper(mdev, cmd);
70834d30 185 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
b411b363
PR
186 if (ret)
187 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
188 usermode_helper, cmd, mb,
189 (ret >> 8) & 0xff, ret);
190 else
191 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
192 usermode_helper, cmd, mb,
193 (ret >> 8) & 0xff, ret);
194
c2ba686f 195 if (current == mdev->worker.task)
06f10adb 196 drbd_clear_flag(mdev, CALLBACK_PENDING);
c2ba686f 197
b411b363
PR
198 if (ret < 0) /* Ignore any ERRNOs we got. */
199 ret = 0;
200
201 return ret;
202}
203
204enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
205{
206 char *ex_to_string;
207 int r;
208 enum drbd_disk_state nps;
209 enum drbd_fencing_p fp;
210
211 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
212
213 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
214 fp = mdev->ldev->dc.fencing;
215 put_ldev(mdev);
216 } else {
217 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
fb22c402
PR
218 nps = mdev->state.pdsk;
219 goto out;
b411b363
PR
220 }
221
b411b363
PR
222 r = drbd_khelper(mdev, "fence-peer");
223
224 switch ((r>>8) & 0xff) {
225 case 3: /* peer is inconsistent */
226 ex_to_string = "peer is inconsistent or worse";
227 nps = D_INCONSISTENT;
228 break;
229 case 4: /* peer got outdated, or was already outdated */
230 ex_to_string = "peer was fenced";
231 nps = D_OUTDATED;
232 break;
233 case 5: /* peer was down */
234 if (mdev->state.disk == D_UP_TO_DATE) {
235 /* we will(have) create(d) a new UUID anyways... */
236 ex_to_string = "peer is unreachable, assumed to be dead";
237 nps = D_OUTDATED;
238 } else {
239 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
240 nps = mdev->state.pdsk;
241 }
242 break;
243 case 6: /* Peer is primary, voluntarily outdate myself.
244 * This is useful when an unconnected R_SECONDARY is asked to
245 * become R_PRIMARY, but finds the other peer being active. */
246 ex_to_string = "peer is active";
247 dev_warn(DEV, "Peer is primary, outdating myself.\n");
248 nps = D_UNKNOWN;
249 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
250 break;
251 case 7:
252 if (fp != FP_STONITH)
253 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
254 ex_to_string = "peer was stonithed";
255 nps = D_OUTDATED;
256 break;
257 default:
258 /* The script is broken ... */
259 nps = D_UNKNOWN;
260 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
261 return nps;
262 }
263
264 dev_info(DEV, "fence-peer helper returned %d (%s)\n",
265 (r>>8) & 0xff, ex_to_string);
fb22c402
PR
266
267out:
268 if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
269 /* The handler was not successful... unfreeze here, the
270 state engine can not unfreeze... */
271 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
272 }
273
b411b363
PR
274 return nps;
275}
276
87f7be4c
PR
277static int _try_outdate_peer_async(void *data)
278{
279 struct drbd_conf *mdev = (struct drbd_conf *)data;
280 enum drbd_disk_state nps;
21423fa7 281 union drbd_state ns;
87f7be4c
PR
282
283 nps = drbd_try_outdate_peer(mdev);
21423fa7
PR
284
285 /* Not using
286 drbd_request_state(mdev, NS(pdsk, nps));
99432fcc
PR
287 here, because we might were able to re-establish the connection
288 in the meantime. This can only partially be solved in the state's
289 engine is_valid_state() and is_valid_state_transition()
290 functions.
291
292 nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
293 pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
294 therefore we have to have the pre state change check here.
21423fa7
PR
295 */
296 spin_lock_irq(&mdev->req_lock);
297 ns = mdev->state;
06f10adb 298 if (ns.conn < C_WF_REPORT_PARAMS && !drbd_test_flag(mdev, STATE_SENT)) {
21423fa7
PR
299 ns.pdsk = nps;
300 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
301 }
302 spin_unlock_irq(&mdev->req_lock);
87f7be4c
PR
303
304 return 0;
305}
306
307void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
308{
309 struct task_struct *opa;
310
311 opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
312 if (IS_ERR(opa))
313 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
314}
b411b363 315
bf885f8a
AG
316enum drbd_state_rv
317drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
b411b363
PR
318{
319 const int max_tries = 4;
bf885f8a 320 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
b411b363
PR
321 int try = 0;
322 int forced = 0;
323 union drbd_state mask, val;
324 enum drbd_disk_state nps;
325
326 if (new_role == R_PRIMARY)
327 request_ping(mdev); /* Detect a dead peer ASAP */
328
329 mutex_lock(&mdev->state_mutex);
330
331 mask.i = 0; mask.role = R_MASK;
332 val.i = 0; val.role = new_role;
333
334 while (try++ < max_tries) {
bf885f8a 335 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
b411b363
PR
336
337 /* in case we first succeeded to outdate,
338 * but now suddenly could establish a connection */
bf885f8a 339 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
b411b363
PR
340 val.pdsk = 0;
341 mask.pdsk = 0;
342 continue;
343 }
344
bf885f8a 345 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
d10a33c6
PR
346 (mdev->state.disk < D_UP_TO_DATE &&
347 mdev->state.disk >= D_INCONSISTENT)) {
b411b363
PR
348 mask.disk = D_MASK;
349 val.disk = D_UP_TO_DATE;
350 forced = 1;
351 continue;
352 }
353
bf885f8a 354 if (rv == SS_NO_UP_TO_DATE_DISK &&
b411b363
PR
355 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
356 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
357 nps = drbd_try_outdate_peer(mdev);
358
359 if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
360 val.disk = D_UP_TO_DATE;
361 mask.disk = D_MASK;
362 }
363
364 val.pdsk = nps;
365 mask.pdsk = D_MASK;
366
367 continue;
368 }
369
bf885f8a 370 if (rv == SS_NOTHING_TO_DO)
b411b363 371 goto fail;
bf885f8a 372 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
b411b363
PR
373 nps = drbd_try_outdate_peer(mdev);
374
375 if (force && nps > D_OUTDATED) {
376 dev_warn(DEV, "Forced into split brain situation!\n");
377 nps = D_OUTDATED;
378 }
379
380 mask.pdsk = D_MASK;
381 val.pdsk = nps;
382
383 continue;
384 }
bf885f8a 385 if (rv == SS_TWO_PRIMARIES) {
b411b363
PR
386 /* Maybe the peer is detected as dead very soon...
387 retry at most once more in this case. */
20ee6390 388 schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10);
b411b363
PR
389 if (try < max_tries)
390 try = max_tries - 1;
391 continue;
392 }
bf885f8a
AG
393 if (rv < SS_SUCCESS) {
394 rv = _drbd_request_state(mdev, mask, val,
b411b363 395 CS_VERBOSE + CS_WAIT_COMPLETE);
bf885f8a 396 if (rv < SS_SUCCESS)
b411b363
PR
397 goto fail;
398 }
399 break;
400 }
401
bf885f8a 402 if (rv < SS_SUCCESS)
b411b363
PR
403 goto fail;
404
405 if (forced)
406 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
407
408 /* Wait until nothing is on the fly :) */
409 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
410
411 if (new_role == R_SECONDARY) {
81e84650 412 set_disk_ro(mdev->vdisk, true);
b411b363
PR
413 if (get_ldev(mdev)) {
414 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
415 put_ldev(mdev);
416 }
417 } else {
418 if (get_net_conf(mdev)) {
419 mdev->net_conf->want_lose = 0;
420 put_net_conf(mdev);
421 }
81e84650 422 set_disk_ro(mdev->vdisk, false);
b411b363
PR
423 if (get_ldev(mdev)) {
424 if (((mdev->state.conn < C_CONNECTED ||
425 mdev->state.pdsk <= D_FAILED)
426 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
427 drbd_uuid_new_current(mdev);
428
429 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
430 put_ldev(mdev);
431 }
432 }
433
19f843aa
LE
434 /* writeout of activity log covered areas of the bitmap
435 * to stable storage done in after state change already */
b411b363
PR
436
437 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
438 /* if this was forced, we should consider sync */
439 if (forced)
440 drbd_send_uuids(mdev);
f479ea06 441 drbd_send_current_state(mdev);
b411b363
PR
442 }
443
444 drbd_md_sync(mdev);
445
446 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
447 fail:
448 mutex_unlock(&mdev->state_mutex);
bf885f8a 449 return rv;
b411b363
PR
450}
451
ef50a3e3
LE
452static struct drbd_conf *ensure_mdev(int minor, int create)
453{
454 struct drbd_conf *mdev;
455
456 if (minor >= minor_count)
457 return NULL;
458
459 mdev = minor_to_mdev(minor);
460
461 if (!mdev && create) {
462 struct gendisk *disk = NULL;
463 mdev = drbd_new_device(minor);
464
465 spin_lock_irq(&drbd_pp_lock);
466 if (minor_table[minor] == NULL) {
467 minor_table[minor] = mdev;
468 disk = mdev->vdisk;
469 mdev = NULL;
470 } /* else: we lost the race */
471 spin_unlock_irq(&drbd_pp_lock);
472
473 if (disk) /* we won the race above */
474 /* in case we ever add a drbd_delete_device(),
475 * don't forget the del_gendisk! */
476 add_disk(disk);
477 else /* we lost the race above */
478 drbd_free_mdev(mdev);
479
480 mdev = minor_to_mdev(minor);
481 }
482
483 return mdev;
484}
b411b363
PR
485
486static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
487 struct drbd_nl_cfg_reply *reply)
488{
489 struct primary primary_args;
490
491 memset(&primary_args, 0, sizeof(struct primary));
492 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
493 reply->ret_code = ERR_MANDATORY_TAG;
494 return 0;
495 }
496
497 reply->ret_code =
1f552430 498 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
b411b363
PR
499
500 return 0;
501}
502
503static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
504 struct drbd_nl_cfg_reply *reply)
505{
506 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
507
508 return 0;
509}
510
511/* initializes the md.*_offset members, so we are able to find
512 * the on disk meta data */
513static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
514 struct drbd_backing_dev *bdev)
515{
516 sector_t md_size_sect = 0;
517 switch (bdev->dc.meta_dev_idx) {
518 default:
519 /* v07 style fixed size indexed meta data */
520 bdev->md.md_size_sect = MD_RESERVED_SECT;
521 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
522 bdev->md.al_offset = MD_AL_OFFSET;
523 bdev->md.bm_offset = MD_BM_OFFSET;
524 break;
525 case DRBD_MD_INDEX_FLEX_EXT:
526 /* just occupy the full device; unit: sectors */
527 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
528 bdev->md.md_offset = 0;
529 bdev->md.al_offset = MD_AL_OFFSET;
530 bdev->md.bm_offset = MD_BM_OFFSET;
531 break;
532 case DRBD_MD_INDEX_INTERNAL:
533 case DRBD_MD_INDEX_FLEX_INT:
534 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
535 /* al size is still fixed */
536 bdev->md.al_offset = -MD_AL_MAX_SIZE;
537 /* we need (slightly less than) ~ this much bitmap sectors: */
538 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
539 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
540 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
541 md_size_sect = ALIGN(md_size_sect, 8);
542
543 /* plus the "drbd meta data super block",
544 * and the activity log; */
545 md_size_sect += MD_BM_OFFSET;
546
547 bdev->md.md_size_sect = md_size_sect;
548 /* bitmap offset is adjusted by 'super' block size */
549 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
550 break;
551 }
552}
553
4b0715f0 554/* input size is expected to be in KB */
b411b363
PR
555char *ppsize(char *buf, unsigned long long size)
556{
4b0715f0
LE
557 /* Needs 9 bytes at max including trailing NUL:
558 * -1ULL ==> "16384 EB" */
b411b363
PR
559 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
560 int base = 0;
4b0715f0 561 while (size >= 10000 && base < sizeof(units)-1) {
b411b363
PR
562 /* shift + round */
563 size = (size >> 10) + !!(size & (1<<9));
564 base++;
565 }
4b0715f0 566 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
b411b363
PR
567
568 return buf;
569}
570
571/* there is still a theoretical deadlock when called from receiver
572 * on an D_INCONSISTENT R_PRIMARY:
573 * remote READ does inc_ap_bio, receiver would need to receive answer
574 * packet from remote to dec_ap_bio again.
575 * receiver receive_sizes(), comes here,
576 * waits for ap_bio_cnt == 0. -> deadlock.
577 * but this cannot happen, actually, because:
578 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
579 * (not connected, or bad/no disk on peer):
580 * see drbd_fail_request_early, ap_bio_cnt is zero.
581 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
582 * peer may not initiate a resize.
583 */
584void drbd_suspend_io(struct drbd_conf *mdev)
585{
06f10adb 586 drbd_set_flag(mdev, SUSPEND_IO);
fb22c402 587 if (is_susp(mdev->state))
265be2d0 588 return;
b411b363
PR
589 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
590}
591
592void drbd_resume_io(struct drbd_conf *mdev)
593{
06f10adb 594 drbd_clear_flag(mdev, SUSPEND_IO);
b411b363
PR
595 wake_up(&mdev->misc_wait);
596}
597
598/**
599 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
600 * @mdev: DRBD device.
601 *
602 * Returns 0 on success, negative return values indicate errors.
603 * You should call drbd_md_sync() after calling this function.
604 */
24c4830c 605enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
b411b363
PR
606{
607 sector_t prev_first_sect, prev_size; /* previous meta location */
608 sector_t la_size;
609 sector_t size;
610 char ppb[10];
611
612 int md_moved, la_size_changed;
613 enum determine_dev_size rv = unchanged;
614
615 /* race:
616 * application request passes inc_ap_bio,
617 * but then cannot get an AL-reference.
618 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
619 *
620 * to avoid that:
621 * Suspend IO right here.
622 * still lock the act_log to not trigger ASSERTs there.
623 */
624 drbd_suspend_io(mdev);
625
626 /* no wait necessary anymore, actually we could assert that */
627 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
628
629 prev_first_sect = drbd_md_first_sector(mdev->ldev);
630 prev_size = mdev->ldev->md.md_size_sect;
631 la_size = mdev->ldev->md.la_size_sect;
632
633 /* TODO: should only be some assert here, not (re)init... */
634 drbd_md_set_sector_offsets(mdev, mdev->ldev);
635
d845030f 636 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
b411b363
PR
637
638 if (drbd_get_capacity(mdev->this_bdev) != size ||
639 drbd_bm_capacity(mdev) != size) {
640 int err;
02d9a94b 641 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
b411b363
PR
642 if (unlikely(err)) {
643 /* currently there is only one error: ENOMEM! */
644 size = drbd_bm_capacity(mdev)>>1;
645 if (size == 0) {
646 dev_err(DEV, "OUT OF MEMORY! "
647 "Could not allocate bitmap!\n");
648 } else {
649 dev_err(DEV, "BM resizing failed. "
650 "Leaving size unchanged at size = %lu KB\n",
651 (unsigned long)size);
652 }
653 rv = dev_size_error;
654 }
655 /* racy, see comments above. */
656 drbd_set_my_capacity(mdev, size);
657 mdev->ldev->md.la_size_sect = size;
658 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
659 (unsigned long long)size>>1);
660 }
661 if (rv == dev_size_error)
662 goto out;
663
664 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
665
666 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
667 || prev_size != mdev->ldev->md.md_size_sect;
668
669 if (la_size_changed || md_moved) {
24dccabb
AG
670 int err;
671
b411b363
PR
672 drbd_al_shrink(mdev); /* All extents inactive. */
673 dev_info(DEV, "Writing the whole bitmap, %s\n",
674 la_size_changed && md_moved ? "size changed and md moved" :
675 la_size_changed ? "size changed" : "md moved");
20ceb2b2 676 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
d1aa4d04
PR
677 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
678 "size changed", BM_LOCKED_MASK);
24dccabb
AG
679 if (err) {
680 rv = dev_size_error;
681 goto out;
682 }
b411b363
PR
683 drbd_md_mark_dirty(mdev);
684 }
685
686 if (size > la_size)
687 rv = grew;
688 if (size < la_size)
689 rv = shrunk;
690out:
691 lc_unlock(mdev->act_log);
692 wake_up(&mdev->al_wait);
693 drbd_resume_io(mdev);
694
695 return rv;
696}
697
698sector_t
a393db6f 699drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
b411b363
PR
700{
701 sector_t p_size = mdev->p_size; /* partner's disk size. */
702 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
703 sector_t m_size; /* my size */
704 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
705 sector_t size = 0;
706
707 m_size = drbd_get_max_capacity(bdev);
708
a393db6f
PR
709 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
710 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
711 p_size = m_size;
712 }
713
b411b363
PR
714 if (p_size && m_size) {
715 size = min_t(sector_t, p_size, m_size);
716 } else {
717 if (la_size) {
718 size = la_size;
719 if (m_size && m_size < size)
720 size = m_size;
721 if (p_size && p_size < size)
722 size = p_size;
723 } else {
724 if (m_size)
725 size = m_size;
726 if (p_size)
727 size = p_size;
728 }
729 }
730
731 if (size == 0)
732 dev_err(DEV, "Both nodes diskless!\n");
733
734 if (u_size) {
735 if (u_size > size)
736 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
737 (unsigned long)u_size>>1, (unsigned long)size>>1);
738 else
739 size = u_size;
740 }
741
742 return size;
743}
744
745/**
746 * drbd_check_al_size() - Ensures that the AL is of the right size
747 * @mdev: DRBD device.
748 *
749 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
750 * failed, and 0 on success. You should call drbd_md_sync() after you called
751 * this function.
752 */
753static int drbd_check_al_size(struct drbd_conf *mdev)
754{
755 struct lru_cache *n, *t;
756 struct lc_element *e;
757 unsigned int in_use;
758 int i;
759
760 ERR_IF(mdev->sync_conf.al_extents < 7)
761 mdev->sync_conf.al_extents = 127;
762
763 if (mdev->act_log &&
764 mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
765 return 0;
766
767 in_use = 0;
768 t = mdev->act_log;
769 n = lc_create("act_log", drbd_al_ext_cache,
770 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
771
772 if (n == NULL) {
773 dev_err(DEV, "Cannot allocate act_log lru!\n");
774 return -ENOMEM;
775 }
776 spin_lock_irq(&mdev->al_lock);
777 if (t) {
778 for (i = 0; i < t->nr_elements; i++) {
779 e = lc_element_by_index(t, i);
780 if (e->refcnt)
781 dev_err(DEV, "refcnt(%d)==%d\n",
782 e->lc_number, e->refcnt);
783 in_use += e->refcnt;
784 }
785 }
786 if (!in_use)
787 mdev->act_log = n;
788 spin_unlock_irq(&mdev->al_lock);
789 if (in_use) {
790 dev_err(DEV, "Activity log still in use!\n");
791 lc_destroy(n);
792 return -EBUSY;
793 } else {
794 if (t)
795 lc_destroy(t);
796 }
797 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
798 return 0;
799}
800
99432fcc 801static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
b411b363
PR
802{
803 struct request_queue * const q = mdev->rq_queue;
db141b2f
LE
804 unsigned int max_hw_sectors = max_bio_size >> 9;
805 unsigned int max_segments = 0;
99432fcc
PR
806
807 if (get_ldev_if_state(mdev, D_ATTACHING)) {
808 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
809
810 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
811 max_segments = mdev->ldev->dc.max_bio_bvecs;
812 put_ldev(mdev);
813 }
b411b363 814
b411b363 815 blk_queue_logical_block_size(q, 512);
1816a2b4
LE
816 blk_queue_max_hw_sectors(q, max_hw_sectors);
817 /* This is the workaround for "bio would need to, but cannot, be split" */
818 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
819 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
b411b363 820
99432fcc
PR
821 if (get_ldev_if_state(mdev, D_ATTACHING)) {
822 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
823
824 blk_queue_stack_limits(q, b);
825
826 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
827 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
828 q->backing_dev_info.ra_pages,
829 b->backing_dev_info.ra_pages);
830 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
831 }
832 put_ldev(mdev);
833 }
834}
835
836void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
837{
db141b2f 838 unsigned int now, new, local, peer;
99432fcc
PR
839
840 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
841 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
842 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
b411b363 843
99432fcc
PR
844 if (get_ldev_if_state(mdev, D_ATTACHING)) {
845 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
846 mdev->local_max_bio_size = local;
847 put_ldev(mdev);
b411b363 848 }
db141b2f 849 local = min(local, DRBD_MAX_BIO_SIZE);
99432fcc
PR
850
851 /* We may ignore peer limits if the peer is modern enough.
852 Because new from 8.3.8 onwards the peer can use multiple
853 BIOs for a single peer_request */
854 if (mdev->state.conn >= C_CONNECTED) {
6809384c 855 if (mdev->agreed_pro_version < 94) {
db141b2f 856 peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
6809384c
PR
857 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
858 } else if (mdev->agreed_pro_version == 94)
99432fcc
PR
859 peer = DRBD_MAX_SIZE_H80_PACKET;
860 else /* drbd 8.3.8 onwards */
861 peer = DRBD_MAX_BIO_SIZE;
862 }
863
db141b2f 864 new = min(local, peer);
99432fcc
PR
865
866 if (mdev->state.role == R_PRIMARY && new < now)
db141b2f 867 dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
99432fcc
PR
868
869 if (new != now)
870 dev_info(DEV, "max BIO size = %u\n", new);
871
872 drbd_setup_queue_param(mdev, new);
b411b363
PR
873}
874
875/* serialize deconfig (worker exiting, doing cleanup)
876 * and reconfig (drbdsetup disk, drbdsetup net)
877 *
c518d04f
LE
878 * Wait for a potentially exiting worker, then restart it,
879 * or start a new one. Flush any pending work, there may still be an
880 * after_state_change queued.
b411b363
PR
881 */
882static void drbd_reconfig_start(struct drbd_conf *mdev)
883{
06f10adb
LE
884 wait_event(mdev->state_wait, !drbd_test_and_set_flag(mdev, CONFIG_PENDING));
885 wait_event(mdev->state_wait, !drbd_test_flag(mdev, DEVICE_DYING));
b411b363 886 drbd_thread_start(&mdev->worker);
c518d04f 887 drbd_flush_workqueue(mdev);
b411b363
PR
888}
889
890/* if still unconfigured, stops worker again.
891 * if configured now, clears CONFIG_PENDING.
892 * wakes potential waiters */
893static void drbd_reconfig_done(struct drbd_conf *mdev)
894{
895 spin_lock_irq(&mdev->req_lock);
896 if (mdev->state.disk == D_DISKLESS &&
897 mdev->state.conn == C_STANDALONE &&
898 mdev->state.role == R_SECONDARY) {
06f10adb 899 drbd_set_flag(mdev, DEVICE_DYING);
b411b363
PR
900 drbd_thread_stop_nowait(&mdev->worker);
901 } else
06f10adb 902 drbd_clear_flag(mdev, CONFIG_PENDING);
b411b363
PR
903 spin_unlock_irq(&mdev->req_lock);
904 wake_up(&mdev->state_wait);
905}
906
0778286a
PR
907/* Make sure IO is suspended before calling this function(). */
908static void drbd_suspend_al(struct drbd_conf *mdev)
909{
910 int s = 0;
911
912 if (lc_try_lock(mdev->act_log)) {
913 drbd_al_shrink(mdev);
914 lc_unlock(mdev->act_log);
915 } else {
916 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
917 return;
918 }
919
920 spin_lock_irq(&mdev->req_lock);
921 if (mdev->state.conn < C_CONNECTED)
06f10adb 922 s = !drbd_test_and_set_flag(mdev, AL_SUSPENDED);
0778286a
PR
923
924 spin_unlock_irq(&mdev->req_lock);
925
926 if (s)
927 dev_info(DEV, "Suspended AL updates\n");
928}
929
b411b363
PR
930/* does always return 0;
931 * interesting return code is in reply->ret_code */
932static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
933 struct drbd_nl_cfg_reply *reply)
934{
116676ca 935 enum drbd_ret_code retcode;
b411b363
PR
936 enum determine_dev_size dd;
937 sector_t max_possible_sectors;
938 sector_t min_md_device_sectors;
939 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
e525fd89 940 struct block_device *bdev;
b411b363
PR
941 struct lru_cache *resync_lru = NULL;
942 union drbd_state ns, os;
f2024e7c 943 enum drbd_state_rv rv;
b411b363
PR
944 int cp_discovered = 0;
945 int logical_block_size;
946
947 drbd_reconfig_start(mdev);
948
949 /* if you want to reconfigure, please tear down first */
950 if (mdev->state.disk > D_DISKLESS) {
951 retcode = ERR_DISK_CONFIGURED;
952 goto fail;
953 }
82f59cc6
LE
954 /* It may just now have detached because of IO error. Make sure
955 * drbd_ldev_destroy is done already, we may end up here very fast,
956 * e.g. if someone calls attach from the on-io-error handler,
957 * to realize a "hot spare" feature (not that I'd recommend that) */
958 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
b411b363 959
383606e0 960 /* make sure there is no leftover from previous force-detach attempts */
06f10adb 961 drbd_clear_flag(mdev, FORCE_DETACH);
383606e0 962
0029d624
LE
963 /* and no leftover from previously aborted resync or verify, either */
964 mdev->rs_total = 0;
965 mdev->rs_failed = 0;
966 atomic_set(&mdev->rs_pending_cnt, 0);
967
b411b363
PR
968 /* allocation not in the IO path, cqueue thread context */
969 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
970 if (!nbc) {
971 retcode = ERR_NOMEM;
972 goto fail;
973 }
974
975 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
976 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
977 nbc->dc.fencing = DRBD_FENCING_DEF;
978 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
979
9f2247bb
PR
980 spin_lock_init(&nbc->md.uuid_lock);
981
b411b363
PR
982 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
983 retcode = ERR_MANDATORY_TAG;
984 goto fail;
985 }
986
987 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
988 retcode = ERR_MD_IDX_INVALID;
989 goto fail;
990 }
991
47ff2d0a
PR
992 if (get_net_conf(mdev)) {
993 int prot = mdev->net_conf->wire_protocol;
994 put_net_conf(mdev);
995 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
996 retcode = ERR_STONITH_AND_PROT_A;
997 goto fail;
998 }
999 }
1000
d4d77629
TH
1001 bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1002 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
e525fd89 1003 if (IS_ERR(bdev)) {
b411b363 1004 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
e525fd89 1005 PTR_ERR(bdev));
b411b363
PR
1006 retcode = ERR_OPEN_DISK;
1007 goto fail;
1008 }
e525fd89
TH
1009 nbc->backing_bdev = bdev;
1010
1011 /*
1012 * meta_dev_idx >= 0: external fixed size, possibly multiple
1013 * drbd sharing one meta device. TODO in that case, paranoia
1014 * check that [md_bdev, meta_dev_idx] is not yet used by some
1015 * other drbd minor! (if you use drbd.conf + drbdadm, that
1016 * should check it for you already; but if you don't, or
1017 * someone fooled it, we need to double check here)
1018 */
d4d77629
TH
1019 bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1020 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1021 (nbc->dc.meta_dev_idx < 0) ?
1022 (void *)mdev : (void *)drbd_m_holder);
e525fd89 1023 if (IS_ERR(bdev)) {
b411b363 1024 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
e525fd89 1025 PTR_ERR(bdev));
b411b363
PR
1026 retcode = ERR_OPEN_MD_DISK;
1027 goto fail;
1028 }
e525fd89 1029 nbc->md_bdev = bdev;
b411b363 1030
e525fd89
TH
1031 if ((nbc->backing_bdev == nbc->md_bdev) !=
1032 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1033 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1034 retcode = ERR_MD_IDX_INVALID;
b411b363
PR
1035 goto fail;
1036 }
1037
1038 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1039 61, sizeof(struct bm_extent),
1040 offsetof(struct bm_extent, lce));
1041 if (!resync_lru) {
1042 retcode = ERR_NOMEM;
e525fd89 1043 goto fail;
b411b363
PR
1044 }
1045
1046 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1047 drbd_md_set_sector_offsets(mdev, nbc);
1048
1049 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1050 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1051 (unsigned long long) drbd_get_max_capacity(nbc),
1052 (unsigned long long) nbc->dc.disk_size);
7948bcdc 1053 retcode = ERR_DISK_TOO_SMALL;
e525fd89 1054 goto fail;
b411b363
PR
1055 }
1056
1057 if (nbc->dc.meta_dev_idx < 0) {
1058 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1059 /* at least one MB, otherwise it does not make sense */
1060 min_md_device_sectors = (2<<10);
1061 } else {
1062 max_possible_sectors = DRBD_MAX_SECTORS;
1063 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1064 }
1065
b411b363 1066 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
7948bcdc 1067 retcode = ERR_MD_DISK_TOO_SMALL;
b411b363
PR
1068 dev_warn(DEV, "refusing attach: md-device too small, "
1069 "at least %llu sectors needed for this meta-disk type\n",
1070 (unsigned long long) min_md_device_sectors);
e525fd89 1071 goto fail;
b411b363
PR
1072 }
1073
1074 /* Make sure the new disk is big enough
1075 * (we may currently be R_PRIMARY with no local disk...) */
1076 if (drbd_get_max_capacity(nbc) <
1077 drbd_get_capacity(mdev->this_bdev)) {
7948bcdc 1078 retcode = ERR_DISK_TOO_SMALL;
e525fd89 1079 goto fail;
b411b363
PR
1080 }
1081
1082 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1083
1352994b
LE
1084 if (nbc->known_size > max_possible_sectors) {
1085 dev_warn(DEV, "==> truncating very big lower level device "
1086 "to currently maximum possible %llu sectors <==\n",
1087 (unsigned long long) max_possible_sectors);
1088 if (nbc->dc.meta_dev_idx >= 0)
1089 dev_warn(DEV, "==>> using internal or flexible "
1090 "meta data may help <<==\n");
1091 }
1092
b411b363
PR
1093 drbd_suspend_io(mdev);
1094 /* also wait for the last barrier ack. */
fb22c402 1095 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
b411b363
PR
1096 /* and for any other previously queued work */
1097 drbd_flush_workqueue(mdev);
1098
f2024e7c
AG
1099 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1100 retcode = rv; /* FIXME: Type mismatch. */
b411b363 1101 drbd_resume_io(mdev);
f2024e7c 1102 if (rv < SS_SUCCESS)
e525fd89 1103 goto fail;
b411b363
PR
1104
1105 if (!get_ldev_if_state(mdev, D_ATTACHING))
1106 goto force_diskless;
1107
1108 drbd_md_set_sector_offsets(mdev, nbc);
1109
4aa83b7b
LE
1110 /* allocate a second IO page if logical_block_size != 512 */
1111 logical_block_size = bdev_logical_block_size(nbc->md_bdev);
1112 if (logical_block_size == 0)
1113 logical_block_size = MD_SECTOR_SIZE;
1114
1115 if (logical_block_size != MD_SECTOR_SIZE) {
1116 if (!mdev->md_io_tmpp) {
1117 struct page *page = alloc_page(GFP_NOIO);
1118 if (!page)
1119 goto force_diskless_dec;
1120
1121 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
1122 logical_block_size, MD_SECTOR_SIZE);
1123 dev_warn(DEV, "Workaround engaged (has performance impact).\n");
1124
1125 mdev->md_io_tmpp = page;
1126 }
1127 }
1128
b411b363
PR
1129 if (!mdev->bitmap) {
1130 if (drbd_bm_init(mdev)) {
1131 retcode = ERR_NOMEM;
1132 goto force_diskless_dec;
1133 }
1134 }
1135
1136 retcode = drbd_md_read(mdev, nbc);
1137 if (retcode != NO_ERROR)
1138 goto force_diskless_dec;
1139
1140 if (mdev->state.conn < C_CONNECTED &&
1141 mdev->state.role == R_PRIMARY &&
1142 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1143 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1144 (unsigned long long)mdev->ed_uuid);
1145 retcode = ERR_DATA_NOT_CURRENT;
1146 goto force_diskless_dec;
1147 }
1148
1149 /* Since we are diskless, fix the activity log first... */
1150 if (drbd_check_al_size(mdev)) {
1151 retcode = ERR_NOMEM;
1152 goto force_diskless_dec;
1153 }
1154
1155 /* Prevent shrinking of consistent devices ! */
1156 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
a393db6f 1157 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
b411b363 1158 dev_warn(DEV, "refusing to truncate a consistent device\n");
7948bcdc 1159 retcode = ERR_DISK_TOO_SMALL;
b411b363
PR
1160 goto force_diskless_dec;
1161 }
1162
1163 if (!drbd_al_read_log(mdev, nbc)) {
1164 retcode = ERR_IO_MD_DISK;
1165 goto force_diskless_dec;
1166 }
1167
b411b363
PR
1168 /* Reset the "barriers don't work" bits here, then force meta data to
1169 * be written, to ensure we determine if barriers are supported. */
1170 if (nbc->dc.no_md_flush)
06f10adb 1171 drbd_set_flag(mdev, MD_NO_FUA);
b411b363 1172 else
06f10adb 1173 drbd_clear_flag(mdev, MD_NO_FUA);
b411b363
PR
1174
1175 /* Point of no return reached.
1176 * Devices and memory are no longer released by error cleanup below.
1177 * now mdev takes over responsibility, and the state engine should
1178 * clean it up somewhere. */
1179 D_ASSERT(mdev->ldev == NULL);
1180 mdev->ldev = nbc;
1181 mdev->resync = resync_lru;
1182 nbc = NULL;
1183 resync_lru = NULL;
1184
2451fc3b
PR
1185 mdev->write_ordering = WO_bdev_flush;
1186 drbd_bump_write_ordering(mdev, WO_bdev_flush);
b411b363
PR
1187
1188 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
06f10adb 1189 drbd_set_flag(mdev, CRASHED_PRIMARY);
b411b363 1190 else
06f10adb 1191 drbd_clear_flag(mdev, CRASHED_PRIMARY);
b411b363 1192
894c6a94 1193 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
fb22c402 1194 !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
06f10adb 1195 drbd_set_flag(mdev, CRASHED_PRIMARY);
b411b363
PR
1196 cp_discovered = 1;
1197 }
1198
1199 mdev->send_cnt = 0;
1200 mdev->recv_cnt = 0;
1201 mdev->read_cnt = 0;
1202 mdev->writ_cnt = 0;
1203
99432fcc 1204 drbd_reconsider_max_bio_size(mdev);
b411b363
PR
1205
1206 /* If I am currently not R_PRIMARY,
1207 * but meta data primary indicator is set,
1208 * I just now recover from a hard crash,
1209 * and have been R_PRIMARY before that crash.
1210 *
1211 * Now, if I had no connection before that crash
1212 * (have been degraded R_PRIMARY), chances are that
1213 * I won't find my peer now either.
1214 *
1215 * In that case, and _only_ in that case,
1216 * we use the degr-wfc-timeout instead of the default,
1217 * so we can automatically recover from a crash of a
1218 * degraded but active "cluster" after a certain timeout.
1219 */
06f10adb 1220 drbd_clear_flag(mdev, USE_DEGR_WFC_T);
b411b363
PR
1221 if (mdev->state.role != R_PRIMARY &&
1222 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1223 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
06f10adb 1224 drbd_set_flag(mdev, USE_DEGR_WFC_T);
b411b363 1225
24c4830c 1226 dd = drbd_determine_dev_size(mdev, 0);
b411b363
PR
1227 if (dd == dev_size_error) {
1228 retcode = ERR_NOMEM_BITMAP;
1229 goto force_diskless_dec;
1230 } else if (dd == grew)
06f10adb 1231 drbd_set_flag(mdev, RESYNC_AFTER_NEG);
b411b363
PR
1232
1233 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1234 dev_info(DEV, "Assuming that all blocks are out of sync "
1235 "(aka FullSync)\n");
20ceb2b2
LE
1236 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1237 "set_n_write from attaching", BM_LOCKED_MASK)) {
b411b363
PR
1238 retcode = ERR_IO_MD_DISK;
1239 goto force_diskless_dec;
1240 }
1241 } else {
20ceb2b2
LE
1242 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1243 "read from attaching", BM_LOCKED_MASK) < 0) {
b411b363
PR
1244 retcode = ERR_IO_MD_DISK;
1245 goto force_diskless_dec;
1246 }
1247 }
1248
1249 if (cp_discovered) {
1250 drbd_al_apply_to_bm(mdev);
20ceb2b2
LE
1251 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1252 "crashed primary apply AL", BM_LOCKED_MASK)) {
19f843aa
LE
1253 retcode = ERR_IO_MD_DISK;
1254 goto force_diskless_dec;
1255 }
b411b363
PR
1256 }
1257
0778286a
PR
1258 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1259 drbd_suspend_al(mdev); /* IO is still suspended here... */
1260
b411b363
PR
1261 spin_lock_irq(&mdev->req_lock);
1262 os = mdev->state;
1263 ns.i = os.i;
1264 /* If MDF_CONSISTENT is not set go into inconsistent state,
1265 otherwise investigate MDF_WasUpToDate...
1266 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1267 otherwise into D_CONSISTENT state.
1268 */
1269 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1270 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1271 ns.disk = D_CONSISTENT;
1272 else
1273 ns.disk = D_OUTDATED;
1274 } else {
1275 ns.disk = D_INCONSISTENT;
1276 }
1277
1278 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1279 ns.pdsk = D_OUTDATED;
1280
1281 if ( ns.disk == D_CONSISTENT &&
1282 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1283 ns.disk = D_UP_TO_DATE;
1284
1285 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1286 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1287 this point, because drbd_request_state() modifies these
1288 flags. */
1289
1290 /* In case we are C_CONNECTED postpone any decision on the new disk
1291 state after the negotiation phase. */
1292 if (mdev->state.conn == C_CONNECTED) {
1293 mdev->new_state_tmp.i = ns.i;
1294 ns.i = os.i;
1295 ns.disk = D_NEGOTIATING;
dc66c74d
PR
1296
1297 /* We expect to receive up-to-date UUIDs soon.
1298 To avoid a race in receive_state, free p_uuid while
1299 holding req_lock. I.e. atomic with the state change */
1300 kfree(mdev->p_uuid);
1301 mdev->p_uuid = NULL;
b411b363
PR
1302 }
1303
1304 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1305 ns = mdev->state;
1306 spin_unlock_irq(&mdev->req_lock);
1307
1308 if (rv < SS_SUCCESS)
1309 goto force_diskless_dec;
1310
1311 if (mdev->state.role == R_PRIMARY)
1312 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1313 else
1314 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1315
1316 drbd_md_mark_dirty(mdev);
1317 drbd_md_sync(mdev);
1318
1319 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1320 put_ldev(mdev);
1321 reply->ret_code = retcode;
1322 drbd_reconfig_done(mdev);
1323 return 0;
1324
1325 force_diskless_dec:
1326 put_ldev(mdev);
1327 force_diskless:
82f59cc6 1328 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363 1329 drbd_md_sync(mdev);
b411b363
PR
1330 fail:
1331 if (nbc) {
e525fd89
TH
1332 if (nbc->backing_bdev)
1333 blkdev_put(nbc->backing_bdev,
1334 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1335 if (nbc->md_bdev)
1336 blkdev_put(nbc->md_bdev,
1337 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
1338 kfree(nbc);
1339 }
1340 lc_destroy(resync_lru);
1341
1342 reply->ret_code = retcode;
1343 drbd_reconfig_done(mdev);
1344 return 0;
1345}
1346
82f59cc6
LE
1347/* Detaching the disk is a process in multiple stages. First we need to lock
1348 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1349 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1350 * internal references as well.
1351 * Only then we have finally detached. */
b411b363
PR
1352static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1353 struct drbd_nl_cfg_reply *reply)
1354{
9a0d9d03
LE
1355 enum drbd_ret_code retcode;
1356 int ret;
02ee8f95
PR
1357 struct detach dt = {};
1358
1359 if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
1360 reply->ret_code = ERR_MANDATORY_TAG;
1361 goto out;
1362 }
1363
1364 if (dt.detach_force) {
06f10adb 1365 drbd_set_flag(mdev, FORCE_DETACH);
02ee8f95
PR
1366 drbd_force_state(mdev, NS(disk, D_FAILED));
1367 reply->ret_code = SS_SUCCESS;
1368 goto out;
1369 }
1370
82f59cc6 1371 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
a2e91381 1372 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
9a0d9d03 1373 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
a2e91381 1374 drbd_md_put_buffer(mdev);
9a0d9d03
LE
1375 /* D_FAILED will transition to DISKLESS. */
1376 ret = wait_event_interruptible(mdev->misc_wait,
1377 mdev->state.disk != D_FAILED);
82f59cc6 1378 drbd_resume_io(mdev);
02ee8f95 1379
9b2f61ae 1380 if ((int)retcode == (int)SS_IS_DISKLESS)
9a0d9d03
LE
1381 retcode = SS_NOTHING_TO_DO;
1382 if (ret)
1383 retcode = ERR_INTR;
1384 reply->ret_code = retcode;
02ee8f95 1385out:
b411b363
PR
1386 return 0;
1387}
1388
1389static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1390 struct drbd_nl_cfg_reply *reply)
1391{
1392 int i, ns;
116676ca 1393 enum drbd_ret_code retcode;
b411b363
PR
1394 struct net_conf *new_conf = NULL;
1395 struct crypto_hash *tfm = NULL;
1396 struct crypto_hash *integrity_w_tfm = NULL;
1397 struct crypto_hash *integrity_r_tfm = NULL;
1398 struct hlist_head *new_tl_hash = NULL;
1399 struct hlist_head *new_ee_hash = NULL;
1400 struct drbd_conf *odev;
1401 char hmac_name[CRYPTO_MAX_ALG_NAME];
1402 void *int_dig_out = NULL;
1403 void *int_dig_in = NULL;
1404 void *int_dig_vv = NULL;
1405 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1406
1407 drbd_reconfig_start(mdev);
1408
1409 if (mdev->state.conn > C_STANDALONE) {
1410 retcode = ERR_NET_CONFIGURED;
1411 goto fail;
1412 }
1413
1414 /* allocation not in the IO path, cqueue thread context */
2db4e42e 1415 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
b411b363
PR
1416 if (!new_conf) {
1417 retcode = ERR_NOMEM;
1418 goto fail;
1419 }
1420
b411b363
PR
1421 new_conf->timeout = DRBD_TIMEOUT_DEF;
1422 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
1423 new_conf->ping_int = DRBD_PING_INT_DEF;
1424 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
1425 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
1426 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1427 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
1428 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
1429 new_conf->ko_count = DRBD_KO_COUNT_DEF;
1430 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
1431 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
1432 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
1433 new_conf->want_lose = 0;
1434 new_conf->two_primaries = 0;
1435 new_conf->wire_protocol = DRBD_PROT_C;
1436 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
1437 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
422028b1
PR
1438 new_conf->on_congestion = DRBD_ON_CONGESTION_DEF;
1439 new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF;
b411b363
PR
1440
1441 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
1442 retcode = ERR_MANDATORY_TAG;
1443 goto fail;
1444 }
1445
1446 if (new_conf->two_primaries
1447 && (new_conf->wire_protocol != DRBD_PROT_C)) {
1448 retcode = ERR_NOT_PROTO_C;
1449 goto fail;
47ff2d0a
PR
1450 }
1451
1452 if (get_ldev(mdev)) {
1453 enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1454 put_ldev(mdev);
1455 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1456 retcode = ERR_STONITH_AND_PROT_A;
1457 goto fail;
1458 }
1459 }
b411b363 1460
422028b1
PR
1461 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
1462 retcode = ERR_CONG_NOT_PROTO_A;
1463 goto fail;
1464 }
1465
b411b363
PR
1466 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1467 retcode = ERR_DISCARD;
1468 goto fail;
1469 }
1470
1471 retcode = NO_ERROR;
1472
1473 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1474 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1475 for (i = 0; i < minor_count; i++) {
1476 odev = minor_to_mdev(i);
1477 if (!odev || odev == mdev)
1478 continue;
1479 if (get_net_conf(odev)) {
1480 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
1481 if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
1482 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1483 retcode = ERR_LOCAL_ADDR;
1484
1485 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
1486 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
1487 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1488 retcode = ERR_PEER_ADDR;
1489
1490 put_net_conf(odev);
1491 if (retcode != NO_ERROR)
1492 goto fail;
1493 }
1494 }
1495
1496 if (new_conf->cram_hmac_alg[0] != 0) {
1497 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1498 new_conf->cram_hmac_alg);
1499 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1500 if (IS_ERR(tfm)) {
1501 tfm = NULL;
1502 retcode = ERR_AUTH_ALG;
1503 goto fail;
1504 }
1505
0798219f 1506 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
b411b363
PR
1507 retcode = ERR_AUTH_ALG_ND;
1508 goto fail;
1509 }
1510 }
1511
1512 if (new_conf->integrity_alg[0]) {
1513 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1514 if (IS_ERR(integrity_w_tfm)) {
1515 integrity_w_tfm = NULL;
1516 retcode=ERR_INTEGRITY_ALG;
1517 goto fail;
1518 }
1519
1520 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1521 retcode=ERR_INTEGRITY_ALG_ND;
1522 goto fail;
1523 }
1524
1525 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1526 if (IS_ERR(integrity_r_tfm)) {
1527 integrity_r_tfm = NULL;
1528 retcode=ERR_INTEGRITY_ALG;
1529 goto fail;
1530 }
1531 }
1532
1533 ns = new_conf->max_epoch_size/8;
1534 if (mdev->tl_hash_s != ns) {
1535 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1536 if (!new_tl_hash) {
1537 retcode = ERR_NOMEM;
1538 goto fail;
1539 }
1540 }
1541
1542 ns = new_conf->max_buffers/8;
1543 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
1544 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1545 if (!new_ee_hash) {
1546 retcode = ERR_NOMEM;
1547 goto fail;
1548 }
1549 }
1550
1551 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1552
1553 if (integrity_w_tfm) {
1554 i = crypto_hash_digestsize(integrity_w_tfm);
1555 int_dig_out = kmalloc(i, GFP_KERNEL);
1556 if (!int_dig_out) {
1557 retcode = ERR_NOMEM;
1558 goto fail;
1559 }
1560 int_dig_in = kmalloc(i, GFP_KERNEL);
1561 if (!int_dig_in) {
1562 retcode = ERR_NOMEM;
1563 goto fail;
1564 }
1565 int_dig_vv = kmalloc(i, GFP_KERNEL);
1566 if (!int_dig_vv) {
1567 retcode = ERR_NOMEM;
1568 goto fail;
1569 }
1570 }
1571
1572 if (!mdev->bitmap) {
1573 if(drbd_bm_init(mdev)) {
1574 retcode = ERR_NOMEM;
1575 goto fail;
1576 }
1577 }
1578
f70b3511 1579 drbd_flush_workqueue(mdev);
b411b363
PR
1580 spin_lock_irq(&mdev->req_lock);
1581 if (mdev->net_conf != NULL) {
1582 retcode = ERR_NET_CONFIGURED;
1583 spin_unlock_irq(&mdev->req_lock);
1584 goto fail;
1585 }
1586 mdev->net_conf = new_conf;
1587
1588 mdev->send_cnt = 0;
1589 mdev->recv_cnt = 0;
1590
1591 if (new_tl_hash) {
1592 kfree(mdev->tl_hash);
1593 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
1594 mdev->tl_hash = new_tl_hash;
1595 }
1596
1597 if (new_ee_hash) {
1598 kfree(mdev->ee_hash);
1599 mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
1600 mdev->ee_hash = new_ee_hash;
1601 }
1602
1603 crypto_free_hash(mdev->cram_hmac_tfm);
1604 mdev->cram_hmac_tfm = tfm;
1605
1606 crypto_free_hash(mdev->integrity_w_tfm);
1607 mdev->integrity_w_tfm = integrity_w_tfm;
1608
1609 crypto_free_hash(mdev->integrity_r_tfm);
1610 mdev->integrity_r_tfm = integrity_r_tfm;
1611
1612 kfree(mdev->int_dig_out);
1613 kfree(mdev->int_dig_in);
1614 kfree(mdev->int_dig_vv);
1615 mdev->int_dig_out=int_dig_out;
1616 mdev->int_dig_in=int_dig_in;
1617 mdev->int_dig_vv=int_dig_vv;
f70b3511 1618 retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
b411b363
PR
1619 spin_unlock_irq(&mdev->req_lock);
1620
b411b363
PR
1621 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1622 reply->ret_code = retcode;
1623 drbd_reconfig_done(mdev);
1624 return 0;
1625
1626fail:
1627 kfree(int_dig_out);
1628 kfree(int_dig_in);
1629 kfree(int_dig_vv);
1630 crypto_free_hash(tfm);
1631 crypto_free_hash(integrity_w_tfm);
1632 crypto_free_hash(integrity_r_tfm);
1633 kfree(new_tl_hash);
1634 kfree(new_ee_hash);
1635 kfree(new_conf);
1636
1637 reply->ret_code = retcode;
1638 drbd_reconfig_done(mdev);
1639 return 0;
1640}
1641
1642static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1643 struct drbd_nl_cfg_reply *reply)
1644{
1645 int retcode;
2561b9c1
PR
1646 struct disconnect dc;
1647
1648 memset(&dc, 0, sizeof(struct disconnect));
1649 if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
1650 retcode = ERR_MANDATORY_TAG;
1651 goto fail;
1652 }
1653
1654 if (dc.force) {
1655 spin_lock_irq(&mdev->req_lock);
1656 if (mdev->state.conn >= C_WF_CONNECTION)
1657 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
1658 spin_unlock_irq(&mdev->req_lock);
1659 goto done;
1660 }
b411b363
PR
1661
1662 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
1663
1664 if (retcode == SS_NOTHING_TO_DO)
1665 goto done;
1666 else if (retcode == SS_ALREADY_STANDALONE)
1667 goto done;
1668 else if (retcode == SS_PRIMARY_NOP) {
1669 /* Our statche checking code wants to see the peer outdated. */
1670 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1671 pdsk, D_OUTDATED));
1672 } else if (retcode == SS_CW_FAILED_BY_PEER) {
1673 /* The peer probably wants to see us outdated. */
1674 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1675 disk, D_OUTDATED),
1676 CS_ORDERED);
1677 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1678 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1679 retcode = SS_SUCCESS;
1680 }
1681 }
1682
1683 if (retcode < SS_SUCCESS)
1684 goto fail;
1685
1686 if (wait_event_interruptible(mdev->state_wait,
1687 mdev->state.conn != C_DISCONNECTING)) {
1688 /* Do not test for mdev->state.conn == C_STANDALONE, since
1689 someone else might connect us in the mean time! */
1690 retcode = ERR_INTR;
1691 goto fail;
1692 }
1693
1694 done:
1695 retcode = NO_ERROR;
1696 fail:
1697 drbd_md_sync(mdev);
1698 reply->ret_code = retcode;
1699 return 0;
1700}
1701
1702void resync_after_online_grow(struct drbd_conf *mdev)
1703{
1704 int iass; /* I am sync source */
1705
1706 dev_info(DEV, "Resync of new storage after online grow\n");
1707 if (mdev->state.role != mdev->state.peer)
1708 iass = (mdev->state.role == R_PRIMARY);
1709 else
06f10adb 1710 iass = drbd_test_flag(mdev, DISCARD_CONCURRENT);
b411b363
PR
1711
1712 if (iass)
1713 drbd_start_resync(mdev, C_SYNC_SOURCE);
1714 else
1715 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1716}
1717
1718static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1719 struct drbd_nl_cfg_reply *reply)
1720{
1721 struct resize rs;
1722 int retcode = NO_ERROR;
b411b363 1723 enum determine_dev_size dd;
6495d2c6 1724 enum dds_flags ddsf;
b411b363
PR
1725
1726 memset(&rs, 0, sizeof(struct resize));
1727 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
1728 retcode = ERR_MANDATORY_TAG;
1729 goto fail;
1730 }
1731
1732 if (mdev->state.conn > C_CONNECTED) {
1733 retcode = ERR_RESIZE_RESYNC;
1734 goto fail;
1735 }
1736
1737 if (mdev->state.role == R_SECONDARY &&
1738 mdev->state.peer == R_SECONDARY) {
1739 retcode = ERR_NO_PRIMARY;
1740 goto fail;
1741 }
1742
1743 if (!get_ldev(mdev)) {
1744 retcode = ERR_NO_DISK;
1745 goto fail;
1746 }
1747
6495d2c6
PR
1748 if (rs.no_resync && mdev->agreed_pro_version < 93) {
1749 retcode = ERR_NEED_APV_93;
7b4e4d31 1750 goto fail_ldev;
6495d2c6
PR
1751 }
1752
087c2492 1753 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
b411b363 1754 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
b411b363
PR
1755
1756 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
6495d2c6 1757 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
24c4830c 1758 dd = drbd_determine_dev_size(mdev, ddsf);
b411b363
PR
1759 drbd_md_sync(mdev);
1760 put_ldev(mdev);
1761 if (dd == dev_size_error) {
1762 retcode = ERR_NOMEM_BITMAP;
1763 goto fail;
1764 }
1765
087c2492 1766 if (mdev->state.conn == C_CONNECTED) {
b411b363 1767 if (dd == grew)
06f10adb 1768 drbd_set_flag(mdev, RESIZE_PENDING);
b411b363
PR
1769
1770 drbd_send_uuids(mdev);
6495d2c6 1771 drbd_send_sizes(mdev, 1, ddsf);
b411b363
PR
1772 }
1773
1774 fail:
1775 reply->ret_code = retcode;
1776 return 0;
7b4e4d31
AG
1777
1778 fail_ldev:
1779 put_ldev(mdev);
1780 goto fail;
b411b363
PR
1781}
1782
1783static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1784 struct drbd_nl_cfg_reply *reply)
1785{
1786 int retcode = NO_ERROR;
1787 int err;
1788 int ovr; /* online verify running */
1789 int rsr; /* re-sync running */
1790 struct crypto_hash *verify_tfm = NULL;
1791 struct crypto_hash *csums_tfm = NULL;
1792 struct syncer_conf sc;
1793 cpumask_var_t new_cpu_mask;
778f271d
PR
1794 int *rs_plan_s = NULL;
1795 int fifo_size;
b411b363
PR
1796
1797 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1798 retcode = ERR_NOMEM;
1799 goto fail;
1800 }
1801
1802 if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
1803 memset(&sc, 0, sizeof(struct syncer_conf));
1804 sc.rate = DRBD_RATE_DEF;
1805 sc.after = DRBD_AFTER_DEF;
1806 sc.al_extents = DRBD_AL_EXTENTS_DEF;
265be2d0 1807 sc.on_no_data = DRBD_ON_NO_DATA_DEF;
9a31d716
PR
1808 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1809 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1810 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
0f0601f4
LE
1811 sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1812 sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
b411b363
PR
1813 } else
1814 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1815
1816 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
1817 retcode = ERR_MANDATORY_TAG;
1818 goto fail;
1819 }
1820
1821 /* re-sync running */
1822 rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1823 mdev->state.conn == C_SYNC_TARGET ||
1824 mdev->state.conn == C_PAUSED_SYNC_S ||
1825 mdev->state.conn == C_PAUSED_SYNC_T );
1826
1827 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1828 retcode = ERR_CSUMS_RESYNC_RUNNING;
1829 goto fail;
1830 }
1831
1832 if (!rsr && sc.csums_alg[0]) {
1833 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1834 if (IS_ERR(csums_tfm)) {
1835 csums_tfm = NULL;
1836 retcode = ERR_CSUMS_ALG;
1837 goto fail;
1838 }
1839
1840 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1841 retcode = ERR_CSUMS_ALG_ND;
1842 goto fail;
1843 }
1844 }
1845
1846 /* online verify running */
1847 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1848
1849 if (ovr) {
1850 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1851 retcode = ERR_VERIFY_RUNNING;
1852 goto fail;
1853 }
1854 }
1855
1856 if (!ovr && sc.verify_alg[0]) {
1857 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1858 if (IS_ERR(verify_tfm)) {
1859 verify_tfm = NULL;
1860 retcode = ERR_VERIFY_ALG;
1861 goto fail;
1862 }
1863
1864 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1865 retcode = ERR_VERIFY_ALG_ND;
1866 goto fail;
1867 }
1868 }
1869
1870 /* silently ignore cpu mask on UP kernel */
1871 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
ddad9ef5 1872 err = bitmap_parse(sc.cpu_mask, 32,
b411b363
PR
1873 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1874 if (err) {
ddad9ef5 1875 dev_warn(DEV, "bitmap_parse() failed with %d\n", err);
b411b363
PR
1876 retcode = ERR_CPU_MASK_PARSE;
1877 goto fail;
1878 }
1879 }
1880
1881 ERR_IF (sc.rate < 1) sc.rate = 1;
1882 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
1883#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
1884 if (sc.al_extents > AL_MAX) {
1885 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
1886 sc.al_extents = AL_MAX;
1887 }
1888#undef AL_MAX
1889
ef50a3e3
LE
1890 /* to avoid spurious errors when configuring minors before configuring
1891 * the minors they depend on: if necessary, first create the minor we
1892 * depend on */
1893 if (sc.after >= 0)
1894 ensure_mdev(sc.after, 1);
1895
b411b363
PR
1896 /* most sanity checks done, try to assign the new sync-after
1897 * dependency. need to hold the global lock in there,
1898 * to avoid a race in the dependency loop check. */
1899 retcode = drbd_alter_sa(mdev, sc.after);
1900 if (retcode != NO_ERROR)
1901 goto fail;
1902
778f271d
PR
1903 fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1904 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1905 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1906 if (!rs_plan_s) {
1907 dev_err(DEV, "kmalloc of fifo_buffer failed");
1908 retcode = ERR_NOMEM;
1909 goto fail;
1910 }
1911 }
1912
b411b363
PR
1913 /* ok, assign the rest of it as well.
1914 * lock against receive_SyncParam() */
1915 spin_lock(&mdev->peer_seq_lock);
1916 mdev->sync_conf = sc;
1917
1918 if (!rsr) {
1919 crypto_free_hash(mdev->csums_tfm);
1920 mdev->csums_tfm = csums_tfm;
1921 csums_tfm = NULL;
1922 }
1923
1924 if (!ovr) {
1925 crypto_free_hash(mdev->verify_tfm);
1926 mdev->verify_tfm = verify_tfm;
1927 verify_tfm = NULL;
1928 }
778f271d
PR
1929
1930 if (fifo_size != mdev->rs_plan_s.size) {
1931 kfree(mdev->rs_plan_s.values);
1932 mdev->rs_plan_s.values = rs_plan_s;
1933 mdev->rs_plan_s.size = fifo_size;
1934 mdev->rs_planed = 0;
1935 rs_plan_s = NULL;
1936 }
1937
b411b363
PR
1938 spin_unlock(&mdev->peer_seq_lock);
1939
1940 if (get_ldev(mdev)) {
1941 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1942 drbd_al_shrink(mdev);
1943 err = drbd_check_al_size(mdev);
1944 lc_unlock(mdev->act_log);
1945 wake_up(&mdev->al_wait);
1946
1947 put_ldev(mdev);
1948 drbd_md_sync(mdev);
1949
1950 if (err) {
1951 retcode = ERR_NOMEM;
1952 goto fail;
1953 }
1954 }
1955
1956 if (mdev->state.conn >= C_CONNECTED)
1957 drbd_send_sync_param(mdev, &sc);
1958
1959 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
1960 cpumask_copy(mdev->cpu_mask, new_cpu_mask);
1961 drbd_calc_cpu_mask(mdev);
1962 mdev->receiver.reset_cpu_mask = 1;
1963 mdev->asender.reset_cpu_mask = 1;
1964 mdev->worker.reset_cpu_mask = 1;
1965 }
1966
1967 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1968fail:
778f271d 1969 kfree(rs_plan_s);
b411b363
PR
1970 free_cpumask_var(new_cpu_mask);
1971 crypto_free_hash(csums_tfm);
1972 crypto_free_hash(verify_tfm);
1973 reply->ret_code = retcode;
1974 return 0;
1975}
1976
1977static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1978 struct drbd_nl_cfg_reply *reply)
1979{
1980 int retcode;
1981
194bfb32 1982 /* If there is still bitmap IO pending, probably because of a previous
7ee1fb93
LE
1983 * resync just being finished, wait for it before requesting a new resync.
1984 * Also wait for it's after_state_ch(). */
a574daf5 1985 drbd_suspend_io(mdev);
06f10adb 1986 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
7ee1fb93 1987 drbd_flush_workqueue(mdev);
194bfb32 1988
b411b363
PR
1989 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
1990
1991 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
1992 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1993
1994 while (retcode == SS_NEED_CONNECTION) {
1995 spin_lock_irq(&mdev->req_lock);
1996 if (mdev->state.conn < C_CONNECTED)
1997 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
1998 spin_unlock_irq(&mdev->req_lock);
1999
2000 if (retcode != SS_NEED_CONNECTION)
2001 break;
2002
2003 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2004 }
a574daf5 2005 drbd_resume_io(mdev);
b411b363
PR
2006
2007 reply->ret_code = retcode;
2008 return 0;
2009}
2010
0778286a
PR
2011static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2012{
2013 int rv;
2014
2015 rv = drbd_bmio_set_n_write(mdev);
2016 drbd_suspend_al(mdev);
2017 return rv;
2018}
2019
b411b363
PR
2020static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2021 struct drbd_nl_cfg_reply *reply)
2022{
0778286a 2023 int retcode;
b411b363 2024
194bfb32 2025 /* If there is still bitmap IO pending, probably because of a previous
7ee1fb93
LE
2026 * resync just being finished, wait for it before requesting a new resync.
2027 * Also wait for it's after_state_ch(). */
a574daf5 2028 drbd_suspend_io(mdev);
06f10adb 2029 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
7ee1fb93 2030 drbd_flush_workqueue(mdev);
194bfb32 2031
0778286a
PR
2032 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
2033
2034 if (retcode < SS_SUCCESS) {
2035 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
2036 /* The peer will get a resync upon connect anyways. Just make that
2037 into a full resync. */
2038 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2039 if (retcode >= SS_SUCCESS) {
0778286a 2040 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
20ceb2b2
LE
2041 "set_n_write from invalidate_peer",
2042 BM_LOCKED_SET_ALLOWED))
0778286a
PR
2043 retcode = ERR_IO_MD_DISK;
2044 }
2045 } else
2046 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
2047 }
a574daf5 2048 drbd_resume_io(mdev);
b411b363 2049
0778286a 2050 reply->ret_code = retcode;
b411b363
PR
2051 return 0;
2052}
2053
2054static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2055 struct drbd_nl_cfg_reply *reply)
2056{
2057 int retcode = NO_ERROR;
2058
2059 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2060 retcode = ERR_PAUSE_IS_SET;
2061
2062 reply->ret_code = retcode;
2063 return 0;
2064}
2065
2066static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2067 struct drbd_nl_cfg_reply *reply)
2068{
2069 int retcode = NO_ERROR;
cd88d030 2070 union drbd_state s;
b411b363 2071
cd88d030
PR
2072 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2073 s = mdev->state;
2074 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2075 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2076 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2077 } else {
2078 retcode = ERR_PAUSE_IS_CLEAR;
2079 }
2080 }
b411b363
PR
2081
2082 reply->ret_code = retcode;
2083 return 0;
2084}
2085
2086static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2087 struct drbd_nl_cfg_reply *reply)
2088{
2089 reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
2090
2091 return 0;
2092}
2093
2094static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2095 struct drbd_nl_cfg_reply *reply)
2096{
06f10adb 2097 if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
43a5182c 2098 drbd_uuid_new_current(mdev);
06f10adb 2099 drbd_clear_flag(mdev, NEW_CUR_UUID);
43a5182c 2100 }
265be2d0 2101 drbd_suspend_io(mdev);
fb22c402 2102 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
265be2d0
PR
2103 if (reply->ret_code == SS_SUCCESS) {
2104 if (mdev->state.conn < C_CONNECTED)
2105 tl_clear(mdev);
2106 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2107 tl_restart(mdev, fail_frozen_disk_io);
2108 }
2109 drbd_resume_io(mdev);
2110
b411b363
PR
2111 return 0;
2112}
2113
2114static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2115 struct drbd_nl_cfg_reply *reply)
2116{
2117 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
2118 return 0;
2119}
2120
2121static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2122 struct drbd_nl_cfg_reply *reply)
2123{
2124 unsigned short *tl;
2125
2126 tl = reply->tag_list;
2127
2128 if (get_ldev(mdev)) {
2129 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
2130 put_ldev(mdev);
2131 }
2132
2133 if (get_net_conf(mdev)) {
2134 tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
2135 put_net_conf(mdev);
2136 }
2137 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
2138
2139 put_unaligned(TT_END, tl++); /* Close the tag list */
2140
2141 return (int)((char *)tl - (char *)reply->tag_list);
2142}
2143
2144static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2145 struct drbd_nl_cfg_reply *reply)
2146{
2147 unsigned short *tl = reply->tag_list;
2148 union drbd_state s = mdev->state;
2149 unsigned long rs_left;
2150 unsigned int res;
2151
2152 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
2153
2154 /* no local ref, no bitmap, no syncer progress. */
2155 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
2156 if (get_ldev(mdev)) {
2157 drbd_get_syncer_progress(mdev, &rs_left, &res);
2158 tl = tl_add_int(tl, T_sync_progress, &res);
2159 put_ldev(mdev);
2160 }
2161 }
2162 put_unaligned(TT_END, tl++); /* Close the tag list */
2163
2164 return (int)((char *)tl - (char *)reply->tag_list);
2165}
2166
2167static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2168 struct drbd_nl_cfg_reply *reply)
2169{
2170 unsigned short *tl;
2171
2172 tl = reply->tag_list;
2173
2174 if (get_ldev(mdev)) {
9f2247bb
PR
2175 unsigned long flags;
2176 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
b411b363
PR
2177 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
2178 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
9f2247bb 2179 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
b411b363
PR
2180 put_ldev(mdev);
2181 }
2182 put_unaligned(TT_END, tl++); /* Close the tag list */
2183
2184 return (int)((char *)tl - (char *)reply->tag_list);
2185}
2186
2187/**
2188 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
2189 * @mdev: DRBD device.
2190 * @nlp: Netlink/connector packet from drbdsetup
2191 * @reply: Reply packet for drbdsetup
2192 */
2193static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2194 struct drbd_nl_cfg_reply *reply)
2195{
2196 unsigned short *tl;
2197 char rv;
2198
2199 tl = reply->tag_list;
2200
2201 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
06f10adb 2202 drbd_test_flag(mdev, USE_DEGR_WFC_T) ? UT_DEGRADED : UT_DEFAULT;
b411b363
PR
2203
2204 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
2205 put_unaligned(TT_END, tl++); /* Close the tag list */
2206
2207 return (int)((char *)tl - (char *)reply->tag_list);
2208}
2209
2210static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2211 struct drbd_nl_cfg_reply *reply)
2212{
2213 /* default to resume from last known position, if possible */
02b91b55
LE
2214 struct start_ov args = {
2215 .start_sector = mdev->ov_start_sector,
2216 .stop_sector = ULLONG_MAX,
2217 };
b411b363
PR
2218
2219 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
2220 reply->ret_code = ERR_MANDATORY_TAG;
2221 return 0;
2222 }
873b0d5f
LE
2223
2224 /* If there is still bitmap IO pending, e.g. previous resync or verify
2225 * just being finished, wait for it before requesting a new resync. */
a574daf5 2226 drbd_suspend_io(mdev);
06f10adb 2227 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
873b0d5f 2228
02b91b55
LE
2229 /* w_make_ov_request expects start position to be aligned */
2230 mdev->ov_start_sector = args.start_sector & ~(BM_SECT_PER_BIT-1);
2231 mdev->ov_stop_sector = args.stop_sector;
b411b363 2232 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
a574daf5 2233 drbd_resume_io(mdev);
b411b363
PR
2234 return 0;
2235}
2236
2237
2238static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2239 struct drbd_nl_cfg_reply *reply)
2240{
2241 int retcode = NO_ERROR;
2242 int skip_initial_sync = 0;
2243 int err;
2244
2245 struct new_c_uuid args;
2246
2247 memset(&args, 0, sizeof(struct new_c_uuid));
2248 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
2249 reply->ret_code = ERR_MANDATORY_TAG;
2250 return 0;
2251 }
2252
2253 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
2254
2255 if (!get_ldev(mdev)) {
2256 retcode = ERR_NO_DISK;
2257 goto out;
2258 }
2259
2260 /* this is "skip initial sync", assume to be clean */
2261 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
2262 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2263 dev_info(DEV, "Preparing to skip initial sync\n");
2264 skip_initial_sync = 1;
2265 } else if (mdev->state.conn != C_STANDALONE) {
2266 retcode = ERR_CONNECTED;
2267 goto out_dec;
2268 }
2269
2270 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2271 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2272
2273 if (args.clear_bm) {
20ceb2b2
LE
2274 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2275 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
b411b363
PR
2276 if (err) {
2277 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2278 retcode = ERR_IO_MD_DISK;
2279 }
2280 if (skip_initial_sync) {
2281 drbd_send_uuids_skip_initial_sync(mdev);
2282 _drbd_uuid_set(mdev, UI_BITMAP, 0);
62b0da3a 2283 drbd_print_uuids(mdev, "cleared bitmap UUID");
b411b363
PR
2284 spin_lock_irq(&mdev->req_lock);
2285 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2286 CS_VERBOSE, NULL);
2287 spin_unlock_irq(&mdev->req_lock);
2288 }
2289 }
2290
2291 drbd_md_sync(mdev);
2292out_dec:
2293 put_ldev(mdev);
2294out:
2295 mutex_unlock(&mdev->state_mutex);
2296
2297 reply->ret_code = retcode;
2298 return 0;
2299}
2300
b411b363
PR
2301struct cn_handler_struct {
2302 int (*function)(struct drbd_conf *,
2303 struct drbd_nl_cfg_req *,
2304 struct drbd_nl_cfg_reply *);
2305 int reply_body_size;
2306};
2307
2308static struct cn_handler_struct cnd_table[] = {
2309 [ P_primary ] = { &drbd_nl_primary, 0 },
2310 [ P_secondary ] = { &drbd_nl_secondary, 0 },
2311 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
2312 [ P_detach ] = { &drbd_nl_detach, 0 },
2313 [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
2314 [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
2315 [ P_resize ] = { &drbd_nl_resize, 0 },
2316 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
2317 [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
2318 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
2319 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
2320 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
2321 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
2322 [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
2323 [ P_outdate ] = { &drbd_nl_outdate, 0 },
2324 [ P_get_config ] = { &drbd_nl_get_config,
2325 sizeof(struct syncer_conf_tag_len_struct) +
2326 sizeof(struct disk_conf_tag_len_struct) +
2327 sizeof(struct net_conf_tag_len_struct) },
2328 [ P_get_state ] = { &drbd_nl_get_state,
2329 sizeof(struct get_state_tag_len_struct) +
2330 sizeof(struct sync_progress_tag_len_struct) },
2331 [ P_get_uuids ] = { &drbd_nl_get_uuids,
2332 sizeof(struct get_uuids_tag_len_struct) },
2333 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
2334 sizeof(struct get_timeout_flag_tag_len_struct)},
2335 [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
2336 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
2337};
2338
9f5180e5 2339static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
b411b363
PR
2340{
2341 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
2342 struct cn_handler_struct *cm;
2343 struct cn_msg *cn_reply;
2344 struct drbd_nl_cfg_reply *reply;
2345 struct drbd_conf *mdev;
2346 int retcode, rr;
2347 int reply_size = sizeof(struct cn_msg)
2348 + sizeof(struct drbd_nl_cfg_reply)
2349 + sizeof(short int);
2350
2351 if (!try_module_get(THIS_MODULE)) {
2352 printk(KERN_ERR "drbd: try_module_get() failed!\n");
2353 return;
2354 }
2355
38bf1953 2356 if (!capable(CAP_SYS_ADMIN)) {
9f5180e5
PR
2357 retcode = ERR_PERM;
2358 goto fail;
2359 }
2360
ef50a3e3
LE
2361 mdev = ensure_mdev(nlp->drbd_minor,
2362 (nlp->flags & DRBD_NL_CREATE_DEVICE));
b411b363
PR
2363 if (!mdev) {
2364 retcode = ERR_MINOR_INVALID;
2365 goto fail;
2366 }
2367
42ff269d
LE
2368 if (nlp->packet_type >= P_nl_after_last_packet ||
2369 nlp->packet_type == P_return_code_only) {
b411b363
PR
2370 retcode = ERR_PACKET_NR;
2371 goto fail;
2372 }
2373
2374 cm = cnd_table + nlp->packet_type;
2375
2376 /* This may happen if packet number is 0: */
2377 if (cm->function == NULL) {
2378 retcode = ERR_PACKET_NR;
2379 goto fail;
2380 }
2381
2382 reply_size += cm->reply_body_size;
2383
2384 /* allocation not in the IO path, cqueue thread context */
3e3a7766 2385 cn_reply = kzalloc(reply_size, GFP_KERNEL);
b411b363
PR
2386 if (!cn_reply) {
2387 retcode = ERR_NOMEM;
2388 goto fail;
2389 }
2390 reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
2391
2392 reply->packet_type =
42ff269d 2393 cm->reply_body_size ? nlp->packet_type : P_return_code_only;
b411b363
PR
2394 reply->minor = nlp->drbd_minor;
2395 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
2396 /* reply->tag_list; might be modified by cm->function. */
2397
2398 rr = cm->function(mdev, nlp, reply);
2399
2400 cn_reply->id = req->id;
2401 cn_reply->seq = req->seq;
2402 cn_reply->ack = req->ack + 1;
2403 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
2404 cn_reply->flags = 0;
2405
b411b363
PR
2406 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
2407 if (rr && rr != -ESRCH)
2408 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2409
2410 kfree(cn_reply);
2411 module_put(THIS_MODULE);
2412 return;
2413 fail:
2414 drbd_nl_send_reply(req, retcode);
2415 module_put(THIS_MODULE);
2416}
2417
2418static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
2419
2420static unsigned short *
2421__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
2422 unsigned short len, int nul_terminated)
2423{
2424 unsigned short l = tag_descriptions[tag_number(tag)].max_len;
2425 len = (len < l) ? len : l;
2426 put_unaligned(tag, tl++);
2427 put_unaligned(len, tl++);
2428 memcpy(tl, data, len);
2429 tl = (unsigned short*)((char*)tl + len);
2430 if (nul_terminated)
2431 *((char*)tl - 1) = 0;
2432 return tl;
2433}
2434
2435static unsigned short *
2436tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
2437{
2438 return __tl_add_blob(tl, tag, data, len, 0);
2439}
2440
2441static unsigned short *
2442tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
2443{
2444 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
2445}
2446
2447static unsigned short *
2448tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
2449{
2450 put_unaligned(tag, tl++);
2451 switch(tag_type(tag)) {
2452 case TT_INTEGER:
2453 put_unaligned(sizeof(int), tl++);
2454 put_unaligned(*(int *)val, (int *)tl);
2455 tl = (unsigned short*)((char*)tl+sizeof(int));
2456 break;
2457 case TT_INT64:
2458 put_unaligned(sizeof(u64), tl++);
2459 put_unaligned(*(u64 *)val, (u64 *)tl);
2460 tl = (unsigned short*)((char*)tl+sizeof(u64));
2461 break;
2462 default:
2463 /* someone did something stupid. */
2464 ;
2465 }
2466 return tl;
2467}
2468
2469void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
2470{
2471 char buffer[sizeof(struct cn_msg)+
2472 sizeof(struct drbd_nl_cfg_reply)+
2473 sizeof(struct get_state_tag_len_struct)+
2474 sizeof(short int)];
2475 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2476 struct drbd_nl_cfg_reply *reply =
2477 (struct drbd_nl_cfg_reply *)cn_reply->data;
2478 unsigned short *tl = reply->tag_list;
2479
2480 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2481
2482 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
2483
2484 put_unaligned(TT_END, tl++); /* Close the tag list */
2485
2486 cn_reply->id.idx = CN_IDX_DRBD;
2487 cn_reply->id.val = CN_VAL_DRBD;
2488
2489 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2490 cn_reply->ack = 0; /* not used here. */
2491 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2492 (int)((char *)tl - (char *)reply->tag_list);
2493 cn_reply->flags = 0;
2494
2495 reply->packet_type = P_get_state;
2496 reply->minor = mdev_to_minor(mdev);
2497 reply->ret_code = NO_ERROR;
2498
b411b363
PR
2499 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2500}
2501
2502void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
2503{
2504 char buffer[sizeof(struct cn_msg)+
2505 sizeof(struct drbd_nl_cfg_reply)+
2506 sizeof(struct call_helper_tag_len_struct)+
2507 sizeof(short int)];
2508 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2509 struct drbd_nl_cfg_reply *reply =
2510 (struct drbd_nl_cfg_reply *)cn_reply->data;
2511 unsigned short *tl = reply->tag_list;
2512
2513 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2514
2515 tl = tl_add_str(tl, T_helper, helper_name);
2516 put_unaligned(TT_END, tl++); /* Close the tag list */
2517
2518 cn_reply->id.idx = CN_IDX_DRBD;
2519 cn_reply->id.val = CN_VAL_DRBD;
2520
2521 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2522 cn_reply->ack = 0; /* not used here. */
2523 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2524 (int)((char *)tl - (char *)reply->tag_list);
2525 cn_reply->flags = 0;
2526
2527 reply->packet_type = P_call_helper;
2528 reply->minor = mdev_to_minor(mdev);
2529 reply->ret_code = NO_ERROR;
2530
b411b363
PR
2531 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2532}
2533
2534void drbd_bcast_ee(struct drbd_conf *mdev,
2535 const char *reason, const int dgs,
2536 const char* seen_hash, const char* calc_hash,
2537 const struct drbd_epoch_entry* e)
2538{
2539 struct cn_msg *cn_reply;
2540 struct drbd_nl_cfg_reply *reply;
b411b363 2541 unsigned short *tl;
45bb912b
LE
2542 struct page *page;
2543 unsigned len;
b411b363
PR
2544
2545 if (!e)
2546 return;
2547 if (!reason || !reason[0])
2548 return;
2549
2550 /* apparently we have to memcpy twice, first to prepare the data for the
2551 * struct cn_msg, then within cn_netlink_send from the cn_msg to the
2552 * netlink skb. */
2553 /* receiver thread context, which is not in the writeout path (of this node),
2554 * but may be in the writeout path of the _other_ node.
2555 * GFP_NOIO to avoid potential "distributed deadlock". */
3e3a7766 2556 cn_reply = kzalloc(
b411b363
PR
2557 sizeof(struct cn_msg)+
2558 sizeof(struct drbd_nl_cfg_reply)+
2559 sizeof(struct dump_ee_tag_len_struct)+
2560 sizeof(short int),
2561 GFP_NOIO);
2562
2563 if (!cn_reply) {
2564 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
2565 (unsigned long long)e->sector, e->size);
2566 return;
2567 }
2568
2569 reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
2570 tl = reply->tag_list;
2571
2572 tl = tl_add_str(tl, T_dump_ee_reason, reason);
2573 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
2574 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
2575 tl = tl_add_int(tl, T_ee_sector, &e->sector);
2576 tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
2577
3129b1b9
LE
2578 /* dump the first 32k */
2579 len = min_t(unsigned, e->size, 32 << 10);
b411b363 2580 put_unaligned(T_ee_data, tl++);
3129b1b9 2581 put_unaligned(len, tl++);
b411b363 2582
45bb912b
LE
2583 page = e->pages;
2584 page_chain_for_each(page) {
cfd8005c 2585 void *d = kmap_atomic(page);
45bb912b
LE
2586 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2587 memcpy(tl, d, l);
cfd8005c 2588 kunmap_atomic(d);
45bb912b
LE
2589 tl = (unsigned short*)((char*)tl + l);
2590 len -= l;
3129b1b9
LE
2591 if (len == 0)
2592 break;
b411b363
PR
2593 }
2594 put_unaligned(TT_END, tl++); /* Close the tag list */
2595
2596 cn_reply->id.idx = CN_IDX_DRBD;
2597 cn_reply->id.val = CN_VAL_DRBD;
2598
2599 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
2600 cn_reply->ack = 0; // not used here.
2601 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2602 (int)((char*)tl - (char*)reply->tag_list);
2603 cn_reply->flags = 0;
2604
2605 reply->packet_type = P_dump_ee;
2606 reply->minor = mdev_to_minor(mdev);
2607 reply->ret_code = NO_ERROR;
2608
b411b363
PR
2609 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2610 kfree(cn_reply);
2611}
2612
2613void drbd_bcast_sync_progress(struct drbd_conf *mdev)
2614{
2615 char buffer[sizeof(struct cn_msg)+
2616 sizeof(struct drbd_nl_cfg_reply)+
2617 sizeof(struct sync_progress_tag_len_struct)+
2618 sizeof(short int)];
2619 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2620 struct drbd_nl_cfg_reply *reply =
2621 (struct drbd_nl_cfg_reply *)cn_reply->data;
2622 unsigned short *tl = reply->tag_list;
2623 unsigned long rs_left;
2624 unsigned int res;
2625
2626 /* no local ref, no bitmap, no syncer progress, no broadcast. */
2627 if (!get_ldev(mdev))
2628 return;
2629 drbd_get_syncer_progress(mdev, &rs_left, &res);
2630 put_ldev(mdev);
2631
2632 tl = tl_add_int(tl, T_sync_progress, &res);
2633 put_unaligned(TT_END, tl++); /* Close the tag list */
2634
2635 cn_reply->id.idx = CN_IDX_DRBD;
2636 cn_reply->id.val = CN_VAL_DRBD;
2637
2638 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2639 cn_reply->ack = 0; /* not used here. */
2640 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2641 (int)((char *)tl - (char *)reply->tag_list);
2642 cn_reply->flags = 0;
2643
2644 reply->packet_type = P_sync_progress;
2645 reply->minor = mdev_to_minor(mdev);
2646 reply->ret_code = NO_ERROR;
2647
b411b363
PR
2648 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2649}
2650
2651int __init drbd_nl_init(void)
2652{
2653 static struct cb_id cn_id_drbd;
2654 int err, try=10;
2655
2656 cn_id_drbd.val = CN_VAL_DRBD;
2657 do {
2658 cn_id_drbd.idx = cn_idx;
2659 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
2660 if (!err)
2661 break;
2662 cn_idx = (cn_idx + CN_IDX_STEP);
2663 } while (try--);
2664
2665 if (err) {
2666 printk(KERN_ERR "drbd: cn_drbd failed to register\n");
2667 return err;
2668 }
2669
2670 return 0;
2671}
2672
2673void drbd_nl_cleanup(void)
2674{
2675 static struct cb_id cn_id_drbd;
2676
2677 cn_id_drbd.idx = cn_idx;
2678 cn_id_drbd.val = CN_VAL_DRBD;
2679
2680 cn_del_callback(&cn_id_drbd);
2681}
2682
2683void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
2684{
2685 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
2686 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2687 struct drbd_nl_cfg_reply *reply =
2688 (struct drbd_nl_cfg_reply *)cn_reply->data;
2689 int rr;
2690
3e3a7766 2691 memset(buffer, 0, sizeof(buffer));
b411b363
PR
2692 cn_reply->id = req->id;
2693
2694 cn_reply->seq = req->seq;
2695 cn_reply->ack = req->ack + 1;
2696 cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
2697 cn_reply->flags = 0;
2698
42ff269d 2699 reply->packet_type = P_return_code_only;
b411b363
PR
2700 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
2701 reply->ret_code = ret_code;
2702
b411b363
PR
2703 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2704 if (rr && rr != -ESRCH)
2705 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2706}
2707