drbd: reject attach of unsuitable uuids even if connected
[linux-block.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
b411b363
PR
1/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
b411b363
PR
26#include <linux/module.h>
27
7e5fec31 28#include <linux/uaccess.h>
b411b363
PR
29#include <net/sock.h>
30
b411b363
PR
31#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
ae7e81c0 39#include <uapi/linux/sched/types.h>
174cd4b1 40#include <linux/sched/signal.h>
b411b363
PR
41#include <linux/pkt_sched.h>
42#define __KERNEL_SYSCALLS__
43#include <linux/unistd.h>
44#include <linux/vmalloc.h>
45#include <linux/random.h>
b411b363
PR
46#include <linux/string.h>
47#include <linux/scatterlist.h>
48#include "drbd_int.h"
a3603a6e 49#include "drbd_protocol.h"
b411b363 50#include "drbd_req.h"
b411b363
PR
51#include "drbd_vli.h"
52
9104d31a 53#define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME)
20c68fde 54
77351055
PR
55struct packet_info {
56 enum drbd_packet cmd;
e2857216
AG
57 unsigned int size;
58 unsigned int vnr;
e658983a 59 void *data;
77351055
PR
60};
61
b411b363
PR
62enum finish_epoch {
63 FE_STILL_LIVE,
64 FE_DESTROYED,
65 FE_RECYCLED,
66};
67
bde89a9e
AG
68static int drbd_do_features(struct drbd_connection *connection);
69static int drbd_do_auth(struct drbd_connection *connection);
69a22773 70static int drbd_disconnected(struct drbd_peer_device *);
a0fb3c47 71static void conn_wait_active_ee_empty(struct drbd_connection *connection);
bde89a9e 72static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
99920dc5 73static int e_end_block(struct drbd_work *, int);
b411b363 74
b411b363
PR
75
76#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
77
45bb912b
LE
78/*
79 * some helper functions to deal with single linked page lists,
80 * page->private being our "next" pointer.
81 */
82
83/* If at least n pages are linked at head, get n pages off.
84 * Otherwise, don't modify head, and return NULL.
85 * Locking is the responsibility of the caller.
86 */
87static struct page *page_chain_del(struct page **head, int n)
88{
89 struct page *page;
90 struct page *tmp;
91
92 BUG_ON(!n);
93 BUG_ON(!head);
94
95 page = *head;
23ce4227
PR
96
97 if (!page)
98 return NULL;
99
45bb912b
LE
100 while (page) {
101 tmp = page_chain_next(page);
102 if (--n == 0)
103 break; /* found sufficient pages */
104 if (tmp == NULL)
105 /* insufficient pages, don't use any of them. */
106 return NULL;
107 page = tmp;
108 }
109
110 /* add end of list marker for the returned list */
111 set_page_private(page, 0);
112 /* actual return value, and adjustment of head */
113 page = *head;
114 *head = tmp;
115 return page;
116}
117
118/* may be used outside of locks to find the tail of a (usually short)
119 * "private" page chain, before adding it back to a global chain head
120 * with page_chain_add() under a spinlock. */
121static struct page *page_chain_tail(struct page *page, int *len)
122{
123 struct page *tmp;
124 int i = 1;
125 while ((tmp = page_chain_next(page)))
126 ++i, page = tmp;
127 if (len)
128 *len = i;
129 return page;
130}
131
132static int page_chain_free(struct page *page)
133{
134 struct page *tmp;
135 int i = 0;
136 page_chain_for_each_safe(page, tmp) {
137 put_page(page);
138 ++i;
139 }
140 return i;
141}
142
143static void page_chain_add(struct page **head,
144 struct page *chain_first, struct page *chain_last)
145{
146#if 1
147 struct page *tmp;
148 tmp = page_chain_tail(chain_first, NULL);
149 BUG_ON(tmp != chain_last);
150#endif
151
152 /* add chain to head */
153 set_page_private(chain_last, (unsigned long)*head);
154 *head = chain_first;
155}
156
b30ab791 157static struct page *__drbd_alloc_pages(struct drbd_device *device,
18c2d522 158 unsigned int number)
b411b363
PR
159{
160 struct page *page = NULL;
45bb912b 161 struct page *tmp = NULL;
18c2d522 162 unsigned int i = 0;
b411b363
PR
163
164 /* Yes, testing drbd_pp_vacant outside the lock is racy.
165 * So what. It saves a spin_lock. */
45bb912b 166 if (drbd_pp_vacant >= number) {
b411b363 167 spin_lock(&drbd_pp_lock);
45bb912b
LE
168 page = page_chain_del(&drbd_pp_pool, number);
169 if (page)
170 drbd_pp_vacant -= number;
b411b363 171 spin_unlock(&drbd_pp_lock);
45bb912b
LE
172 if (page)
173 return page;
b411b363 174 }
45bb912b 175
b411b363
PR
176 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
177 * "criss-cross" setup, that might cause write-out on some other DRBD,
178 * which in turn might block on the other node at this very place. */
45bb912b
LE
179 for (i = 0; i < number; i++) {
180 tmp = alloc_page(GFP_TRY);
181 if (!tmp)
182 break;
183 set_page_private(tmp, (unsigned long)page);
184 page = tmp;
185 }
186
187 if (i == number)
188 return page;
189
190 /* Not enough pages immediately available this time.
c37c8ecf 191 * No need to jump around here, drbd_alloc_pages will retry this
45bb912b
LE
192 * function "soon". */
193 if (page) {
194 tmp = page_chain_tail(page, NULL);
195 spin_lock(&drbd_pp_lock);
196 page_chain_add(&drbd_pp_pool, page, tmp);
197 drbd_pp_vacant += i;
198 spin_unlock(&drbd_pp_lock);
199 }
200 return NULL;
b411b363
PR
201}
202
b30ab791 203static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
a990be46 204 struct list_head *to_be_freed)
b411b363 205{
a8cd15ba 206 struct drbd_peer_request *peer_req, *tmp;
b411b363
PR
207
208 /* The EEs are always appended to the end of the list. Since
209 they are sent in order over the wire, they have to finish
210 in order. As soon as we see the first not finished we can
211 stop to examine the list... */
212
a8cd15ba 213 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
045417f7 214 if (drbd_peer_req_has_active_page(peer_req))
b411b363 215 break;
a8cd15ba 216 list_move(&peer_req->w.list, to_be_freed);
b411b363
PR
217 }
218}
219
668700b4 220static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
b411b363
PR
221{
222 LIST_HEAD(reclaimed);
db830c46 223 struct drbd_peer_request *peer_req, *t;
b411b363 224
0500813f 225 spin_lock_irq(&device->resource->req_lock);
b30ab791 226 reclaim_finished_net_peer_reqs(device, &reclaimed);
0500813f 227 spin_unlock_irq(&device->resource->req_lock);
a8cd15ba 228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
b30ab791 229 drbd_free_net_peer_req(device, peer_req);
b411b363
PR
230}
231
668700b4
PR
232static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
233{
234 struct drbd_peer_device *peer_device;
235 int vnr;
236
237 rcu_read_lock();
238 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
239 struct drbd_device *device = peer_device->device;
240 if (!atomic_read(&device->pp_in_use_by_net))
241 continue;
242
243 kref_get(&device->kref);
244 rcu_read_unlock();
245 drbd_reclaim_net_peer_reqs(device);
246 kref_put(&device->kref, drbd_destroy_device);
247 rcu_read_lock();
248 }
249 rcu_read_unlock();
250}
251
b411b363 252/**
c37c8ecf 253 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
b30ab791 254 * @device: DRBD device.
45bb912b
LE
255 * @number: number of pages requested
256 * @retry: whether to retry, if not enough pages are available right now
257 *
258 * Tries to allocate number pages, first from our own page pool, then from
0e49d7b0 259 * the kernel.
45bb912b 260 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 261 *
0e49d7b0
LE
262 * If this allocation would exceed the max_buffers setting, we throttle
263 * allocation (schedule_timeout) to give the system some room to breathe.
264 *
265 * We do not use max-buffers as hard limit, because it could lead to
266 * congestion and further to a distributed deadlock during online-verify or
267 * (checksum based) resync, if the max-buffers, socket buffer sizes and
268 * resync-rate settings are mis-configured.
269 *
45bb912b 270 * Returns a page chain linked via page->private.
b411b363 271 */
69a22773 272struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
c37c8ecf 273 bool retry)
b411b363 274{
69a22773 275 struct drbd_device *device = peer_device->device;
b411b363 276 struct page *page = NULL;
44ed167d 277 struct net_conf *nc;
b411b363 278 DEFINE_WAIT(wait);
0e49d7b0 279 unsigned int mxb;
b411b363 280
44ed167d 281 rcu_read_lock();
69a22773 282 nc = rcu_dereference(peer_device->connection->net_conf);
44ed167d
PR
283 mxb = nc ? nc->max_buffers : 1000000;
284 rcu_read_unlock();
285
b30ab791
AG
286 if (atomic_read(&device->pp_in_use) < mxb)
287 page = __drbd_alloc_pages(device, number);
b411b363 288
668700b4
PR
289 /* Try to keep the fast path fast, but occasionally we need
290 * to reclaim the pages we lended to the network stack. */
291 if (page && atomic_read(&device->pp_in_use_by_net) > 512)
292 drbd_reclaim_net_peer_reqs(device);
293
45bb912b 294 while (page == NULL) {
b411b363
PR
295 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
296
668700b4 297 drbd_reclaim_net_peer_reqs(device);
b411b363 298
b30ab791
AG
299 if (atomic_read(&device->pp_in_use) < mxb) {
300 page = __drbd_alloc_pages(device, number);
b411b363
PR
301 if (page)
302 break;
303 }
304
305 if (!retry)
306 break;
307
308 if (signal_pending(current)) {
d0180171 309 drbd_warn(device, "drbd_alloc_pages interrupted!\n");
b411b363
PR
310 break;
311 }
312
0e49d7b0
LE
313 if (schedule_timeout(HZ/10) == 0)
314 mxb = UINT_MAX;
b411b363
PR
315 }
316 finish_wait(&drbd_pp_wait, &wait);
317
45bb912b 318 if (page)
b30ab791 319 atomic_add(number, &device->pp_in_use);
b411b363
PR
320 return page;
321}
322
c37c8ecf 323/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
0500813f 324 * Is also used from inside an other spin_lock_irq(&resource->req_lock);
45bb912b
LE
325 * Either links the page chain back to the global pool,
326 * or returns all pages to the system. */
b30ab791 327static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
b411b363 328{
b30ab791 329 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
b411b363 330 int i;
435f0740 331
a73ff323
LE
332 if (page == NULL)
333 return;
334
183ece30 335 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
45bb912b
LE
336 i = page_chain_free(page);
337 else {
338 struct page *tmp;
339 tmp = page_chain_tail(page, &i);
340 spin_lock(&drbd_pp_lock);
341 page_chain_add(&drbd_pp_pool, page, tmp);
342 drbd_pp_vacant += i;
343 spin_unlock(&drbd_pp_lock);
b411b363 344 }
435f0740 345 i = atomic_sub_return(i, a);
45bb912b 346 if (i < 0)
d0180171 347 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
435f0740 348 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
349 wake_up(&drbd_pp_wait);
350}
351
352/*
353You need to hold the req_lock:
354 _drbd_wait_ee_list_empty()
355
356You must not have the req_lock:
3967deb1 357 drbd_free_peer_req()
0db55363 358 drbd_alloc_peer_req()
7721f567 359 drbd_free_peer_reqs()
b411b363 360 drbd_ee_fix_bhs()
a990be46 361 drbd_finish_peer_reqs()
b411b363
PR
362 drbd_clear_done_ee()
363 drbd_wait_ee_list_empty()
364*/
365
9104d31a
LE
366/* normal: payload_size == request size (bi_size)
367 * w_same: payload_size == logical_block_size
368 * trim: payload_size == 0 */
f6ffca9f 369struct drbd_peer_request *
69a22773 370drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
9104d31a 371 unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
b411b363 372{
69a22773 373 struct drbd_device *device = peer_device->device;
db830c46 374 struct drbd_peer_request *peer_req;
a73ff323 375 struct page *page = NULL;
9104d31a 376 unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363 377
b30ab791 378 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
b411b363
PR
379 return NULL;
380
0892fac8 381 peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
db830c46 382 if (!peer_req) {
b411b363 383 if (!(gfp_mask & __GFP_NOWARN))
d0180171 384 drbd_err(device, "%s: allocation failed\n", __func__);
b411b363
PR
385 return NULL;
386 }
387
9104d31a 388 if (nr_pages) {
d0164adc
MG
389 page = drbd_alloc_pages(peer_device, nr_pages,
390 gfpflags_allow_blocking(gfp_mask));
a73ff323
LE
391 if (!page)
392 goto fail;
393 }
b411b363 394
c5a2c150
LE
395 memset(peer_req, 0, sizeof(*peer_req));
396 INIT_LIST_HEAD(&peer_req->w.list);
db830c46 397 drbd_clear_interval(&peer_req->i);
9104d31a 398 peer_req->i.size = request_size;
db830c46 399 peer_req->i.sector = sector;
c5a2c150 400 peer_req->submit_jif = jiffies;
a8cd15ba 401 peer_req->peer_device = peer_device;
db830c46 402 peer_req->pages = page;
9a8e7753
AG
403 /*
404 * The block_id is opaque to the receiver. It is not endianness
405 * converted, and sent back to the sender unchanged.
406 */
db830c46 407 peer_req->block_id = id;
b411b363 408
db830c46 409 return peer_req;
b411b363 410
45bb912b 411 fail:
0892fac8 412 mempool_free(peer_req, &drbd_ee_mempool);
b411b363
PR
413 return NULL;
414}
415
b30ab791 416void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
f6ffca9f 417 int is_net)
b411b363 418{
21ae5d7f 419 might_sleep();
db830c46
AG
420 if (peer_req->flags & EE_HAS_DIGEST)
421 kfree(peer_req->digest);
b30ab791 422 drbd_free_pages(device, peer_req->pages, is_net);
0b0ba1ef
AG
423 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
424 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
21ae5d7f
LE
425 if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
426 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
427 drbd_al_complete_io(device, &peer_req->i);
428 }
0892fac8 429 mempool_free(peer_req, &drbd_ee_mempool);
b411b363
PR
430}
431
b30ab791 432int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
b411b363
PR
433{
434 LIST_HEAD(work_list);
db830c46 435 struct drbd_peer_request *peer_req, *t;
b411b363 436 int count = 0;
b30ab791 437 int is_net = list == &device->net_ee;
b411b363 438
0500813f 439 spin_lock_irq(&device->resource->req_lock);
b411b363 440 list_splice_init(list, &work_list);
0500813f 441 spin_unlock_irq(&device->resource->req_lock);
b411b363 442
a8cd15ba 443 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
b30ab791 444 __drbd_free_peer_req(device, peer_req, is_net);
b411b363
PR
445 count++;
446 }
447 return count;
448}
449
b411b363 450/*
a990be46 451 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
b411b363 452 */
b30ab791 453static int drbd_finish_peer_reqs(struct drbd_device *device)
b411b363
PR
454{
455 LIST_HEAD(work_list);
456 LIST_HEAD(reclaimed);
db830c46 457 struct drbd_peer_request *peer_req, *t;
e2b3032b 458 int err = 0;
b411b363 459
0500813f 460 spin_lock_irq(&device->resource->req_lock);
b30ab791
AG
461 reclaim_finished_net_peer_reqs(device, &reclaimed);
462 list_splice_init(&device->done_ee, &work_list);
0500813f 463 spin_unlock_irq(&device->resource->req_lock);
b411b363 464
a8cd15ba 465 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
b30ab791 466 drbd_free_net_peer_req(device, peer_req);
b411b363
PR
467
468 /* possible callbacks here:
d4dabbe2 469 * e_end_block, and e_end_resync_block, e_send_superseded.
b411b363
PR
470 * all ignore the last argument.
471 */
a8cd15ba 472 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
473 int err2;
474
b411b363 475 /* list_del not necessary, next/prev members not touched */
a8cd15ba 476 err2 = peer_req->w.cb(&peer_req->w, !!err);
e2b3032b
AG
477 if (!err)
478 err = err2;
b30ab791 479 drbd_free_peer_req(device, peer_req);
b411b363 480 }
b30ab791 481 wake_up(&device->ee_wait);
b411b363 482
e2b3032b 483 return err;
b411b363
PR
484}
485
b30ab791 486static void _drbd_wait_ee_list_empty(struct drbd_device *device,
d4da1537 487 struct list_head *head)
b411b363
PR
488{
489 DEFINE_WAIT(wait);
490
491 /* avoids spin_lock/unlock
492 * and calling prepare_to_wait in the fast path */
493 while (!list_empty(head)) {
b30ab791 494 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
0500813f 495 spin_unlock_irq(&device->resource->req_lock);
7eaceacc 496 io_schedule();
b30ab791 497 finish_wait(&device->ee_wait, &wait);
0500813f 498 spin_lock_irq(&device->resource->req_lock);
b411b363
PR
499 }
500}
501
b30ab791 502static void drbd_wait_ee_list_empty(struct drbd_device *device,
d4da1537 503 struct list_head *head)
b411b363 504{
0500813f 505 spin_lock_irq(&device->resource->req_lock);
b30ab791 506 _drbd_wait_ee_list_empty(device, head);
0500813f 507 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
508}
509
dbd9eea0 510static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363 511{
b411b363
PR
512 struct kvec iov = {
513 .iov_base = buf,
514 .iov_len = size,
515 };
516 struct msghdr msg = {
b411b363
PR
517 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
518 };
aa563d7b 519 iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
f7765c36 520 return sock_recvmsg(sock, &msg, msg.msg_flags);
b411b363
PR
521}
522
bde89a9e 523static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
b411b363 524{
b411b363
PR
525 int rv;
526
bde89a9e 527 rv = drbd_recv_short(connection->data.socket, buf, size, 0);
b411b363 528
dbd0820c
PR
529 if (rv < 0) {
530 if (rv == -ECONNRESET)
1ec861eb 531 drbd_info(connection, "sock was reset by peer\n");
dbd0820c 532 else if (rv != -ERESTARTSYS)
1ec861eb 533 drbd_err(connection, "sock_recvmsg returned %d\n", rv);
dbd0820c 534 } else if (rv == 0) {
bde89a9e 535 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
b66623e3
PR
536 long t;
537 rcu_read_lock();
bde89a9e 538 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
b66623e3
PR
539 rcu_read_unlock();
540
bde89a9e 541 t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
b66623e3 542
599377ac
PR
543 if (t)
544 goto out;
545 }
1ec861eb 546 drbd_info(connection, "sock was shut down by peer\n");
599377ac
PR
547 }
548
b411b363 549 if (rv != size)
bde89a9e 550 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 551
599377ac 552out:
b411b363
PR
553 return rv;
554}
555
bde89a9e 556static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
c6967746
AG
557{
558 int err;
559
bde89a9e 560 err = drbd_recv(connection, buf, size);
c6967746
AG
561 if (err != size) {
562 if (err >= 0)
563 err = -EIO;
564 } else
565 err = 0;
566 return err;
567}
568
bde89a9e 569static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
a5c31904
AG
570{
571 int err;
572
bde89a9e 573 err = drbd_recv_all(connection, buf, size);
a5c31904 574 if (err && !signal_pending(current))
1ec861eb 575 drbd_warn(connection, "short read (expected size %d)\n", (int)size);
a5c31904
AG
576 return err;
577}
578
5dbf1673
LE
579/* quoting tcp(7):
580 * On individual connections, the socket buffer size must be set prior to the
581 * listen(2) or connect(2) calls in order to have it take effect.
582 * This is our wrapper to do so.
583 */
584static void drbd_setbufsize(struct socket *sock, unsigned int snd,
585 unsigned int rcv)
586{
587 /* open coded SO_SNDBUF, SO_RCVBUF */
588 if (snd) {
589 sock->sk->sk_sndbuf = snd;
590 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
591 }
592 if (rcv) {
593 sock->sk->sk_rcvbuf = rcv;
594 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
595 }
596}
597
bde89a9e 598static struct socket *drbd_try_connect(struct drbd_connection *connection)
b411b363
PR
599{
600 const char *what;
601 struct socket *sock;
602 struct sockaddr_in6 src_in6;
44ed167d
PR
603 struct sockaddr_in6 peer_in6;
604 struct net_conf *nc;
605 int err, peer_addr_len, my_addr_len;
69ef82de 606 int sndbuf_size, rcvbuf_size, connect_int;
b411b363
PR
607 int disconnect_on_error = 1;
608
44ed167d 609 rcu_read_lock();
bde89a9e 610 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
611 if (!nc) {
612 rcu_read_unlock();
b411b363 613 return NULL;
44ed167d 614 }
44ed167d
PR
615 sndbuf_size = nc->sndbuf_size;
616 rcvbuf_size = nc->rcvbuf_size;
69ef82de 617 connect_int = nc->connect_int;
089c075d 618 rcu_read_unlock();
44ed167d 619
bde89a9e
AG
620 my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
621 memcpy(&src_in6, &connection->my_addr, my_addr_len);
44ed167d 622
bde89a9e 623 if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
44ed167d
PR
624 src_in6.sin6_port = 0;
625 else
626 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
627
bde89a9e
AG
628 peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
629 memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
b411b363
PR
630
631 what = "sock_create_kern";
eeb1bd5c 632 err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
44ed167d 633 SOCK_STREAM, IPPROTO_TCP, &sock);
b411b363
PR
634 if (err < 0) {
635 sock = NULL;
636 goto out;
637 }
638
639 sock->sk->sk_rcvtimeo =
69ef82de 640 sock->sk->sk_sndtimeo = connect_int * HZ;
44ed167d 641 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
b411b363
PR
642
643 /* explicitly bind to the configured IP as source IP
644 * for the outgoing connections.
645 * This is needed for multihomed hosts and to be
646 * able to use lo: interfaces for drbd.
647 * Make sure to use 0 as port number, so linux selects
648 * a free one dynamically.
649 */
b411b363 650 what = "bind before connect";
44ed167d 651 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
b411b363
PR
652 if (err < 0)
653 goto out;
654
655 /* connect may fail, peer not yet available.
656 * stay C_WF_CONNECTION, don't go Disconnecting! */
657 disconnect_on_error = 0;
658 what = "connect";
44ed167d 659 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
b411b363
PR
660
661out:
662 if (err < 0) {
663 if (sock) {
664 sock_release(sock);
665 sock = NULL;
666 }
667 switch (-err) {
668 /* timeout, busy, signal pending */
669 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
670 case EINTR: case ERESTARTSYS:
671 /* peer not (yet) available, network problem */
672 case ECONNREFUSED: case ENETUNREACH:
673 case EHOSTDOWN: case EHOSTUNREACH:
674 disconnect_on_error = 0;
675 break;
676 default:
1ec861eb 677 drbd_err(connection, "%s failed, err = %d\n", what, err);
b411b363
PR
678 }
679 if (disconnect_on_error)
bde89a9e 680 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 681 }
44ed167d 682
b411b363
PR
683 return sock;
684}
685
7a426fd8 686struct accept_wait_data {
bde89a9e 687 struct drbd_connection *connection;
7a426fd8
PR
688 struct socket *s_listen;
689 struct completion door_bell;
690 void (*original_sk_state_change)(struct sock *sk);
691
692};
693
715306f6 694static void drbd_incoming_connection(struct sock *sk)
7a426fd8
PR
695{
696 struct accept_wait_data *ad = sk->sk_user_data;
715306f6 697 void (*state_change)(struct sock *sk);
7a426fd8 698
715306f6
AG
699 state_change = ad->original_sk_state_change;
700 if (sk->sk_state == TCP_ESTABLISHED)
701 complete(&ad->door_bell);
702 state_change(sk);
7a426fd8
PR
703}
704
bde89a9e 705static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
b411b363 706{
1f3e509b 707 int err, sndbuf_size, rcvbuf_size, my_addr_len;
44ed167d 708 struct sockaddr_in6 my_addr;
1f3e509b 709 struct socket *s_listen;
44ed167d 710 struct net_conf *nc;
b411b363
PR
711 const char *what;
712
44ed167d 713 rcu_read_lock();
bde89a9e 714 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
715 if (!nc) {
716 rcu_read_unlock();
7a426fd8 717 return -EIO;
44ed167d 718 }
44ed167d
PR
719 sndbuf_size = nc->sndbuf_size;
720 rcvbuf_size = nc->rcvbuf_size;
44ed167d 721 rcu_read_unlock();
b411b363 722
bde89a9e
AG
723 my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
724 memcpy(&my_addr, &connection->my_addr, my_addr_len);
b411b363
PR
725
726 what = "sock_create_kern";
eeb1bd5c 727 err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
1f3e509b 728 SOCK_STREAM, IPPROTO_TCP, &s_listen);
b411b363
PR
729 if (err) {
730 s_listen = NULL;
731 goto out;
732 }
733
98683650 734 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
44ed167d 735 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
b411b363
PR
736
737 what = "bind before listen";
44ed167d 738 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
b411b363
PR
739 if (err < 0)
740 goto out;
741
7a426fd8
PR
742 ad->s_listen = s_listen;
743 write_lock_bh(&s_listen->sk->sk_callback_lock);
744 ad->original_sk_state_change = s_listen->sk->sk_state_change;
715306f6 745 s_listen->sk->sk_state_change = drbd_incoming_connection;
7a426fd8
PR
746 s_listen->sk->sk_user_data = ad;
747 write_unlock_bh(&s_listen->sk->sk_callback_lock);
b411b363 748
2820fd39
PR
749 what = "listen";
750 err = s_listen->ops->listen(s_listen, 5);
751 if (err < 0)
752 goto out;
753
7a426fd8 754 return 0;
b411b363
PR
755out:
756 if (s_listen)
757 sock_release(s_listen);
758 if (err < 0) {
759 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1ec861eb 760 drbd_err(connection, "%s failed, err = %d\n", what, err);
bde89a9e 761 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
762 }
763 }
b411b363 764
7a426fd8 765 return -EIO;
b411b363
PR
766}
767
715306f6 768static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
b411b363 769{
715306f6
AG
770 write_lock_bh(&sk->sk_callback_lock);
771 sk->sk_state_change = ad->original_sk_state_change;
772 sk->sk_user_data = NULL;
773 write_unlock_bh(&sk->sk_callback_lock);
b411b363
PR
774}
775
bde89a9e 776static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
b411b363 777{
1f3e509b
PR
778 int timeo, connect_int, err = 0;
779 struct socket *s_estab = NULL;
1f3e509b
PR
780 struct net_conf *nc;
781
782 rcu_read_lock();
bde89a9e 783 nc = rcu_dereference(connection->net_conf);
1f3e509b
PR
784 if (!nc) {
785 rcu_read_unlock();
786 return NULL;
787 }
788 connect_int = nc->connect_int;
789 rcu_read_unlock();
790
791 timeo = connect_int * HZ;
38b682b2
AM
792 /* 28.5% random jitter */
793 timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
1f3e509b 794
7a426fd8
PR
795 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
796 if (err <= 0)
797 return NULL;
b411b363 798
7a426fd8 799 err = kernel_accept(ad->s_listen, &s_estab, 0);
b411b363
PR
800 if (err < 0) {
801 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1ec861eb 802 drbd_err(connection, "accept failed, err = %d\n", err);
bde89a9e 803 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
804 }
805 }
b411b363 806
715306f6
AG
807 if (s_estab)
808 unregister_state_change(s_estab->sk, ad);
b411b363 809
b411b363
PR
810 return s_estab;
811}
b411b363 812
bde89a9e 813static int decode_header(struct drbd_connection *, void *, struct packet_info *);
b411b363 814
bde89a9e 815static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
9f5bdc33
AG
816 enum drbd_packet cmd)
817{
bde89a9e 818 if (!conn_prepare_command(connection, sock))
9f5bdc33 819 return -EIO;
bde89a9e 820 return conn_send_command(connection, sock, cmd, 0, NULL, 0);
b411b363
PR
821}
822
bde89a9e 823static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
b411b363 824{
bde89a9e 825 unsigned int header_size = drbd_header_size(connection);
9f5bdc33 826 struct packet_info pi;
4920e37a 827 struct net_conf *nc;
9f5bdc33 828 int err;
b411b363 829
4920e37a
PR
830 rcu_read_lock();
831 nc = rcu_dereference(connection->net_conf);
832 if (!nc) {
833 rcu_read_unlock();
834 return -EIO;
835 }
836 sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10;
837 rcu_read_unlock();
838
bde89a9e 839 err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
9f5bdc33
AG
840 if (err != header_size) {
841 if (err >= 0)
842 err = -EIO;
843 return err;
844 }
bde89a9e 845 err = decode_header(connection, connection->data.rbuf, &pi);
9f5bdc33
AG
846 if (err)
847 return err;
848 return pi.cmd;
b411b363
PR
849}
850
851/**
852 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
853 * @sock: pointer to the pointer to the socket.
854 */
5d0b17f1 855static bool drbd_socket_okay(struct socket **sock)
b411b363
PR
856{
857 int rr;
858 char tb[4];
859
860 if (!*sock)
81e84650 861 return false;
b411b363 862
dbd9eea0 863 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
864
865 if (rr > 0 || rr == -EAGAIN) {
81e84650 866 return true;
b411b363
PR
867 } else {
868 sock_release(*sock);
869 *sock = NULL;
81e84650 870 return false;
b411b363
PR
871 }
872}
5d0b17f1
PR
873
874static bool connection_established(struct drbd_connection *connection,
875 struct socket **sock1,
876 struct socket **sock2)
877{
878 struct net_conf *nc;
879 int timeout;
880 bool ok;
881
882 if (!*sock1 || !*sock2)
883 return false;
884
885 rcu_read_lock();
886 nc = rcu_dereference(connection->net_conf);
887 timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10;
888 rcu_read_unlock();
889 schedule_timeout_interruptible(timeout);
890
891 ok = drbd_socket_okay(sock1);
892 ok = drbd_socket_okay(sock2) && ok;
893
894 return ok;
895}
896
2325eb66
PR
897/* Gets called if a connection is established, or if a new minor gets created
898 in a connection */
69a22773 899int drbd_connected(struct drbd_peer_device *peer_device)
907599e0 900{
69a22773 901 struct drbd_device *device = peer_device->device;
0829f5ed 902 int err;
907599e0 903
b30ab791
AG
904 atomic_set(&device->packet_seq, 0);
905 device->peer_seq = 0;
907599e0 906
69a22773
AG
907 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
908 &peer_device->connection->cstate_mutex :
b30ab791 909 &device->own_state_mutex;
8410da8f 910
69a22773 911 err = drbd_send_sync_param(peer_device);
0829f5ed 912 if (!err)
69a22773 913 err = drbd_send_sizes(peer_device, 0, 0);
0829f5ed 914 if (!err)
69a22773 915 err = drbd_send_uuids(peer_device);
0829f5ed 916 if (!err)
69a22773 917 err = drbd_send_current_state(peer_device);
b30ab791
AG
918 clear_bit(USE_DEGR_WFC_T, &device->flags);
919 clear_bit(RESIZE_PENDING, &device->flags);
920 atomic_set(&device->ap_in_flight, 0);
921 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
0829f5ed 922 return err;
907599e0 923}
b411b363
PR
924
925/*
926 * return values:
927 * 1 yes, we have a valid connection
928 * 0 oops, did not work out, please try again
929 * -1 peer talks different language,
930 * no point in trying again, please go standalone.
931 * -2 We do not have a network config...
932 */
bde89a9e 933static int conn_connect(struct drbd_connection *connection)
b411b363 934{
7da35862 935 struct drbd_socket sock, msock;
c06ece6b 936 struct drbd_peer_device *peer_device;
44ed167d 937 struct net_conf *nc;
5d0b17f1
PR
938 int vnr, timeout, h;
939 bool discard_my_data, ok;
197296ff 940 enum drbd_state_rv rv;
7a426fd8 941 struct accept_wait_data ad = {
bde89a9e 942 .connection = connection,
7a426fd8
PR
943 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
944 };
b411b363 945
bde89a9e
AG
946 clear_bit(DISCONNECT_SENT, &connection->flags);
947 if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
948 return -2;
949
7da35862 950 mutex_init(&sock.mutex);
bde89a9e
AG
951 sock.sbuf = connection->data.sbuf;
952 sock.rbuf = connection->data.rbuf;
7da35862
PR
953 sock.socket = NULL;
954 mutex_init(&msock.mutex);
bde89a9e
AG
955 msock.sbuf = connection->meta.sbuf;
956 msock.rbuf = connection->meta.rbuf;
7da35862
PR
957 msock.socket = NULL;
958
0916e0e3 959 /* Assume that the peer only understands protocol 80 until we know better. */
bde89a9e 960 connection->agreed_pro_version = 80;
b411b363 961
bde89a9e 962 if (prepare_listen_socket(connection, &ad))
7a426fd8 963 return 0;
b411b363
PR
964
965 do {
2bf89621 966 struct socket *s;
b411b363 967
bde89a9e 968 s = drbd_try_connect(connection);
b411b363 969 if (s) {
7da35862
PR
970 if (!sock.socket) {
971 sock.socket = s;
bde89a9e 972 send_first_packet(connection, &sock, P_INITIAL_DATA);
7da35862 973 } else if (!msock.socket) {
bde89a9e 974 clear_bit(RESOLVE_CONFLICTS, &connection->flags);
7da35862 975 msock.socket = s;
bde89a9e 976 send_first_packet(connection, &msock, P_INITIAL_META);
b411b363 977 } else {
1ec861eb 978 drbd_err(connection, "Logic error in conn_connect()\n");
b411b363
PR
979 goto out_release_sockets;
980 }
981 }
982
5d0b17f1
PR
983 if (connection_established(connection, &sock.socket, &msock.socket))
984 break;
b411b363
PR
985
986retry:
bde89a9e 987 s = drbd_wait_for_connect(connection, &ad);
b411b363 988 if (s) {
bde89a9e 989 int fp = receive_first_packet(connection, s);
7da35862
PR
990 drbd_socket_okay(&sock.socket);
991 drbd_socket_okay(&msock.socket);
92f14951 992 switch (fp) {
e5d6f33a 993 case P_INITIAL_DATA:
7da35862 994 if (sock.socket) {
1ec861eb 995 drbd_warn(connection, "initial packet S crossed\n");
7da35862 996 sock_release(sock.socket);
80c6eed4
PR
997 sock.socket = s;
998 goto randomize;
b411b363 999 }
7da35862 1000 sock.socket = s;
b411b363 1001 break;
e5d6f33a 1002 case P_INITIAL_META:
bde89a9e 1003 set_bit(RESOLVE_CONFLICTS, &connection->flags);
7da35862 1004 if (msock.socket) {
1ec861eb 1005 drbd_warn(connection, "initial packet M crossed\n");
7da35862 1006 sock_release(msock.socket);
80c6eed4
PR
1007 msock.socket = s;
1008 goto randomize;
b411b363 1009 }
7da35862 1010 msock.socket = s;
b411b363
PR
1011 break;
1012 default:
1ec861eb 1013 drbd_warn(connection, "Error receiving initial packet\n");
b411b363 1014 sock_release(s);
80c6eed4 1015randomize:
38b682b2 1016 if (prandom_u32() & 1)
b411b363
PR
1017 goto retry;
1018 }
1019 }
1020
bde89a9e 1021 if (connection->cstate <= C_DISCONNECTING)
b411b363
PR
1022 goto out_release_sockets;
1023 if (signal_pending(current)) {
1024 flush_signals(current);
1025 smp_rmb();
bde89a9e 1026 if (get_t_state(&connection->receiver) == EXITING)
b411b363
PR
1027 goto out_release_sockets;
1028 }
1029
5d0b17f1 1030 ok = connection_established(connection, &sock.socket, &msock.socket);
b666dbf8 1031 } while (!ok);
b411b363 1032
7a426fd8
PR
1033 if (ad.s_listen)
1034 sock_release(ad.s_listen);
b411b363 1035
98683650
PR
1036 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1037 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
b411b363 1038
7da35862
PR
1039 sock.socket->sk->sk_allocation = GFP_NOIO;
1040 msock.socket->sk->sk_allocation = GFP_NOIO;
b411b363 1041
7da35862
PR
1042 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1043 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
b411b363 1044
b411b363 1045 /* NOT YET ...
bde89a9e 1046 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
7da35862 1047 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
6038178e 1048 * first set it to the P_CONNECTION_FEATURES timeout,
b411b363 1049 * which we set to 4x the configured ping_timeout. */
44ed167d 1050 rcu_read_lock();
bde89a9e 1051 nc = rcu_dereference(connection->net_conf);
44ed167d 1052
7da35862
PR
1053 sock.socket->sk->sk_sndtimeo =
1054 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
b411b363 1055
7da35862 1056 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
44ed167d 1057 timeout = nc->timeout * HZ / 10;
08b165ba 1058 discard_my_data = nc->discard_my_data;
44ed167d 1059 rcu_read_unlock();
b411b363 1060
7da35862 1061 msock.socket->sk->sk_sndtimeo = timeout;
b411b363
PR
1062
1063 /* we don't want delays.
25985edc 1064 * we use TCP_CORK where appropriate, though */
7da35862
PR
1065 drbd_tcp_nodelay(sock.socket);
1066 drbd_tcp_nodelay(msock.socket);
b411b363 1067
bde89a9e
AG
1068 connection->data.socket = sock.socket;
1069 connection->meta.socket = msock.socket;
1070 connection->last_received = jiffies;
b411b363 1071
bde89a9e 1072 h = drbd_do_features(connection);
b411b363
PR
1073 if (h <= 0)
1074 return h;
1075
bde89a9e 1076 if (connection->cram_hmac_tfm) {
b30ab791 1077 /* drbd_request_state(device, NS(conn, WFAuth)); */
bde89a9e 1078 switch (drbd_do_auth(connection)) {
b10d96cb 1079 case -1:
1ec861eb 1080 drbd_err(connection, "Authentication of peer failed\n");
b411b363 1081 return -1;
b10d96cb 1082 case 0:
1ec861eb 1083 drbd_err(connection, "Authentication of peer failed, trying again.\n");
b10d96cb 1084 return 0;
b411b363
PR
1085 }
1086 }
1087
bde89a9e
AG
1088 connection->data.socket->sk->sk_sndtimeo = timeout;
1089 connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
b411b363 1090
bde89a9e 1091 if (drbd_send_protocol(connection) == -EOPNOTSUPP)
7e2455c1 1092 return -1;
b411b363 1093
31007745
PR
1094 /* Prevent a race between resync-handshake and
1095 * being promoted to Primary.
1096 *
1097 * Grab and release the state mutex, so we know that any current
1098 * drbd_set_role() is finished, and any incoming drbd_set_role
1099 * will see the STATE_SENT flag, and wait for it to be cleared.
1100 */
1101 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1102 mutex_lock(peer_device->device->state_mutex);
1103
cde81d99
LE
1104 /* avoid a race with conn_request_state( C_DISCONNECTING ) */
1105 spin_lock_irq(&connection->resource->req_lock);
bde89a9e 1106 set_bit(STATE_SENT, &connection->flags);
cde81d99 1107 spin_unlock_irq(&connection->resource->req_lock);
a1096a6e 1108
31007745
PR
1109 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1110 mutex_unlock(peer_device->device->state_mutex);
1111
c141ebda 1112 rcu_read_lock();
c06ece6b
AG
1113 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1114 struct drbd_device *device = peer_device->device;
b30ab791 1115 kref_get(&device->kref);
26ea8f92
AG
1116 rcu_read_unlock();
1117
08b165ba 1118 if (discard_my_data)
b30ab791 1119 set_bit(DISCARD_MY_DATA, &device->flags);
08b165ba 1120 else
b30ab791 1121 clear_bit(DISCARD_MY_DATA, &device->flags);
08b165ba 1122
69a22773 1123 drbd_connected(peer_device);
05a10ec7 1124 kref_put(&device->kref, drbd_destroy_device);
c141ebda
PR
1125 rcu_read_lock();
1126 }
1127 rcu_read_unlock();
1128
bde89a9e
AG
1129 rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1130 if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
1131 clear_bit(STATE_SENT, &connection->flags);
1e86ac48 1132 return 0;
a1096a6e 1133 }
1e86ac48 1134
1c03e520 1135 drbd_thread_start(&connection->ack_receiver);
39e91a60
LE
1136 /* opencoded create_singlethread_workqueue(),
1137 * to be able to use format string arguments */
1138 connection->ack_sender =
1139 alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name);
668700b4
PR
1140 if (!connection->ack_sender) {
1141 drbd_err(connection, "Failed to create workqueue ack_sender\n");
1142 return 0;
1143 }
b411b363 1144
0500813f 1145 mutex_lock(&connection->resource->conf_update);
08b165ba
PR
1146 /* The discard_my_data flag is a single-shot modifier to the next
1147 * connection attempt, the handshake of which is now well underway.
1148 * No need for rcu style copying of the whole struct
1149 * just to clear a single value. */
bde89a9e 1150 connection->net_conf->discard_my_data = 0;
0500813f 1151 mutex_unlock(&connection->resource->conf_update);
08b165ba 1152
d3fcb490 1153 return h;
b411b363
PR
1154
1155out_release_sockets:
7a426fd8
PR
1156 if (ad.s_listen)
1157 sock_release(ad.s_listen);
7da35862
PR
1158 if (sock.socket)
1159 sock_release(sock.socket);
1160 if (msock.socket)
1161 sock_release(msock.socket);
b411b363
PR
1162 return -1;
1163}
1164
bde89a9e 1165static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
b411b363 1166{
bde89a9e 1167 unsigned int header_size = drbd_header_size(connection);
e658983a 1168
0c8e36d9
AG
1169 if (header_size == sizeof(struct p_header100) &&
1170 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1171 struct p_header100 *h = header;
1172 if (h->pad != 0) {
1ec861eb 1173 drbd_err(connection, "Header padding is not zero\n");
0c8e36d9
AG
1174 return -EINVAL;
1175 }
1176 pi->vnr = be16_to_cpu(h->volume);
1177 pi->cmd = be16_to_cpu(h->command);
1178 pi->size = be32_to_cpu(h->length);
1179 } else if (header_size == sizeof(struct p_header95) &&
1180 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
e658983a 1181 struct p_header95 *h = header;
e658983a 1182 pi->cmd = be16_to_cpu(h->command);
b55d84ba
AG
1183 pi->size = be32_to_cpu(h->length);
1184 pi->vnr = 0;
e658983a
AG
1185 } else if (header_size == sizeof(struct p_header80) &&
1186 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1187 struct p_header80 *h = header;
1188 pi->cmd = be16_to_cpu(h->command);
1189 pi->size = be16_to_cpu(h->length);
77351055 1190 pi->vnr = 0;
02918be2 1191 } else {
1ec861eb 1192 drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
e658983a 1193 be32_to_cpu(*(__be32 *)header),
bde89a9e 1194 connection->agreed_pro_version);
8172f3e9 1195 return -EINVAL;
b411b363 1196 }
e658983a 1197 pi->data = header + header_size;
8172f3e9 1198 return 0;
257d0af6 1199}
b411b363 1200
c51a0ef3
LE
1201static void drbd_unplug_all_devices(struct drbd_connection *connection)
1202{
1203 if (current->plug == &connection->receiver_plug) {
1204 blk_finish_plug(&connection->receiver_plug);
1205 blk_start_plug(&connection->receiver_plug);
1206 } /* else: maybe just schedule() ?? */
1207}
1208
bde89a9e 1209static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
257d0af6 1210{
bde89a9e 1211 void *buffer = connection->data.rbuf;
69bc7bc3 1212 int err;
257d0af6 1213
bde89a9e 1214 err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
a5c31904 1215 if (err)
69bc7bc3 1216 return err;
257d0af6 1217
bde89a9e
AG
1218 err = decode_header(connection, buffer, pi);
1219 connection->last_received = jiffies;
b411b363 1220
69bc7bc3 1221 return err;
b411b363
PR
1222}
1223
c51a0ef3
LE
1224static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
1225{
1226 void *buffer = connection->data.rbuf;
1227 unsigned int size = drbd_header_size(connection);
1228 int err;
1229
1230 err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
1231 if (err != size) {
1232 /* If we have nothing in the receive buffer now, to reduce
1233 * application latency, try to drain the backend queues as
1234 * quickly as possible, and let remote TCP know what we have
1235 * received so far. */
1236 if (err == -EAGAIN) {
1237 drbd_tcp_quickack(connection->data.socket);
1238 drbd_unplug_all_devices(connection);
1239 }
1240 if (err > 0) {
1241 buffer += err;
1242 size -= err;
1243 }
1244 err = drbd_recv_all_warn(connection, buffer, size);
1245 if (err)
1246 return err;
1247 }
1248
1249 err = decode_header(connection, connection->data.rbuf, pi);
1250 connection->last_received = jiffies;
1251
1252 return err;
1253}
f9ff0da5
LE
1254/* This is blkdev_issue_flush, but asynchronous.
1255 * We want to submit to all component volumes in parallel,
1256 * then wait for all completions.
1257 */
1258struct issue_flush_context {
1259 atomic_t pending;
1260 int error;
1261 struct completion done;
1262};
1263struct one_flush_context {
1264 struct drbd_device *device;
1265 struct issue_flush_context *ctx;
1266};
1267
1ffa7bfa 1268static void one_flush_endio(struct bio *bio)
b411b363 1269{
f9ff0da5
LE
1270 struct one_flush_context *octx = bio->bi_private;
1271 struct drbd_device *device = octx->device;
1272 struct issue_flush_context *ctx = octx->ctx;
1273
4e4cbee9
CH
1274 if (bio->bi_status) {
1275 ctx->error = blk_status_to_errno(bio->bi_status);
1276 drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
f9ff0da5
LE
1277 }
1278 kfree(octx);
1279 bio_put(bio);
1280
1281 clear_bit(FLUSH_PENDING, &device->flags);
1282 put_ldev(device);
1283 kref_put(&device->kref, drbd_destroy_device);
1284
1285 if (atomic_dec_and_test(&ctx->pending))
1286 complete(&ctx->done);
1287}
1288
1289static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
1290{
1291 struct bio *bio = bio_alloc(GFP_NOIO, 0);
1292 struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
1293 if (!bio || !octx) {
1294 drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
1295 /* FIXME: what else can I do now? disconnecting or detaching
1296 * really does not help to improve the state of the world, either.
1297 */
1298 kfree(octx);
1299 if (bio)
1300 bio_put(bio);
1301
1302 ctx->error = -ENOMEM;
1303 put_ldev(device);
1304 kref_put(&device->kref, drbd_destroy_device);
1305 return;
1306 }
4b0007c0 1307
f9ff0da5
LE
1308 octx->device = device;
1309 octx->ctx = ctx;
74d46992 1310 bio_set_dev(bio, device->ldev->backing_bdev);
f9ff0da5
LE
1311 bio->bi_private = octx;
1312 bio->bi_end_io = one_flush_endio;
70fd7614 1313 bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
f9ff0da5
LE
1314
1315 device->flush_jif = jiffies;
1316 set_bit(FLUSH_PENDING, &device->flags);
1317 atomic_inc(&ctx->pending);
1318 submit_bio(bio);
1319}
1320
1321static void drbd_flush(struct drbd_connection *connection)
1322{
f6ba8636 1323 if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
f9ff0da5
LE
1324 struct drbd_peer_device *peer_device;
1325 struct issue_flush_context ctx;
1326 int vnr;
1327
1328 atomic_set(&ctx.pending, 1);
1329 ctx.error = 0;
1330 init_completion(&ctx.done);
1331
615e087f 1332 rcu_read_lock();
c06ece6b
AG
1333 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1334 struct drbd_device *device = peer_device->device;
1335
b30ab791 1336 if (!get_ldev(device))
615e087f 1337 continue;
b30ab791 1338 kref_get(&device->kref);
615e087f
LE
1339 rcu_read_unlock();
1340
f9ff0da5 1341 submit_one_flush(device, &ctx);
b411b363 1342
615e087f 1343 rcu_read_lock();
b411b363 1344 }
615e087f 1345 rcu_read_unlock();
f9ff0da5
LE
1346
1347 /* Do we want to add a timeout,
1348 * if disk-timeout is set? */
1349 if (!atomic_dec_and_test(&ctx.pending))
1350 wait_for_completion(&ctx.done);
1351
1352 if (ctx.error) {
1353 /* would rather check on EOPNOTSUPP, but that is not reliable.
1354 * don't try again for ANY return value != 0
1355 * if (rv == -EOPNOTSUPP) */
1356 /* Any error is already reported by bio_endio callback. */
1357 drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
1358 }
b411b363 1359 }
b411b363
PR
1360}
1361
1362/**
1363 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
b30ab791 1364 * @device: DRBD device.
b411b363
PR
1365 * @epoch: Epoch object.
1366 * @ev: Epoch event.
1367 */
bde89a9e 1368static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
b411b363
PR
1369 struct drbd_epoch *epoch,
1370 enum epoch_event ev)
1371{
2451fc3b 1372 int epoch_size;
b411b363 1373 struct drbd_epoch *next_epoch;
b411b363
PR
1374 enum finish_epoch rv = FE_STILL_LIVE;
1375
bde89a9e 1376 spin_lock(&connection->epoch_lock);
b411b363
PR
1377 do {
1378 next_epoch = NULL;
b411b363
PR
1379
1380 epoch_size = atomic_read(&epoch->epoch_size);
1381
1382 switch (ev & ~EV_CLEANUP) {
1383 case EV_PUT:
1384 atomic_dec(&epoch->active);
1385 break;
1386 case EV_GOT_BARRIER_NR:
1387 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1388 break;
1389 case EV_BECAME_LAST:
1390 /* nothing to do*/
1391 break;
1392 }
1393
b411b363
PR
1394 if (epoch_size != 0 &&
1395 atomic_read(&epoch->active) == 0 &&
80f9fd55 1396 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
b411b363 1397 if (!(ev & EV_CLEANUP)) {
bde89a9e
AG
1398 spin_unlock(&connection->epoch_lock);
1399 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1400 spin_lock(&connection->epoch_lock);
b411b363 1401 }
9ed57dcb
LE
1402#if 0
1403 /* FIXME: dec unacked on connection, once we have
1404 * something to count pending connection packets in. */
80f9fd55 1405 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
bde89a9e 1406 dec_unacked(epoch->connection);
9ed57dcb 1407#endif
b411b363 1408
bde89a9e 1409 if (connection->current_epoch != epoch) {
b411b363
PR
1410 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1411 list_del(&epoch->list);
1412 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
bde89a9e 1413 connection->epochs--;
b411b363
PR
1414 kfree(epoch);
1415
1416 if (rv == FE_STILL_LIVE)
1417 rv = FE_DESTROYED;
1418 } else {
1419 epoch->flags = 0;
1420 atomic_set(&epoch->epoch_size, 0);
698f9315 1421 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1422 if (rv == FE_STILL_LIVE)
1423 rv = FE_RECYCLED;
1424 }
1425 }
1426
1427 if (!next_epoch)
1428 break;
1429
1430 epoch = next_epoch;
1431 } while (1);
1432
bde89a9e 1433 spin_unlock(&connection->epoch_lock);
b411b363 1434
b411b363
PR
1435 return rv;
1436}
1437
8fe39aac
PR
1438static enum write_ordering_e
1439max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
1440{
1441 struct disk_conf *dc;
1442
1443 dc = rcu_dereference(bdev->disk_conf);
1444
f6ba8636
AG
1445 if (wo == WO_BDEV_FLUSH && !dc->disk_flushes)
1446 wo = WO_DRAIN_IO;
1447 if (wo == WO_DRAIN_IO && !dc->disk_drain)
1448 wo = WO_NONE;
8fe39aac
PR
1449
1450 return wo;
1451}
1452
b411b363
PR
1453/**
1454 * drbd_bump_write_ordering() - Fall back to an other write ordering method
bde89a9e 1455 * @connection: DRBD connection.
b411b363
PR
1456 * @wo: Write ordering method to try.
1457 */
8fe39aac
PR
1458void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1459 enum write_ordering_e wo)
b411b363 1460{
e9526580 1461 struct drbd_device *device;
b411b363 1462 enum write_ordering_e pwo;
4b0007c0 1463 int vnr;
b411b363 1464 static char *write_ordering_str[] = {
f6ba8636
AG
1465 [WO_NONE] = "none",
1466 [WO_DRAIN_IO] = "drain",
1467 [WO_BDEV_FLUSH] = "flush",
b411b363
PR
1468 };
1469
e9526580 1470 pwo = resource->write_ordering;
f6ba8636 1471 if (wo != WO_BDEV_FLUSH)
70df7092 1472 wo = min(pwo, wo);
daeda1cc 1473 rcu_read_lock();
e9526580 1474 idr_for_each_entry(&resource->devices, device, vnr) {
8fe39aac
PR
1475 if (get_ldev(device)) {
1476 wo = max_allowed_wo(device->ldev, wo);
1477 if (device->ldev == bdev)
1478 bdev = NULL;
1479 put_ldev(device);
1480 }
4b0007c0 1481 }
8fe39aac
PR
1482
1483 if (bdev)
1484 wo = max_allowed_wo(bdev, wo);
1485
70df7092
LE
1486 rcu_read_unlock();
1487
e9526580 1488 resource->write_ordering = wo;
f6ba8636 1489 if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH)
e9526580 1490 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
b411b363
PR
1491}
1492
0dbed96a 1493static void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req)
dd4f699d
LE
1494{
1495 struct block_device *bdev = device->ldev->backing_bdev;
dd4f699d 1496
0dbed96a
CH
1497 if (blkdev_issue_zeroout(bdev, peer_req->i.sector, peer_req->i.size >> 9,
1498 GFP_NOIO, 0))
dd4f699d 1499 peer_req->flags |= EE_WAS_ERROR;
0dbed96a 1500
dd4f699d
LE
1501 drbd_endio_write_sec_final(peer_req);
1502}
1503
9104d31a
LE
1504static void drbd_issue_peer_wsame(struct drbd_device *device,
1505 struct drbd_peer_request *peer_req)
1506{
1507 struct block_device *bdev = device->ldev->backing_bdev;
1508 sector_t s = peer_req->i.sector;
1509 sector_t nr = peer_req->i.size >> 9;
1510 if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
1511 peer_req->flags |= EE_WAS_ERROR;
1512 drbd_endio_write_sec_final(peer_req);
1513}
1514
1515
45bb912b 1516/**
fbe29dec 1517 * drbd_submit_peer_request()
b30ab791 1518 * @device: DRBD device.
db830c46 1519 * @peer_req: peer request
1eff9d32 1520 * @rw: flag field, see bio->bi_opf
10f6d992
LE
1521 *
1522 * May spread the pages to multiple bios,
1523 * depending on bio_add_page restrictions.
1524 *
1525 * Returns 0 if all bios have been submitted,
1526 * -ENOMEM if we could not allocate enough bios,
1527 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1528 * single page to an empty bio (which should never happen and likely indicates
1529 * that the lower level IO stack is in some way broken). This has been observed
1530 * on certain Xen deployments.
45bb912b
LE
1531 */
1532/* TODO allocate from our own bio_set. */
b30ab791 1533int drbd_submit_peer_request(struct drbd_device *device,
fbe29dec 1534 struct drbd_peer_request *peer_req,
bb3cc85e
MC
1535 const unsigned op, const unsigned op_flags,
1536 const int fault_type)
45bb912b
LE
1537{
1538 struct bio *bios = NULL;
1539 struct bio *bio;
db830c46
AG
1540 struct page *page = peer_req->pages;
1541 sector_t sector = peer_req->i.sector;
11f8b2b6 1542 unsigned data_size = peer_req->i.size;
45bb912b 1543 unsigned n_bios = 0;
11f8b2b6 1544 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
10f6d992 1545 int err = -ENOMEM;
45bb912b 1546
dd4f699d
LE
1547 /* TRIM/DISCARD: for now, always use the helper function
1548 * blkdev_issue_zeroout(..., discard=true).
1549 * It's synchronous, but it does the right thing wrt. bio splitting.
1550 * Correctness first, performance later. Next step is to code an
1551 * asynchronous variant of the same.
1552 */
9104d31a 1553 if (peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) {
a0fb3c47
LE
1554 /* wait for all pending IO completions, before we start
1555 * zeroing things out. */
5dd2ca19 1556 conn_wait_active_ee_empty(peer_req->peer_device->connection);
45d2933c
LE
1557 /* add it to the active list now,
1558 * so we can find it to present it in debugfs */
21ae5d7f
LE
1559 peer_req->submit_jif = jiffies;
1560 peer_req->flags |= EE_SUBMITTED;
700ca8c0
PR
1561
1562 /* If this was a resync request from receive_rs_deallocated(),
1563 * it is already on the sync_ee list */
1564 if (list_empty(&peer_req->w.list)) {
1565 spin_lock_irq(&device->resource->req_lock);
1566 list_add_tail(&peer_req->w.list, &device->active_ee);
1567 spin_unlock_irq(&device->resource->req_lock);
1568 }
1569
9104d31a
LE
1570 if (peer_req->flags & EE_IS_TRIM)
1571 drbd_issue_peer_discard(device, peer_req);
1572 else /* EE_WRITE_SAME */
1573 drbd_issue_peer_wsame(device, peer_req);
a0fb3c47
LE
1574 return 0;
1575 }
1576
45bb912b
LE
1577 /* In most cases, we will only need one bio. But in case the lower
1578 * level restrictions happen to be different at this offset on this
1579 * side than those of the sending peer, we may need to submit the
9476f39d
LE
1580 * request in more than one bio.
1581 *
1582 * Plain bio_alloc is good enough here, this is no DRBD internally
1583 * generated bio, but a bio allocated on behalf of the peer.
1584 */
45bb912b
LE
1585next_bio:
1586 bio = bio_alloc(GFP_NOIO, nr_pages);
1587 if (!bio) {
a0fb3c47 1588 drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
45bb912b
LE
1589 goto fail;
1590 }
db830c46 1591 /* > peer_req->i.sector, unless this is the first bio */
4f024f37 1592 bio->bi_iter.bi_sector = sector;
74d46992 1593 bio_set_dev(bio, device->ldev->backing_bdev);
bb3cc85e 1594 bio_set_op_attrs(bio, op, op_flags);
db830c46 1595 bio->bi_private = peer_req;
fcefa62e 1596 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1597
1598 bio->bi_next = bios;
1599 bios = bio;
1600 ++n_bios;
1601
1602 page_chain_for_each(page) {
11f8b2b6 1603 unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
06efffda 1604 if (!bio_add_page(bio, page, len, 0))
45bb912b 1605 goto next_bio;
11f8b2b6 1606 data_size -= len;
45bb912b
LE
1607 sector += len >> 9;
1608 --nr_pages;
1609 }
11f8b2b6 1610 D_ASSERT(device, data_size == 0);
a0fb3c47 1611 D_ASSERT(device, page == NULL);
45bb912b 1612
db830c46 1613 atomic_set(&peer_req->pending_bios, n_bios);
21ae5d7f
LE
1614 /* for debugfs: update timestamp, mark as submitted */
1615 peer_req->submit_jif = jiffies;
1616 peer_req->flags |= EE_SUBMITTED;
45bb912b
LE
1617 do {
1618 bio = bios;
1619 bios = bios->bi_next;
1620 bio->bi_next = NULL;
1621
b30ab791 1622 drbd_generic_make_request(device, fault_type, bio);
45bb912b 1623 } while (bios);
45bb912b
LE
1624 return 0;
1625
1626fail:
1627 while (bios) {
1628 bio = bios;
1629 bios = bios->bi_next;
1630 bio_put(bio);
1631 }
10f6d992 1632 return err;
45bb912b
LE
1633}
1634
b30ab791 1635static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
db830c46 1636 struct drbd_peer_request *peer_req)
53840641 1637{
db830c46 1638 struct drbd_interval *i = &peer_req->i;
53840641 1639
b30ab791 1640 drbd_remove_interval(&device->write_requests, i);
53840641
AG
1641 drbd_clear_interval(i);
1642
6c852bec 1643 /* Wake up any processes waiting for this peer request to complete. */
53840641 1644 if (i->waiting)
b30ab791 1645 wake_up(&device->misc_wait);
53840641
AG
1646}
1647
bde89a9e 1648static void conn_wait_active_ee_empty(struct drbd_connection *connection)
77fede51 1649{
c06ece6b 1650 struct drbd_peer_device *peer_device;
77fede51
PR
1651 int vnr;
1652
1653 rcu_read_lock();
c06ece6b
AG
1654 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1655 struct drbd_device *device = peer_device->device;
1656
b30ab791 1657 kref_get(&device->kref);
77fede51 1658 rcu_read_unlock();
b30ab791 1659 drbd_wait_ee_list_empty(device, &device->active_ee);
05a10ec7 1660 kref_put(&device->kref, drbd_destroy_device);
77fede51
PR
1661 rcu_read_lock();
1662 }
1663 rcu_read_unlock();
1664}
1665
bde89a9e 1666static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
b411b363 1667{
2451fc3b 1668 int rv;
e658983a 1669 struct p_barrier *p = pi->data;
b411b363
PR
1670 struct drbd_epoch *epoch;
1671
9ed57dcb
LE
1672 /* FIXME these are unacked on connection,
1673 * not a specific (peer)device.
1674 */
bde89a9e
AG
1675 connection->current_epoch->barrier_nr = p->barrier;
1676 connection->current_epoch->connection = connection;
1677 rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
b411b363
PR
1678
1679 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1680 * the activity log, which means it would not be resynced in case the
1681 * R_PRIMARY crashes now.
1682 * Therefore we must send the barrier_ack after the barrier request was
1683 * completed. */
e9526580 1684 switch (connection->resource->write_ordering) {
f6ba8636 1685 case WO_NONE:
b411b363 1686 if (rv == FE_RECYCLED)
82bc0194 1687 return 0;
2451fc3b
PR
1688
1689 /* receiver context, in the writeout path of the other node.
1690 * avoid potential distributed deadlock */
1691 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1692 if (epoch)
1693 break;
1694 else
1ec861eb 1695 drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
2451fc3b 1696 /* Fall through */
b411b363 1697
f6ba8636
AG
1698 case WO_BDEV_FLUSH:
1699 case WO_DRAIN_IO:
bde89a9e
AG
1700 conn_wait_active_ee_empty(connection);
1701 drbd_flush(connection);
2451fc3b 1702
bde89a9e 1703 if (atomic_read(&connection->current_epoch->epoch_size)) {
2451fc3b
PR
1704 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1705 if (epoch)
1706 break;
b411b363
PR
1707 }
1708
82bc0194 1709 return 0;
2451fc3b 1710 default:
e9526580
PR
1711 drbd_err(connection, "Strangeness in connection->write_ordering %d\n",
1712 connection->resource->write_ordering);
82bc0194 1713 return -EIO;
b411b363
PR
1714 }
1715
1716 epoch->flags = 0;
1717 atomic_set(&epoch->epoch_size, 0);
1718 atomic_set(&epoch->active, 0);
1719
bde89a9e
AG
1720 spin_lock(&connection->epoch_lock);
1721 if (atomic_read(&connection->current_epoch->epoch_size)) {
1722 list_add(&epoch->list, &connection->current_epoch->list);
1723 connection->current_epoch = epoch;
1724 connection->epochs++;
b411b363
PR
1725 } else {
1726 /* The current_epoch got recycled while we allocated this one... */
1727 kfree(epoch);
1728 }
bde89a9e 1729 spin_unlock(&connection->epoch_lock);
b411b363 1730
82bc0194 1731 return 0;
b411b363
PR
1732}
1733
9104d31a 1734/* quick wrapper in case payload size != request_size (write same) */
3d0e6375 1735static void drbd_csum_ee_size(struct crypto_shash *h,
9104d31a
LE
1736 struct drbd_peer_request *r, void *d,
1737 unsigned int payload_size)
1738{
1739 unsigned int tmp = r->i.size;
1740 r->i.size = payload_size;
1741 drbd_csum_ee(h, r, d);
1742 r->i.size = tmp;
1743}
1744
b411b363 1745/* used from receive_RSDataReply (recv_resync_read)
9104d31a
LE
1746 * and from receive_Data.
1747 * data_size: actual payload ("data in")
1748 * for normal writes that is bi_size.
1749 * for discards, that is zero.
1750 * for write same, it is logical_block_size.
1751 * both trim and write same have the bi_size ("data len to be affected")
1752 * as extra argument in the packet header.
1753 */
f6ffca9f 1754static struct drbd_peer_request *
69a22773 1755read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
a0fb3c47 1756 struct packet_info *pi) __must_hold(local)
b411b363 1757{
69a22773 1758 struct drbd_device *device = peer_device->device;
b30ab791 1759 const sector_t capacity = drbd_get_capacity(device->this_bdev);
db830c46 1760 struct drbd_peer_request *peer_req;
b411b363 1761 struct page *page;
11f8b2b6
AG
1762 int digest_size, err;
1763 unsigned int data_size = pi->size, ds;
69a22773
AG
1764 void *dig_in = peer_device->connection->int_dig_in;
1765 void *dig_vv = peer_device->connection->int_dig_vv;
6b4388ac 1766 unsigned long *data;
a0fb3c47 1767 struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
9104d31a 1768 struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL;
b411b363 1769
11f8b2b6 1770 digest_size = 0;
a0fb3c47 1771 if (!trim && peer_device->connection->peer_integrity_tfm) {
3d0e6375 1772 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
9f5bdc33
AG
1773 /*
1774 * FIXME: Receive the incoming digest into the receive buffer
1775 * here, together with its struct p_data?
1776 */
11f8b2b6 1777 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
a5c31904 1778 if (err)
b411b363 1779 return NULL;
11f8b2b6 1780 data_size -= digest_size;
b411b363
PR
1781 }
1782
9104d31a
LE
1783 /* assume request_size == data_size, but special case trim and wsame. */
1784 ds = data_size;
a0fb3c47 1785 if (trim) {
9104d31a
LE
1786 if (!expect(data_size == 0))
1787 return NULL;
1788 ds = be32_to_cpu(trim->size);
1789 } else if (wsame) {
1790 if (data_size != queue_logical_block_size(device->rq_queue)) {
1791 drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n",
1792 data_size, queue_logical_block_size(device->rq_queue));
1793 return NULL;
1794 }
1795 if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
1796 drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n",
1797 data_size, bdev_logical_block_size(device->ldev->backing_bdev));
1798 return NULL;
1799 }
1800 ds = be32_to_cpu(wsame->size);
a0fb3c47
LE
1801 }
1802
9104d31a 1803 if (!expect(IS_ALIGNED(ds, 512)))
841ce241 1804 return NULL;
9104d31a
LE
1805 if (trim || wsame) {
1806 if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
1807 return NULL;
1808 } else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
841ce241 1809 return NULL;
b411b363 1810
6666032a
LE
1811 /* even though we trust out peer,
1812 * we sometimes have to double check. */
9104d31a 1813 if (sector + (ds>>9) > capacity) {
d0180171 1814 drbd_err(device, "request from peer beyond end of local disk: "
fdda6544 1815 "capacity: %llus < sector: %llus + size: %u\n",
6666032a 1816 (unsigned long long)capacity,
9104d31a 1817 (unsigned long long)sector, ds);
6666032a
LE
1818 return NULL;
1819 }
1820
b411b363
PR
1821 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1822 * "criss-cross" setup, that might cause write-out on some other DRBD,
1823 * which in turn might block on the other node at this very place. */
9104d31a 1824 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
db830c46 1825 if (!peer_req)
b411b363 1826 return NULL;
45bb912b 1827
21ae5d7f 1828 peer_req->flags |= EE_WRITE;
9104d31a
LE
1829 if (trim) {
1830 peer_req->flags |= EE_IS_TRIM;
81a3537a 1831 return peer_req;
9104d31a
LE
1832 }
1833 if (wsame)
1834 peer_req->flags |= EE_WRITE_SAME;
a73ff323 1835
9104d31a 1836 /* receive payload size bytes into page chain */
b411b363 1837 ds = data_size;
db830c46 1838 page = peer_req->pages;
45bb912b
LE
1839 page_chain_for_each(page) {
1840 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1841 data = kmap(page);
69a22773 1842 err = drbd_recv_all_warn(peer_device->connection, data, len);
b30ab791 1843 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
d0180171 1844 drbd_err(device, "Fault injection: Corrupting data on receive\n");
6b4388ac
PR
1845 data[0] = data[0] ^ (unsigned long)-1;
1846 }
b411b363 1847 kunmap(page);
a5c31904 1848 if (err) {
b30ab791 1849 drbd_free_peer_req(device, peer_req);
b411b363
PR
1850 return NULL;
1851 }
a5c31904 1852 ds -= len;
b411b363
PR
1853 }
1854
11f8b2b6 1855 if (digest_size) {
9104d31a 1856 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
11f8b2b6 1857 if (memcmp(dig_in, dig_vv, digest_size)) {
d0180171 1858 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
470be44a 1859 (unsigned long long)sector, data_size);
b30ab791 1860 drbd_free_peer_req(device, peer_req);
b411b363
PR
1861 return NULL;
1862 }
1863 }
11f8b2b6 1864 device->recv_cnt += data_size >> 9;
db830c46 1865 return peer_req;
b411b363
PR
1866}
1867
1868/* drbd_drain_block() just takes a data block
1869 * out of the socket input buffer, and discards it.
1870 */
69a22773 1871static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
b411b363
PR
1872{
1873 struct page *page;
a5c31904 1874 int err = 0;
b411b363
PR
1875 void *data;
1876
c3470cde 1877 if (!data_size)
fc5be839 1878 return 0;
c3470cde 1879
69a22773 1880 page = drbd_alloc_pages(peer_device, 1, 1);
b411b363
PR
1881
1882 data = kmap(page);
1883 while (data_size) {
fc5be839
AG
1884 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1885
69a22773 1886 err = drbd_recv_all_warn(peer_device->connection, data, len);
a5c31904 1887 if (err)
b411b363 1888 break;
a5c31904 1889 data_size -= len;
b411b363
PR
1890 }
1891 kunmap(page);
69a22773 1892 drbd_free_pages(peer_device->device, page, 0);
fc5be839 1893 return err;
b411b363
PR
1894}
1895
69a22773 1896static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
b411b363
PR
1897 sector_t sector, int data_size)
1898{
7988613b
KO
1899 struct bio_vec bvec;
1900 struct bvec_iter iter;
b411b363 1901 struct bio *bio;
11f8b2b6 1902 int digest_size, err, expect;
69a22773
AG
1903 void *dig_in = peer_device->connection->int_dig_in;
1904 void *dig_vv = peer_device->connection->int_dig_vv;
b411b363 1905
11f8b2b6 1906 digest_size = 0;
69a22773 1907 if (peer_device->connection->peer_integrity_tfm) {
3d0e6375 1908 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
11f8b2b6 1909 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
a5c31904
AG
1910 if (err)
1911 return err;
11f8b2b6 1912 data_size -= digest_size;
b411b363
PR
1913 }
1914
b411b363
PR
1915 /* optimistically update recv_cnt. if receiving fails below,
1916 * we disconnect anyways, and counters will be reset. */
69a22773 1917 peer_device->device->recv_cnt += data_size>>9;
b411b363
PR
1918
1919 bio = req->master_bio;
69a22773 1920 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
b411b363 1921
7988613b
KO
1922 bio_for_each_segment(bvec, bio, iter) {
1923 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1924 expect = min_t(int, data_size, bvec.bv_len);
69a22773 1925 err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
7988613b 1926 kunmap(bvec.bv_page);
a5c31904
AG
1927 if (err)
1928 return err;
1929 data_size -= expect;
b411b363
PR
1930 }
1931
11f8b2b6 1932 if (digest_size) {
69a22773 1933 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
11f8b2b6 1934 if (memcmp(dig_in, dig_vv, digest_size)) {
69a22773 1935 drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 1936 return -EINVAL;
b411b363
PR
1937 }
1938 }
1939
69a22773 1940 D_ASSERT(peer_device->device, data_size == 0);
28284cef 1941 return 0;
b411b363
PR
1942}
1943
a990be46 1944/*
668700b4 1945 * e_end_resync_block() is called in ack_sender context via
a990be46
AG
1946 * drbd_finish_peer_reqs().
1947 */
99920dc5 1948static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 1949{
8050e6d0 1950 struct drbd_peer_request *peer_req =
a8cd15ba
AG
1951 container_of(w, struct drbd_peer_request, w);
1952 struct drbd_peer_device *peer_device = peer_req->peer_device;
1953 struct drbd_device *device = peer_device->device;
db830c46 1954 sector_t sector = peer_req->i.sector;
99920dc5 1955 int err;
b411b363 1956
0b0ba1ef 1957 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
b411b363 1958
db830c46 1959 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b30ab791 1960 drbd_set_in_sync(device, sector, peer_req->i.size);
a8cd15ba 1961 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
b411b363
PR
1962 } else {
1963 /* Record failure to sync */
b30ab791 1964 drbd_rs_failed_io(device, sector, peer_req->i.size);
b411b363 1965
a8cd15ba 1966 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
b411b363 1967 }
b30ab791 1968 dec_unacked(device);
b411b363 1969
99920dc5 1970 return err;
b411b363
PR
1971}
1972
69a22773 1973static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
a0fb3c47 1974 struct packet_info *pi) __releases(local)
b411b363 1975{
69a22773 1976 struct drbd_device *device = peer_device->device;
db830c46 1977 struct drbd_peer_request *peer_req;
b411b363 1978
a0fb3c47 1979 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
db830c46 1980 if (!peer_req)
45bb912b 1981 goto fail;
b411b363 1982
b30ab791 1983 dec_rs_pending(device);
b411b363 1984
b30ab791 1985 inc_unacked(device);
b411b363
PR
1986 /* corresponding dec_unacked() in e_end_resync_block()
1987 * respective _drbd_clear_done_ee */
1988
a8cd15ba 1989 peer_req->w.cb = e_end_resync_block;
21ae5d7f 1990 peer_req->submit_jif = jiffies;
45bb912b 1991
0500813f 1992 spin_lock_irq(&device->resource->req_lock);
b9ed7080 1993 list_add_tail(&peer_req->w.list, &device->sync_ee);
0500813f 1994 spin_unlock_irq(&device->resource->req_lock);
b411b363 1995
a0fb3c47 1996 atomic_add(pi->size >> 9, &device->rs_sect_ev);
bb3cc85e
MC
1997 if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
1998 DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 1999 return 0;
b411b363 2000
10f6d992 2001 /* don't care for the reason here */
d0180171 2002 drbd_err(device, "submit failed, triggering re-connect\n");
0500813f 2003 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2004 list_del(&peer_req->w.list);
0500813f 2005 spin_unlock_irq(&device->resource->req_lock);
22cc37a9 2006
b30ab791 2007 drbd_free_peer_req(device, peer_req);
45bb912b 2008fail:
b30ab791 2009 put_ldev(device);
e1c1b0fc 2010 return -EIO;
b411b363
PR
2011}
2012
668eebc6 2013static struct drbd_request *
b30ab791 2014find_request(struct drbd_device *device, struct rb_root *root, u64 id,
bc9c5c41 2015 sector_t sector, bool missing_ok, const char *func)
51624585 2016{
51624585
AG
2017 struct drbd_request *req;
2018
bc9c5c41
AG
2019 /* Request object according to our peer */
2020 req = (struct drbd_request *)(unsigned long)id;
5e472264 2021 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 2022 return req;
c3afd8f5 2023 if (!missing_ok) {
d0180171 2024 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
c3afd8f5
AG
2025 (unsigned long)id, (unsigned long long)sector);
2026 }
51624585 2027 return NULL;
b411b363
PR
2028}
2029
bde89a9e 2030static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2031{
9f4fe9ad 2032 struct drbd_peer_device *peer_device;
b30ab791 2033 struct drbd_device *device;
b411b363
PR
2034 struct drbd_request *req;
2035 sector_t sector;
82bc0194 2036 int err;
e658983a 2037 struct p_data *p = pi->data;
4a76b161 2038
9f4fe9ad
AG
2039 peer_device = conn_peer_device(connection, pi->vnr);
2040 if (!peer_device)
4a76b161 2041 return -EIO;
9f4fe9ad 2042 device = peer_device->device;
b411b363
PR
2043
2044 sector = be64_to_cpu(p->sector);
2045
0500813f 2046 spin_lock_irq(&device->resource->req_lock);
b30ab791 2047 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
0500813f 2048 spin_unlock_irq(&device->resource->req_lock);
c3afd8f5 2049 if (unlikely(!req))
82bc0194 2050 return -EIO;
b411b363 2051
24c4830c 2052 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
b411b363
PR
2053 * special casing it there for the various failure cases.
2054 * still no race with drbd_fail_pending_reads */
69a22773 2055 err = recv_dless_read(peer_device, req, sector, pi->size);
82bc0194 2056 if (!err)
8554df1c 2057 req_mod(req, DATA_RECEIVED);
b411b363
PR
2058 /* else: nothing. handled from drbd_disconnect...
2059 * I don't think we may complete this just yet
2060 * in case we are "on-disconnect: freeze" */
2061
82bc0194 2062 return err;
b411b363
PR
2063}
2064
bde89a9e 2065static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2066{
9f4fe9ad 2067 struct drbd_peer_device *peer_device;
b30ab791 2068 struct drbd_device *device;
b411b363 2069 sector_t sector;
82bc0194 2070 int err;
e658983a 2071 struct p_data *p = pi->data;
4a76b161 2072
9f4fe9ad
AG
2073 peer_device = conn_peer_device(connection, pi->vnr);
2074 if (!peer_device)
4a76b161 2075 return -EIO;
9f4fe9ad 2076 device = peer_device->device;
b411b363
PR
2077
2078 sector = be64_to_cpu(p->sector);
0b0ba1ef 2079 D_ASSERT(device, p->block_id == ID_SYNCER);
b411b363 2080
b30ab791 2081 if (get_ldev(device)) {
b411b363
PR
2082 /* data is submitted to disk within recv_resync_read.
2083 * corresponding put_ldev done below on error,
fcefa62e 2084 * or in drbd_peer_request_endio. */
a0fb3c47 2085 err = recv_resync_read(peer_device, sector, pi);
b411b363
PR
2086 } else {
2087 if (__ratelimit(&drbd_ratelimit_state))
d0180171 2088 drbd_err(device, "Can not write resync data to local disk.\n");
b411b363 2089
69a22773 2090 err = drbd_drain_block(peer_device, pi->size);
b411b363 2091
69a22773 2092 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
b411b363
PR
2093 }
2094
b30ab791 2095 atomic_add(pi->size >> 9, &device->rs_sect_in);
778f271d 2096
82bc0194 2097 return err;
b411b363
PR
2098}
2099
b30ab791 2100static void restart_conflicting_writes(struct drbd_device *device,
7be8da07 2101 sector_t sector, int size)
b411b363 2102{
7be8da07
AG
2103 struct drbd_interval *i;
2104 struct drbd_request *req;
2105
b30ab791 2106 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2107 if (!i->local)
2108 continue;
2109 req = container_of(i, struct drbd_request, i);
2110 if (req->rq_state & RQ_LOCAL_PENDING ||
2111 !(req->rq_state & RQ_POSTPONED))
2112 continue;
2312f0b3
LE
2113 /* as it is RQ_POSTPONED, this will cause it to
2114 * be queued on the retry workqueue. */
d4dabbe2 2115 __req_mod(req, CONFLICT_RESOLVED, NULL);
7be8da07
AG
2116 }
2117}
b411b363 2118
a990be46 2119/*
668700b4 2120 * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs().
b411b363 2121 */
99920dc5 2122static int e_end_block(struct drbd_work *w, int cancel)
b411b363 2123{
8050e6d0 2124 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2125 container_of(w, struct drbd_peer_request, w);
2126 struct drbd_peer_device *peer_device = peer_req->peer_device;
2127 struct drbd_device *device = peer_device->device;
db830c46 2128 sector_t sector = peer_req->i.sector;
99920dc5 2129 int err = 0, pcmd;
b411b363 2130
303d1448 2131 if (peer_req->flags & EE_SEND_WRITE_ACK) {
db830c46 2132 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b30ab791
AG
2133 pcmd = (device->state.conn >= C_SYNC_SOURCE &&
2134 device->state.conn <= C_PAUSED_SYNC_T &&
db830c46 2135 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 2136 P_RS_WRITE_ACK : P_WRITE_ACK;
a8cd15ba 2137 err = drbd_send_ack(peer_device, pcmd, peer_req);
b411b363 2138 if (pcmd == P_RS_WRITE_ACK)
b30ab791 2139 drbd_set_in_sync(device, sector, peer_req->i.size);
b411b363 2140 } else {
a8cd15ba 2141 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
b411b363
PR
2142 /* we expect it to be marked out of sync anyways...
2143 * maybe assert this? */
2144 }
b30ab791 2145 dec_unacked(device);
b411b363 2146 }
08d0dabf 2147
b411b363
PR
2148 /* we delete from the conflict detection hash _after_ we sent out the
2149 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
302bdeae 2150 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
0500813f 2151 spin_lock_irq(&device->resource->req_lock);
0b0ba1ef 2152 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
b30ab791 2153 drbd_remove_epoch_entry_interval(device, peer_req);
7be8da07 2154 if (peer_req->flags & EE_RESTART_REQUESTS)
b30ab791 2155 restart_conflicting_writes(device, sector, peer_req->i.size);
0500813f 2156 spin_unlock_irq(&device->resource->req_lock);
bb3bfe96 2157 } else
0b0ba1ef 2158 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
b411b363 2159
5dd2ca19 2160 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 2161
99920dc5 2162 return err;
b411b363
PR
2163}
2164
a8cd15ba 2165static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 2166{
8050e6d0 2167 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2168 container_of(w, struct drbd_peer_request, w);
2169 struct drbd_peer_device *peer_device = peer_req->peer_device;
99920dc5 2170 int err;
b411b363 2171
a8cd15ba
AG
2172 err = drbd_send_ack(peer_device, ack, peer_req);
2173 dec_unacked(peer_device->device);
b411b363 2174
99920dc5 2175 return err;
b411b363
PR
2176}
2177
d4dabbe2 2178static int e_send_superseded(struct drbd_work *w, int unused)
7be8da07 2179{
a8cd15ba 2180 return e_send_ack(w, P_SUPERSEDED);
7be8da07
AG
2181}
2182
99920dc5 2183static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07 2184{
a8cd15ba
AG
2185 struct drbd_peer_request *peer_req =
2186 container_of(w, struct drbd_peer_request, w);
2187 struct drbd_connection *connection = peer_req->peer_device->connection;
7be8da07 2188
a8cd15ba 2189 return e_send_ack(w, connection->agreed_pro_version >= 100 ?
d4dabbe2 2190 P_RETRY_WRITE : P_SUPERSEDED);
7be8da07 2191}
b411b363 2192
3e394da1
AG
2193static bool seq_greater(u32 a, u32 b)
2194{
2195 /*
2196 * We assume 32-bit wrap-around here.
2197 * For 24-bit wrap-around, we would have to shift:
2198 * a <<= 8; b <<= 8;
2199 */
2200 return (s32)a - (s32)b > 0;
2201}
b411b363 2202
3e394da1
AG
2203static u32 seq_max(u32 a, u32 b)
2204{
2205 return seq_greater(a, b) ? a : b;
b411b363
PR
2206}
2207
69a22773 2208static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
3e394da1 2209{
69a22773 2210 struct drbd_device *device = peer_device->device;
3c13b680 2211 unsigned int newest_peer_seq;
3e394da1 2212
69a22773 2213 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
b30ab791
AG
2214 spin_lock(&device->peer_seq_lock);
2215 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
2216 device->peer_seq = newest_peer_seq;
2217 spin_unlock(&device->peer_seq_lock);
2218 /* wake up only if we actually changed device->peer_seq */
3c13b680 2219 if (peer_seq == newest_peer_seq)
b30ab791 2220 wake_up(&device->seq_wait);
7be8da07 2221 }
b411b363
PR
2222}
2223
d93f6302 2224static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
b6a370ba 2225{
d93f6302
LE
2226 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
2227}
b6a370ba 2228
d93f6302 2229/* maybe change sync_ee into interval trees as well? */
b30ab791 2230static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
d93f6302
LE
2231{
2232 struct drbd_peer_request *rs_req;
7e5fec31 2233 bool rv = false;
b6a370ba 2234
0500813f 2235 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2236 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
d93f6302
LE
2237 if (overlaps(peer_req->i.sector, peer_req->i.size,
2238 rs_req->i.sector, rs_req->i.size)) {
7e5fec31 2239 rv = true;
b6a370ba
PR
2240 break;
2241 }
2242 }
0500813f 2243 spin_unlock_irq(&device->resource->req_lock);
b6a370ba
PR
2244
2245 return rv;
2246}
2247
b411b363
PR
2248/* Called from receive_Data.
2249 * Synchronize packets on sock with packets on msock.
2250 *
2251 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
2252 * packet traveling on msock, they are still processed in the order they have
2253 * been sent.
2254 *
2255 * Note: we don't care for Ack packets overtaking P_DATA packets.
2256 *
b30ab791 2257 * In case packet_seq is larger than device->peer_seq number, there are
b411b363 2258 * outstanding packets on the msock. We wait for them to arrive.
b30ab791 2259 * In case we are the logically next packet, we update device->peer_seq
b411b363
PR
2260 * ourselves. Correctly handles 32bit wrap around.
2261 *
2262 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
2263 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
2264 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
2265 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
2266 *
2267 * returns 0 if we may process the packet,
2268 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
69a22773 2269static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
b411b363 2270{
69a22773 2271 struct drbd_device *device = peer_device->device;
b411b363 2272 DEFINE_WAIT(wait);
b411b363 2273 long timeout;
b874d231 2274 int ret = 0, tp;
7be8da07 2275
69a22773 2276 if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
7be8da07
AG
2277 return 0;
2278
b30ab791 2279 spin_lock(&device->peer_seq_lock);
b411b363 2280 for (;;) {
b30ab791
AG
2281 if (!seq_greater(peer_seq - 1, device->peer_seq)) {
2282 device->peer_seq = seq_max(device->peer_seq, peer_seq);
b411b363 2283 break;
7be8da07 2284 }
b874d231 2285
b411b363
PR
2286 if (signal_pending(current)) {
2287 ret = -ERESTARTSYS;
2288 break;
2289 }
b874d231
PR
2290
2291 rcu_read_lock();
5dd2ca19 2292 tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
b874d231
PR
2293 rcu_read_unlock();
2294
2295 if (!tp)
2296 break;
2297
2298 /* Only need to wait if two_primaries is enabled */
b30ab791
AG
2299 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
2300 spin_unlock(&device->peer_seq_lock);
44ed167d 2301 rcu_read_lock();
69a22773 2302 timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
44ed167d 2303 rcu_read_unlock();
71b1c1eb 2304 timeout = schedule_timeout(timeout);
b30ab791 2305 spin_lock(&device->peer_seq_lock);
7be8da07 2306 if (!timeout) {
b411b363 2307 ret = -ETIMEDOUT;
d0180171 2308 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
2309 break;
2310 }
2311 }
b30ab791
AG
2312 spin_unlock(&device->peer_seq_lock);
2313 finish_wait(&device->seq_wait, &wait);
b411b363
PR
2314 return ret;
2315}
2316
688593c5
LE
2317/* see also bio_flags_to_wire()
2318 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2319 * flags and back. We may replicate to other kernel versions. */
bb3cc85e 2320static unsigned long wire_flags_to_bio_flags(u32 dpf)
76d2e7ec 2321{
688593c5
LE
2322 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2323 (dpf & DP_FUA ? REQ_FUA : 0) |
28a8f0d3 2324 (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
bb3cc85e
MC
2325}
2326
2327static unsigned long wire_flags_to_bio_op(u32 dpf)
2328{
2329 if (dpf & DP_DISCARD)
45c21793 2330 return REQ_OP_WRITE_ZEROES;
bb3cc85e
MC
2331 else
2332 return REQ_OP_WRITE;
76d2e7ec
PR
2333}
2334
b30ab791 2335static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
7be8da07
AG
2336 unsigned int size)
2337{
2338 struct drbd_interval *i;
2339
2340 repeat:
b30ab791 2341 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2342 struct drbd_request *req;
2343 struct bio_and_error m;
2344
2345 if (!i->local)
2346 continue;
2347 req = container_of(i, struct drbd_request, i);
2348 if (!(req->rq_state & RQ_POSTPONED))
2349 continue;
2350 req->rq_state &= ~RQ_POSTPONED;
2351 __req_mod(req, NEG_ACKED, &m);
0500813f 2352 spin_unlock_irq(&device->resource->req_lock);
7be8da07 2353 if (m.bio)
b30ab791 2354 complete_master_bio(device, &m);
0500813f 2355 spin_lock_irq(&device->resource->req_lock);
7be8da07
AG
2356 goto repeat;
2357 }
2358}
2359
b30ab791 2360static int handle_write_conflicts(struct drbd_device *device,
7be8da07
AG
2361 struct drbd_peer_request *peer_req)
2362{
e33b32de 2363 struct drbd_connection *connection = peer_req->peer_device->connection;
bde89a9e 2364 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
7be8da07
AG
2365 sector_t sector = peer_req->i.sector;
2366 const unsigned int size = peer_req->i.size;
2367 struct drbd_interval *i;
2368 bool equal;
2369 int err;
2370
2371 /*
2372 * Inserting the peer request into the write_requests tree will prevent
2373 * new conflicting local requests from being added.
2374 */
b30ab791 2375 drbd_insert_interval(&device->write_requests, &peer_req->i);
7be8da07
AG
2376
2377 repeat:
b30ab791 2378 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2379 if (i == &peer_req->i)
2380 continue;
08d0dabf
LE
2381 if (i->completed)
2382 continue;
7be8da07
AG
2383
2384 if (!i->local) {
2385 /*
2386 * Our peer has sent a conflicting remote request; this
2387 * should not happen in a two-node setup. Wait for the
2388 * earlier peer request to complete.
2389 */
b30ab791 2390 err = drbd_wait_misc(device, i);
7be8da07
AG
2391 if (err)
2392 goto out;
2393 goto repeat;
2394 }
2395
2396 equal = i->sector == sector && i->size == size;
2397 if (resolve_conflicts) {
2398 /*
2399 * If the peer request is fully contained within the
d4dabbe2
LE
2400 * overlapping request, it can be considered overwritten
2401 * and thus superseded; otherwise, it will be retried
2402 * once all overlapping requests have completed.
7be8da07 2403 */
d4dabbe2 2404 bool superseded = i->sector <= sector && i->sector +
7be8da07
AG
2405 (i->size >> 9) >= sector + (size >> 9);
2406
2407 if (!equal)
d0180171 2408 drbd_alert(device, "Concurrent writes detected: "
7be8da07
AG
2409 "local=%llus +%u, remote=%llus +%u, "
2410 "assuming %s came first\n",
2411 (unsigned long long)i->sector, i->size,
2412 (unsigned long long)sector, size,
d4dabbe2 2413 superseded ? "local" : "remote");
7be8da07 2414
a8cd15ba 2415 peer_req->w.cb = superseded ? e_send_superseded :
7be8da07 2416 e_send_retry_write;
a8cd15ba 2417 list_add_tail(&peer_req->w.list, &device->done_ee);
668700b4 2418 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
7be8da07
AG
2419
2420 err = -ENOENT;
2421 goto out;
2422 } else {
2423 struct drbd_request *req =
2424 container_of(i, struct drbd_request, i);
2425
2426 if (!equal)
d0180171 2427 drbd_alert(device, "Concurrent writes detected: "
7be8da07
AG
2428 "local=%llus +%u, remote=%llus +%u\n",
2429 (unsigned long long)i->sector, i->size,
2430 (unsigned long long)sector, size);
2431
2432 if (req->rq_state & RQ_LOCAL_PENDING ||
2433 !(req->rq_state & RQ_POSTPONED)) {
2434 /*
2435 * Wait for the node with the discard flag to
d4dabbe2
LE
2436 * decide if this request has been superseded
2437 * or needs to be retried.
2438 * Requests that have been superseded will
7be8da07
AG
2439 * disappear from the write_requests tree.
2440 *
2441 * In addition, wait for the conflicting
2442 * request to finish locally before submitting
2443 * the conflicting peer request.
2444 */
b30ab791 2445 err = drbd_wait_misc(device, &req->i);
7be8da07 2446 if (err) {
e33b32de 2447 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
b30ab791 2448 fail_postponed_requests(device, sector, size);
7be8da07
AG
2449 goto out;
2450 }
2451 goto repeat;
2452 }
2453 /*
2454 * Remember to restart the conflicting requests after
2455 * the new peer request has completed.
2456 */
2457 peer_req->flags |= EE_RESTART_REQUESTS;
2458 }
2459 }
2460 err = 0;
2461
2462 out:
2463 if (err)
b30ab791 2464 drbd_remove_epoch_entry_interval(device, peer_req);
7be8da07
AG
2465 return err;
2466}
2467
b411b363 2468/* mirrored write */
bde89a9e 2469static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2470{
9f4fe9ad 2471 struct drbd_peer_device *peer_device;
b30ab791 2472 struct drbd_device *device;
21ae5d7f 2473 struct net_conf *nc;
b411b363 2474 sector_t sector;
db830c46 2475 struct drbd_peer_request *peer_req;
e658983a 2476 struct p_data *p = pi->data;
7be8da07 2477 u32 peer_seq = be32_to_cpu(p->seq_num);
bb3cc85e 2478 int op, op_flags;
b411b363 2479 u32 dp_flags;
302bdeae 2480 int err, tp;
b411b363 2481
9f4fe9ad
AG
2482 peer_device = conn_peer_device(connection, pi->vnr);
2483 if (!peer_device)
4a76b161 2484 return -EIO;
9f4fe9ad 2485 device = peer_device->device;
b411b363 2486
b30ab791 2487 if (!get_ldev(device)) {
82bc0194
AG
2488 int err2;
2489
69a22773
AG
2490 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2491 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
bde89a9e 2492 atomic_inc(&connection->current_epoch->epoch_size);
69a22773 2493 err2 = drbd_drain_block(peer_device, pi->size);
82bc0194
AG
2494 if (!err)
2495 err = err2;
2496 return err;
b411b363
PR
2497 }
2498
fcefa62e
AG
2499 /*
2500 * Corresponding put_ldev done either below (on various errors), or in
2501 * drbd_peer_request_endio, if we successfully submit the data at the
2502 * end of this function.
2503 */
b411b363
PR
2504
2505 sector = be64_to_cpu(p->sector);
a0fb3c47 2506 peer_req = read_in_block(peer_device, p->block_id, sector, pi);
db830c46 2507 if (!peer_req) {
b30ab791 2508 put_ldev(device);
82bc0194 2509 return -EIO;
b411b363
PR
2510 }
2511
a8cd15ba 2512 peer_req->w.cb = e_end_block;
21ae5d7f
LE
2513 peer_req->submit_jif = jiffies;
2514 peer_req->flags |= EE_APPLICATION;
b411b363 2515
688593c5 2516 dp_flags = be32_to_cpu(p->dp_flags);
bb3cc85e
MC
2517 op = wire_flags_to_bio_op(dp_flags);
2518 op_flags = wire_flags_to_bio_flags(dp_flags);
a0fb3c47 2519 if (pi->cmd == P_TRIM) {
a0fb3c47 2520 D_ASSERT(peer_device, peer_req->i.size > 0);
45c21793 2521 D_ASSERT(peer_device, op == REQ_OP_WRITE_ZEROES);
a0fb3c47
LE
2522 D_ASSERT(peer_device, peer_req->pages == NULL);
2523 } else if (peer_req->pages == NULL) {
0b0ba1ef
AG
2524 D_ASSERT(device, peer_req->i.size == 0);
2525 D_ASSERT(device, dp_flags & DP_FLUSH);
a73ff323 2526 }
688593c5
LE
2527
2528 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2529 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2530
bde89a9e
AG
2531 spin_lock(&connection->epoch_lock);
2532 peer_req->epoch = connection->current_epoch;
db830c46
AG
2533 atomic_inc(&peer_req->epoch->epoch_size);
2534 atomic_inc(&peer_req->epoch->active);
bde89a9e 2535 spin_unlock(&connection->epoch_lock);
b411b363 2536
302bdeae 2537 rcu_read_lock();
21ae5d7f
LE
2538 nc = rcu_dereference(peer_device->connection->net_conf);
2539 tp = nc->two_primaries;
2540 if (peer_device->connection->agreed_pro_version < 100) {
2541 switch (nc->wire_protocol) {
2542 case DRBD_PROT_C:
2543 dp_flags |= DP_SEND_WRITE_ACK;
2544 break;
2545 case DRBD_PROT_B:
2546 dp_flags |= DP_SEND_RECEIVE_ACK;
2547 break;
2548 }
2549 }
302bdeae 2550 rcu_read_unlock();
21ae5d7f
LE
2551
2552 if (dp_flags & DP_SEND_WRITE_ACK) {
2553 peer_req->flags |= EE_SEND_WRITE_ACK;
2554 inc_unacked(device);
2555 /* corresponding dec_unacked() in e_end_block()
2556 * respective _drbd_clear_done_ee */
2557 }
2558
2559 if (dp_flags & DP_SEND_RECEIVE_ACK) {
2560 /* I really don't like it that the receiver thread
2561 * sends on the msock, but anyways */
5dd2ca19 2562 drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
21ae5d7f
LE
2563 }
2564
302bdeae 2565 if (tp) {
21ae5d7f
LE
2566 /* two primaries implies protocol C */
2567 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
302bdeae 2568 peer_req->flags |= EE_IN_INTERVAL_TREE;
69a22773 2569 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
7be8da07 2570 if (err)
b411b363 2571 goto out_interrupted;
0500813f 2572 spin_lock_irq(&device->resource->req_lock);
b30ab791 2573 err = handle_write_conflicts(device, peer_req);
7be8da07 2574 if (err) {
0500813f 2575 spin_unlock_irq(&device->resource->req_lock);
7be8da07 2576 if (err == -ENOENT) {
b30ab791 2577 put_ldev(device);
82bc0194 2578 return 0;
b411b363 2579 }
7be8da07 2580 goto out_interrupted;
b411b363 2581 }
b874d231 2582 } else {
69a22773 2583 update_peer_seq(peer_device, peer_seq);
0500813f 2584 spin_lock_irq(&device->resource->req_lock);
b874d231 2585 }
9104d31a
LE
2586 /* TRIM and WRITE_SAME are processed synchronously,
2587 * we wait for all pending requests, respectively wait for
a0fb3c47
LE
2588 * active_ee to become empty in drbd_submit_peer_request();
2589 * better not add ourselves here. */
9104d31a 2590 if ((peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) == 0)
b9ed7080 2591 list_add_tail(&peer_req->w.list, &device->active_ee);
0500813f 2592 spin_unlock_irq(&device->resource->req_lock);
b411b363 2593
b30ab791
AG
2594 if (device->state.conn == C_SYNC_TARGET)
2595 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
b411b363 2596
b30ab791 2597 if (device->state.pdsk < D_INCONSISTENT) {
b411b363 2598 /* In case we have the only disk of the cluster, */
b30ab791 2599 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
db830c46 2600 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
4dd726f0 2601 drbd_al_begin_io(device, &peer_req->i);
21ae5d7f 2602 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
b411b363
PR
2603 }
2604
bb3cc85e
MC
2605 err = drbd_submit_peer_request(device, peer_req, op, op_flags,
2606 DRBD_FAULT_DT_WR);
82bc0194
AG
2607 if (!err)
2608 return 0;
b411b363 2609
10f6d992 2610 /* don't care for the reason here */
d0180171 2611 drbd_err(device, "submit failed, triggering re-connect\n");
0500813f 2612 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2613 list_del(&peer_req->w.list);
b30ab791 2614 drbd_remove_epoch_entry_interval(device, peer_req);
0500813f 2615 spin_unlock_irq(&device->resource->req_lock);
21ae5d7f
LE
2616 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
2617 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
b30ab791 2618 drbd_al_complete_io(device, &peer_req->i);
21ae5d7f 2619 }
22cc37a9 2620
b411b363 2621out_interrupted:
7e5fec31 2622 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
b30ab791
AG
2623 put_ldev(device);
2624 drbd_free_peer_req(device, peer_req);
82bc0194 2625 return err;
b411b363
PR
2626}
2627
0f0601f4
LE
2628/* We may throttle resync, if the lower device seems to be busy,
2629 * and current sync rate is above c_min_rate.
2630 *
2631 * To decide whether or not the lower device is busy, we use a scheme similar
2632 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2633 * (more than 64 sectors) of activity we cannot account for with our own resync
2634 * activity, it obviously is "busy".
2635 *
2636 * The current sync rate used here uses only the most recent two step marks,
2637 * to have a short time average so we can react faster.
2638 */
ad3fee79
LE
2639bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
2640 bool throttle_if_app_is_waiting)
0f0601f4 2641{
e3555d85 2642 struct lc_element *tmp;
ad3fee79 2643 bool throttle = drbd_rs_c_min_rate_throttle(device);
daeda1cc 2644
ad3fee79
LE
2645 if (!throttle || throttle_if_app_is_waiting)
2646 return throttle;
0f0601f4 2647
b30ab791
AG
2648 spin_lock_irq(&device->al_lock);
2649 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
e3555d85
PR
2650 if (tmp) {
2651 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
e8299874
LE
2652 if (test_bit(BME_PRIORITY, &bm_ext->flags))
2653 throttle = false;
ad3fee79
LE
2654 /* Do not slow down if app IO is already waiting for this extent,
2655 * and our progress is necessary for application IO to complete. */
e3555d85 2656 }
b30ab791 2657 spin_unlock_irq(&device->al_lock);
e3555d85 2658
e8299874
LE
2659 return throttle;
2660}
2661
2662bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
2663{
2664 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
2665 unsigned long db, dt, dbdt;
2666 unsigned int c_min_rate;
2667 int curr_events;
2668
2669 rcu_read_lock();
2670 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2671 rcu_read_unlock();
2672
2673 /* feature disabled? */
2674 if (c_min_rate == 0)
2675 return false;
2676
59767fbd 2677 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
b30ab791 2678 atomic_read(&device->rs_sect_ev);
ad3fee79
LE
2679
2680 if (atomic_read(&device->ap_actlog_cnt)
ff8bd88b 2681 || curr_events - device->rs_last_events > 64) {
0f0601f4
LE
2682 unsigned long rs_left;
2683 int i;
2684
b30ab791 2685 device->rs_last_events = curr_events;
0f0601f4
LE
2686
2687 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2688 * approx. */
b30ab791 2689 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2649f080 2690
b30ab791
AG
2691 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2692 rs_left = device->ov_left;
2649f080 2693 else
b30ab791 2694 rs_left = drbd_bm_total_weight(device) - device->rs_failed;
0f0601f4 2695
b30ab791 2696 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
0f0601f4
LE
2697 if (!dt)
2698 dt++;
b30ab791 2699 db = device->rs_mark_left[i] - rs_left;
0f0601f4
LE
2700 dbdt = Bit2KB(db/dt);
2701
daeda1cc 2702 if (dbdt > c_min_rate)
e8299874 2703 return true;
0f0601f4 2704 }
e8299874 2705 return false;
0f0601f4
LE
2706}
2707
bde89a9e 2708static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2709{
9f4fe9ad 2710 struct drbd_peer_device *peer_device;
b30ab791 2711 struct drbd_device *device;
b411b363 2712 sector_t sector;
4a76b161 2713 sector_t capacity;
db830c46 2714 struct drbd_peer_request *peer_req;
b411b363 2715 struct digest_info *di = NULL;
b18b37be 2716 int size, verb;
b411b363 2717 unsigned int fault_type;
e658983a 2718 struct p_block_req *p = pi->data;
4a76b161 2719
9f4fe9ad
AG
2720 peer_device = conn_peer_device(connection, pi->vnr);
2721 if (!peer_device)
4a76b161 2722 return -EIO;
9f4fe9ad 2723 device = peer_device->device;
b30ab791 2724 capacity = drbd_get_capacity(device->this_bdev);
b411b363
PR
2725
2726 sector = be64_to_cpu(p->sector);
2727 size = be32_to_cpu(p->blksize);
2728
c670a398 2729 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
d0180171 2730 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
b411b363 2731 (unsigned long long)sector, size);
82bc0194 2732 return -EINVAL;
b411b363
PR
2733 }
2734 if (sector + (size>>9) > capacity) {
d0180171 2735 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
b411b363 2736 (unsigned long long)sector, size);
82bc0194 2737 return -EINVAL;
b411b363
PR
2738 }
2739
b30ab791 2740 if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
b18b37be 2741 verb = 1;
e2857216 2742 switch (pi->cmd) {
b18b37be 2743 case P_DATA_REQUEST:
69a22773 2744 drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
b18b37be 2745 break;
700ca8c0 2746 case P_RS_THIN_REQ:
b18b37be
PR
2747 case P_RS_DATA_REQUEST:
2748 case P_CSUM_RS_REQUEST:
2749 case P_OV_REQUEST:
69a22773 2750 drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
b18b37be
PR
2751 break;
2752 case P_OV_REPLY:
2753 verb = 0;
b30ab791 2754 dec_rs_pending(device);
69a22773 2755 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
b18b37be
PR
2756 break;
2757 default:
49ba9b1b 2758 BUG();
b18b37be
PR
2759 }
2760 if (verb && __ratelimit(&drbd_ratelimit_state))
d0180171 2761 drbd_err(device, "Can not satisfy peer's read request, "
b411b363 2762 "no local data.\n");
b18b37be 2763
a821cc4a 2764 /* drain possibly payload */
69a22773 2765 return drbd_drain_block(peer_device, pi->size);
b411b363
PR
2766 }
2767
2768 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2769 * "criss-cross" setup, that might cause write-out on some other DRBD,
2770 * which in turn might block on the other node at this very place. */
a0fb3c47 2771 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
9104d31a 2772 size, GFP_NOIO);
db830c46 2773 if (!peer_req) {
b30ab791 2774 put_ldev(device);
82bc0194 2775 return -ENOMEM;
b411b363
PR
2776 }
2777
e2857216 2778 switch (pi->cmd) {
b411b363 2779 case P_DATA_REQUEST:
a8cd15ba 2780 peer_req->w.cb = w_e_end_data_req;
b411b363 2781 fault_type = DRBD_FAULT_DT_RD;
80a40e43 2782 /* application IO, don't drbd_rs_begin_io */
21ae5d7f 2783 peer_req->flags |= EE_APPLICATION;
80a40e43
LE
2784 goto submit;
2785
700ca8c0
PR
2786 case P_RS_THIN_REQ:
2787 /* If at some point in the future we have a smart way to
2788 find out if this data block is completely deallocated,
2789 then we would do something smarter here than reading
2790 the block... */
2791 peer_req->flags |= EE_RS_THIN_REQ;
d769a992 2792 /* fall through */
b411b363 2793 case P_RS_DATA_REQUEST:
a8cd15ba 2794 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2795 fault_type = DRBD_FAULT_RS_RD;
5f9915bb 2796 /* used in the sector offset progress display */
b30ab791 2797 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2798 break;
2799
2800 case P_OV_REPLY:
2801 case P_CSUM_RS_REQUEST:
2802 fault_type = DRBD_FAULT_RS_RD;
e2857216 2803 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
b411b363
PR
2804 if (!di)
2805 goto out_free_e;
2806
e2857216 2807 di->digest_size = pi->size;
b411b363
PR
2808 di->digest = (((char *)di)+sizeof(struct digest_info));
2809
db830c46
AG
2810 peer_req->digest = di;
2811 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2812
9f4fe9ad 2813 if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
b411b363
PR
2814 goto out_free_e;
2815
e2857216 2816 if (pi->cmd == P_CSUM_RS_REQUEST) {
9f4fe9ad 2817 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
a8cd15ba 2818 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb 2819 /* used in the sector offset progress display */
b30ab791 2820 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
aaaba345
LE
2821 /* remember to report stats in drbd_resync_finished */
2822 device->use_csums = true;
e2857216 2823 } else if (pi->cmd == P_OV_REPLY) {
2649f080 2824 /* track progress, we may need to throttle */
b30ab791 2825 atomic_add(size >> 9, &device->rs_sect_in);
a8cd15ba 2826 peer_req->w.cb = w_e_end_ov_reply;
b30ab791 2827 dec_rs_pending(device);
0f0601f4
LE
2828 /* drbd_rs_begin_io done when we sent this request,
2829 * but accounting still needs to be done. */
2830 goto submit_for_resync;
b411b363
PR
2831 }
2832 break;
2833
2834 case P_OV_REQUEST:
b30ab791 2835 if (device->ov_start_sector == ~(sector_t)0 &&
9f4fe9ad 2836 peer_device->connection->agreed_pro_version >= 90) {
de228bba
LE
2837 unsigned long now = jiffies;
2838 int i;
b30ab791
AG
2839 device->ov_start_sector = sector;
2840 device->ov_position = sector;
2841 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2842 device->rs_total = device->ov_left;
de228bba 2843 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
b30ab791
AG
2844 device->rs_mark_left[i] = device->ov_left;
2845 device->rs_mark_time[i] = now;
de228bba 2846 }
d0180171 2847 drbd_info(device, "Online Verify start sector: %llu\n",
b411b363
PR
2848 (unsigned long long)sector);
2849 }
a8cd15ba 2850 peer_req->w.cb = w_e_end_ov_req;
b411b363 2851 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2852 break;
2853
b411b363 2854 default:
49ba9b1b 2855 BUG();
b411b363
PR
2856 }
2857
0f0601f4
LE
2858 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2859 * wrt the receiver, but it is not as straightforward as it may seem.
2860 * Various places in the resync start and stop logic assume resync
2861 * requests are processed in order, requeuing this on the worker thread
2862 * introduces a bunch of new code for synchronization between threads.
2863 *
2864 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2865 * "forever", throttling after drbd_rs_begin_io will lock that extent
2866 * for application writes for the same time. For now, just throttle
2867 * here, where the rest of the code expects the receiver to sleep for
2868 * a while, anyways.
2869 */
2870
2871 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2872 * this defers syncer requests for some time, before letting at least
2873 * on request through. The resync controller on the receiving side
2874 * will adapt to the incoming rate accordingly.
2875 *
2876 * We cannot throttle here if remote is Primary/SyncTarget:
2877 * we would also throttle its application reads.
2878 * In that case, throttling is done on the SyncTarget only.
2879 */
c5a2c150
LE
2880
2881 /* Even though this may be a resync request, we do add to "read_ee";
2882 * "sync_ee" is only used for resync WRITEs.
2883 * Add to list early, so debugfs can find this request
2884 * even if we have to sleep below. */
2885 spin_lock_irq(&device->resource->req_lock);
2886 list_add_tail(&peer_req->w.list, &device->read_ee);
2887 spin_unlock_irq(&device->resource->req_lock);
2888
944410e9 2889 update_receiver_timing_details(connection, drbd_rs_should_slow_down);
ad3fee79
LE
2890 if (device->state.peer != R_PRIMARY
2891 && drbd_rs_should_slow_down(device, sector, false))
e3555d85 2892 schedule_timeout_uninterruptible(HZ/10);
944410e9 2893 update_receiver_timing_details(connection, drbd_rs_begin_io);
b30ab791 2894 if (drbd_rs_begin_io(device, sector))
80a40e43 2895 goto out_free_e;
b411b363 2896
0f0601f4 2897submit_for_resync:
b30ab791 2898 atomic_add(size >> 9, &device->rs_sect_ev);
0f0601f4 2899
80a40e43 2900submit:
944410e9 2901 update_receiver_timing_details(connection, drbd_submit_peer_request);
b30ab791 2902 inc_unacked(device);
bb3cc85e
MC
2903 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
2904 fault_type) == 0)
82bc0194 2905 return 0;
b411b363 2906
10f6d992 2907 /* don't care for the reason here */
d0180171 2908 drbd_err(device, "submit failed, triggering re-connect\n");
c5a2c150
LE
2909
2910out_free_e:
0500813f 2911 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2912 list_del(&peer_req->w.list);
0500813f 2913 spin_unlock_irq(&device->resource->req_lock);
22cc37a9
LE
2914 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2915
b30ab791
AG
2916 put_ldev(device);
2917 drbd_free_peer_req(device, peer_req);
82bc0194 2918 return -EIO;
b411b363
PR
2919}
2920
69a22773
AG
2921/**
2922 * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries
2923 */
2924static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 2925{
69a22773 2926 struct drbd_device *device = peer_device->device;
b411b363
PR
2927 int self, peer, rv = -100;
2928 unsigned long ch_self, ch_peer;
44ed167d 2929 enum drbd_after_sb_p after_sb_0p;
b411b363 2930
b30ab791
AG
2931 self = device->ldev->md.uuid[UI_BITMAP] & 1;
2932 peer = device->p_uuid[UI_BITMAP] & 1;
b411b363 2933
b30ab791
AG
2934 ch_peer = device->p_uuid[UI_SIZE];
2935 ch_self = device->comm_bm_set;
b411b363 2936
44ed167d 2937 rcu_read_lock();
69a22773 2938 after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
44ed167d
PR
2939 rcu_read_unlock();
2940 switch (after_sb_0p) {
b411b363
PR
2941 case ASB_CONSENSUS:
2942 case ASB_DISCARD_SECONDARY:
2943 case ASB_CALL_HELPER:
44ed167d 2944 case ASB_VIOLENTLY:
d0180171 2945 drbd_err(device, "Configuration error.\n");
b411b363
PR
2946 break;
2947 case ASB_DISCONNECT:
2948 break;
2949 case ASB_DISCARD_YOUNGER_PRI:
2950 if (self == 0 && peer == 1) {
2951 rv = -1;
2952 break;
2953 }
2954 if (self == 1 && peer == 0) {
2955 rv = 1;
2956 break;
2957 }
2958 /* Else fall through to one of the other strategies... */
2959 case ASB_DISCARD_OLDER_PRI:
2960 if (self == 0 && peer == 1) {
2961 rv = 1;
2962 break;
2963 }
2964 if (self == 1 && peer == 0) {
2965 rv = -1;
2966 break;
2967 }
2968 /* Else fall through to one of the other strategies... */
d0180171 2969 drbd_warn(device, "Discard younger/older primary did not find a decision\n"
b411b363 2970 "Using discard-least-changes instead\n");
d769a992 2971 /* fall through */
b411b363
PR
2972 case ASB_DISCARD_ZERO_CHG:
2973 if (ch_peer == 0 && ch_self == 0) {
69a22773 2974 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
b411b363
PR
2975 ? -1 : 1;
2976 break;
2977 } else {
2978 if (ch_peer == 0) { rv = 1; break; }
2979 if (ch_self == 0) { rv = -1; break; }
2980 }
44ed167d 2981 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363 2982 break;
d769a992 2983 /* else: fall through */
b411b363
PR
2984 case ASB_DISCARD_LEAST_CHG:
2985 if (ch_self < ch_peer)
2986 rv = -1;
2987 else if (ch_self > ch_peer)
2988 rv = 1;
2989 else /* ( ch_self == ch_peer ) */
2990 /* Well, then use something else. */
69a22773 2991 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
b411b363
PR
2992 ? -1 : 1;
2993 break;
2994 case ASB_DISCARD_LOCAL:
2995 rv = -1;
2996 break;
2997 case ASB_DISCARD_REMOTE:
2998 rv = 1;
2999 }
3000
3001 return rv;
3002}
3003
69a22773
AG
3004/**
3005 * drbd_asb_recover_1p - Recover after split-brain with one remaining primary
3006 */
3007static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3008{
69a22773 3009 struct drbd_device *device = peer_device->device;
6184ea21 3010 int hg, rv = -100;
44ed167d 3011 enum drbd_after_sb_p after_sb_1p;
b411b363 3012
44ed167d 3013 rcu_read_lock();
69a22773 3014 after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
44ed167d
PR
3015 rcu_read_unlock();
3016 switch (after_sb_1p) {
b411b363
PR
3017 case ASB_DISCARD_YOUNGER_PRI:
3018 case ASB_DISCARD_OLDER_PRI:
3019 case ASB_DISCARD_LEAST_CHG:
3020 case ASB_DISCARD_LOCAL:
3021 case ASB_DISCARD_REMOTE:
44ed167d 3022 case ASB_DISCARD_ZERO_CHG:
d0180171 3023 drbd_err(device, "Configuration error.\n");
b411b363
PR
3024 break;
3025 case ASB_DISCONNECT:
3026 break;
3027 case ASB_CONSENSUS:
69a22773 3028 hg = drbd_asb_recover_0p(peer_device);
b30ab791 3029 if (hg == -1 && device->state.role == R_SECONDARY)
b411b363 3030 rv = hg;
b30ab791 3031 if (hg == 1 && device->state.role == R_PRIMARY)
b411b363
PR
3032 rv = hg;
3033 break;
3034 case ASB_VIOLENTLY:
69a22773 3035 rv = drbd_asb_recover_0p(peer_device);
b411b363
PR
3036 break;
3037 case ASB_DISCARD_SECONDARY:
b30ab791 3038 return device->state.role == R_PRIMARY ? 1 : -1;
b411b363 3039 case ASB_CALL_HELPER:
69a22773 3040 hg = drbd_asb_recover_0p(peer_device);
b30ab791 3041 if (hg == -1 && device->state.role == R_PRIMARY) {
bb437946
AG
3042 enum drbd_state_rv rv2;
3043
b411b363
PR
3044 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3045 * we might be here in C_WF_REPORT_PARAMS which is transient.
3046 * we do not need to wait for the after state change work either. */
b30ab791 3047 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
bb437946 3048 if (rv2 != SS_SUCCESS) {
b30ab791 3049 drbd_khelper(device, "pri-lost-after-sb");
b411b363 3050 } else {
d0180171 3051 drbd_warn(device, "Successfully gave up primary role.\n");
b411b363
PR
3052 rv = hg;
3053 }
3054 } else
3055 rv = hg;
3056 }
3057
3058 return rv;
3059}
3060
69a22773
AG
3061/**
3062 * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries
3063 */
3064static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3065{
69a22773 3066 struct drbd_device *device = peer_device->device;
6184ea21 3067 int hg, rv = -100;
44ed167d 3068 enum drbd_after_sb_p after_sb_2p;
b411b363 3069
44ed167d 3070 rcu_read_lock();
69a22773 3071 after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
44ed167d
PR
3072 rcu_read_unlock();
3073 switch (after_sb_2p) {
b411b363
PR
3074 case ASB_DISCARD_YOUNGER_PRI:
3075 case ASB_DISCARD_OLDER_PRI:
3076 case ASB_DISCARD_LEAST_CHG:
3077 case ASB_DISCARD_LOCAL:
3078 case ASB_DISCARD_REMOTE:
3079 case ASB_CONSENSUS:
3080 case ASB_DISCARD_SECONDARY:
44ed167d 3081 case ASB_DISCARD_ZERO_CHG:
d0180171 3082 drbd_err(device, "Configuration error.\n");
b411b363
PR
3083 break;
3084 case ASB_VIOLENTLY:
69a22773 3085 rv = drbd_asb_recover_0p(peer_device);
b411b363
PR
3086 break;
3087 case ASB_DISCONNECT:
3088 break;
3089 case ASB_CALL_HELPER:
69a22773 3090 hg = drbd_asb_recover_0p(peer_device);
b411b363 3091 if (hg == -1) {
bb437946
AG
3092 enum drbd_state_rv rv2;
3093
b411b363
PR
3094 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3095 * we might be here in C_WF_REPORT_PARAMS which is transient.
3096 * we do not need to wait for the after state change work either. */
b30ab791 3097 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
bb437946 3098 if (rv2 != SS_SUCCESS) {
b30ab791 3099 drbd_khelper(device, "pri-lost-after-sb");
b411b363 3100 } else {
d0180171 3101 drbd_warn(device, "Successfully gave up primary role.\n");
b411b363
PR
3102 rv = hg;
3103 }
3104 } else
3105 rv = hg;
3106 }
3107
3108 return rv;
3109}
3110
b30ab791 3111static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
b411b363
PR
3112 u64 bits, u64 flags)
3113{
3114 if (!uuid) {
d0180171 3115 drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
b411b363
PR
3116 return;
3117 }
d0180171 3118 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
b411b363
PR
3119 text,
3120 (unsigned long long)uuid[UI_CURRENT],
3121 (unsigned long long)uuid[UI_BITMAP],
3122 (unsigned long long)uuid[UI_HISTORY_START],
3123 (unsigned long long)uuid[UI_HISTORY_END],
3124 (unsigned long long)bits,
3125 (unsigned long long)flags);
3126}
3127
3128/*
3129 100 after split brain try auto recover
3130 2 C_SYNC_SOURCE set BitMap
3131 1 C_SYNC_SOURCE use BitMap
3132 0 no Sync
3133 -1 C_SYNC_TARGET use BitMap
3134 -2 C_SYNC_TARGET set BitMap
3135 -100 after split brain, disconnect
3136-1000 unrelated data
4a23f264
PR
3137-1091 requires proto 91
3138-1096 requires proto 96
b411b363 3139 */
f2d3d75b
LE
3140
3141static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
b411b363 3142{
44a4d551
LE
3143 struct drbd_peer_device *const peer_device = first_peer_device(device);
3144 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
b411b363
PR
3145 u64 self, peer;
3146 int i, j;
3147
b30ab791
AG
3148 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3149 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
3150
3151 *rule_nr = 10;
3152 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
3153 return 0;
3154
3155 *rule_nr = 20;
3156 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
3157 peer != UUID_JUST_CREATED)
3158 return -2;
3159
3160 *rule_nr = 30;
3161 if (self != UUID_JUST_CREATED &&
3162 (peer == UUID_JUST_CREATED || peer == (u64)0))
3163 return 2;
3164
3165 if (self == peer) {
3166 int rct, dc; /* roles at crash time */
3167
b30ab791 3168 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
b411b363 3169
44a4d551 3170 if (connection->agreed_pro_version < 91)
4a23f264 3171 return -1091;
b411b363 3172
b30ab791
AG
3173 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
3174 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
d0180171 3175 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
b30ab791
AG
3176 drbd_uuid_move_history(device);
3177 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3178 device->ldev->md.uuid[UI_BITMAP] = 0;
b411b363 3179
b30ab791
AG
3180 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3181 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
b411b363
PR
3182 *rule_nr = 34;
3183 } else {
d0180171 3184 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
b411b363
PR
3185 *rule_nr = 36;
3186 }
3187
3188 return 1;
3189 }
3190
b30ab791 3191 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
b411b363 3192
44a4d551 3193 if (connection->agreed_pro_version < 91)
4a23f264 3194 return -1091;
b411b363 3195
b30ab791
AG
3196 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
3197 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
d0180171 3198 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
b411b363 3199
b30ab791
AG
3200 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
3201 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
3202 device->p_uuid[UI_BITMAP] = 0UL;
b411b363 3203
b30ab791 3204 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
b411b363
PR
3205 *rule_nr = 35;
3206 } else {
d0180171 3207 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
b411b363
PR
3208 *rule_nr = 37;
3209 }
3210
3211 return -1;
3212 }
3213
3214 /* Common power [off|failure] */
b30ab791
AG
3215 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
3216 (device->p_uuid[UI_FLAGS] & 2);
b411b363
PR
3217 /* lowest bit is set when we were primary,
3218 * next bit (weight 2) is set when peer was primary */
3219 *rule_nr = 40;
3220
f2d3d75b
LE
3221 /* Neither has the "crashed primary" flag set,
3222 * only a replication link hickup. */
3223 if (rct == 0)
3224 return 0;
3225
3226 /* Current UUID equal and no bitmap uuid; does not necessarily
3227 * mean this was a "simultaneous hard crash", maybe IO was
3228 * frozen, so no UUID-bump happened.
3229 * This is a protocol change, overload DRBD_FF_WSAME as flag
3230 * for "new-enough" peer DRBD version. */
3231 if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
3232 *rule_nr = 41;
3233 if (!(connection->agreed_features & DRBD_FF_WSAME)) {
3234 drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
3235 return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8));
3236 }
3237 if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
3238 /* At least one has the "crashed primary" bit set,
3239 * both are primary now, but neither has rotated its UUIDs?
3240 * "Can not happen." */
3241 drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
3242 return -100;
3243 }
3244 if (device->state.role == R_PRIMARY)
3245 return 1;
3246 return -1;
3247 }
3248
3249 /* Both are secondary.
3250 * Really looks like recovery from simultaneous hard crash.
3251 * Check which had been primary before, and arbitrate. */
b411b363 3252 switch (rct) {
f2d3d75b 3253 case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */
b411b363
PR
3254 case 1: /* self_pri && !peer_pri */ return 1;
3255 case 2: /* !self_pri && peer_pri */ return -1;
3256 case 3: /* self_pri && peer_pri */
44a4d551 3257 dc = test_bit(RESOLVE_CONFLICTS, &connection->flags);
b411b363
PR
3258 return dc ? -1 : 1;
3259 }
3260 }
3261
3262 *rule_nr = 50;
b30ab791 3263 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
b411b363
PR
3264 if (self == peer)
3265 return -1;
3266
3267 *rule_nr = 51;
b30ab791 3268 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
b411b363 3269 if (self == peer) {
44a4d551 3270 if (connection->agreed_pro_version < 96 ?
b30ab791
AG
3271 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
3272 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
3273 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
3274 /* The last P_SYNC_UUID did not get though. Undo the last start of
3275 resync as sync source modifications of the peer's UUIDs. */
3276
44a4d551 3277 if (connection->agreed_pro_version < 91)
4a23f264 3278 return -1091;
b411b363 3279
b30ab791
AG
3280 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
3281 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
4a23f264 3282
d0180171 3283 drbd_info(device, "Lost last syncUUID packet, corrected:\n");
b30ab791 3284 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
4a23f264 3285
b411b363
PR
3286 return -1;
3287 }
3288 }
3289
3290 *rule_nr = 60;
b30ab791 3291 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
b411b363 3292 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3293 peer = device->p_uuid[i] & ~((u64)1);
b411b363
PR
3294 if (self == peer)
3295 return -2;
3296 }
3297
3298 *rule_nr = 70;
b30ab791
AG
3299 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3300 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
3301 if (self == peer)
3302 return 1;
3303
3304 *rule_nr = 71;
b30ab791 3305 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
b411b363 3306 if (self == peer) {
44a4d551 3307 if (connection->agreed_pro_version < 96 ?
b30ab791
AG
3308 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
3309 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
3310 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
3311 /* The last P_SYNC_UUID did not get though. Undo the last start of
3312 resync as sync source modifications of our UUIDs. */
3313
44a4d551 3314 if (connection->agreed_pro_version < 91)
4a23f264 3315 return -1091;
b411b363 3316
b30ab791
AG
3317 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
3318 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
b411b363 3319
d0180171 3320 drbd_info(device, "Last syncUUID did not get through, corrected:\n");
b30ab791
AG
3321 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3322 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
b411b363
PR
3323
3324 return 1;
3325 }
3326 }
3327
3328
3329 *rule_nr = 80;
b30ab791 3330 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363 3331 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3332 self = device->ldev->md.uuid[i] & ~((u64)1);
b411b363
PR
3333 if (self == peer)
3334 return 2;
3335 }
3336
3337 *rule_nr = 90;
b30ab791
AG
3338 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3339 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
b411b363
PR
3340 if (self == peer && self != ((u64)0))
3341 return 100;
3342
3343 *rule_nr = 100;
3344 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3345 self = device->ldev->md.uuid[i] & ~((u64)1);
b411b363 3346 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
b30ab791 3347 peer = device->p_uuid[j] & ~((u64)1);
b411b363
PR
3348 if (self == peer)
3349 return -100;
3350 }
3351 }
3352
3353 return -1000;
3354}
3355
3356/* drbd_sync_handshake() returns the new conn state on success, or
3357 CONN_MASK (-1) on failure.
3358 */
69a22773
AG
3359static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
3360 enum drbd_role peer_role,
b411b363
PR
3361 enum drbd_disk_state peer_disk) __must_hold(local)
3362{
69a22773 3363 struct drbd_device *device = peer_device->device;
b411b363
PR
3364 enum drbd_conns rv = C_MASK;
3365 enum drbd_disk_state mydisk;
44ed167d 3366 struct net_conf *nc;
d29e89e3 3367 int hg, rule_nr, rr_conflict, tentative, always_asbp;
b411b363 3368
b30ab791 3369 mydisk = device->state.disk;
b411b363 3370 if (mydisk == D_NEGOTIATING)
b30ab791 3371 mydisk = device->new_state_tmp.disk;
b411b363 3372
d0180171 3373 drbd_info(device, "drbd_sync_handshake:\n");
9f2247bb 3374
b30ab791
AG
3375 spin_lock_irq(&device->ldev->md.uuid_lock);
3376 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
3377 drbd_uuid_dump(device, "peer", device->p_uuid,
3378 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
b411b363 3379
f2d3d75b 3380 hg = drbd_uuid_compare(device, peer_role, &rule_nr);
b30ab791 3381 spin_unlock_irq(&device->ldev->md.uuid_lock);
b411b363 3382
d0180171 3383 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
b411b363
PR
3384
3385 if (hg == -1000) {
d0180171 3386 drbd_alert(device, "Unrelated data, aborting!\n");
b411b363
PR
3387 return C_MASK;
3388 }
f2d3d75b
LE
3389 if (hg < -0x10000) {
3390 int proto, fflags;
3391 hg = -hg;
3392 proto = hg & 0xff;
3393 fflags = (hg >> 8) & 0xff;
3394 drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
3395 proto, fflags);
3396 return C_MASK;
3397 }
4a23f264 3398 if (hg < -1000) {
d0180171 3399 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
3400 return C_MASK;
3401 }
3402
3403 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
3404 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
3405 int f = (hg == -100) || abs(hg) == 2;
3406 hg = mydisk > D_INCONSISTENT ? 1 : -1;
3407 if (f)
3408 hg = hg*2;
d0180171 3409 drbd_info(device, "Becoming sync %s due to disk states.\n",
b411b363
PR
3410 hg > 0 ? "source" : "target");
3411 }
3412
3a11a487 3413 if (abs(hg) == 100)
b30ab791 3414 drbd_khelper(device, "initial-split-brain");
3a11a487 3415
44ed167d 3416 rcu_read_lock();
69a22773 3417 nc = rcu_dereference(peer_device->connection->net_conf);
d29e89e3
RK
3418 always_asbp = nc->always_asbp;
3419 rr_conflict = nc->rr_conflict;
3420 tentative = nc->tentative;
3421 rcu_read_unlock();
44ed167d 3422
d29e89e3 3423 if (hg == 100 || (hg == -100 && always_asbp)) {
b30ab791 3424 int pcount = (device->state.role == R_PRIMARY)
b411b363
PR
3425 + (peer_role == R_PRIMARY);
3426 int forced = (hg == -100);
3427
3428 switch (pcount) {
3429 case 0:
69a22773 3430 hg = drbd_asb_recover_0p(peer_device);
b411b363
PR
3431 break;
3432 case 1:
69a22773 3433 hg = drbd_asb_recover_1p(peer_device);
b411b363
PR
3434 break;
3435 case 2:
69a22773 3436 hg = drbd_asb_recover_2p(peer_device);
b411b363
PR
3437 break;
3438 }
3439 if (abs(hg) < 100) {
d0180171 3440 drbd_warn(device, "Split-Brain detected, %d primaries, "
b411b363
PR
3441 "automatically solved. Sync from %s node\n",
3442 pcount, (hg < 0) ? "peer" : "this");
3443 if (forced) {
d0180171 3444 drbd_warn(device, "Doing a full sync, since"
b411b363
PR
3445 " UUIDs where ambiguous.\n");
3446 hg = hg*2;
3447 }
3448 }
3449 }
3450
3451 if (hg == -100) {
b30ab791 3452 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
b411b363 3453 hg = -1;
b30ab791 3454 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
b411b363
PR
3455 hg = 1;
3456
3457 if (abs(hg) < 100)
d0180171 3458 drbd_warn(device, "Split-Brain detected, manually solved. "
b411b363
PR
3459 "Sync from %s node\n",
3460 (hg < 0) ? "peer" : "this");
3461 }
3462
3463 if (hg == -100) {
580b9767
LE
3464 /* FIXME this log message is not correct if we end up here
3465 * after an attempted attach on a diskless node.
3466 * We just refuse to attach -- well, we drop the "connection"
3467 * to that disk, in a way... */
d0180171 3468 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
b30ab791 3469 drbd_khelper(device, "split-brain");
b411b363
PR
3470 return C_MASK;
3471 }
3472
3473 if (hg > 0 && mydisk <= D_INCONSISTENT) {
d0180171 3474 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
b411b363
PR
3475 return C_MASK;
3476 }
3477
3478 if (hg < 0 && /* by intention we do not use mydisk here. */
b30ab791 3479 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
44ed167d 3480 switch (rr_conflict) {
b411b363 3481 case ASB_CALL_HELPER:
b30ab791 3482 drbd_khelper(device, "pri-lost");
b411b363
PR
3483 /* fall through */
3484 case ASB_DISCONNECT:
d0180171 3485 drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
b411b363
PR
3486 return C_MASK;
3487 case ASB_VIOLENTLY:
d0180171 3488 drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
b411b363
PR
3489 "assumption\n");
3490 }
3491 }
3492
69a22773 3493 if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
cf14c2e9 3494 if (hg == 0)
d0180171 3495 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
cf14c2e9 3496 else
d0180171 3497 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
cf14c2e9
PR
3498 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3499 abs(hg) >= 2 ? "full" : "bit-map based");
3500 return C_MASK;
3501 }
3502
b411b363 3503 if (abs(hg) >= 2) {
d0180171 3504 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
b30ab791 3505 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
20ceb2b2 3506 BM_LOCKED_SET_ALLOWED))
b411b363
PR
3507 return C_MASK;
3508 }
3509
3510 if (hg > 0) { /* become sync source. */
3511 rv = C_WF_BITMAP_S;
3512 } else if (hg < 0) { /* become sync target */
3513 rv = C_WF_BITMAP_T;
3514 } else {
3515 rv = C_CONNECTED;
b30ab791 3516 if (drbd_bm_total_weight(device)) {
d0180171 3517 drbd_info(device, "No resync, but %lu bits in bitmap!\n",
b30ab791 3518 drbd_bm_total_weight(device));
b411b363
PR
3519 }
3520 }
3521
3522 return rv;
3523}
3524
f179d76d 3525static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
b411b363
PR
3526{
3527 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
f179d76d
PR
3528 if (peer == ASB_DISCARD_REMOTE)
3529 return ASB_DISCARD_LOCAL;
b411b363
PR
3530
3531 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
f179d76d
PR
3532 if (peer == ASB_DISCARD_LOCAL)
3533 return ASB_DISCARD_REMOTE;
b411b363
PR
3534
3535 /* everything else is valid if they are equal on both sides. */
f179d76d 3536 return peer;
b411b363
PR
3537}
3538
bde89a9e 3539static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3540{
e658983a 3541 struct p_protocol *p = pi->data;
036b17ea
PR
3542 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3543 int p_proto, p_discard_my_data, p_two_primaries, cf;
3544 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3545 char integrity_alg[SHARED_SECRET_MAX] = "";
3d0e6375 3546 struct crypto_shash *peer_integrity_tfm = NULL;
7aca6c75 3547 void *int_dig_in = NULL, *int_dig_vv = NULL;
b411b363 3548
b411b363
PR
3549 p_proto = be32_to_cpu(p->protocol);
3550 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3551 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3552 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 3553 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9 3554 cf = be32_to_cpu(p->conn_flags);
6139f60d 3555 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
cf14c2e9 3556
bde89a9e 3557 if (connection->agreed_pro_version >= 87) {
86db0618 3558 int err;
cf14c2e9 3559
88104ca4 3560 if (pi->size > sizeof(integrity_alg))
86db0618 3561 return -EIO;
bde89a9e 3562 err = drbd_recv_all(connection, integrity_alg, pi->size);
86db0618
AG
3563 if (err)
3564 return err;
036b17ea 3565 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
b411b363
PR
3566 }
3567
7d4c782c 3568 if (pi->cmd != P_PROTOCOL_UPDATE) {
bde89a9e 3569 clear_bit(CONN_DRY_RUN, &connection->flags);
b411b363 3570
fbc12f45 3571 if (cf & CF_DRY_RUN)
bde89a9e 3572 set_bit(CONN_DRY_RUN, &connection->flags);
b411b363 3573
fbc12f45 3574 rcu_read_lock();
bde89a9e 3575 nc = rcu_dereference(connection->net_conf);
b411b363 3576
fbc12f45 3577 if (p_proto != nc->wire_protocol) {
1ec861eb 3578 drbd_err(connection, "incompatible %s settings\n", "protocol");
fbc12f45
AG
3579 goto disconnect_rcu_unlock;
3580 }
b411b363 3581
fbc12f45 3582 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
1ec861eb 3583 drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
fbc12f45
AG
3584 goto disconnect_rcu_unlock;
3585 }
b411b363 3586
fbc12f45 3587 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
1ec861eb 3588 drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
fbc12f45
AG
3589 goto disconnect_rcu_unlock;
3590 }
b411b363 3591
fbc12f45 3592 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
1ec861eb 3593 drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
fbc12f45
AG
3594 goto disconnect_rcu_unlock;
3595 }
b411b363 3596
fbc12f45 3597 if (p_discard_my_data && nc->discard_my_data) {
1ec861eb 3598 drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
fbc12f45
AG
3599 goto disconnect_rcu_unlock;
3600 }
b411b363 3601
fbc12f45 3602 if (p_two_primaries != nc->two_primaries) {
1ec861eb 3603 drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
fbc12f45
AG
3604 goto disconnect_rcu_unlock;
3605 }
b411b363 3606
fbc12f45 3607 if (strcmp(integrity_alg, nc->integrity_alg)) {
1ec861eb 3608 drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
fbc12f45
AG
3609 goto disconnect_rcu_unlock;
3610 }
b411b363 3611
fbc12f45 3612 rcu_read_unlock();
b411b363
PR
3613 }
3614
7d4c782c
AG
3615 if (integrity_alg[0]) {
3616 int hash_size;
3617
3618 /*
3619 * We can only change the peer data integrity algorithm
3620 * here. Changing our own data integrity algorithm
3621 * requires that we send a P_PROTOCOL_UPDATE packet at
3622 * the same time; otherwise, the peer has no way to
3623 * tell between which packets the algorithm should
3624 * change.
3625 */
b411b363 3626
3d0e6375 3627 peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
1b57e663
LE
3628 if (IS_ERR(peer_integrity_tfm)) {
3629 peer_integrity_tfm = NULL;
1ec861eb 3630 drbd_err(connection, "peer data-integrity-alg %s not supported\n",
7d4c782c
AG
3631 integrity_alg);
3632 goto disconnect;
3633 }
b411b363 3634
3d0e6375 3635 hash_size = crypto_shash_digestsize(peer_integrity_tfm);
7d4c782c
AG
3636 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3637 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3638 if (!(int_dig_in && int_dig_vv)) {
1ec861eb 3639 drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
b411b363
PR
3640 goto disconnect;
3641 }
b411b363
PR
3642 }
3643
7d4c782c
AG
3644 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3645 if (!new_net_conf) {
1ec861eb 3646 drbd_err(connection, "Allocation of new net_conf failed\n");
7d4c782c
AG
3647 goto disconnect;
3648 }
3649
bde89a9e 3650 mutex_lock(&connection->data.mutex);
0500813f 3651 mutex_lock(&connection->resource->conf_update);
bde89a9e 3652 old_net_conf = connection->net_conf;
7d4c782c
AG
3653 *new_net_conf = *old_net_conf;
3654
3655 new_net_conf->wire_protocol = p_proto;
3656 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3657 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3658 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3659 new_net_conf->two_primaries = p_two_primaries;
3660
bde89a9e 3661 rcu_assign_pointer(connection->net_conf, new_net_conf);
0500813f 3662 mutex_unlock(&connection->resource->conf_update);
bde89a9e 3663 mutex_unlock(&connection->data.mutex);
7d4c782c 3664
3d0e6375 3665 crypto_free_shash(connection->peer_integrity_tfm);
bde89a9e
AG
3666 kfree(connection->int_dig_in);
3667 kfree(connection->int_dig_vv);
3668 connection->peer_integrity_tfm = peer_integrity_tfm;
3669 connection->int_dig_in = int_dig_in;
3670 connection->int_dig_vv = int_dig_vv;
7d4c782c
AG
3671
3672 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
1ec861eb 3673 drbd_info(connection, "peer data-integrity-alg: %s\n",
7d4c782c
AG
3674 integrity_alg[0] ? integrity_alg : "(none)");
3675
3676 synchronize_rcu();
3677 kfree(old_net_conf);
82bc0194 3678 return 0;
b411b363 3679
44ed167d
PR
3680disconnect_rcu_unlock:
3681 rcu_read_unlock();
b411b363 3682disconnect:
3d0e6375 3683 crypto_free_shash(peer_integrity_tfm);
036b17ea
PR
3684 kfree(int_dig_in);
3685 kfree(int_dig_vv);
bde89a9e 3686 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3687 return -EIO;
b411b363
PR
3688}
3689
3690/* helper function
3691 * input: alg name, feature name
3692 * return: NULL (alg name was "")
3693 * ERR_PTR(error) if something goes wrong
3694 * or the crypto hash ptr, if it worked out ok. */
3d0e6375
KC
3695static struct crypto_shash *drbd_crypto_alloc_digest_safe(
3696 const struct drbd_device *device,
b411b363
PR
3697 const char *alg, const char *name)
3698{
3d0e6375 3699 struct crypto_shash *tfm;
b411b363
PR
3700
3701 if (!alg[0])
3702 return NULL;
3703
3d0e6375 3704 tfm = crypto_alloc_shash(alg, 0, 0);
b411b363 3705 if (IS_ERR(tfm)) {
d0180171 3706 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
b411b363
PR
3707 alg, name, PTR_ERR(tfm));
3708 return tfm;
3709 }
b411b363
PR
3710 return tfm;
3711}
3712
bde89a9e 3713static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
4a76b161 3714{
bde89a9e 3715 void *buffer = connection->data.rbuf;
4a76b161
AG
3716 int size = pi->size;
3717
3718 while (size) {
3719 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
bde89a9e 3720 s = drbd_recv(connection, buffer, s);
4a76b161
AG
3721 if (s <= 0) {
3722 if (s < 0)
3723 return s;
3724 break;
3725 }
3726 size -= s;
3727 }
3728 if (size)
3729 return -EIO;
3730 return 0;
3731}
3732
3733/*
3734 * config_unknown_volume - device configuration command for unknown volume
3735 *
3736 * When a device is added to an existing connection, the node on which the
3737 * device is added first will send configuration commands to its peer but the
3738 * peer will not know about the device yet. It will warn and ignore these
3739 * commands. Once the device is added on the second node, the second node will
3740 * send the same device configuration commands, but in the other direction.
3741 *
3742 * (We can also end up here if drbd is misconfigured.)
3743 */
bde89a9e 3744static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
4a76b161 3745{
1ec861eb 3746 drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
2fcb8f30 3747 cmdname(pi->cmd), pi->vnr);
bde89a9e 3748 return ignore_remaining_packet(connection, pi);
4a76b161
AG
3749}
3750
bde89a9e 3751static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3752{
9f4fe9ad 3753 struct drbd_peer_device *peer_device;
b30ab791 3754 struct drbd_device *device;
e658983a 3755 struct p_rs_param_95 *p;
b411b363 3756 unsigned int header_size, data_size, exp_max_sz;
3d0e6375
KC
3757 struct crypto_shash *verify_tfm = NULL;
3758 struct crypto_shash *csums_tfm = NULL;
2ec91e0e 3759 struct net_conf *old_net_conf, *new_net_conf = NULL;
813472ce 3760 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
bde89a9e 3761 const int apv = connection->agreed_pro_version;
813472ce 3762 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
778f271d 3763 int fifo_size = 0;
82bc0194 3764 int err;
b411b363 3765
9f4fe9ad
AG
3766 peer_device = conn_peer_device(connection, pi->vnr);
3767 if (!peer_device)
bde89a9e 3768 return config_unknown_volume(connection, pi);
9f4fe9ad 3769 device = peer_device->device;
b411b363
PR
3770
3771 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3772 : apv == 88 ? sizeof(struct p_rs_param)
3773 + SHARED_SECRET_MAX
8e26f9cc
PR
3774 : apv <= 94 ? sizeof(struct p_rs_param_89)
3775 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 3776
e2857216 3777 if (pi->size > exp_max_sz) {
d0180171 3778 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
e2857216 3779 pi->size, exp_max_sz);
82bc0194 3780 return -EIO;
b411b363
PR
3781 }
3782
3783 if (apv <= 88) {
e658983a 3784 header_size = sizeof(struct p_rs_param);
e2857216 3785 data_size = pi->size - header_size;
8e26f9cc 3786 } else if (apv <= 94) {
e658983a 3787 header_size = sizeof(struct p_rs_param_89);
e2857216 3788 data_size = pi->size - header_size;
0b0ba1ef 3789 D_ASSERT(device, data_size == 0);
8e26f9cc 3790 } else {
e658983a 3791 header_size = sizeof(struct p_rs_param_95);
e2857216 3792 data_size = pi->size - header_size;
0b0ba1ef 3793 D_ASSERT(device, data_size == 0);
b411b363
PR
3794 }
3795
3796 /* initialize verify_alg and csums_alg */
e658983a 3797 p = pi->data;
b411b363
PR
3798 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3799
9f4fe9ad 3800 err = drbd_recv_all(peer_device->connection, p, header_size);
82bc0194
AG
3801 if (err)
3802 return err;
b411b363 3803
0500813f 3804 mutex_lock(&connection->resource->conf_update);
9f4fe9ad 3805 old_net_conf = peer_device->connection->net_conf;
b30ab791 3806 if (get_ldev(device)) {
813472ce
PR
3807 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3808 if (!new_disk_conf) {
b30ab791 3809 put_ldev(device);
0500813f 3810 mutex_unlock(&connection->resource->conf_update);
d0180171 3811 drbd_err(device, "Allocation of new disk_conf failed\n");
813472ce
PR
3812 return -ENOMEM;
3813 }
daeda1cc 3814
b30ab791 3815 old_disk_conf = device->ldev->disk_conf;
813472ce 3816 *new_disk_conf = *old_disk_conf;
b411b363 3817
6394b935 3818 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
813472ce 3819 }
b411b363
PR
3820
3821 if (apv >= 88) {
3822 if (apv == 88) {
5de73827 3823 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
d0180171 3824 drbd_err(device, "verify-alg of wrong size, "
5de73827
PR
3825 "peer wants %u, accepting only up to %u byte\n",
3826 data_size, SHARED_SECRET_MAX);
813472ce
PR
3827 err = -EIO;
3828 goto reconnect;
b411b363
PR
3829 }
3830
9f4fe9ad 3831 err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
813472ce
PR
3832 if (err)
3833 goto reconnect;
b411b363
PR
3834 /* we expect NUL terminated string */
3835 /* but just in case someone tries to be evil */
0b0ba1ef 3836 D_ASSERT(device, p->verify_alg[data_size-1] == 0);
b411b363
PR
3837 p->verify_alg[data_size-1] = 0;
3838
3839 } else /* apv >= 89 */ {
3840 /* we still expect NUL terminated strings */
3841 /* but just in case someone tries to be evil */
0b0ba1ef
AG
3842 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3843 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
b411b363
PR
3844 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3845 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3846 }
3847
2ec91e0e 3848 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
b30ab791 3849 if (device->state.conn == C_WF_REPORT_PARAMS) {
d0180171 3850 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3851 old_net_conf->verify_alg, p->verify_alg);
b411b363
PR
3852 goto disconnect;
3853 }
b30ab791 3854 verify_tfm = drbd_crypto_alloc_digest_safe(device,
b411b363
PR
3855 p->verify_alg, "verify-alg");
3856 if (IS_ERR(verify_tfm)) {
3857 verify_tfm = NULL;
3858 goto disconnect;
3859 }
3860 }
3861
2ec91e0e 3862 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
b30ab791 3863 if (device->state.conn == C_WF_REPORT_PARAMS) {
d0180171 3864 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3865 old_net_conf->csums_alg, p->csums_alg);
b411b363
PR
3866 goto disconnect;
3867 }
b30ab791 3868 csums_tfm = drbd_crypto_alloc_digest_safe(device,
b411b363
PR
3869 p->csums_alg, "csums-alg");
3870 if (IS_ERR(csums_tfm)) {
3871 csums_tfm = NULL;
3872 goto disconnect;
3873 }
3874 }
3875
813472ce 3876 if (apv > 94 && new_disk_conf) {
daeda1cc
PR
3877 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3878 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3879 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3880 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 3881
daeda1cc 3882 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
b30ab791 3883 if (fifo_size != device->rs_plan_s->size) {
813472ce
PR
3884 new_plan = fifo_alloc(fifo_size);
3885 if (!new_plan) {
d0180171 3886 drbd_err(device, "kmalloc of fifo_buffer failed");
b30ab791 3887 put_ldev(device);
778f271d
PR
3888 goto disconnect;
3889 }
3890 }
8e26f9cc 3891 }
b411b363 3892
91fd4dad 3893 if (verify_tfm || csums_tfm) {
2ec91e0e
PR
3894 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3895 if (!new_net_conf) {
d0180171 3896 drbd_err(device, "Allocation of new net_conf failed\n");
91fd4dad
PR
3897 goto disconnect;
3898 }
3899
2ec91e0e 3900 *new_net_conf = *old_net_conf;
91fd4dad
PR
3901
3902 if (verify_tfm) {
2ec91e0e
PR
3903 strcpy(new_net_conf->verify_alg, p->verify_alg);
3904 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3d0e6375 3905 crypto_free_shash(peer_device->connection->verify_tfm);
9f4fe9ad 3906 peer_device->connection->verify_tfm = verify_tfm;
d0180171 3907 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
91fd4dad
PR
3908 }
3909 if (csums_tfm) {
2ec91e0e
PR
3910 strcpy(new_net_conf->csums_alg, p->csums_alg);
3911 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3d0e6375 3912 crypto_free_shash(peer_device->connection->csums_tfm);
9f4fe9ad 3913 peer_device->connection->csums_tfm = csums_tfm;
d0180171 3914 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
91fd4dad 3915 }
bde89a9e 3916 rcu_assign_pointer(connection->net_conf, new_net_conf);
778f271d 3917 }
b411b363
PR
3918 }
3919
813472ce 3920 if (new_disk_conf) {
b30ab791
AG
3921 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3922 put_ldev(device);
813472ce
PR
3923 }
3924
3925 if (new_plan) {
b30ab791
AG
3926 old_plan = device->rs_plan_s;
3927 rcu_assign_pointer(device->rs_plan_s, new_plan);
b411b363 3928 }
daeda1cc 3929
0500813f 3930 mutex_unlock(&connection->resource->conf_update);
daeda1cc
PR
3931 synchronize_rcu();
3932 if (new_net_conf)
3933 kfree(old_net_conf);
3934 kfree(old_disk_conf);
813472ce 3935 kfree(old_plan);
daeda1cc 3936
82bc0194 3937 return 0;
b411b363 3938
813472ce
PR
3939reconnect:
3940 if (new_disk_conf) {
b30ab791 3941 put_ldev(device);
813472ce
PR
3942 kfree(new_disk_conf);
3943 }
0500813f 3944 mutex_unlock(&connection->resource->conf_update);
813472ce
PR
3945 return -EIO;
3946
b411b363 3947disconnect:
813472ce
PR
3948 kfree(new_plan);
3949 if (new_disk_conf) {
b30ab791 3950 put_ldev(device);
813472ce
PR
3951 kfree(new_disk_conf);
3952 }
0500813f 3953 mutex_unlock(&connection->resource->conf_update);
b411b363
PR
3954 /* just for completeness: actually not needed,
3955 * as this is not reached if csums_tfm was ok. */
3d0e6375 3956 crypto_free_shash(csums_tfm);
b411b363 3957 /* but free the verify_tfm again, if csums_tfm did not work out */
3d0e6375 3958 crypto_free_shash(verify_tfm);
9f4fe9ad 3959 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3960 return -EIO;
b411b363
PR
3961}
3962
b411b363 3963/* warn if the arguments differ by more than 12.5% */
b30ab791 3964static void warn_if_differ_considerably(struct drbd_device *device,
b411b363
PR
3965 const char *s, sector_t a, sector_t b)
3966{
3967 sector_t d;
3968 if (a == 0 || b == 0)
3969 return;
3970 d = (a > b) ? (a - b) : (b - a);
3971 if (d > (a>>3) || d > (b>>3))
d0180171 3972 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
b411b363
PR
3973 (unsigned long long)a, (unsigned long long)b);
3974}
3975
bde89a9e 3976static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3977{
9f4fe9ad 3978 struct drbd_peer_device *peer_device;
b30ab791 3979 struct drbd_device *device;
e658983a 3980 struct p_sizes *p = pi->data;
9104d31a 3981 struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
e96c9633 3982 enum determine_dev_size dd = DS_UNCHANGED;
6a8d68b1 3983 sector_t p_size, p_usize, p_csize, my_usize;
94c43a13 3984 sector_t new_size, cur_size;
b411b363 3985 int ldsc = 0; /* local disk size changed */
e89b591c 3986 enum dds_flags ddsf;
b411b363 3987
9f4fe9ad
AG
3988 peer_device = conn_peer_device(connection, pi->vnr);
3989 if (!peer_device)
bde89a9e 3990 return config_unknown_volume(connection, pi);
9f4fe9ad 3991 device = peer_device->device;
94c43a13 3992 cur_size = drbd_get_capacity(device->this_bdev);
4a76b161 3993
b411b363
PR
3994 p_size = be64_to_cpu(p->d_size);
3995 p_usize = be64_to_cpu(p->u_size);
6a8d68b1 3996 p_csize = be64_to_cpu(p->c_size);
b411b363 3997
b411b363
PR
3998 /* just store the peer's disk size for now.
3999 * we still need to figure out whether we accept that. */
b30ab791 4000 device->p_size = p_size;
b411b363 4001
b30ab791 4002 if (get_ldev(device)) {
daeda1cc 4003 rcu_read_lock();
b30ab791 4004 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
daeda1cc
PR
4005 rcu_read_unlock();
4006
b30ab791
AG
4007 warn_if_differ_considerably(device, "lower level device sizes",
4008 p_size, drbd_get_max_capacity(device->ldev));
4009 warn_if_differ_considerably(device, "user requested size",
daeda1cc 4010 p_usize, my_usize);
b411b363
PR
4011
4012 /* if this is the first connect, or an otherwise expected
4013 * param exchange, choose the minimum */
b30ab791 4014 if (device->state.conn == C_WF_REPORT_PARAMS)
daeda1cc 4015 p_usize = min_not_zero(my_usize, p_usize);
b411b363 4016
ad6e8979
LE
4017 /* Never shrink a device with usable data during connect,
4018 * or "attach" on the peer.
4019 * But allow online shrinking if we are connected. */
60bac040 4020 new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
60bac040 4021 if (new_size < cur_size &&
b30ab791 4022 device->state.disk >= D_OUTDATED &&
ad6e8979 4023 (device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS)) {
60bac040
LE
4024 drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
4025 (unsigned long long)new_size, (unsigned long long)cur_size);
9f4fe9ad 4026 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
b30ab791 4027 put_ldev(device);
82bc0194 4028 return -EIO;
b411b363 4029 }
daeda1cc
PR
4030
4031 if (my_usize != p_usize) {
4032 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
4033
4034 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
4035 if (!new_disk_conf) {
d0180171 4036 drbd_err(device, "Allocation of new disk_conf failed\n");
b30ab791 4037 put_ldev(device);
daeda1cc
PR
4038 return -ENOMEM;
4039 }
4040
0500813f 4041 mutex_lock(&connection->resource->conf_update);
b30ab791 4042 old_disk_conf = device->ldev->disk_conf;
daeda1cc
PR
4043 *new_disk_conf = *old_disk_conf;
4044 new_disk_conf->disk_size = p_usize;
4045
b30ab791 4046 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
0500813f 4047 mutex_unlock(&connection->resource->conf_update);
daeda1cc
PR
4048 synchronize_rcu();
4049 kfree(old_disk_conf);
4050
ad6e8979
LE
4051 drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n",
4052 (unsigned long)p_usize, (unsigned long)my_usize);
b411b363 4053 }
daeda1cc 4054
b30ab791 4055 put_ldev(device);
b411b363 4056 }
b411b363 4057
20c68fde 4058 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
dd4f699d 4059 /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
20c68fde 4060 In case we cleared the QUEUE_FLAG_DISCARD from our queue in
dd4f699d 4061 drbd_reconsider_queue_parameters(), we can be sure that after
20c68fde
LE
4062 drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
4063
e89b591c 4064 ddsf = be16_to_cpu(p->dds_flags);
b30ab791 4065 if (get_ldev(device)) {
9104d31a 4066 drbd_reconsider_queue_parameters(device, device->ldev, o);
b30ab791
AG
4067 dd = drbd_determine_dev_size(device, ddsf, NULL);
4068 put_ldev(device);
e96c9633 4069 if (dd == DS_ERROR)
82bc0194 4070 return -EIO;
b30ab791 4071 drbd_md_sync(device);
b411b363 4072 } else {
6a8d68b1
LE
4073 /*
4074 * I am diskless, need to accept the peer's *current* size.
4075 * I must NOT accept the peers backing disk size,
4076 * it may have been larger than mine all along...
4077 *
4078 * At this point, the peer knows more about my disk, or at
4079 * least about what we last agreed upon, than myself.
4080 * So if his c_size is less than his d_size, the most likely
4081 * reason is that *my* d_size was smaller last time we checked.
4082 *
4083 * However, if he sends a zero current size,
4084 * take his (user-capped or) backing disk size anyways.
94c43a13
LE
4085 *
4086 * Unless of course he does not have a disk himself.
4087 * In which case we ignore this completely.
6a8d68b1 4088 */
94c43a13 4089 sector_t new_size = p_csize ?: p_usize ?: p_size;
9104d31a 4090 drbd_reconsider_queue_parameters(device, NULL, o);
94c43a13
LE
4091 if (new_size == 0) {
4092 /* Ignore, peer does not know nothing. */
4093 } else if (new_size == cur_size) {
4094 /* nothing to do */
4095 } else if (cur_size != 0 && p_size == 0) {
4096 drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n",
4097 (unsigned long long)new_size, (unsigned long long)cur_size);
4098 } else if (new_size < cur_size && device->state.role == R_PRIMARY) {
4099 drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n",
4100 (unsigned long long)new_size, (unsigned long long)cur_size);
4101 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4102 return -EIO;
4103 } else {
4104 /* I believe the peer, if
4105 * - I don't have a current size myself
4106 * - we agree on the size anyways
4107 * - I do have a current size, am Secondary,
4108 * and he has the only disk
4109 * - I do have a current size, am Primary,
4110 * and he has the only disk,
4111 * which is larger than my current size
4112 */
4113 drbd_set_my_capacity(device, new_size);
4114 }
b411b363
PR
4115 }
4116
b30ab791
AG
4117 if (get_ldev(device)) {
4118 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
4119 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
b411b363
PR
4120 ldsc = 1;
4121 }
4122
b30ab791 4123 put_ldev(device);
b411b363
PR
4124 }
4125
b30ab791 4126 if (device->state.conn > C_WF_REPORT_PARAMS) {
b411b363 4127 if (be64_to_cpu(p->c_size) !=
b30ab791 4128 drbd_get_capacity(device->this_bdev) || ldsc) {
b411b363
PR
4129 /* we have different sizes, probably peer
4130 * needs to know my new size... */
69a22773 4131 drbd_send_sizes(peer_device, 0, ddsf);
b411b363 4132 }
b30ab791
AG
4133 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
4134 (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
4135 if (device->state.pdsk >= D_INCONSISTENT &&
4136 device->state.disk >= D_INCONSISTENT) {
e89b591c 4137 if (ddsf & DDSF_NO_RESYNC)
d0180171 4138 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
e89b591c 4139 else
b30ab791 4140 resync_after_online_grow(device);
e89b591c 4141 } else
b30ab791 4142 set_bit(RESYNC_AFTER_NEG, &device->flags);
b411b363
PR
4143 }
4144 }
4145
82bc0194 4146 return 0;
b411b363
PR
4147}
4148
bde89a9e 4149static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4150{
9f4fe9ad 4151 struct drbd_peer_device *peer_device;
b30ab791 4152 struct drbd_device *device;
e658983a 4153 struct p_uuids *p = pi->data;
b411b363 4154 u64 *p_uuid;
62b0da3a 4155 int i, updated_uuids = 0;
b411b363 4156
9f4fe9ad
AG
4157 peer_device = conn_peer_device(connection, pi->vnr);
4158 if (!peer_device)
bde89a9e 4159 return config_unknown_volume(connection, pi);
9f4fe9ad 4160 device = peer_device->device;
4a76b161 4161
365cf663 4162 p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
063eacf8 4163 if (!p_uuid) {
d0180171 4164 drbd_err(device, "kmalloc of p_uuid failed\n");
063eacf8
JW
4165 return false;
4166 }
b411b363
PR
4167
4168 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
4169 p_uuid[i] = be64_to_cpu(p->uuid[i]);
4170
b30ab791
AG
4171 kfree(device->p_uuid);
4172 device->p_uuid = p_uuid;
b411b363 4173
b17b5960 4174 if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
b30ab791
AG
4175 device->state.disk < D_INCONSISTENT &&
4176 device->state.role == R_PRIMARY &&
4177 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
d0180171 4178 drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
b30ab791 4179 (unsigned long long)device->ed_uuid);
9f4fe9ad 4180 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4181 return -EIO;
b411b363
PR
4182 }
4183
b30ab791 4184 if (get_ldev(device)) {
b411b363 4185 int skip_initial_sync =
b30ab791 4186 device->state.conn == C_CONNECTED &&
9f4fe9ad 4187 peer_device->connection->agreed_pro_version >= 90 &&
b30ab791 4188 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
b411b363
PR
4189 (p_uuid[UI_FLAGS] & 8);
4190 if (skip_initial_sync) {
d0180171 4191 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
b30ab791 4192 drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
20ceb2b2
LE
4193 "clear_n_write from receive_uuids",
4194 BM_LOCKED_TEST_ALLOWED);
b30ab791
AG
4195 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
4196 _drbd_uuid_set(device, UI_BITMAP, 0);
4197 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
b411b363 4198 CS_VERBOSE, NULL);
b30ab791 4199 drbd_md_sync(device);
62b0da3a 4200 updated_uuids = 1;
b411b363 4201 }
b30ab791
AG
4202 put_ldev(device);
4203 } else if (device->state.disk < D_INCONSISTENT &&
4204 device->state.role == R_PRIMARY) {
18a50fa2
PR
4205 /* I am a diskless primary, the peer just created a new current UUID
4206 for me. */
b30ab791 4207 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
b411b363
PR
4208 }
4209
4210 /* Before we test for the disk state, we should wait until an eventually
4211 ongoing cluster wide state change is finished. That is important if
4212 we are primary and are detaching from our disk. We need to see the
4213 new disk state... */
b30ab791
AG
4214 mutex_lock(device->state_mutex);
4215 mutex_unlock(device->state_mutex);
4216 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
4217 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
62b0da3a
LE
4218
4219 if (updated_uuids)
b30ab791 4220 drbd_print_uuids(device, "receiver updated UUIDs to");
b411b363 4221
82bc0194 4222 return 0;
b411b363
PR
4223}
4224
4225/**
4226 * convert_state() - Converts the peer's view of the cluster state to our point of view
4227 * @ps: The state as seen by the peer.
4228 */
4229static union drbd_state convert_state(union drbd_state ps)
4230{
4231 union drbd_state ms;
4232
4233 static enum drbd_conns c_tab[] = {
369bea63 4234 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
b411b363
PR
4235 [C_CONNECTED] = C_CONNECTED,
4236
4237 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
4238 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
4239 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
4240 [C_VERIFY_S] = C_VERIFY_T,
4241 [C_MASK] = C_MASK,
4242 };
4243
4244 ms.i = ps.i;
4245
4246 ms.conn = c_tab[ps.conn];
4247 ms.peer = ps.role;
4248 ms.role = ps.peer;
4249 ms.pdsk = ps.disk;
4250 ms.disk = ps.pdsk;
4251 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
4252
4253 return ms;
4254}
4255
bde89a9e 4256static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4257{
9f4fe9ad 4258 struct drbd_peer_device *peer_device;
b30ab791 4259 struct drbd_device *device;
e658983a 4260 struct p_req_state *p = pi->data;
b411b363 4261 union drbd_state mask, val;
bf885f8a 4262 enum drbd_state_rv rv;
b411b363 4263
9f4fe9ad
AG
4264 peer_device = conn_peer_device(connection, pi->vnr);
4265 if (!peer_device)
4a76b161 4266 return -EIO;
9f4fe9ad 4267 device = peer_device->device;
4a76b161 4268
b411b363
PR
4269 mask.i = be32_to_cpu(p->mask);
4270 val.i = be32_to_cpu(p->val);
4271
9f4fe9ad 4272 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
b30ab791 4273 mutex_is_locked(device->state_mutex)) {
69a22773 4274 drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
82bc0194 4275 return 0;
b411b363
PR
4276 }
4277
4278 mask = convert_state(mask);
4279 val = convert_state(val);
4280
b30ab791 4281 rv = drbd_change_state(device, CS_VERBOSE, mask, val);
69a22773 4282 drbd_send_sr_reply(peer_device, rv);
b411b363 4283
b30ab791 4284 drbd_md_sync(device);
b411b363 4285
82bc0194 4286 return 0;
b411b363
PR
4287}
4288
bde89a9e 4289static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4290{
e658983a 4291 struct p_req_state *p = pi->data;
b411b363 4292 union drbd_state mask, val;
bf885f8a 4293 enum drbd_state_rv rv;
b411b363 4294
b411b363
PR
4295 mask.i = be32_to_cpu(p->mask);
4296 val.i = be32_to_cpu(p->val);
4297
bde89a9e
AG
4298 if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
4299 mutex_is_locked(&connection->cstate_mutex)) {
4300 conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
82bc0194 4301 return 0;
b411b363
PR
4302 }
4303
4304 mask = convert_state(mask);
4305 val = convert_state(val);
4306
bde89a9e
AG
4307 rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
4308 conn_send_sr_reply(connection, rv);
b411b363 4309
82bc0194 4310 return 0;
b411b363
PR
4311}
4312
bde89a9e 4313static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4314{
9f4fe9ad 4315 struct drbd_peer_device *peer_device;
b30ab791 4316 struct drbd_device *device;
e658983a 4317 struct p_state *p = pi->data;
4ac4aada 4318 union drbd_state os, ns, peer_state;
b411b363 4319 enum drbd_disk_state real_peer_disk;
65d922c3 4320 enum chg_state_flags cs_flags;
b411b363
PR
4321 int rv;
4322
9f4fe9ad
AG
4323 peer_device = conn_peer_device(connection, pi->vnr);
4324 if (!peer_device)
bde89a9e 4325 return config_unknown_volume(connection, pi);
9f4fe9ad 4326 device = peer_device->device;
4a76b161 4327
b411b363
PR
4328 peer_state.i = be32_to_cpu(p->state);
4329
4330 real_peer_disk = peer_state.disk;
4331 if (peer_state.disk == D_NEGOTIATING) {
b30ab791 4332 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
d0180171 4333 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
b411b363
PR
4334 }
4335
0500813f 4336 spin_lock_irq(&device->resource->req_lock);
b411b363 4337 retry:
b30ab791 4338 os = ns = drbd_read_state(device);
0500813f 4339 spin_unlock_irq(&device->resource->req_lock);
b411b363 4340
668700b4 4341 /* If some other part of the code (ack_receiver thread, timeout)
545752d5
LE
4342 * already decided to close the connection again,
4343 * we must not "re-establish" it here. */
4344 if (os.conn <= C_TEAR_DOWN)
58ffa580 4345 return -ECONNRESET;
545752d5 4346
40424e4a
LE
4347 /* If this is the "end of sync" confirmation, usually the peer disk
4348 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
4349 * set) resync started in PausedSyncT, or if the timing of pause-/
4350 * unpause-sync events has been "just right", the peer disk may
4351 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
4352 */
4353 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
4354 real_peer_disk == D_UP_TO_DATE &&
e9ef7bb6
LE
4355 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
4356 /* If we are (becoming) SyncSource, but peer is still in sync
4357 * preparation, ignore its uptodate-ness to avoid flapping, it
4358 * will change to inconsistent once the peer reaches active
4359 * syncing states.
4360 * It may have changed syncer-paused flags, however, so we
4361 * cannot ignore this completely. */
4362 if (peer_state.conn > C_CONNECTED &&
4363 peer_state.conn < C_SYNC_SOURCE)
4364 real_peer_disk = D_INCONSISTENT;
4365
4366 /* if peer_state changes to connected at the same time,
4367 * it explicitly notifies us that it finished resync.
4368 * Maybe we should finish it up, too? */
4369 else if (os.conn >= C_SYNC_SOURCE &&
4370 peer_state.conn == C_CONNECTED) {
b30ab791
AG
4371 if (drbd_bm_total_weight(device) <= device->rs_failed)
4372 drbd_resync_finished(device);
82bc0194 4373 return 0;
e9ef7bb6
LE
4374 }
4375 }
4376
02b91b55
LE
4377 /* explicit verify finished notification, stop sector reached. */
4378 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
4379 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
b30ab791
AG
4380 ov_out_of_sync_print(device);
4381 drbd_resync_finished(device);
58ffa580 4382 return 0;
02b91b55
LE
4383 }
4384
e9ef7bb6
LE
4385 /* peer says his disk is inconsistent, while we think it is uptodate,
4386 * and this happens while the peer still thinks we have a sync going on,
4387 * but we think we are already done with the sync.
4388 * We ignore this to avoid flapping pdsk.
4389 * This should not happen, if the peer is a recent version of drbd. */
4390 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
4391 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
4392 real_peer_disk = D_UP_TO_DATE;
4393
4ac4aada
LE
4394 if (ns.conn == C_WF_REPORT_PARAMS)
4395 ns.conn = C_CONNECTED;
b411b363 4396
67531718
PR
4397 if (peer_state.conn == C_AHEAD)
4398 ns.conn = C_BEHIND;
4399
fe43ed97
LE
4400 /* TODO:
4401 * if (primary and diskless and peer uuid != effective uuid)
4402 * abort attach on peer;
4403 *
4404 * If this node does not have good data, was already connected, but
4405 * the peer did a late attach only now, trying to "negotiate" with me,
4406 * AND I am currently Primary, possibly frozen, with some specific
4407 * "effective" uuid, this should never be reached, really, because
4408 * we first send the uuids, then the current state.
4409 *
4410 * In this scenario, we already dropped the connection hard
4411 * when we received the unsuitable uuids (receive_uuids().
4412 *
4413 * Should we want to change this, that is: not drop the connection in
4414 * receive_uuids() already, then we would need to add a branch here
4415 * that aborts the attach of "unsuitable uuids" on the peer in case
4416 * this node is currently Diskless Primary.
4417 */
4418
b30ab791
AG
4419 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
4420 get_ldev_if_state(device, D_NEGOTIATING)) {
b411b363
PR
4421 int cr; /* consider resync */
4422
4423 /* if we established a new connection */
4ac4aada 4424 cr = (os.conn < C_CONNECTED);
b411b363
PR
4425 /* if we had an established connection
4426 * and one of the nodes newly attaches a disk */
4ac4aada 4427 cr |= (os.conn == C_CONNECTED &&
b411b363 4428 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 4429 os.disk == D_NEGOTIATING));
b411b363
PR
4430 /* if we have both been inconsistent, and the peer has been
4431 * forced to be UpToDate with --overwrite-data */
b30ab791 4432 cr |= test_bit(CONSIDER_RESYNC, &device->flags);
b411b363
PR
4433 /* if we had been plain connected, and the admin requested to
4434 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 4435 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
4436 (peer_state.conn >= C_STARTING_SYNC_S &&
4437 peer_state.conn <= C_WF_BITMAP_T));
4438
4439 if (cr)
69a22773 4440 ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
b411b363 4441
b30ab791 4442 put_ldev(device);
4ac4aada
LE
4443 if (ns.conn == C_MASK) {
4444 ns.conn = C_CONNECTED;
b30ab791
AG
4445 if (device->state.disk == D_NEGOTIATING) {
4446 drbd_force_state(device, NS(disk, D_FAILED));
b411b363 4447 } else if (peer_state.disk == D_NEGOTIATING) {
d0180171 4448 drbd_err(device, "Disk attach process on the peer node was aborted.\n");
b411b363 4449 peer_state.disk = D_DISKLESS;
580b9767 4450 real_peer_disk = D_DISKLESS;
b411b363 4451 } else {
9f4fe9ad 4452 if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
82bc0194 4453 return -EIO;
0b0ba1ef 4454 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
9f4fe9ad 4455 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4456 return -EIO;
b411b363
PR
4457 }
4458 }
4459 }
4460
0500813f 4461 spin_lock_irq(&device->resource->req_lock);
b30ab791 4462 if (os.i != drbd_read_state(device).i)
b411b363 4463 goto retry;
b30ab791 4464 clear_bit(CONSIDER_RESYNC, &device->flags);
b411b363
PR
4465 ns.peer = peer_state.role;
4466 ns.pdsk = real_peer_disk;
4467 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 4468 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b30ab791 4469 ns.disk = device->new_state_tmp.disk;
4ac4aada 4470 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
b30ab791
AG
4471 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
4472 test_bit(NEW_CUR_UUID, &device->flags)) {
8554df1c 4473 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 4474 for temporal network outages! */
0500813f 4475 spin_unlock_irq(&device->resource->req_lock);
d0180171 4476 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
9f4fe9ad 4477 tl_clear(peer_device->connection);
b30ab791
AG
4478 drbd_uuid_new_current(device);
4479 clear_bit(NEW_CUR_UUID, &device->flags);
9f4fe9ad 4480 conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 4481 return -EIO;
481c6f50 4482 }
b30ab791
AG
4483 rv = _drbd_set_state(device, ns, cs_flags, NULL);
4484 ns = drbd_read_state(device);
0500813f 4485 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
4486
4487 if (rv < SS_SUCCESS) {
9f4fe9ad 4488 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4489 return -EIO;
b411b363
PR
4490 }
4491
4ac4aada
LE
4492 if (os.conn > C_WF_REPORT_PARAMS) {
4493 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
4494 peer_state.disk != D_NEGOTIATING ) {
4495 /* we want resync, peer has not yet decided to sync... */
4496 /* Nowadays only used when forcing a node into primary role and
4497 setting its disk to UpToDate with that */
69a22773
AG
4498 drbd_send_uuids(peer_device);
4499 drbd_send_current_state(peer_device);
b411b363
PR
4500 }
4501 }
4502
b30ab791 4503 clear_bit(DISCARD_MY_DATA, &device->flags);
b411b363 4504
b30ab791 4505 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
b411b363 4506
82bc0194 4507 return 0;
b411b363
PR
4508}
4509
bde89a9e 4510static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4511{
9f4fe9ad 4512 struct drbd_peer_device *peer_device;
b30ab791 4513 struct drbd_device *device;
e658983a 4514 struct p_rs_uuid *p = pi->data;
4a76b161 4515
9f4fe9ad
AG
4516 peer_device = conn_peer_device(connection, pi->vnr);
4517 if (!peer_device)
4a76b161 4518 return -EIO;
9f4fe9ad 4519 device = peer_device->device;
b411b363 4520
b30ab791
AG
4521 wait_event(device->misc_wait,
4522 device->state.conn == C_WF_SYNC_UUID ||
4523 device->state.conn == C_BEHIND ||
4524 device->state.conn < C_CONNECTED ||
4525 device->state.disk < D_NEGOTIATING);
b411b363 4526
0b0ba1ef 4527 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
b411b363 4528
b411b363
PR
4529 /* Here the _drbd_uuid_ functions are right, current should
4530 _not_ be rotated into the history */
b30ab791
AG
4531 if (get_ldev_if_state(device, D_NEGOTIATING)) {
4532 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
4533 _drbd_uuid_set(device, UI_BITMAP, 0UL);
b411b363 4534
b30ab791
AG
4535 drbd_print_uuids(device, "updated sync uuid");
4536 drbd_start_resync(device, C_SYNC_TARGET);
b411b363 4537
b30ab791 4538 put_ldev(device);
b411b363 4539 } else
d0180171 4540 drbd_err(device, "Ignoring SyncUUID packet!\n");
b411b363 4541
82bc0194 4542 return 0;
b411b363
PR
4543}
4544
2c46407d
AG
4545/**
4546 * receive_bitmap_plain
4547 *
4548 * Return 0 when done, 1 when another iteration is needed, and a negative error
4549 * code upon failure.
4550 */
4551static int
69a22773 4552receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
e658983a 4553 unsigned long *p, struct bm_xfer_ctx *c)
b411b363 4554{
50d0b1ad 4555 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
69a22773 4556 drbd_header_size(peer_device->connection);
e658983a 4557 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 4558 c->bm_words - c->word_offset);
e658983a 4559 unsigned int want = num_words * sizeof(*p);
2c46407d 4560 int err;
b411b363 4561
50d0b1ad 4562 if (want != size) {
69a22773 4563 drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
2c46407d 4564 return -EIO;
b411b363
PR
4565 }
4566 if (want == 0)
2c46407d 4567 return 0;
69a22773 4568 err = drbd_recv_all(peer_device->connection, p, want);
82bc0194 4569 if (err)
2c46407d 4570 return err;
b411b363 4571
69a22773 4572 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
b411b363
PR
4573
4574 c->word_offset += num_words;
4575 c->bit_offset = c->word_offset * BITS_PER_LONG;
4576 if (c->bit_offset > c->bm_bits)
4577 c->bit_offset = c->bm_bits;
4578
2c46407d 4579 return 1;
b411b363
PR
4580}
4581
a02d1240
AG
4582static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4583{
4584 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4585}
4586
4587static int dcbp_get_start(struct p_compressed_bm *p)
4588{
4589 return (p->encoding & 0x80) != 0;
4590}
4591
4592static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4593{
4594 return (p->encoding >> 4) & 0x7;
4595}
4596
2c46407d
AG
4597/**
4598 * recv_bm_rle_bits
4599 *
4600 * Return 0 when done, 1 when another iteration is needed, and a negative error
4601 * code upon failure.
4602 */
4603static int
69a22773 4604recv_bm_rle_bits(struct drbd_peer_device *peer_device,
b411b363 4605 struct p_compressed_bm *p,
c6d25cfe
PR
4606 struct bm_xfer_ctx *c,
4607 unsigned int len)
b411b363
PR
4608{
4609 struct bitstream bs;
4610 u64 look_ahead;
4611 u64 rl;
4612 u64 tmp;
4613 unsigned long s = c->bit_offset;
4614 unsigned long e;
a02d1240 4615 int toggle = dcbp_get_start(p);
b411b363
PR
4616 int have;
4617 int bits;
4618
a02d1240 4619 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
b411b363
PR
4620
4621 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4622 if (bits < 0)
2c46407d 4623 return -EIO;
b411b363
PR
4624
4625 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4626 bits = vli_decode_bits(&rl, look_ahead);
4627 if (bits <= 0)
2c46407d 4628 return -EIO;
b411b363
PR
4629
4630 if (toggle) {
4631 e = s + rl -1;
4632 if (e >= c->bm_bits) {
69a22773 4633 drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 4634 return -EIO;
b411b363 4635 }
69a22773 4636 _drbd_bm_set_bits(peer_device->device, s, e);
b411b363
PR
4637 }
4638
4639 if (have < bits) {
69a22773 4640 drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
b411b363
PR
4641 have, bits, look_ahead,
4642 (unsigned int)(bs.cur.b - p->code),
4643 (unsigned int)bs.buf_len);
2c46407d 4644 return -EIO;
b411b363 4645 }
d2da5b0c
LE
4646 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4647 if (likely(bits < 64))
4648 look_ahead >>= bits;
4649 else
4650 look_ahead = 0;
b411b363
PR
4651 have -= bits;
4652
4653 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4654 if (bits < 0)
2c46407d 4655 return -EIO;
b411b363
PR
4656 look_ahead |= tmp << have;
4657 have += bits;
4658 }
4659
4660 c->bit_offset = s;
4661 bm_xfer_ctx_bit_to_word_offset(c);
4662
2c46407d 4663 return (s != c->bm_bits);
b411b363
PR
4664}
4665
2c46407d
AG
4666/**
4667 * decode_bitmap_c
4668 *
4669 * Return 0 when done, 1 when another iteration is needed, and a negative error
4670 * code upon failure.
4671 */
4672static int
69a22773 4673decode_bitmap_c(struct drbd_peer_device *peer_device,
b411b363 4674 struct p_compressed_bm *p,
c6d25cfe
PR
4675 struct bm_xfer_ctx *c,
4676 unsigned int len)
b411b363 4677{
a02d1240 4678 if (dcbp_get_code(p) == RLE_VLI_Bits)
69a22773 4679 return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
b411b363
PR
4680
4681 /* other variants had been implemented for evaluation,
4682 * but have been dropped as this one turned out to be "best"
4683 * during all our tests. */
4684
69a22773
AG
4685 drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4686 conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 4687 return -EIO;
b411b363
PR
4688}
4689
b30ab791 4690void INFO_bm_xfer_stats(struct drbd_device *device,
b411b363
PR
4691 const char *direction, struct bm_xfer_ctx *c)
4692{
4693 /* what would it take to transfer it "plaintext" */
a6b32bc3 4694 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
50d0b1ad
AG
4695 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4696 unsigned int plain =
4697 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4698 c->bm_words * sizeof(unsigned long);
4699 unsigned int total = c->bytes[0] + c->bytes[1];
4700 unsigned int r;
b411b363
PR
4701
4702 /* total can not be zero. but just in case: */
4703 if (total == 0)
4704 return;
4705
4706 /* don't report if not compressed */
4707 if (total >= plain)
4708 return;
4709
4710 /* total < plain. check for overflow, still */
4711 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4712 : (1000 * total / plain);
4713
4714 if (r > 1000)
4715 r = 1000;
4716
4717 r = 1000 - r;
d0180171 4718 drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
b411b363
PR
4719 "total %u; compression: %u.%u%%\n",
4720 direction,
4721 c->bytes[1], c->packets[1],
4722 c->bytes[0], c->packets[0],
4723 total, r/10, r % 10);
4724}
4725
4726/* Since we are processing the bitfield from lower addresses to higher,
4727 it does not matter if the process it in 32 bit chunks or 64 bit
4728 chunks as long as it is little endian. (Understand it as byte stream,
4729 beginning with the lowest byte...) If we would use big endian
4730 we would need to process it from the highest address to the lowest,
4731 in order to be agnostic to the 32 vs 64 bits issue.
4732
4733 returns 0 on failure, 1 if we successfully received it. */
bde89a9e 4734static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4735{
9f4fe9ad 4736 struct drbd_peer_device *peer_device;
b30ab791 4737 struct drbd_device *device;
b411b363 4738 struct bm_xfer_ctx c;
2c46407d 4739 int err;
4a76b161 4740
9f4fe9ad
AG
4741 peer_device = conn_peer_device(connection, pi->vnr);
4742 if (!peer_device)
4a76b161 4743 return -EIO;
9f4fe9ad 4744 device = peer_device->device;
b411b363 4745
b30ab791 4746 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
20ceb2b2
LE
4747 /* you are supposed to send additional out-of-sync information
4748 * if you actually set bits during this phase */
b411b363 4749
b411b363 4750 c = (struct bm_xfer_ctx) {
b30ab791
AG
4751 .bm_bits = drbd_bm_bits(device),
4752 .bm_words = drbd_bm_words(device),
b411b363
PR
4753 };
4754
2c46407d 4755 for(;;) {
e658983a 4756 if (pi->cmd == P_BITMAP)
69a22773 4757 err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
e658983a 4758 else if (pi->cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
4759 /* MAYBE: sanity check that we speak proto >= 90,
4760 * and the feature is enabled! */
e658983a 4761 struct p_compressed_bm *p = pi->data;
b411b363 4762
bde89a9e 4763 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
d0180171 4764 drbd_err(device, "ReportCBitmap packet too large\n");
82bc0194 4765 err = -EIO;
b411b363
PR
4766 goto out;
4767 }
e658983a 4768 if (pi->size <= sizeof(*p)) {
d0180171 4769 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
82bc0194 4770 err = -EIO;
78fcbdae 4771 goto out;
b411b363 4772 }
9f4fe9ad 4773 err = drbd_recv_all(peer_device->connection, p, pi->size);
e658983a
AG
4774 if (err)
4775 goto out;
69a22773 4776 err = decode_bitmap_c(peer_device, p, &c, pi->size);
b411b363 4777 } else {
d0180171 4778 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
82bc0194 4779 err = -EIO;
b411b363
PR
4780 goto out;
4781 }
4782
e2857216 4783 c.packets[pi->cmd == P_BITMAP]++;
bde89a9e 4784 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
b411b363 4785
2c46407d
AG
4786 if (err <= 0) {
4787 if (err < 0)
4788 goto out;
b411b363 4789 break;
2c46407d 4790 }
9f4fe9ad 4791 err = drbd_recv_header(peer_device->connection, pi);
82bc0194 4792 if (err)
b411b363 4793 goto out;
2c46407d 4794 }
b411b363 4795
b30ab791 4796 INFO_bm_xfer_stats(device, "receive", &c);
b411b363 4797
b30ab791 4798 if (device->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
4799 enum drbd_state_rv rv;
4800
b30ab791 4801 err = drbd_send_bitmap(device);
82bc0194 4802 if (err)
b411b363
PR
4803 goto out;
4804 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
b30ab791 4805 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
0b0ba1ef 4806 D_ASSERT(device, rv == SS_SUCCESS);
b30ab791 4807 } else if (device->state.conn != C_WF_BITMAP_S) {
b411b363
PR
4808 /* admin may have requested C_DISCONNECTING,
4809 * other threads may have noticed network errors */
d0180171 4810 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
b30ab791 4811 drbd_conn_str(device->state.conn));
b411b363 4812 }
82bc0194 4813 err = 0;
b411b363 4814
b411b363 4815 out:
b30ab791
AG
4816 drbd_bm_unlock(device);
4817 if (!err && device->state.conn == C_WF_BITMAP_S)
4818 drbd_start_resync(device, C_SYNC_SOURCE);
82bc0194 4819 return err;
b411b363
PR
4820}
4821
bde89a9e 4822static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4823{
1ec861eb 4824 drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
e2857216 4825 pi->cmd, pi->size);
b411b363 4826
bde89a9e 4827 return ignore_remaining_packet(connection, pi);
b411b363
PR
4828}
4829
bde89a9e 4830static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
0ced55a3 4831{
e7f52dfb
LE
4832 /* Make sure we've acked all the TCP data associated
4833 * with the data requests being unplugged */
bde89a9e 4834 drbd_tcp_quickack(connection->data.socket);
0ced55a3 4835
82bc0194 4836 return 0;
0ced55a3
PR
4837}
4838
bde89a9e 4839static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
73a01a18 4840{
9f4fe9ad 4841 struct drbd_peer_device *peer_device;
b30ab791 4842 struct drbd_device *device;
e658983a 4843 struct p_block_desc *p = pi->data;
4a76b161 4844
9f4fe9ad
AG
4845 peer_device = conn_peer_device(connection, pi->vnr);
4846 if (!peer_device)
4a76b161 4847 return -EIO;
9f4fe9ad 4848 device = peer_device->device;
73a01a18 4849
b30ab791 4850 switch (device->state.conn) {
f735e363
LE
4851 case C_WF_SYNC_UUID:
4852 case C_WF_BITMAP_T:
4853 case C_BEHIND:
4854 break;
4855 default:
d0180171 4856 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
b30ab791 4857 drbd_conn_str(device->state.conn));
f735e363
LE
4858 }
4859
b30ab791 4860 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
73a01a18 4861
82bc0194 4862 return 0;
73a01a18
PR
4863}
4864
700ca8c0
PR
4865static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
4866{
4867 struct drbd_peer_device *peer_device;
4868 struct p_block_desc *p = pi->data;
4869 struct drbd_device *device;
4870 sector_t sector;
4871 int size, err = 0;
4872
4873 peer_device = conn_peer_device(connection, pi->vnr);
4874 if (!peer_device)
4875 return -EIO;
4876 device = peer_device->device;
4877
4878 sector = be64_to_cpu(p->sector);
4879 size = be32_to_cpu(p->blksize);
4880
4881 dec_rs_pending(device);
4882
4883 if (get_ldev(device)) {
4884 struct drbd_peer_request *peer_req;
45c21793 4885 const int op = REQ_OP_WRITE_ZEROES;
700ca8c0
PR
4886
4887 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
9104d31a 4888 size, 0, GFP_NOIO);
700ca8c0
PR
4889 if (!peer_req) {
4890 put_ldev(device);
4891 return -ENOMEM;
4892 }
4893
4894 peer_req->w.cb = e_end_resync_block;
4895 peer_req->submit_jif = jiffies;
4896 peer_req->flags |= EE_IS_TRIM;
4897
4898 spin_lock_irq(&device->resource->req_lock);
4899 list_add_tail(&peer_req->w.list, &device->sync_ee);
4900 spin_unlock_irq(&device->resource->req_lock);
4901
4902 atomic_add(pi->size >> 9, &device->rs_sect_ev);
4903 err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
4904
4905 if (err) {
4906 spin_lock_irq(&device->resource->req_lock);
4907 list_del(&peer_req->w.list);
4908 spin_unlock_irq(&device->resource->req_lock);
4909
4910 drbd_free_peer_req(device, peer_req);
4911 put_ldev(device);
4912 err = 0;
4913 goto fail;
4914 }
4915
4916 inc_unacked(device);
4917
4918 /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(),
4919 as well as drbd_rs_complete_io() */
4920 } else {
4921 fail:
4922 drbd_rs_complete_io(device, sector);
4923 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
4924 }
4925
4926 atomic_add(size >> 9, &device->rs_sect_in);
4927
4928 return err;
4929}
4930
02918be2
PR
4931struct data_cmd {
4932 int expect_payload;
9104d31a 4933 unsigned int pkt_size;
bde89a9e 4934 int (*fn)(struct drbd_connection *, struct packet_info *);
02918be2
PR
4935};
4936
4937static struct data_cmd drbd_cmd_handler[] = {
4938 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4939 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4940 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4941 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
e658983a
AG
4942 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4943 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4944 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
02918be2
PR
4945 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4946 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
e658983a
AG
4947 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4948 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
02918be2
PR
4949 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4950 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4951 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4952 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4953 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4954 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4955 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4956 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4957 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
700ca8c0 4958 [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest },
02918be2 4959 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
73a01a18 4960 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4a76b161 4961 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
036b17ea 4962 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
a0fb3c47 4963 [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
700ca8c0 4964 [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
9104d31a 4965 [P_WSAME] = { 1, sizeof(struct p_wsame), receive_Data },
b411b363
PR
4966};
4967
bde89a9e 4968static void drbdd(struct drbd_connection *connection)
b411b363 4969{
77351055 4970 struct packet_info pi;
02918be2 4971 size_t shs; /* sub header size */
82bc0194 4972 int err;
b411b363 4973
bde89a9e 4974 while (get_t_state(&connection->receiver) == RUNNING) {
9104d31a 4975 struct data_cmd const *cmd;
b411b363 4976
bde89a9e 4977 drbd_thread_current_set_cpu(&connection->receiver);
c51a0ef3
LE
4978 update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
4979 if (drbd_recv_header_maybe_unplug(connection, &pi))
02918be2 4980 goto err_out;
b411b363 4981
deebe195 4982 cmd = &drbd_cmd_handler[pi.cmd];
4a76b161 4983 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
1ec861eb 4984 drbd_err(connection, "Unexpected data packet %s (0x%04x)",
2fcb8f30 4985 cmdname(pi.cmd), pi.cmd);
02918be2 4986 goto err_out;
0b33a916 4987 }
b411b363 4988
e658983a 4989 shs = cmd->pkt_size;
9104d31a
LE
4990 if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME)
4991 shs += sizeof(struct o_qlim);
e658983a 4992 if (pi.size > shs && !cmd->expect_payload) {
1ec861eb 4993 drbd_err(connection, "No payload expected %s l:%d\n",
2fcb8f30 4994 cmdname(pi.cmd), pi.size);
02918be2 4995 goto err_out;
b411b363 4996 }
9104d31a
LE
4997 if (pi.size < shs) {
4998 drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n",
4999 cmdname(pi.cmd), (int)shs, pi.size);
5000 goto err_out;
5001 }
b411b363 5002
c13f7e1a 5003 if (shs) {
944410e9 5004 update_receiver_timing_details(connection, drbd_recv_all_warn);
bde89a9e 5005 err = drbd_recv_all_warn(connection, pi.data, shs);
a5c31904 5006 if (err)
c13f7e1a 5007 goto err_out;
e2857216 5008 pi.size -= shs;
c13f7e1a
LE
5009 }
5010
944410e9 5011 update_receiver_timing_details(connection, cmd->fn);
bde89a9e 5012 err = cmd->fn(connection, &pi);
4a76b161 5013 if (err) {
1ec861eb 5014 drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
9f5bdc33 5015 cmdname(pi.cmd), err, pi.size);
02918be2 5016 goto err_out;
b411b363
PR
5017 }
5018 }
82bc0194 5019 return;
b411b363 5020
82bc0194 5021 err_out:
bde89a9e 5022 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
5023}
5024
bde89a9e 5025static void conn_disconnect(struct drbd_connection *connection)
b411b363 5026{
c06ece6b 5027 struct drbd_peer_device *peer_device;
bbeb641c 5028 enum drbd_conns oc;
376694a0 5029 int vnr;
b411b363 5030
bde89a9e 5031 if (connection->cstate == C_STANDALONE)
b411b363 5032 return;
b411b363 5033
545752d5
LE
5034 /* We are about to start the cleanup after connection loss.
5035 * Make sure drbd_make_request knows about that.
5036 * Usually we should be in some network failure state already,
5037 * but just in case we are not, we fix it up here.
5038 */
bde89a9e 5039 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
545752d5 5040
668700b4 5041 /* ack_receiver does not clean up anything. it must not interfere, either */
1c03e520 5042 drbd_thread_stop(&connection->ack_receiver);
668700b4
PR
5043 if (connection->ack_sender) {
5044 destroy_workqueue(connection->ack_sender);
5045 connection->ack_sender = NULL;
5046 }
bde89a9e 5047 drbd_free_sock(connection);
360cc740 5048
c141ebda 5049 rcu_read_lock();
c06ece6b
AG
5050 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5051 struct drbd_device *device = peer_device->device;
b30ab791 5052 kref_get(&device->kref);
c141ebda 5053 rcu_read_unlock();
69a22773 5054 drbd_disconnected(peer_device);
c06ece6b 5055 kref_put(&device->kref, drbd_destroy_device);
c141ebda
PR
5056 rcu_read_lock();
5057 }
5058 rcu_read_unlock();
5059
bde89a9e 5060 if (!list_empty(&connection->current_epoch->list))
1ec861eb 5061 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
12038a3a 5062 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
bde89a9e
AG
5063 atomic_set(&connection->current_epoch->epoch_size, 0);
5064 connection->send.seen_any_write_yet = false;
12038a3a 5065
1ec861eb 5066 drbd_info(connection, "Connection closed\n");
360cc740 5067
bde89a9e
AG
5068 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
5069 conn_try_outdate_peer_async(connection);
cb703454 5070
0500813f 5071 spin_lock_irq(&connection->resource->req_lock);
bde89a9e 5072 oc = connection->cstate;
bbeb641c 5073 if (oc >= C_UNCONNECTED)
bde89a9e 5074 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
bbeb641c 5075
0500813f 5076 spin_unlock_irq(&connection->resource->req_lock);
360cc740 5077
f3dfa40a 5078 if (oc == C_DISCONNECTING)
bde89a9e 5079 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
360cc740
PR
5080}
5081
69a22773 5082static int drbd_disconnected(struct drbd_peer_device *peer_device)
360cc740 5083{
69a22773 5084 struct drbd_device *device = peer_device->device;
360cc740 5085 unsigned int i;
b411b363 5086
85719573 5087 /* wait for current activity to cease. */
0500813f 5088 spin_lock_irq(&device->resource->req_lock);
b30ab791
AG
5089 _drbd_wait_ee_list_empty(device, &device->active_ee);
5090 _drbd_wait_ee_list_empty(device, &device->sync_ee);
5091 _drbd_wait_ee_list_empty(device, &device->read_ee);
0500813f 5092 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
5093
5094 /* We do not have data structures that would allow us to
5095 * get the rs_pending_cnt down to 0 again.
5096 * * On C_SYNC_TARGET we do not have any data structures describing
5097 * the pending RSDataRequest's we have sent.
5098 * * On C_SYNC_SOURCE there is no data structure that tracks
5099 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
5100 * And no, it is not the sum of the reference counts in the
5101 * resync_LRU. The resync_LRU tracks the whole operation including
5102 * the disk-IO, while the rs_pending_cnt only tracks the blocks
5103 * on the fly. */
b30ab791
AG
5104 drbd_rs_cancel_all(device);
5105 device->rs_total = 0;
5106 device->rs_failed = 0;
5107 atomic_set(&device->rs_pending_cnt, 0);
5108 wake_up(&device->misc_wait);
b411b363 5109
b30ab791 5110 del_timer_sync(&device->resync_timer);
2bccef39 5111 resync_timer_fn(&device->resync_timer);
b411b363 5112
b411b363
PR
5113 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
5114 * w_make_resync_request etc. which may still be on the worker queue
5115 * to be "canceled" */
b5043c5e 5116 drbd_flush_workqueue(&peer_device->connection->sender_work);
b411b363 5117
b30ab791 5118 drbd_finish_peer_reqs(device);
b411b363 5119
d10b4ea3
PR
5120 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
5121 might have issued a work again. The one before drbd_finish_peer_reqs() is
5122 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
b5043c5e 5123 drbd_flush_workqueue(&peer_device->connection->sender_work);
d10b4ea3 5124
08332d73
LE
5125 /* need to do it again, drbd_finish_peer_reqs() may have populated it
5126 * again via drbd_try_clear_on_disk_bm(). */
b30ab791 5127 drbd_rs_cancel_all(device);
b411b363 5128
b30ab791
AG
5129 kfree(device->p_uuid);
5130 device->p_uuid = NULL;
b411b363 5131
b30ab791 5132 if (!drbd_suspended(device))
69a22773 5133 tl_clear(peer_device->connection);
b411b363 5134
b30ab791 5135 drbd_md_sync(device);
b411b363 5136
be115b69
LE
5137 if (get_ldev(device)) {
5138 drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
5139 "write from disconnected", BM_LOCKED_CHANGE_ALLOWED);
5140 put_ldev(device);
5141 }
20ceb2b2 5142
b411b363
PR
5143 /* tcp_close and release of sendpage pages can be deferred. I don't
5144 * want to use SO_LINGER, because apparently it can be deferred for
5145 * more than 20 seconds (longest time I checked).
5146 *
5147 * Actually we don't care for exactly when the network stack does its
5148 * put_page(), but release our reference on these pages right here.
5149 */
b30ab791 5150 i = drbd_free_peer_reqs(device, &device->net_ee);
b411b363 5151 if (i)
d0180171 5152 drbd_info(device, "net_ee not empty, killed %u entries\n", i);
b30ab791 5153 i = atomic_read(&device->pp_in_use_by_net);
435f0740 5154 if (i)
d0180171 5155 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
b30ab791 5156 i = atomic_read(&device->pp_in_use);
b411b363 5157 if (i)
d0180171 5158 drbd_info(device, "pp_in_use = %d, expected 0\n", i);
b411b363 5159
0b0ba1ef
AG
5160 D_ASSERT(device, list_empty(&device->read_ee));
5161 D_ASSERT(device, list_empty(&device->active_ee));
5162 D_ASSERT(device, list_empty(&device->sync_ee));
5163 D_ASSERT(device, list_empty(&device->done_ee));
b411b363 5164
360cc740 5165 return 0;
b411b363
PR
5166}
5167
5168/*
5169 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
5170 * we can agree on is stored in agreed_pro_version.
5171 *
5172 * feature flags and the reserved array should be enough room for future
5173 * enhancements of the handshake protocol, and possible plugins...
5174 *
5175 * for now, they are expected to be zero, but ignored.
5176 */
bde89a9e 5177static int drbd_send_features(struct drbd_connection *connection)
b411b363 5178{
9f5bdc33
AG
5179 struct drbd_socket *sock;
5180 struct p_connection_features *p;
b411b363 5181
bde89a9e
AG
5182 sock = &connection->data;
5183 p = conn_prepare_command(connection, sock);
9f5bdc33 5184 if (!p)
e8d17b01 5185 return -EIO;
b411b363
PR
5186 memset(p, 0, sizeof(*p));
5187 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
5188 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
20c68fde 5189 p->feature_flags = cpu_to_be32(PRO_FEATURES);
bde89a9e 5190 return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
b411b363
PR
5191}
5192
5193/*
5194 * return values:
5195 * 1 yes, we have a valid connection
5196 * 0 oops, did not work out, please try again
5197 * -1 peer talks different language,
5198 * no point in trying again, please go standalone.
5199 */
bde89a9e 5200static int drbd_do_features(struct drbd_connection *connection)
b411b363 5201{
bde89a9e 5202 /* ASSERT current == connection->receiver ... */
e658983a
AG
5203 struct p_connection_features *p;
5204 const int expect = sizeof(struct p_connection_features);
77351055 5205 struct packet_info pi;
a5c31904 5206 int err;
b411b363 5207
bde89a9e 5208 err = drbd_send_features(connection);
e8d17b01 5209 if (err)
b411b363
PR
5210 return 0;
5211
bde89a9e 5212 err = drbd_recv_header(connection, &pi);
69bc7bc3 5213 if (err)
b411b363
PR
5214 return 0;
5215
6038178e 5216 if (pi.cmd != P_CONNECTION_FEATURES) {
1ec861eb 5217 drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
2fcb8f30 5218 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5219 return -1;
5220 }
5221
77351055 5222 if (pi.size != expect) {
1ec861eb 5223 drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
77351055 5224 expect, pi.size);
b411b363
PR
5225 return -1;
5226 }
5227
e658983a 5228 p = pi.data;
bde89a9e 5229 err = drbd_recv_all_warn(connection, p, expect);
a5c31904 5230 if (err)
b411b363 5231 return 0;
b411b363 5232
b411b363
PR
5233 p->protocol_min = be32_to_cpu(p->protocol_min);
5234 p->protocol_max = be32_to_cpu(p->protocol_max);
5235 if (p->protocol_max == 0)
5236 p->protocol_max = p->protocol_min;
5237
5238 if (PRO_VERSION_MAX < p->protocol_min ||
5239 PRO_VERSION_MIN > p->protocol_max)
5240 goto incompat;
5241
bde89a9e 5242 connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
20c68fde 5243 connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags);
b411b363 5244
1ec861eb 5245 drbd_info(connection, "Handshake successful: "
bde89a9e 5246 "Agreed network protocol version %d\n", connection->agreed_pro_version);
b411b363 5247
9104d31a
LE
5248 drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s.\n",
5249 connection->agreed_features,
5250 connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "",
5251 connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "",
5252 connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" :
5253 connection->agreed_features ? "" : " none");
92d94ae6 5254
b411b363
PR
5255 return 1;
5256
5257 incompat:
1ec861eb 5258 drbd_err(connection, "incompatible DRBD dialects: "
b411b363
PR
5259 "I support %d-%d, peer supports %d-%d\n",
5260 PRO_VERSION_MIN, PRO_VERSION_MAX,
5261 p->protocol_min, p->protocol_max);
5262 return -1;
5263}
5264
5265#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
bde89a9e 5266static int drbd_do_auth(struct drbd_connection *connection)
b411b363 5267{
1ec861eb
AG
5268 drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
5269 drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 5270 return -1;
b411b363
PR
5271}
5272#else
5273#define CHALLENGE_LEN 64
b10d96cb
JT
5274
5275/* Return value:
5276 1 - auth succeeded,
5277 0 - failed, try again (network error),
5278 -1 - auth failed, don't try again.
5279*/
5280
bde89a9e 5281static int drbd_do_auth(struct drbd_connection *connection)
b411b363 5282{
9f5bdc33 5283 struct drbd_socket *sock;
b411b363 5284 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
b411b363
PR
5285 char *response = NULL;
5286 char *right_response = NULL;
5287 char *peers_ch = NULL;
44ed167d
PR
5288 unsigned int key_len;
5289 char secret[SHARED_SECRET_MAX]; /* 64 byte */
b411b363 5290 unsigned int resp_size;
9534d671 5291 SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
77351055 5292 struct packet_info pi;
44ed167d 5293 struct net_conf *nc;
69bc7bc3 5294 int err, rv;
b411b363 5295
9f5bdc33 5296 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
b411b363 5297
44ed167d 5298 rcu_read_lock();
bde89a9e 5299 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
5300 key_len = strlen(nc->shared_secret);
5301 memcpy(secret, nc->shared_secret, key_len);
5302 rcu_read_unlock();
5303
9534d671
HX
5304 desc->tfm = connection->cram_hmac_tfm;
5305 desc->flags = 0;
b411b363 5306
9534d671 5307 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
b411b363 5308 if (rv) {
9534d671 5309 drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
b10d96cb 5310 rv = -1;
b411b363
PR
5311 goto fail;
5312 }
5313
5314 get_random_bytes(my_challenge, CHALLENGE_LEN);
5315
bde89a9e
AG
5316 sock = &connection->data;
5317 if (!conn_prepare_command(connection, sock)) {
9f5bdc33
AG
5318 rv = 0;
5319 goto fail;
5320 }
bde89a9e 5321 rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
9f5bdc33 5322 my_challenge, CHALLENGE_LEN);
b411b363
PR
5323 if (!rv)
5324 goto fail;
5325
bde89a9e 5326 err = drbd_recv_header(connection, &pi);
69bc7bc3
AG
5327 if (err) {
5328 rv = 0;
b411b363 5329 goto fail;
69bc7bc3 5330 }
b411b363 5331
77351055 5332 if (pi.cmd != P_AUTH_CHALLENGE) {
1ec861eb 5333 drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
2fcb8f30 5334 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5335 rv = 0;
5336 goto fail;
5337 }
5338
77351055 5339 if (pi.size > CHALLENGE_LEN * 2) {
1ec861eb 5340 drbd_err(connection, "expected AuthChallenge payload too big.\n");
b10d96cb 5341 rv = -1;
b411b363
PR
5342 goto fail;
5343 }
5344
67cca286
PR
5345 if (pi.size < CHALLENGE_LEN) {
5346 drbd_err(connection, "AuthChallenge payload too small.\n");
5347 rv = -1;
5348 goto fail;
5349 }
5350
77351055 5351 peers_ch = kmalloc(pi.size, GFP_NOIO);
b411b363 5352 if (peers_ch == NULL) {
1ec861eb 5353 drbd_err(connection, "kmalloc of peers_ch failed\n");
b10d96cb 5354 rv = -1;
b411b363
PR
5355 goto fail;
5356 }
5357
bde89a9e 5358 err = drbd_recv_all_warn(connection, peers_ch, pi.size);
a5c31904 5359 if (err) {
b411b363
PR
5360 rv = 0;
5361 goto fail;
5362 }
5363
67cca286
PR
5364 if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) {
5365 drbd_err(connection, "Peer presented the same challenge!\n");
5366 rv = -1;
5367 goto fail;
5368 }
5369
9534d671 5370 resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
b411b363
PR
5371 response = kmalloc(resp_size, GFP_NOIO);
5372 if (response == NULL) {
1ec861eb 5373 drbd_err(connection, "kmalloc of response failed\n");
b10d96cb 5374 rv = -1;
b411b363
PR
5375 goto fail;
5376 }
5377
9534d671 5378 rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
b411b363 5379 if (rv) {
1ec861eb 5380 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 5381 rv = -1;
b411b363
PR
5382 goto fail;
5383 }
5384
bde89a9e 5385 if (!conn_prepare_command(connection, sock)) {
9f5bdc33 5386 rv = 0;
b411b363 5387 goto fail;
9f5bdc33 5388 }
bde89a9e 5389 rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
9f5bdc33 5390 response, resp_size);
b411b363
PR
5391 if (!rv)
5392 goto fail;
5393
bde89a9e 5394 err = drbd_recv_header(connection, &pi);
69bc7bc3 5395 if (err) {
b411b363
PR
5396 rv = 0;
5397 goto fail;
5398 }
5399
77351055 5400 if (pi.cmd != P_AUTH_RESPONSE) {
1ec861eb 5401 drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
2fcb8f30 5402 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5403 rv = 0;
5404 goto fail;
5405 }
5406
77351055 5407 if (pi.size != resp_size) {
1ec861eb 5408 drbd_err(connection, "expected AuthResponse payload of wrong size\n");
b411b363
PR
5409 rv = 0;
5410 goto fail;
5411 }
b411b363 5412
bde89a9e 5413 err = drbd_recv_all_warn(connection, response , resp_size);
a5c31904 5414 if (err) {
b411b363
PR
5415 rv = 0;
5416 goto fail;
5417 }
5418
5419 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 5420 if (right_response == NULL) {
1ec861eb 5421 drbd_err(connection, "kmalloc of right_response failed\n");
b10d96cb 5422 rv = -1;
b411b363
PR
5423 goto fail;
5424 }
5425
9534d671
HX
5426 rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
5427 right_response);
b411b363 5428 if (rv) {
1ec861eb 5429 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 5430 rv = -1;
b411b363
PR
5431 goto fail;
5432 }
5433
5434 rv = !memcmp(response, right_response, resp_size);
5435
5436 if (rv)
1ec861eb 5437 drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
44ed167d 5438 resp_size);
b10d96cb
JT
5439 else
5440 rv = -1;
b411b363
PR
5441
5442 fail:
5443 kfree(peers_ch);
5444 kfree(response);
5445 kfree(right_response);
9534d671 5446 shash_desc_zero(desc);
b411b363
PR
5447
5448 return rv;
5449}
5450#endif
5451
8fe60551 5452int drbd_receiver(struct drbd_thread *thi)
b411b363 5453{
bde89a9e 5454 struct drbd_connection *connection = thi->connection;
b411b363
PR
5455 int h;
5456
1ec861eb 5457 drbd_info(connection, "receiver (re)started\n");
b411b363
PR
5458
5459 do {
bde89a9e 5460 h = conn_connect(connection);
b411b363 5461 if (h == 0) {
bde89a9e 5462 conn_disconnect(connection);
20ee6390 5463 schedule_timeout_interruptible(HZ);
b411b363
PR
5464 }
5465 if (h == -1) {
1ec861eb 5466 drbd_warn(connection, "Discarding network configuration.\n");
bde89a9e 5467 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
5468 }
5469 } while (h == 0);
5470
c51a0ef3
LE
5471 if (h > 0) {
5472 blk_start_plug(&connection->receiver_plug);
bde89a9e 5473 drbdd(connection);
c51a0ef3
LE
5474 blk_finish_plug(&connection->receiver_plug);
5475 }
b411b363 5476
bde89a9e 5477 conn_disconnect(connection);
b411b363 5478
1ec861eb 5479 drbd_info(connection, "receiver terminated\n");
b411b363
PR
5480 return 0;
5481}
5482
5483/* ********* acknowledge sender ******** */
5484
bde89a9e 5485static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5486{
e658983a 5487 struct p_req_state_reply *p = pi->data;
e4f78ede
PR
5488 int retcode = be32_to_cpu(p->retcode);
5489
5490 if (retcode >= SS_SUCCESS) {
bde89a9e 5491 set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
e4f78ede 5492 } else {
bde89a9e 5493 set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
1ec861eb 5494 drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
e4f78ede
PR
5495 drbd_set_st_err_str(retcode), retcode);
5496 }
bde89a9e 5497 wake_up(&connection->ping_wait);
e4f78ede 5498
2735a594 5499 return 0;
e4f78ede 5500}
b411b363 5501
bde89a9e 5502static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5503{
9f4fe9ad 5504 struct drbd_peer_device *peer_device;
b30ab791 5505 struct drbd_device *device;
e658983a 5506 struct p_req_state_reply *p = pi->data;
b411b363
PR
5507 int retcode = be32_to_cpu(p->retcode);
5508
9f4fe9ad
AG
5509 peer_device = conn_peer_device(connection, pi->vnr);
5510 if (!peer_device)
2735a594 5511 return -EIO;
9f4fe9ad 5512 device = peer_device->device;
1952e916 5513
bde89a9e 5514 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
0b0ba1ef 5515 D_ASSERT(device, connection->agreed_pro_version < 100);
bde89a9e 5516 return got_conn_RqSReply(connection, pi);
4d0fc3fd
PR
5517 }
5518
b411b363 5519 if (retcode >= SS_SUCCESS) {
b30ab791 5520 set_bit(CL_ST_CHG_SUCCESS, &device->flags);
b411b363 5521 } else {
b30ab791 5522 set_bit(CL_ST_CHG_FAIL, &device->flags);
d0180171 5523 drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
e4f78ede 5524 drbd_set_st_err_str(retcode), retcode);
b411b363 5525 }
b30ab791 5526 wake_up(&device->state_wait);
b411b363 5527
2735a594 5528 return 0;
b411b363
PR
5529}
5530
bde89a9e 5531static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5532{
bde89a9e 5533 return drbd_send_ping_ack(connection);
b411b363
PR
5534
5535}
5536
bde89a9e 5537static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363
PR
5538{
5539 /* restore idle timeout */
bde89a9e
AG
5540 connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
5541 if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
5542 wake_up(&connection->ping_wait);
b411b363 5543
2735a594 5544 return 0;
b411b363
PR
5545}
5546
bde89a9e 5547static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5548{
9f4fe9ad 5549 struct drbd_peer_device *peer_device;
b30ab791 5550 struct drbd_device *device;
e658983a 5551 struct p_block_ack *p = pi->data;
b411b363
PR
5552 sector_t sector = be64_to_cpu(p->sector);
5553 int blksize = be32_to_cpu(p->blksize);
5554
9f4fe9ad
AG
5555 peer_device = conn_peer_device(connection, pi->vnr);
5556 if (!peer_device)
2735a594 5557 return -EIO;
9f4fe9ad 5558 device = peer_device->device;
1952e916 5559
9f4fe9ad 5560 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
b411b363 5561
69a22773 5562 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5563
b30ab791
AG
5564 if (get_ldev(device)) {
5565 drbd_rs_complete_io(device, sector);
5566 drbd_set_in_sync(device, sector, blksize);
1d53f09e 5567 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
b30ab791
AG
5568 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
5569 put_ldev(device);
1d53f09e 5570 }
b30ab791
AG
5571 dec_rs_pending(device);
5572 atomic_add(blksize >> 9, &device->rs_sect_in);
b411b363 5573
2735a594 5574 return 0;
b411b363
PR
5575}
5576
bc9c5c41 5577static int
b30ab791 5578validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
bc9c5c41
AG
5579 struct rb_root *root, const char *func,
5580 enum drbd_req_event what, bool missing_ok)
b411b363
PR
5581{
5582 struct drbd_request *req;
5583 struct bio_and_error m;
5584
0500813f 5585 spin_lock_irq(&device->resource->req_lock);
b30ab791 5586 req = find_request(device, root, id, sector, missing_ok, func);
b411b363 5587 if (unlikely(!req)) {
0500813f 5588 spin_unlock_irq(&device->resource->req_lock);
85997675 5589 return -EIO;
b411b363
PR
5590 }
5591 __req_mod(req, what, &m);
0500813f 5592 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
5593
5594 if (m.bio)
b30ab791 5595 complete_master_bio(device, &m);
85997675 5596 return 0;
b411b363
PR
5597}
5598
bde89a9e 5599static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5600{
9f4fe9ad 5601 struct drbd_peer_device *peer_device;
b30ab791 5602 struct drbd_device *device;
e658983a 5603 struct p_block_ack *p = pi->data;
b411b363
PR
5604 sector_t sector = be64_to_cpu(p->sector);
5605 int blksize = be32_to_cpu(p->blksize);
5606 enum drbd_req_event what;
5607
9f4fe9ad
AG
5608 peer_device = conn_peer_device(connection, pi->vnr);
5609 if (!peer_device)
2735a594 5610 return -EIO;
9f4fe9ad 5611 device = peer_device->device;
1952e916 5612
69a22773 5613 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5614
579b57ed 5615 if (p->block_id == ID_SYNCER) {
b30ab791
AG
5616 drbd_set_in_sync(device, sector, blksize);
5617 dec_rs_pending(device);
2735a594 5618 return 0;
b411b363 5619 }
e05e1e59 5620 switch (pi->cmd) {
b411b363 5621 case P_RS_WRITE_ACK:
8554df1c 5622 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
5623 break;
5624 case P_WRITE_ACK:
8554df1c 5625 what = WRITE_ACKED_BY_PEER;
b411b363
PR
5626 break;
5627 case P_RECV_ACK:
8554df1c 5628 what = RECV_ACKED_BY_PEER;
b411b363 5629 break;
d4dabbe2
LE
5630 case P_SUPERSEDED:
5631 what = CONFLICT_RESOLVED;
b411b363 5632 break;
7be8da07 5633 case P_RETRY_WRITE:
7be8da07 5634 what = POSTPONE_WRITE;
b411b363
PR
5635 break;
5636 default:
2735a594 5637 BUG();
b411b363
PR
5638 }
5639
b30ab791
AG
5640 return validate_req_change_req_state(device, p->block_id, sector,
5641 &device->write_requests, __func__,
2735a594 5642 what, false);
b411b363
PR
5643}
5644
bde89a9e 5645static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5646{
9f4fe9ad 5647 struct drbd_peer_device *peer_device;
b30ab791 5648 struct drbd_device *device;
e658983a 5649 struct p_block_ack *p = pi->data;
b411b363 5650 sector_t sector = be64_to_cpu(p->sector);
2deb8336 5651 int size = be32_to_cpu(p->blksize);
85997675 5652 int err;
b411b363 5653
9f4fe9ad
AG
5654 peer_device = conn_peer_device(connection, pi->vnr);
5655 if (!peer_device)
2735a594 5656 return -EIO;
9f4fe9ad 5657 device = peer_device->device;
b411b363 5658
69a22773 5659 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5660
579b57ed 5661 if (p->block_id == ID_SYNCER) {
b30ab791
AG
5662 dec_rs_pending(device);
5663 drbd_rs_failed_io(device, sector, size);
2735a594 5664 return 0;
b411b363 5665 }
2deb8336 5666
b30ab791
AG
5667 err = validate_req_change_req_state(device, p->block_id, sector,
5668 &device->write_requests, __func__,
303d1448 5669 NEG_ACKED, true);
85997675 5670 if (err) {
c3afd8f5
AG
5671 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5672 The master bio might already be completed, therefore the
5673 request is no longer in the collision hash. */
5674 /* In Protocol B we might already have got a P_RECV_ACK
5675 but then get a P_NEG_ACK afterwards. */
b30ab791 5676 drbd_set_out_of_sync(device, sector, size);
2deb8336 5677 }
2735a594 5678 return 0;
b411b363
PR
5679}
5680
bde89a9e 5681static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5682{
9f4fe9ad 5683 struct drbd_peer_device *peer_device;
b30ab791 5684 struct drbd_device *device;
e658983a 5685 struct p_block_ack *p = pi->data;
b411b363
PR
5686 sector_t sector = be64_to_cpu(p->sector);
5687
9f4fe9ad
AG
5688 peer_device = conn_peer_device(connection, pi->vnr);
5689 if (!peer_device)
2735a594 5690 return -EIO;
9f4fe9ad 5691 device = peer_device->device;
1952e916 5692
69a22773 5693 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
7be8da07 5694
d0180171 5695 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
b411b363
PR
5696 (unsigned long long)sector, be32_to_cpu(p->blksize));
5697
b30ab791
AG
5698 return validate_req_change_req_state(device, p->block_id, sector,
5699 &device->read_requests, __func__,
2735a594 5700 NEG_ACKED, false);
b411b363
PR
5701}
5702
bde89a9e 5703static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5704{
9f4fe9ad 5705 struct drbd_peer_device *peer_device;
b30ab791 5706 struct drbd_device *device;
b411b363
PR
5707 sector_t sector;
5708 int size;
e658983a 5709 struct p_block_ack *p = pi->data;
1952e916 5710
9f4fe9ad
AG
5711 peer_device = conn_peer_device(connection, pi->vnr);
5712 if (!peer_device)
2735a594 5713 return -EIO;
9f4fe9ad 5714 device = peer_device->device;
b411b363
PR
5715
5716 sector = be64_to_cpu(p->sector);
5717 size = be32_to_cpu(p->blksize);
b411b363 5718
69a22773 5719 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5720
b30ab791 5721 dec_rs_pending(device);
b411b363 5722
b30ab791
AG
5723 if (get_ldev_if_state(device, D_FAILED)) {
5724 drbd_rs_complete_io(device, sector);
e05e1e59 5725 switch (pi->cmd) {
d612d309 5726 case P_NEG_RS_DREPLY:
b30ab791 5727 drbd_rs_failed_io(device, sector, size);
d612d309
PR
5728 case P_RS_CANCEL:
5729 break;
5730 default:
2735a594 5731 BUG();
d612d309 5732 }
b30ab791 5733 put_ldev(device);
b411b363
PR
5734 }
5735
2735a594 5736 return 0;
b411b363
PR
5737}
5738
bde89a9e 5739static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5740{
e658983a 5741 struct p_barrier_ack *p = pi->data;
c06ece6b 5742 struct drbd_peer_device *peer_device;
9ed57dcb 5743 int vnr;
1952e916 5744
bde89a9e 5745 tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
b411b363 5746
9ed57dcb 5747 rcu_read_lock();
c06ece6b
AG
5748 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5749 struct drbd_device *device = peer_device->device;
5750
b30ab791
AG
5751 if (device->state.conn == C_AHEAD &&
5752 atomic_read(&device->ap_in_flight) == 0 &&
5753 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
5754 device->start_resync_timer.expires = jiffies + HZ;
5755 add_timer(&device->start_resync_timer);
9ed57dcb 5756 }
c4752ef1 5757 }
9ed57dcb 5758 rcu_read_unlock();
c4752ef1 5759
2735a594 5760 return 0;
b411b363
PR
5761}
5762
bde89a9e 5763static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5764{
9f4fe9ad 5765 struct drbd_peer_device *peer_device;
b30ab791 5766 struct drbd_device *device;
e658983a 5767 struct p_block_ack *p = pi->data;
84b8c06b 5768 struct drbd_device_work *dw;
b411b363
PR
5769 sector_t sector;
5770 int size;
5771
9f4fe9ad
AG
5772 peer_device = conn_peer_device(connection, pi->vnr);
5773 if (!peer_device)
2735a594 5774 return -EIO;
9f4fe9ad 5775 device = peer_device->device;
1952e916 5776
b411b363
PR
5777 sector = be64_to_cpu(p->sector);
5778 size = be32_to_cpu(p->blksize);
5779
69a22773 5780 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363
PR
5781
5782 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
b30ab791 5783 drbd_ov_out_of_sync_found(device, sector, size);
b411b363 5784 else
b30ab791 5785 ov_out_of_sync_print(device);
b411b363 5786
b30ab791 5787 if (!get_ldev(device))
2735a594 5788 return 0;
1d53f09e 5789
b30ab791
AG
5790 drbd_rs_complete_io(device, sector);
5791 dec_rs_pending(device);
b411b363 5792
b30ab791 5793 --device->ov_left;
ea5442af
LE
5794
5795 /* let's advance progress step marks only for every other megabyte */
b30ab791
AG
5796 if ((device->ov_left & 0x200) == 0x200)
5797 drbd_advance_rs_marks(device, device->ov_left);
ea5442af 5798
b30ab791 5799 if (device->ov_left == 0) {
84b8c06b
AG
5800 dw = kmalloc(sizeof(*dw), GFP_NOIO);
5801 if (dw) {
5802 dw->w.cb = w_ov_finished;
5803 dw->device = device;
5804 drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
b411b363 5805 } else {
84b8c06b 5806 drbd_err(device, "kmalloc(dw) failed.");
b30ab791
AG
5807 ov_out_of_sync_print(device);
5808 drbd_resync_finished(device);
b411b363
PR
5809 }
5810 }
b30ab791 5811 put_ldev(device);
2735a594 5812 return 0;
b411b363
PR
5813}
5814
bde89a9e 5815static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
0ced55a3 5816{
2735a594 5817 return 0;
b411b363
PR
5818}
5819
668700b4
PR
5820struct meta_sock_cmd {
5821 size_t pkt_size;
5822 int (*fn)(struct drbd_connection *connection, struct packet_info *);
5823};
5824
5825static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout)
0ced55a3 5826{
668700b4
PR
5827 long t;
5828 struct net_conf *nc;
32862ec7 5829
668700b4
PR
5830 rcu_read_lock();
5831 nc = rcu_dereference(connection->net_conf);
5832 t = ping_timeout ? nc->ping_timeo : nc->ping_int;
5833 rcu_read_unlock();
c141ebda 5834
668700b4
PR
5835 t *= HZ;
5836 if (ping_timeout)
5837 t /= 10;
082a3439 5838
668700b4
PR
5839 connection->meta.socket->sk->sk_rcvtimeo = t;
5840}
32862ec7 5841
668700b4
PR
5842static void set_ping_timeout(struct drbd_connection *connection)
5843{
5844 set_rcvtimeo(connection, 1);
0ced55a3
PR
5845}
5846
668700b4
PR
5847static void set_idle_timeout(struct drbd_connection *connection)
5848{
5849 set_rcvtimeo(connection, 0);
5850}
b411b363 5851
668700b4 5852static struct meta_sock_cmd ack_receiver_tbl[] = {
e658983a
AG
5853 [P_PING] = { 0, got_Ping },
5854 [P_PING_ACK] = { 0, got_PingAck },
b411b363
PR
5855 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5856 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5857 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
d4dabbe2 5858 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
b411b363
PR
5859 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5860 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
1952e916 5861 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
b411b363
PR
5862 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5863 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5864 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5865 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 5866 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
1952e916
AG
5867 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5868 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5869 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
7201b972 5870};
b411b363 5871
1c03e520 5872int drbd_ack_receiver(struct drbd_thread *thi)
b411b363 5873{
bde89a9e 5874 struct drbd_connection *connection = thi->connection;
668700b4 5875 struct meta_sock_cmd *cmd = NULL;
77351055 5876 struct packet_info pi;
668700b4 5877 unsigned long pre_recv_jif;
257d0af6 5878 int rv;
bde89a9e 5879 void *buf = connection->meta.rbuf;
b411b363 5880 int received = 0;
bde89a9e 5881 unsigned int header_size = drbd_header_size(connection);
52b061a4 5882 int expect = header_size;
44ed167d 5883 bool ping_timeout_active = false;
3990e04d 5884 struct sched_param param = { .sched_priority = 2 };
b411b363 5885
3990e04d
PR
5886 rv = sched_setscheduler(current, SCHED_RR, &param);
5887 if (rv < 0)
668700b4 5888 drbd_err(connection, "drbd_ack_receiver: ERROR set priority, ret=%d\n", rv);
b411b363 5889
e77a0a5c 5890 while (get_t_state(thi) == RUNNING) {
80822284 5891 drbd_thread_current_set_cpu(thi);
b411b363 5892
668700b4 5893 conn_reclaim_net_peer_reqs(connection);
44ed167d 5894
bde89a9e
AG
5895 if (test_and_clear_bit(SEND_PING, &connection->flags)) {
5896 if (drbd_send_ping(connection)) {
1ec861eb 5897 drbd_err(connection, "drbd_send_ping has failed\n");
b411b363 5898 goto reconnect;
841ce241 5899 }
668700b4 5900 set_ping_timeout(connection);
44ed167d 5901 ping_timeout_active = true;
b411b363
PR
5902 }
5903
668700b4 5904 pre_recv_jif = jiffies;
bde89a9e 5905 rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
b411b363
PR
5906
5907 /* Note:
5908 * -EINTR (on meta) we got a signal
5909 * -EAGAIN (on meta) rcvtimeo expired
5910 * -ECONNRESET other side closed the connection
5911 * -ERESTARTSYS (on data) we got a signal
5912 * rv < 0 other than above: unexpected error!
5913 * rv == expected: full header or command
5914 * rv < expected: "woken" by signal during receive
5915 * rv == 0 : "connection shut down by peer"
5916 */
5917 if (likely(rv > 0)) {
5918 received += rv;
5919 buf += rv;
5920 } else if (rv == 0) {
bde89a9e 5921 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
b66623e3
PR
5922 long t;
5923 rcu_read_lock();
bde89a9e 5924 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
b66623e3
PR
5925 rcu_read_unlock();
5926
bde89a9e
AG
5927 t = wait_event_timeout(connection->ping_wait,
5928 connection->cstate < C_WF_REPORT_PARAMS,
b66623e3 5929 t);
599377ac
PR
5930 if (t)
5931 break;
5932 }
1ec861eb 5933 drbd_err(connection, "meta connection shut down by peer.\n");
b411b363
PR
5934 goto reconnect;
5935 } else if (rv == -EAGAIN) {
cb6518cb
LE
5936 /* If the data socket received something meanwhile,
5937 * that is good enough: peer is still alive. */
668700b4 5938 if (time_after(connection->last_received, pre_recv_jif))
cb6518cb 5939 continue;
f36af18c 5940 if (ping_timeout_active) {
1ec861eb 5941 drbd_err(connection, "PingAck did not arrive in time.\n");
b411b363
PR
5942 goto reconnect;
5943 }
bde89a9e 5944 set_bit(SEND_PING, &connection->flags);
b411b363
PR
5945 continue;
5946 } else if (rv == -EINTR) {
668700b4
PR
5947 /* maybe drbd_thread_stop(): the while condition will notice.
5948 * maybe woken for send_ping: we'll send a ping above,
5949 * and change the rcvtimeo */
5950 flush_signals(current);
b411b363
PR
5951 continue;
5952 } else {
1ec861eb 5953 drbd_err(connection, "sock_recvmsg returned %d\n", rv);
b411b363
PR
5954 goto reconnect;
5955 }
5956
5957 if (received == expect && cmd == NULL) {
bde89a9e 5958 if (decode_header(connection, connection->meta.rbuf, &pi))
b411b363 5959 goto reconnect;
668700b4
PR
5960 cmd = &ack_receiver_tbl[pi.cmd];
5961 if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) {
1ec861eb 5962 drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
2fcb8f30 5963 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5964 goto disconnect;
5965 }
e658983a 5966 expect = header_size + cmd->pkt_size;
52b061a4 5967 if (pi.size != expect - header_size) {
1ec861eb 5968 drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 5969 pi.cmd, pi.size);
b411b363 5970 goto reconnect;
257d0af6 5971 }
b411b363
PR
5972 }
5973 if (received == expect) {
2735a594 5974 bool err;
a4fbda8e 5975
bde89a9e 5976 err = cmd->fn(connection, &pi);
2735a594 5977 if (err) {
1ec861eb 5978 drbd_err(connection, "%pf failed\n", cmd->fn);
b411b363 5979 goto reconnect;
1952e916 5980 }
b411b363 5981
bde89a9e 5982 connection->last_received = jiffies;
f36af18c 5983
668700b4
PR
5984 if (cmd == &ack_receiver_tbl[P_PING_ACK]) {
5985 set_idle_timeout(connection);
44ed167d
PR
5986 ping_timeout_active = false;
5987 }
f36af18c 5988
bde89a9e 5989 buf = connection->meta.rbuf;
b411b363 5990 received = 0;
52b061a4 5991 expect = header_size;
b411b363
PR
5992 cmd = NULL;
5993 }
5994 }
5995
5996 if (0) {
5997reconnect:
bde89a9e
AG
5998 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5999 conn_md_sync(connection);
b411b363
PR
6000 }
6001 if (0) {
6002disconnect:
bde89a9e 6003 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 6004 }
b411b363 6005
668700b4 6006 drbd_info(connection, "ack_receiver terminated\n");
b411b363
PR
6007
6008 return 0;
6009}
668700b4
PR
6010
6011void drbd_send_acks_wf(struct work_struct *ws)
6012{
6013 struct drbd_peer_device *peer_device =
6014 container_of(ws, struct drbd_peer_device, send_acks_work);
6015 struct drbd_connection *connection = peer_device->connection;
6016 struct drbd_device *device = peer_device->device;
6017 struct net_conf *nc;
6018 int tcp_cork, err;
6019
6020 rcu_read_lock();
6021 nc = rcu_dereference(connection->net_conf);
6022 tcp_cork = nc->tcp_cork;
6023 rcu_read_unlock();
6024
6025 if (tcp_cork)
6026 drbd_tcp_cork(connection->meta.socket);
6027
6028 err = drbd_finish_peer_reqs(device);
6029 kref_put(&device->kref, drbd_destroy_device);
6030 /* get is in drbd_endio_write_sec_final(). That is necessary to keep the
6031 struct work_struct send_acks_work alive, which is in the peer_device object */
6032
6033 if (err) {
6034 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
6035 return;
6036 }
6037
6038 if (tcp_cork)
6039 drbd_tcp_uncork(connection->meta.socket);
6040
6041 return;
6042}