use less confusing names for iov_iter direction initializers
[linux-block.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
c6ae4c04 1// SPDX-License-Identifier: GPL-2.0-or-later
b411b363
PR
2/*
3 drbd_receiver.c
4
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
b411b363
PR
11 */
12
13
b411b363
PR
14#include <linux/module.h>
15
7e5fec31 16#include <linux/uaccess.h>
b411b363
PR
17#include <net/sock.h>
18
b411b363
PR
19#include <linux/drbd.h>
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/in.h>
23#include <linux/mm.h>
24#include <linux/memcontrol.h>
25#include <linux/mm_inline.h>
26#include <linux/slab.h>
ae7e81c0 27#include <uapi/linux/sched/types.h>
174cd4b1 28#include <linux/sched/signal.h>
b411b363
PR
29#include <linux/pkt_sched.h>
30#define __KERNEL_SYSCALLS__
31#include <linux/unistd.h>
32#include <linux/vmalloc.h>
33#include <linux/random.h>
b411b363
PR
34#include <linux/string.h>
35#include <linux/scatterlist.h>
c6a564ff 36#include <linux/part_stat.h>
b411b363 37#include "drbd_int.h"
a3603a6e 38#include "drbd_protocol.h"
b411b363 39#include "drbd_req.h"
b411b363
PR
40#include "drbd_vli.h"
41
f31e583a 42#define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME|DRBD_FF_WZEROES)
20c68fde 43
77351055
PR
44struct packet_info {
45 enum drbd_packet cmd;
e2857216
AG
46 unsigned int size;
47 unsigned int vnr;
e658983a 48 void *data;
77351055
PR
49};
50
b411b363
PR
51enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
bde89a9e
AG
57static int drbd_do_features(struct drbd_connection *connection);
58static int drbd_do_auth(struct drbd_connection *connection);
69a22773 59static int drbd_disconnected(struct drbd_peer_device *);
a0fb3c47 60static void conn_wait_active_ee_empty(struct drbd_connection *connection);
bde89a9e 61static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
99920dc5 62static int e_end_block(struct drbd_work *, int);
b411b363 63
b411b363
PR
64
65#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
66
45bb912b
LE
67/*
68 * some helper functions to deal with single linked page lists,
69 * page->private being our "next" pointer.
70 */
71
72/* If at least n pages are linked at head, get n pages off.
73 * Otherwise, don't modify head, and return NULL.
74 * Locking is the responsibility of the caller.
75 */
76static struct page *page_chain_del(struct page **head, int n)
77{
78 struct page *page;
79 struct page *tmp;
80
81 BUG_ON(!n);
82 BUG_ON(!head);
83
84 page = *head;
23ce4227
PR
85
86 if (!page)
87 return NULL;
88
45bb912b
LE
89 while (page) {
90 tmp = page_chain_next(page);
91 if (--n == 0)
92 break; /* found sufficient pages */
93 if (tmp == NULL)
94 /* insufficient pages, don't use any of them. */
95 return NULL;
96 page = tmp;
97 }
98
99 /* add end of list marker for the returned list */
100 set_page_private(page, 0);
101 /* actual return value, and adjustment of head */
102 page = *head;
103 *head = tmp;
104 return page;
105}
106
107/* may be used outside of locks to find the tail of a (usually short)
108 * "private" page chain, before adding it back to a global chain head
109 * with page_chain_add() under a spinlock. */
110static struct page *page_chain_tail(struct page *page, int *len)
111{
112 struct page *tmp;
113 int i = 1;
e8628013
JP
114 while ((tmp = page_chain_next(page))) {
115 ++i;
116 page = tmp;
117 }
45bb912b
LE
118 if (len)
119 *len = i;
120 return page;
121}
122
123static int page_chain_free(struct page *page)
124{
125 struct page *tmp;
126 int i = 0;
127 page_chain_for_each_safe(page, tmp) {
128 put_page(page);
129 ++i;
130 }
131 return i;
132}
133
134static void page_chain_add(struct page **head,
135 struct page *chain_first, struct page *chain_last)
136{
137#if 1
138 struct page *tmp;
139 tmp = page_chain_tail(chain_first, NULL);
140 BUG_ON(tmp != chain_last);
141#endif
142
143 /* add chain to head */
144 set_page_private(chain_last, (unsigned long)*head);
145 *head = chain_first;
146}
147
b30ab791 148static struct page *__drbd_alloc_pages(struct drbd_device *device,
18c2d522 149 unsigned int number)
b411b363
PR
150{
151 struct page *page = NULL;
45bb912b 152 struct page *tmp = NULL;
18c2d522 153 unsigned int i = 0;
b411b363
PR
154
155 /* Yes, testing drbd_pp_vacant outside the lock is racy.
156 * So what. It saves a spin_lock. */
45bb912b 157 if (drbd_pp_vacant >= number) {
b411b363 158 spin_lock(&drbd_pp_lock);
45bb912b
LE
159 page = page_chain_del(&drbd_pp_pool, number);
160 if (page)
161 drbd_pp_vacant -= number;
b411b363 162 spin_unlock(&drbd_pp_lock);
45bb912b
LE
163 if (page)
164 return page;
b411b363 165 }
45bb912b 166
b411b363
PR
167 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
168 * "criss-cross" setup, that might cause write-out on some other DRBD,
169 * which in turn might block on the other node at this very place. */
45bb912b
LE
170 for (i = 0; i < number; i++) {
171 tmp = alloc_page(GFP_TRY);
172 if (!tmp)
173 break;
174 set_page_private(tmp, (unsigned long)page);
175 page = tmp;
176 }
177
178 if (i == number)
179 return page;
180
181 /* Not enough pages immediately available this time.
c37c8ecf 182 * No need to jump around here, drbd_alloc_pages will retry this
45bb912b
LE
183 * function "soon". */
184 if (page) {
185 tmp = page_chain_tail(page, NULL);
186 spin_lock(&drbd_pp_lock);
187 page_chain_add(&drbd_pp_pool, page, tmp);
188 drbd_pp_vacant += i;
189 spin_unlock(&drbd_pp_lock);
190 }
191 return NULL;
b411b363
PR
192}
193
b30ab791 194static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
a990be46 195 struct list_head *to_be_freed)
b411b363 196{
a8cd15ba 197 struct drbd_peer_request *peer_req, *tmp;
b411b363
PR
198
199 /* The EEs are always appended to the end of the list. Since
200 they are sent in order over the wire, they have to finish
201 in order. As soon as we see the first not finished we can
202 stop to examine the list... */
203
a8cd15ba 204 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
045417f7 205 if (drbd_peer_req_has_active_page(peer_req))
b411b363 206 break;
a8cd15ba 207 list_move(&peer_req->w.list, to_be_freed);
b411b363
PR
208 }
209}
210
668700b4 211static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
b411b363
PR
212{
213 LIST_HEAD(reclaimed);
db830c46 214 struct drbd_peer_request *peer_req, *t;
b411b363 215
0500813f 216 spin_lock_irq(&device->resource->req_lock);
b30ab791 217 reclaim_finished_net_peer_reqs(device, &reclaimed);
0500813f 218 spin_unlock_irq(&device->resource->req_lock);
a8cd15ba 219 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
b30ab791 220 drbd_free_net_peer_req(device, peer_req);
b411b363
PR
221}
222
668700b4
PR
223static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
224{
225 struct drbd_peer_device *peer_device;
226 int vnr;
227
228 rcu_read_lock();
229 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
230 struct drbd_device *device = peer_device->device;
231 if (!atomic_read(&device->pp_in_use_by_net))
232 continue;
233
234 kref_get(&device->kref);
235 rcu_read_unlock();
236 drbd_reclaim_net_peer_reqs(device);
237 kref_put(&device->kref, drbd_destroy_device);
238 rcu_read_lock();
239 }
240 rcu_read_unlock();
241}
242
b411b363 243/**
c37c8ecf 244 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
9b48ff07
LJ
245 * @peer_device: DRBD device.
246 * @number: number of pages requested
247 * @retry: whether to retry, if not enough pages are available right now
45bb912b
LE
248 *
249 * Tries to allocate number pages, first from our own page pool, then from
0e49d7b0 250 * the kernel.
45bb912b 251 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 252 *
0e49d7b0
LE
253 * If this allocation would exceed the max_buffers setting, we throttle
254 * allocation (schedule_timeout) to give the system some room to breathe.
255 *
256 * We do not use max-buffers as hard limit, because it could lead to
257 * congestion and further to a distributed deadlock during online-verify or
258 * (checksum based) resync, if the max-buffers, socket buffer sizes and
259 * resync-rate settings are mis-configured.
260 *
45bb912b 261 * Returns a page chain linked via page->private.
b411b363 262 */
69a22773 263struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
c37c8ecf 264 bool retry)
b411b363 265{
69a22773 266 struct drbd_device *device = peer_device->device;
b411b363 267 struct page *page = NULL;
44ed167d 268 struct net_conf *nc;
b411b363 269 DEFINE_WAIT(wait);
0e49d7b0 270 unsigned int mxb;
b411b363 271
44ed167d 272 rcu_read_lock();
69a22773 273 nc = rcu_dereference(peer_device->connection->net_conf);
44ed167d
PR
274 mxb = nc ? nc->max_buffers : 1000000;
275 rcu_read_unlock();
276
b30ab791
AG
277 if (atomic_read(&device->pp_in_use) < mxb)
278 page = __drbd_alloc_pages(device, number);
b411b363 279
668700b4
PR
280 /* Try to keep the fast path fast, but occasionally we need
281 * to reclaim the pages we lended to the network stack. */
282 if (page && atomic_read(&device->pp_in_use_by_net) > 512)
283 drbd_reclaim_net_peer_reqs(device);
284
45bb912b 285 while (page == NULL) {
b411b363
PR
286 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
287
668700b4 288 drbd_reclaim_net_peer_reqs(device);
b411b363 289
b30ab791
AG
290 if (atomic_read(&device->pp_in_use) < mxb) {
291 page = __drbd_alloc_pages(device, number);
b411b363
PR
292 if (page)
293 break;
294 }
295
296 if (!retry)
297 break;
298
299 if (signal_pending(current)) {
d0180171 300 drbd_warn(device, "drbd_alloc_pages interrupted!\n");
b411b363
PR
301 break;
302 }
303
0e49d7b0
LE
304 if (schedule_timeout(HZ/10) == 0)
305 mxb = UINT_MAX;
b411b363
PR
306 }
307 finish_wait(&drbd_pp_wait, &wait);
308
45bb912b 309 if (page)
b30ab791 310 atomic_add(number, &device->pp_in_use);
b411b363
PR
311 return page;
312}
313
c37c8ecf 314/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
0500813f 315 * Is also used from inside an other spin_lock_irq(&resource->req_lock);
45bb912b
LE
316 * Either links the page chain back to the global pool,
317 * or returns all pages to the system. */
b30ab791 318static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
b411b363 319{
b30ab791 320 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
b411b363 321 int i;
435f0740 322
a73ff323
LE
323 if (page == NULL)
324 return;
325
183ece30 326 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
45bb912b
LE
327 i = page_chain_free(page);
328 else {
329 struct page *tmp;
330 tmp = page_chain_tail(page, &i);
331 spin_lock(&drbd_pp_lock);
332 page_chain_add(&drbd_pp_pool, page, tmp);
333 drbd_pp_vacant += i;
334 spin_unlock(&drbd_pp_lock);
b411b363 335 }
435f0740 336 i = atomic_sub_return(i, a);
45bb912b 337 if (i < 0)
d0180171 338 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
435f0740 339 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
340 wake_up(&drbd_pp_wait);
341}
342
343/*
344You need to hold the req_lock:
345 _drbd_wait_ee_list_empty()
346
347You must not have the req_lock:
3967deb1 348 drbd_free_peer_req()
0db55363 349 drbd_alloc_peer_req()
7721f567 350 drbd_free_peer_reqs()
b411b363 351 drbd_ee_fix_bhs()
a990be46 352 drbd_finish_peer_reqs()
b411b363
PR
353 drbd_clear_done_ee()
354 drbd_wait_ee_list_empty()
355*/
356
9104d31a
LE
357/* normal: payload_size == request size (bi_size)
358 * w_same: payload_size == logical_block_size
359 * trim: payload_size == 0 */
f6ffca9f 360struct drbd_peer_request *
69a22773 361drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
9104d31a 362 unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
b411b363 363{
69a22773 364 struct drbd_device *device = peer_device->device;
db830c46 365 struct drbd_peer_request *peer_req;
a73ff323 366 struct page *page = NULL;
e6be38a1 367 unsigned int nr_pages = PFN_UP(payload_size);
b411b363 368
b30ab791 369 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
b411b363
PR
370 return NULL;
371
0892fac8 372 peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
db830c46 373 if (!peer_req) {
b411b363 374 if (!(gfp_mask & __GFP_NOWARN))
d0180171 375 drbd_err(device, "%s: allocation failed\n", __func__);
b411b363
PR
376 return NULL;
377 }
378
9104d31a 379 if (nr_pages) {
d0164adc
MG
380 page = drbd_alloc_pages(peer_device, nr_pages,
381 gfpflags_allow_blocking(gfp_mask));
a73ff323
LE
382 if (!page)
383 goto fail;
384 }
b411b363 385
c5a2c150
LE
386 memset(peer_req, 0, sizeof(*peer_req));
387 INIT_LIST_HEAD(&peer_req->w.list);
db830c46 388 drbd_clear_interval(&peer_req->i);
9104d31a 389 peer_req->i.size = request_size;
db830c46 390 peer_req->i.sector = sector;
c5a2c150 391 peer_req->submit_jif = jiffies;
a8cd15ba 392 peer_req->peer_device = peer_device;
db830c46 393 peer_req->pages = page;
9a8e7753
AG
394 /*
395 * The block_id is opaque to the receiver. It is not endianness
396 * converted, and sent back to the sender unchanged.
397 */
db830c46 398 peer_req->block_id = id;
b411b363 399
db830c46 400 return peer_req;
b411b363 401
45bb912b 402 fail:
0892fac8 403 mempool_free(peer_req, &drbd_ee_mempool);
b411b363
PR
404 return NULL;
405}
406
b30ab791 407void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
f6ffca9f 408 int is_net)
b411b363 409{
21ae5d7f 410 might_sleep();
db830c46
AG
411 if (peer_req->flags & EE_HAS_DIGEST)
412 kfree(peer_req->digest);
b30ab791 413 drbd_free_pages(device, peer_req->pages, is_net);
0b0ba1ef
AG
414 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
415 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
21ae5d7f
LE
416 if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
417 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
418 drbd_al_complete_io(device, &peer_req->i);
419 }
0892fac8 420 mempool_free(peer_req, &drbd_ee_mempool);
b411b363
PR
421}
422
b30ab791 423int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
b411b363
PR
424{
425 LIST_HEAD(work_list);
db830c46 426 struct drbd_peer_request *peer_req, *t;
b411b363 427 int count = 0;
b30ab791 428 int is_net = list == &device->net_ee;
b411b363 429
0500813f 430 spin_lock_irq(&device->resource->req_lock);
b411b363 431 list_splice_init(list, &work_list);
0500813f 432 spin_unlock_irq(&device->resource->req_lock);
b411b363 433
a8cd15ba 434 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
b30ab791 435 __drbd_free_peer_req(device, peer_req, is_net);
b411b363
PR
436 count++;
437 }
438 return count;
439}
440
b411b363 441/*
a990be46 442 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
b411b363 443 */
b30ab791 444static int drbd_finish_peer_reqs(struct drbd_device *device)
b411b363
PR
445{
446 LIST_HEAD(work_list);
447 LIST_HEAD(reclaimed);
db830c46 448 struct drbd_peer_request *peer_req, *t;
e2b3032b 449 int err = 0;
b411b363 450
0500813f 451 spin_lock_irq(&device->resource->req_lock);
b30ab791
AG
452 reclaim_finished_net_peer_reqs(device, &reclaimed);
453 list_splice_init(&device->done_ee, &work_list);
0500813f 454 spin_unlock_irq(&device->resource->req_lock);
b411b363 455
a8cd15ba 456 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
b30ab791 457 drbd_free_net_peer_req(device, peer_req);
b411b363
PR
458
459 /* possible callbacks here:
d4dabbe2 460 * e_end_block, and e_end_resync_block, e_send_superseded.
b411b363
PR
461 * all ignore the last argument.
462 */
a8cd15ba 463 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
464 int err2;
465
b411b363 466 /* list_del not necessary, next/prev members not touched */
a8cd15ba 467 err2 = peer_req->w.cb(&peer_req->w, !!err);
e2b3032b
AG
468 if (!err)
469 err = err2;
b30ab791 470 drbd_free_peer_req(device, peer_req);
b411b363 471 }
b30ab791 472 wake_up(&device->ee_wait);
b411b363 473
e2b3032b 474 return err;
b411b363
PR
475}
476
b30ab791 477static void _drbd_wait_ee_list_empty(struct drbd_device *device,
d4da1537 478 struct list_head *head)
b411b363
PR
479{
480 DEFINE_WAIT(wait);
481
482 /* avoids spin_lock/unlock
483 * and calling prepare_to_wait in the fast path */
484 while (!list_empty(head)) {
b30ab791 485 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
0500813f 486 spin_unlock_irq(&device->resource->req_lock);
7eaceacc 487 io_schedule();
b30ab791 488 finish_wait(&device->ee_wait, &wait);
0500813f 489 spin_lock_irq(&device->resource->req_lock);
b411b363
PR
490 }
491}
492
b30ab791 493static void drbd_wait_ee_list_empty(struct drbd_device *device,
d4da1537 494 struct list_head *head)
b411b363 495{
0500813f 496 spin_lock_irq(&device->resource->req_lock);
b30ab791 497 _drbd_wait_ee_list_empty(device, head);
0500813f 498 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
499}
500
dbd9eea0 501static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363 502{
b411b363
PR
503 struct kvec iov = {
504 .iov_base = buf,
505 .iov_len = size,
506 };
507 struct msghdr msg = {
b411b363
PR
508 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
509 };
de4eda9d 510 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size);
f7765c36 511 return sock_recvmsg(sock, &msg, msg.msg_flags);
b411b363
PR
512}
513
bde89a9e 514static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
b411b363 515{
b411b363
PR
516 int rv;
517
bde89a9e 518 rv = drbd_recv_short(connection->data.socket, buf, size, 0);
b411b363 519
dbd0820c
PR
520 if (rv < 0) {
521 if (rv == -ECONNRESET)
1ec861eb 522 drbd_info(connection, "sock was reset by peer\n");
dbd0820c 523 else if (rv != -ERESTARTSYS)
1ec861eb 524 drbd_err(connection, "sock_recvmsg returned %d\n", rv);
dbd0820c 525 } else if (rv == 0) {
bde89a9e 526 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
b66623e3
PR
527 long t;
528 rcu_read_lock();
bde89a9e 529 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
b66623e3
PR
530 rcu_read_unlock();
531
bde89a9e 532 t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
b66623e3 533
599377ac
PR
534 if (t)
535 goto out;
536 }
1ec861eb 537 drbd_info(connection, "sock was shut down by peer\n");
599377ac
PR
538 }
539
b411b363 540 if (rv != size)
bde89a9e 541 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 542
599377ac 543out:
b411b363
PR
544 return rv;
545}
546
bde89a9e 547static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
c6967746
AG
548{
549 int err;
550
bde89a9e 551 err = drbd_recv(connection, buf, size);
c6967746
AG
552 if (err != size) {
553 if (err >= 0)
554 err = -EIO;
555 } else
556 err = 0;
557 return err;
558}
559
bde89a9e 560static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
a5c31904
AG
561{
562 int err;
563
bde89a9e 564 err = drbd_recv_all(connection, buf, size);
a5c31904 565 if (err && !signal_pending(current))
1ec861eb 566 drbd_warn(connection, "short read (expected size %d)\n", (int)size);
a5c31904
AG
567 return err;
568}
569
5dbf1673
LE
570/* quoting tcp(7):
571 * On individual connections, the socket buffer size must be set prior to the
572 * listen(2) or connect(2) calls in order to have it take effect.
573 * This is our wrapper to do so.
574 */
575static void drbd_setbufsize(struct socket *sock, unsigned int snd,
576 unsigned int rcv)
577{
578 /* open coded SO_SNDBUF, SO_RCVBUF */
579 if (snd) {
580 sock->sk->sk_sndbuf = snd;
581 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
582 }
583 if (rcv) {
584 sock->sk->sk_rcvbuf = rcv;
585 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
586 }
587}
588
bde89a9e 589static struct socket *drbd_try_connect(struct drbd_connection *connection)
b411b363
PR
590{
591 const char *what;
592 struct socket *sock;
593 struct sockaddr_in6 src_in6;
44ed167d
PR
594 struct sockaddr_in6 peer_in6;
595 struct net_conf *nc;
596 int err, peer_addr_len, my_addr_len;
69ef82de 597 int sndbuf_size, rcvbuf_size, connect_int;
b411b363
PR
598 int disconnect_on_error = 1;
599
44ed167d 600 rcu_read_lock();
bde89a9e 601 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
602 if (!nc) {
603 rcu_read_unlock();
b411b363 604 return NULL;
44ed167d 605 }
44ed167d
PR
606 sndbuf_size = nc->sndbuf_size;
607 rcvbuf_size = nc->rcvbuf_size;
69ef82de 608 connect_int = nc->connect_int;
089c075d 609 rcu_read_unlock();
44ed167d 610
bde89a9e
AG
611 my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
612 memcpy(&src_in6, &connection->my_addr, my_addr_len);
44ed167d 613
bde89a9e 614 if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
44ed167d
PR
615 src_in6.sin6_port = 0;
616 else
617 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
618
bde89a9e
AG
619 peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
620 memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
b411b363
PR
621
622 what = "sock_create_kern";
eeb1bd5c 623 err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
44ed167d 624 SOCK_STREAM, IPPROTO_TCP, &sock);
b411b363
PR
625 if (err < 0) {
626 sock = NULL;
627 goto out;
628 }
629
630 sock->sk->sk_rcvtimeo =
69ef82de 631 sock->sk->sk_sndtimeo = connect_int * HZ;
44ed167d 632 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
b411b363
PR
633
634 /* explicitly bind to the configured IP as source IP
635 * for the outgoing connections.
636 * This is needed for multihomed hosts and to be
637 * able to use lo: interfaces for drbd.
638 * Make sure to use 0 as port number, so linux selects
639 * a free one dynamically.
640 */
b411b363 641 what = "bind before connect";
44ed167d 642 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
b411b363
PR
643 if (err < 0)
644 goto out;
645
646 /* connect may fail, peer not yet available.
647 * stay C_WF_CONNECTION, don't go Disconnecting! */
648 disconnect_on_error = 0;
649 what = "connect";
44ed167d 650 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
b411b363
PR
651
652out:
653 if (err < 0) {
654 if (sock) {
655 sock_release(sock);
656 sock = NULL;
657 }
658 switch (-err) {
659 /* timeout, busy, signal pending */
660 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
661 case EINTR: case ERESTARTSYS:
662 /* peer not (yet) available, network problem */
663 case ECONNREFUSED: case ENETUNREACH:
664 case EHOSTDOWN: case EHOSTUNREACH:
665 disconnect_on_error = 0;
666 break;
667 default:
1ec861eb 668 drbd_err(connection, "%s failed, err = %d\n", what, err);
b411b363
PR
669 }
670 if (disconnect_on_error)
bde89a9e 671 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 672 }
44ed167d 673
b411b363
PR
674 return sock;
675}
676
7a426fd8 677struct accept_wait_data {
bde89a9e 678 struct drbd_connection *connection;
7a426fd8
PR
679 struct socket *s_listen;
680 struct completion door_bell;
681 void (*original_sk_state_change)(struct sock *sk);
682
683};
684
715306f6 685static void drbd_incoming_connection(struct sock *sk)
7a426fd8
PR
686{
687 struct accept_wait_data *ad = sk->sk_user_data;
715306f6 688 void (*state_change)(struct sock *sk);
7a426fd8 689
715306f6
AG
690 state_change = ad->original_sk_state_change;
691 if (sk->sk_state == TCP_ESTABLISHED)
692 complete(&ad->door_bell);
693 state_change(sk);
7a426fd8
PR
694}
695
bde89a9e 696static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
b411b363 697{
1f3e509b 698 int err, sndbuf_size, rcvbuf_size, my_addr_len;
44ed167d 699 struct sockaddr_in6 my_addr;
1f3e509b 700 struct socket *s_listen;
44ed167d 701 struct net_conf *nc;
b411b363
PR
702 const char *what;
703
44ed167d 704 rcu_read_lock();
bde89a9e 705 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
706 if (!nc) {
707 rcu_read_unlock();
7a426fd8 708 return -EIO;
44ed167d 709 }
44ed167d
PR
710 sndbuf_size = nc->sndbuf_size;
711 rcvbuf_size = nc->rcvbuf_size;
44ed167d 712 rcu_read_unlock();
b411b363 713
bde89a9e
AG
714 my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
715 memcpy(&my_addr, &connection->my_addr, my_addr_len);
b411b363
PR
716
717 what = "sock_create_kern";
eeb1bd5c 718 err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
1f3e509b 719 SOCK_STREAM, IPPROTO_TCP, &s_listen);
b411b363
PR
720 if (err) {
721 s_listen = NULL;
722 goto out;
723 }
724
98683650 725 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
44ed167d 726 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
b411b363
PR
727
728 what = "bind before listen";
44ed167d 729 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
b411b363
PR
730 if (err < 0)
731 goto out;
732
7a426fd8
PR
733 ad->s_listen = s_listen;
734 write_lock_bh(&s_listen->sk->sk_callback_lock);
735 ad->original_sk_state_change = s_listen->sk->sk_state_change;
715306f6 736 s_listen->sk->sk_state_change = drbd_incoming_connection;
7a426fd8
PR
737 s_listen->sk->sk_user_data = ad;
738 write_unlock_bh(&s_listen->sk->sk_callback_lock);
b411b363 739
2820fd39
PR
740 what = "listen";
741 err = s_listen->ops->listen(s_listen, 5);
742 if (err < 0)
743 goto out;
744
7a426fd8 745 return 0;
b411b363
PR
746out:
747 if (s_listen)
748 sock_release(s_listen);
749 if (err < 0) {
750 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1ec861eb 751 drbd_err(connection, "%s failed, err = %d\n", what, err);
bde89a9e 752 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
753 }
754 }
b411b363 755
7a426fd8 756 return -EIO;
b411b363
PR
757}
758
715306f6 759static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
b411b363 760{
715306f6
AG
761 write_lock_bh(&sk->sk_callback_lock);
762 sk->sk_state_change = ad->original_sk_state_change;
763 sk->sk_user_data = NULL;
764 write_unlock_bh(&sk->sk_callback_lock);
b411b363
PR
765}
766
bde89a9e 767static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
b411b363 768{
1f3e509b
PR
769 int timeo, connect_int, err = 0;
770 struct socket *s_estab = NULL;
1f3e509b
PR
771 struct net_conf *nc;
772
773 rcu_read_lock();
bde89a9e 774 nc = rcu_dereference(connection->net_conf);
1f3e509b
PR
775 if (!nc) {
776 rcu_read_unlock();
777 return NULL;
778 }
779 connect_int = nc->connect_int;
780 rcu_read_unlock();
781
782 timeo = connect_int * HZ;
38b682b2 783 /* 28.5% random jitter */
81895a65 784 timeo += prandom_u32_max(2) ? timeo / 7 : -timeo / 7;
1f3e509b 785
7a426fd8
PR
786 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
787 if (err <= 0)
788 return NULL;
b411b363 789
7a426fd8 790 err = kernel_accept(ad->s_listen, &s_estab, 0);
b411b363
PR
791 if (err < 0) {
792 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1ec861eb 793 drbd_err(connection, "accept failed, err = %d\n", err);
bde89a9e 794 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
795 }
796 }
b411b363 797
715306f6
AG
798 if (s_estab)
799 unregister_state_change(s_estab->sk, ad);
b411b363 800
b411b363
PR
801 return s_estab;
802}
b411b363 803
bde89a9e 804static int decode_header(struct drbd_connection *, void *, struct packet_info *);
b411b363 805
bde89a9e 806static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
9f5bdc33
AG
807 enum drbd_packet cmd)
808{
bde89a9e 809 if (!conn_prepare_command(connection, sock))
9f5bdc33 810 return -EIO;
bde89a9e 811 return conn_send_command(connection, sock, cmd, 0, NULL, 0);
b411b363
PR
812}
813
bde89a9e 814static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
b411b363 815{
bde89a9e 816 unsigned int header_size = drbd_header_size(connection);
9f5bdc33 817 struct packet_info pi;
4920e37a 818 struct net_conf *nc;
9f5bdc33 819 int err;
b411b363 820
4920e37a
PR
821 rcu_read_lock();
822 nc = rcu_dereference(connection->net_conf);
823 if (!nc) {
824 rcu_read_unlock();
825 return -EIO;
826 }
827 sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10;
828 rcu_read_unlock();
829
bde89a9e 830 err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
9f5bdc33
AG
831 if (err != header_size) {
832 if (err >= 0)
833 err = -EIO;
834 return err;
835 }
bde89a9e 836 err = decode_header(connection, connection->data.rbuf, &pi);
9f5bdc33
AG
837 if (err)
838 return err;
839 return pi.cmd;
b411b363
PR
840}
841
842/**
843 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
844 * @sock: pointer to the pointer to the socket.
845 */
5d0b17f1 846static bool drbd_socket_okay(struct socket **sock)
b411b363
PR
847{
848 int rr;
849 char tb[4];
850
851 if (!*sock)
81e84650 852 return false;
b411b363 853
dbd9eea0 854 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
855
856 if (rr > 0 || rr == -EAGAIN) {
81e84650 857 return true;
b411b363
PR
858 } else {
859 sock_release(*sock);
860 *sock = NULL;
81e84650 861 return false;
b411b363
PR
862 }
863}
5d0b17f1
PR
864
865static bool connection_established(struct drbd_connection *connection,
866 struct socket **sock1,
867 struct socket **sock2)
868{
869 struct net_conf *nc;
870 int timeout;
871 bool ok;
872
873 if (!*sock1 || !*sock2)
874 return false;
875
876 rcu_read_lock();
877 nc = rcu_dereference(connection->net_conf);
878 timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10;
879 rcu_read_unlock();
880 schedule_timeout_interruptible(timeout);
881
882 ok = drbd_socket_okay(sock1);
883 ok = drbd_socket_okay(sock2) && ok;
884
885 return ok;
886}
887
2325eb66
PR
888/* Gets called if a connection is established, or if a new minor gets created
889 in a connection */
69a22773 890int drbd_connected(struct drbd_peer_device *peer_device)
907599e0 891{
69a22773 892 struct drbd_device *device = peer_device->device;
0829f5ed 893 int err;
907599e0 894
b30ab791
AG
895 atomic_set(&device->packet_seq, 0);
896 device->peer_seq = 0;
907599e0 897
69a22773
AG
898 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
899 &peer_device->connection->cstate_mutex :
b30ab791 900 &device->own_state_mutex;
8410da8f 901
69a22773 902 err = drbd_send_sync_param(peer_device);
0829f5ed 903 if (!err)
69a22773 904 err = drbd_send_sizes(peer_device, 0, 0);
0829f5ed 905 if (!err)
69a22773 906 err = drbd_send_uuids(peer_device);
0829f5ed 907 if (!err)
69a22773 908 err = drbd_send_current_state(peer_device);
b30ab791
AG
909 clear_bit(USE_DEGR_WFC_T, &device->flags);
910 clear_bit(RESIZE_PENDING, &device->flags);
911 atomic_set(&device->ap_in_flight, 0);
912 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
0829f5ed 913 return err;
907599e0 914}
b411b363
PR
915
916/*
917 * return values:
918 * 1 yes, we have a valid connection
919 * 0 oops, did not work out, please try again
920 * -1 peer talks different language,
921 * no point in trying again, please go standalone.
922 * -2 We do not have a network config...
923 */
bde89a9e 924static int conn_connect(struct drbd_connection *connection)
b411b363 925{
7da35862 926 struct drbd_socket sock, msock;
c06ece6b 927 struct drbd_peer_device *peer_device;
44ed167d 928 struct net_conf *nc;
5d0b17f1
PR
929 int vnr, timeout, h;
930 bool discard_my_data, ok;
197296ff 931 enum drbd_state_rv rv;
7a426fd8 932 struct accept_wait_data ad = {
bde89a9e 933 .connection = connection,
7a426fd8
PR
934 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
935 };
b411b363 936
bde89a9e
AG
937 clear_bit(DISCONNECT_SENT, &connection->flags);
938 if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
939 return -2;
940
7da35862 941 mutex_init(&sock.mutex);
bde89a9e
AG
942 sock.sbuf = connection->data.sbuf;
943 sock.rbuf = connection->data.rbuf;
7da35862
PR
944 sock.socket = NULL;
945 mutex_init(&msock.mutex);
bde89a9e
AG
946 msock.sbuf = connection->meta.sbuf;
947 msock.rbuf = connection->meta.rbuf;
7da35862
PR
948 msock.socket = NULL;
949
0916e0e3 950 /* Assume that the peer only understands protocol 80 until we know better. */
bde89a9e 951 connection->agreed_pro_version = 80;
b411b363 952
bde89a9e 953 if (prepare_listen_socket(connection, &ad))
7a426fd8 954 return 0;
b411b363
PR
955
956 do {
2bf89621 957 struct socket *s;
b411b363 958
bde89a9e 959 s = drbd_try_connect(connection);
b411b363 960 if (s) {
7da35862
PR
961 if (!sock.socket) {
962 sock.socket = s;
bde89a9e 963 send_first_packet(connection, &sock, P_INITIAL_DATA);
7da35862 964 } else if (!msock.socket) {
bde89a9e 965 clear_bit(RESOLVE_CONFLICTS, &connection->flags);
7da35862 966 msock.socket = s;
bde89a9e 967 send_first_packet(connection, &msock, P_INITIAL_META);
b411b363 968 } else {
1ec861eb 969 drbd_err(connection, "Logic error in conn_connect()\n");
b411b363
PR
970 goto out_release_sockets;
971 }
972 }
973
5d0b17f1
PR
974 if (connection_established(connection, &sock.socket, &msock.socket))
975 break;
b411b363
PR
976
977retry:
bde89a9e 978 s = drbd_wait_for_connect(connection, &ad);
b411b363 979 if (s) {
bde89a9e 980 int fp = receive_first_packet(connection, s);
7da35862
PR
981 drbd_socket_okay(&sock.socket);
982 drbd_socket_okay(&msock.socket);
92f14951 983 switch (fp) {
e5d6f33a 984 case P_INITIAL_DATA:
7da35862 985 if (sock.socket) {
1ec861eb 986 drbd_warn(connection, "initial packet S crossed\n");
7da35862 987 sock_release(sock.socket);
80c6eed4
PR
988 sock.socket = s;
989 goto randomize;
b411b363 990 }
7da35862 991 sock.socket = s;
b411b363 992 break;
e5d6f33a 993 case P_INITIAL_META:
bde89a9e 994 set_bit(RESOLVE_CONFLICTS, &connection->flags);
7da35862 995 if (msock.socket) {
1ec861eb 996 drbd_warn(connection, "initial packet M crossed\n");
7da35862 997 sock_release(msock.socket);
80c6eed4
PR
998 msock.socket = s;
999 goto randomize;
b411b363 1000 }
7da35862 1001 msock.socket = s;
b411b363
PR
1002 break;
1003 default:
1ec861eb 1004 drbd_warn(connection, "Error receiving initial packet\n");
b411b363 1005 sock_release(s);
80c6eed4 1006randomize:
81895a65 1007 if (prandom_u32_max(2))
b411b363
PR
1008 goto retry;
1009 }
1010 }
1011
bde89a9e 1012 if (connection->cstate <= C_DISCONNECTING)
b411b363
PR
1013 goto out_release_sockets;
1014 if (signal_pending(current)) {
1015 flush_signals(current);
1016 smp_rmb();
bde89a9e 1017 if (get_t_state(&connection->receiver) == EXITING)
b411b363
PR
1018 goto out_release_sockets;
1019 }
1020
5d0b17f1 1021 ok = connection_established(connection, &sock.socket, &msock.socket);
b666dbf8 1022 } while (!ok);
b411b363 1023
7a426fd8
PR
1024 if (ad.s_listen)
1025 sock_release(ad.s_listen);
b411b363 1026
98683650
PR
1027 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1028 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
b411b363 1029
7da35862
PR
1030 sock.socket->sk->sk_allocation = GFP_NOIO;
1031 msock.socket->sk->sk_allocation = GFP_NOIO;
b411b363 1032
7da35862
PR
1033 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1034 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
b411b363 1035
b411b363 1036 /* NOT YET ...
bde89a9e 1037 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
7da35862 1038 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
6038178e 1039 * first set it to the P_CONNECTION_FEATURES timeout,
b411b363 1040 * which we set to 4x the configured ping_timeout. */
44ed167d 1041 rcu_read_lock();
bde89a9e 1042 nc = rcu_dereference(connection->net_conf);
44ed167d 1043
7da35862
PR
1044 sock.socket->sk->sk_sndtimeo =
1045 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
b411b363 1046
7da35862 1047 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
44ed167d 1048 timeout = nc->timeout * HZ / 10;
08b165ba 1049 discard_my_data = nc->discard_my_data;
44ed167d 1050 rcu_read_unlock();
b411b363 1051
7da35862 1052 msock.socket->sk->sk_sndtimeo = timeout;
b411b363
PR
1053
1054 /* we don't want delays.
25985edc 1055 * we use TCP_CORK where appropriate, though */
12abc5ee
CH
1056 tcp_sock_set_nodelay(sock.socket->sk);
1057 tcp_sock_set_nodelay(msock.socket->sk);
b411b363 1058
bde89a9e
AG
1059 connection->data.socket = sock.socket;
1060 connection->meta.socket = msock.socket;
1061 connection->last_received = jiffies;
b411b363 1062
bde89a9e 1063 h = drbd_do_features(connection);
b411b363
PR
1064 if (h <= 0)
1065 return h;
1066
bde89a9e 1067 if (connection->cram_hmac_tfm) {
b30ab791 1068 /* drbd_request_state(device, NS(conn, WFAuth)); */
bde89a9e 1069 switch (drbd_do_auth(connection)) {
b10d96cb 1070 case -1:
1ec861eb 1071 drbd_err(connection, "Authentication of peer failed\n");
b411b363 1072 return -1;
b10d96cb 1073 case 0:
1ec861eb 1074 drbd_err(connection, "Authentication of peer failed, trying again.\n");
b10d96cb 1075 return 0;
b411b363
PR
1076 }
1077 }
1078
bde89a9e
AG
1079 connection->data.socket->sk->sk_sndtimeo = timeout;
1080 connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
b411b363 1081
bde89a9e 1082 if (drbd_send_protocol(connection) == -EOPNOTSUPP)
7e2455c1 1083 return -1;
b411b363 1084
31007745
PR
1085 /* Prevent a race between resync-handshake and
1086 * being promoted to Primary.
1087 *
1088 * Grab and release the state mutex, so we know that any current
1089 * drbd_set_role() is finished, and any incoming drbd_set_role
1090 * will see the STATE_SENT flag, and wait for it to be cleared.
1091 */
1092 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1093 mutex_lock(peer_device->device->state_mutex);
1094
cde81d99
LE
1095 /* avoid a race with conn_request_state( C_DISCONNECTING ) */
1096 spin_lock_irq(&connection->resource->req_lock);
bde89a9e 1097 set_bit(STATE_SENT, &connection->flags);
cde81d99 1098 spin_unlock_irq(&connection->resource->req_lock);
a1096a6e 1099
31007745
PR
1100 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1101 mutex_unlock(peer_device->device->state_mutex);
1102
c141ebda 1103 rcu_read_lock();
c06ece6b
AG
1104 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1105 struct drbd_device *device = peer_device->device;
b30ab791 1106 kref_get(&device->kref);
26ea8f92
AG
1107 rcu_read_unlock();
1108
08b165ba 1109 if (discard_my_data)
b30ab791 1110 set_bit(DISCARD_MY_DATA, &device->flags);
08b165ba 1111 else
b30ab791 1112 clear_bit(DISCARD_MY_DATA, &device->flags);
08b165ba 1113
69a22773 1114 drbd_connected(peer_device);
05a10ec7 1115 kref_put(&device->kref, drbd_destroy_device);
c141ebda
PR
1116 rcu_read_lock();
1117 }
1118 rcu_read_unlock();
1119
bde89a9e
AG
1120 rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1121 if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
1122 clear_bit(STATE_SENT, &connection->flags);
1e86ac48 1123 return 0;
a1096a6e 1124 }
1e86ac48 1125
1c03e520 1126 drbd_thread_start(&connection->ack_receiver);
39e91a60
LE
1127 /* opencoded create_singlethread_workqueue(),
1128 * to be able to use format string arguments */
1129 connection->ack_sender =
1130 alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name);
668700b4
PR
1131 if (!connection->ack_sender) {
1132 drbd_err(connection, "Failed to create workqueue ack_sender\n");
1133 return 0;
1134 }
b411b363 1135
0500813f 1136 mutex_lock(&connection->resource->conf_update);
08b165ba
PR
1137 /* The discard_my_data flag is a single-shot modifier to the next
1138 * connection attempt, the handshake of which is now well underway.
1139 * No need for rcu style copying of the whole struct
1140 * just to clear a single value. */
bde89a9e 1141 connection->net_conf->discard_my_data = 0;
0500813f 1142 mutex_unlock(&connection->resource->conf_update);
08b165ba 1143
d3fcb490 1144 return h;
b411b363
PR
1145
1146out_release_sockets:
7a426fd8
PR
1147 if (ad.s_listen)
1148 sock_release(ad.s_listen);
7da35862
PR
1149 if (sock.socket)
1150 sock_release(sock.socket);
1151 if (msock.socket)
1152 sock_release(msock.socket);
b411b363
PR
1153 return -1;
1154}
1155
bde89a9e 1156static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
b411b363 1157{
bde89a9e 1158 unsigned int header_size = drbd_header_size(connection);
e658983a 1159
0c8e36d9
AG
1160 if (header_size == sizeof(struct p_header100) &&
1161 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1162 struct p_header100 *h = header;
1163 if (h->pad != 0) {
1ec861eb 1164 drbd_err(connection, "Header padding is not zero\n");
0c8e36d9
AG
1165 return -EINVAL;
1166 }
1167 pi->vnr = be16_to_cpu(h->volume);
1168 pi->cmd = be16_to_cpu(h->command);
1169 pi->size = be32_to_cpu(h->length);
1170 } else if (header_size == sizeof(struct p_header95) &&
1171 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
e658983a 1172 struct p_header95 *h = header;
e658983a 1173 pi->cmd = be16_to_cpu(h->command);
b55d84ba
AG
1174 pi->size = be32_to_cpu(h->length);
1175 pi->vnr = 0;
e658983a
AG
1176 } else if (header_size == sizeof(struct p_header80) &&
1177 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1178 struct p_header80 *h = header;
1179 pi->cmd = be16_to_cpu(h->command);
1180 pi->size = be16_to_cpu(h->length);
77351055 1181 pi->vnr = 0;
02918be2 1182 } else {
1ec861eb 1183 drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
e658983a 1184 be32_to_cpu(*(__be32 *)header),
bde89a9e 1185 connection->agreed_pro_version);
8172f3e9 1186 return -EINVAL;
b411b363 1187 }
e658983a 1188 pi->data = header + header_size;
8172f3e9 1189 return 0;
257d0af6 1190}
b411b363 1191
c51a0ef3
LE
1192static void drbd_unplug_all_devices(struct drbd_connection *connection)
1193{
1194 if (current->plug == &connection->receiver_plug) {
1195 blk_finish_plug(&connection->receiver_plug);
1196 blk_start_plug(&connection->receiver_plug);
1197 } /* else: maybe just schedule() ?? */
1198}
1199
bde89a9e 1200static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
257d0af6 1201{
bde89a9e 1202 void *buffer = connection->data.rbuf;
69bc7bc3 1203 int err;
257d0af6 1204
bde89a9e 1205 err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
a5c31904 1206 if (err)
69bc7bc3 1207 return err;
257d0af6 1208
bde89a9e
AG
1209 err = decode_header(connection, buffer, pi);
1210 connection->last_received = jiffies;
b411b363 1211
69bc7bc3 1212 return err;
b411b363
PR
1213}
1214
c51a0ef3
LE
1215static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
1216{
1217 void *buffer = connection->data.rbuf;
1218 unsigned int size = drbd_header_size(connection);
1219 int err;
1220
1221 err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
1222 if (err != size) {
1223 /* If we have nothing in the receive buffer now, to reduce
1224 * application latency, try to drain the backend queues as
1225 * quickly as possible, and let remote TCP know what we have
1226 * received so far. */
1227 if (err == -EAGAIN) {
ddd061b8 1228 tcp_sock_set_quickack(connection->data.socket->sk, 2);
c51a0ef3
LE
1229 drbd_unplug_all_devices(connection);
1230 }
1231 if (err > 0) {
1232 buffer += err;
1233 size -= err;
1234 }
1235 err = drbd_recv_all_warn(connection, buffer, size);
1236 if (err)
1237 return err;
1238 }
1239
1240 err = decode_header(connection, connection->data.rbuf, pi);
1241 connection->last_received = jiffies;
1242
1243 return err;
1244}
f9ff0da5
LE
1245/* This is blkdev_issue_flush, but asynchronous.
1246 * We want to submit to all component volumes in parallel,
1247 * then wait for all completions.
1248 */
1249struct issue_flush_context {
1250 atomic_t pending;
1251 int error;
1252 struct completion done;
1253};
1254struct one_flush_context {
1255 struct drbd_device *device;
1256 struct issue_flush_context *ctx;
1257};
1258
1ffa7bfa 1259static void one_flush_endio(struct bio *bio)
b411b363 1260{
f9ff0da5
LE
1261 struct one_flush_context *octx = bio->bi_private;
1262 struct drbd_device *device = octx->device;
1263 struct issue_flush_context *ctx = octx->ctx;
1264
4e4cbee9
CH
1265 if (bio->bi_status) {
1266 ctx->error = blk_status_to_errno(bio->bi_status);
1267 drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
f9ff0da5
LE
1268 }
1269 kfree(octx);
1270 bio_put(bio);
1271
1272 clear_bit(FLUSH_PENDING, &device->flags);
1273 put_ldev(device);
1274 kref_put(&device->kref, drbd_destroy_device);
1275
1276 if (atomic_dec_and_test(&ctx->pending))
1277 complete(&ctx->done);
1278}
1279
1280static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
1281{
07888c66
CH
1282 struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
1283 REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
f9ff0da5 1284 struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
4b1dc86d
CH
1285
1286 if (!octx) {
1287 drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n");
f9ff0da5
LE
1288 /* FIXME: what else can I do now? disconnecting or detaching
1289 * really does not help to improve the state of the world, either.
1290 */
4b1dc86d 1291 bio_put(bio);
f9ff0da5
LE
1292
1293 ctx->error = -ENOMEM;
1294 put_ldev(device);
1295 kref_put(&device->kref, drbd_destroy_device);
1296 return;
1297 }
4b0007c0 1298
f9ff0da5
LE
1299 octx->device = device;
1300 octx->ctx = ctx;
f9ff0da5
LE
1301 bio->bi_private = octx;
1302 bio->bi_end_io = one_flush_endio;
f9ff0da5
LE
1303
1304 device->flush_jif = jiffies;
1305 set_bit(FLUSH_PENDING, &device->flags);
1306 atomic_inc(&ctx->pending);
1307 submit_bio(bio);
1308}
1309
1310static void drbd_flush(struct drbd_connection *connection)
1311{
f6ba8636 1312 if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
f9ff0da5
LE
1313 struct drbd_peer_device *peer_device;
1314 struct issue_flush_context ctx;
1315 int vnr;
1316
1317 atomic_set(&ctx.pending, 1);
1318 ctx.error = 0;
1319 init_completion(&ctx.done);
1320
615e087f 1321 rcu_read_lock();
c06ece6b
AG
1322 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1323 struct drbd_device *device = peer_device->device;
1324
b30ab791 1325 if (!get_ldev(device))
615e087f 1326 continue;
b30ab791 1327 kref_get(&device->kref);
615e087f
LE
1328 rcu_read_unlock();
1329
f9ff0da5 1330 submit_one_flush(device, &ctx);
b411b363 1331
615e087f 1332 rcu_read_lock();
b411b363 1333 }
615e087f 1334 rcu_read_unlock();
f9ff0da5
LE
1335
1336 /* Do we want to add a timeout,
1337 * if disk-timeout is set? */
1338 if (!atomic_dec_and_test(&ctx.pending))
1339 wait_for_completion(&ctx.done);
1340
1341 if (ctx.error) {
1342 /* would rather check on EOPNOTSUPP, but that is not reliable.
1343 * don't try again for ANY return value != 0
1344 * if (rv == -EOPNOTSUPP) */
1345 /* Any error is already reported by bio_endio callback. */
1346 drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
1347 }
b411b363 1348 }
b411b363
PR
1349}
1350
1351/**
1352 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
9b48ff07 1353 * @connection: DRBD connection.
b411b363
PR
1354 * @epoch: Epoch object.
1355 * @ev: Epoch event.
1356 */
bde89a9e 1357static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
b411b363
PR
1358 struct drbd_epoch *epoch,
1359 enum epoch_event ev)
1360{
2451fc3b 1361 int epoch_size;
b411b363 1362 struct drbd_epoch *next_epoch;
b411b363
PR
1363 enum finish_epoch rv = FE_STILL_LIVE;
1364
bde89a9e 1365 spin_lock(&connection->epoch_lock);
b411b363
PR
1366 do {
1367 next_epoch = NULL;
b411b363
PR
1368
1369 epoch_size = atomic_read(&epoch->epoch_size);
1370
1371 switch (ev & ~EV_CLEANUP) {
1372 case EV_PUT:
1373 atomic_dec(&epoch->active);
1374 break;
1375 case EV_GOT_BARRIER_NR:
1376 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1377 break;
1378 case EV_BECAME_LAST:
1379 /* nothing to do*/
1380 break;
1381 }
1382
b411b363
PR
1383 if (epoch_size != 0 &&
1384 atomic_read(&epoch->active) == 0 &&
80f9fd55 1385 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
b411b363 1386 if (!(ev & EV_CLEANUP)) {
bde89a9e
AG
1387 spin_unlock(&connection->epoch_lock);
1388 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1389 spin_lock(&connection->epoch_lock);
b411b363 1390 }
9ed57dcb
LE
1391#if 0
1392 /* FIXME: dec unacked on connection, once we have
1393 * something to count pending connection packets in. */
80f9fd55 1394 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
bde89a9e 1395 dec_unacked(epoch->connection);
9ed57dcb 1396#endif
b411b363 1397
bde89a9e 1398 if (connection->current_epoch != epoch) {
b411b363
PR
1399 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1400 list_del(&epoch->list);
1401 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
bde89a9e 1402 connection->epochs--;
b411b363
PR
1403 kfree(epoch);
1404
1405 if (rv == FE_STILL_LIVE)
1406 rv = FE_DESTROYED;
1407 } else {
1408 epoch->flags = 0;
1409 atomic_set(&epoch->epoch_size, 0);
698f9315 1410 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1411 if (rv == FE_STILL_LIVE)
1412 rv = FE_RECYCLED;
1413 }
1414 }
1415
1416 if (!next_epoch)
1417 break;
1418
1419 epoch = next_epoch;
1420 } while (1);
1421
bde89a9e 1422 spin_unlock(&connection->epoch_lock);
b411b363 1423
b411b363
PR
1424 return rv;
1425}
1426
8fe39aac
PR
1427static enum write_ordering_e
1428max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
1429{
1430 struct disk_conf *dc;
1431
1432 dc = rcu_dereference(bdev->disk_conf);
1433
f6ba8636
AG
1434 if (wo == WO_BDEV_FLUSH && !dc->disk_flushes)
1435 wo = WO_DRAIN_IO;
1436 if (wo == WO_DRAIN_IO && !dc->disk_drain)
1437 wo = WO_NONE;
8fe39aac
PR
1438
1439 return wo;
1440}
1441
9b48ff07 1442/*
b411b363 1443 * drbd_bump_write_ordering() - Fall back to an other write ordering method
b411b363
PR
1444 * @wo: Write ordering method to try.
1445 */
8fe39aac
PR
1446void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1447 enum write_ordering_e wo)
b411b363 1448{
e9526580 1449 struct drbd_device *device;
b411b363 1450 enum write_ordering_e pwo;
4b0007c0 1451 int vnr;
b411b363 1452 static char *write_ordering_str[] = {
f6ba8636
AG
1453 [WO_NONE] = "none",
1454 [WO_DRAIN_IO] = "drain",
1455 [WO_BDEV_FLUSH] = "flush",
b411b363
PR
1456 };
1457
e9526580 1458 pwo = resource->write_ordering;
f6ba8636 1459 if (wo != WO_BDEV_FLUSH)
70df7092 1460 wo = min(pwo, wo);
daeda1cc 1461 rcu_read_lock();
e9526580 1462 idr_for_each_entry(&resource->devices, device, vnr) {
8fe39aac
PR
1463 if (get_ldev(device)) {
1464 wo = max_allowed_wo(device->ldev, wo);
1465 if (device->ldev == bdev)
1466 bdev = NULL;
1467 put_ldev(device);
1468 }
4b0007c0 1469 }
8fe39aac
PR
1470
1471 if (bdev)
1472 wo = max_allowed_wo(bdev, wo);
1473
70df7092
LE
1474 rcu_read_unlock();
1475
e9526580 1476 resource->write_ordering = wo;
f6ba8636 1477 if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH)
e9526580 1478 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
b411b363
PR
1479}
1480
f31e583a
LE
1481/*
1482 * Mapping "discard" to ZEROOUT with UNMAP does not work for us:
1483 * Drivers have to "announce" q->limits.max_write_zeroes_sectors, or it
1484 * will directly go to fallback mode, submitting normal writes, and
1485 * never even try to UNMAP.
1486 *
1487 * And dm-thin does not do this (yet), mostly because in general it has
1488 * to assume that "skip_block_zeroing" is set. See also:
1489 * https://www.mail-archive.com/dm-devel%40redhat.com/msg07965.html
1490 * https://www.redhat.com/archives/dm-devel/2018-January/msg00271.html
1491 *
1492 * We *may* ignore the discard-zeroes-data setting, if so configured.
1493 *
1494 * Assumption is that this "discard_zeroes_data=0" is only because the backend
1495 * may ignore partial unaligned discards.
1496 *
1497 * LVM/DM thin as of at least
1498 * LVM version: 2.02.115(2)-RHEL7 (2015-01-28)
1499 * Library version: 1.02.93-RHEL7 (2015-01-28)
1500 * Driver version: 4.29.0
1501 * still behaves this way.
1502 *
1503 * For unaligned (wrt. alignment and granularity) or too small discards,
1504 * we zero-out the initial (and/or) trailing unaligned partial chunks,
1505 * but discard all the aligned full chunks.
1506 *
1507 * At least for LVM/DM thin, with skip_block_zeroing=false,
1508 * the result is effectively "discard_zeroes_data=1".
1509 */
1510/* flags: EE_TRIM|EE_ZEROOUT */
1511int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
dd4f699d
LE
1512{
1513 struct block_device *bdev = device->ldev->backing_bdev;
f31e583a
LE
1514 sector_t tmp, nr;
1515 unsigned int max_discard_sectors, granularity;
1516 int alignment;
1517 int err = 0;
dd4f699d 1518
f31e583a
LE
1519 if ((flags & EE_ZEROOUT) || !(flags & EE_TRIM))
1520 goto zero_out;
1521
1522 /* Zero-sector (unknown) and one-sector granularities are the same. */
7b47ef52 1523 granularity = max(bdev_discard_granularity(bdev) >> 9, 1U);
f31e583a
LE
1524 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
1525
cf0fbf89 1526 max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22));
f31e583a
LE
1527 max_discard_sectors -= max_discard_sectors % granularity;
1528 if (unlikely(!max_discard_sectors))
1529 goto zero_out;
1530
1531 if (nr_sectors < granularity)
1532 goto zero_out;
1533
1534 tmp = start;
1535 if (sector_div(tmp, granularity) != alignment) {
1536 if (nr_sectors < 2*granularity)
1537 goto zero_out;
1538 /* start + gran - (start + gran - align) % gran */
1539 tmp = start + granularity - alignment;
1540 tmp = start + granularity - sector_div(tmp, granularity);
1541
1542 nr = tmp - start;
1543 /* don't flag BLKDEV_ZERO_NOUNMAP, we don't know how many
1544 * layers are below us, some may have smaller granularity */
1545 err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0);
1546 nr_sectors -= nr;
1547 start = tmp;
1548 }
1549 while (nr_sectors >= max_discard_sectors) {
44abff2c
CH
1550 err |= blkdev_issue_discard(bdev, start, max_discard_sectors,
1551 GFP_NOIO);
f31e583a
LE
1552 nr_sectors -= max_discard_sectors;
1553 start += max_discard_sectors;
1554 }
1555 if (nr_sectors) {
1556 /* max_discard_sectors is unsigned int (and a multiple of
1557 * granularity, we made sure of that above already);
1558 * nr is < max_discard_sectors;
1559 * I don't need sector_div here, even though nr is sector_t */
1560 nr = nr_sectors;
1561 nr -= (unsigned int)nr % granularity;
1562 if (nr) {
44abff2c 1563 err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO);
f31e583a
LE
1564 nr_sectors -= nr;
1565 start += nr;
1566 }
1567 }
1568 zero_out:
1569 if (nr_sectors) {
1570 err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO,
1571 (flags & EE_TRIM) ? 0 : BLKDEV_ZERO_NOUNMAP);
1572 }
1573 return err != 0;
1574}
0dbed96a 1575
f31e583a
LE
1576static bool can_do_reliable_discards(struct drbd_device *device)
1577{
f31e583a
LE
1578 struct disk_conf *dc;
1579 bool can_do;
0dbed96a 1580
70200574 1581 if (!bdev_max_discard_sectors(device->ldev->backing_bdev))
f31e583a
LE
1582 return false;
1583
1584 rcu_read_lock();
1585 dc = rcu_dereference(device->ldev->disk_conf);
1586 can_do = dc->discard_zeroes_if_aligned;
1587 rcu_read_unlock();
1588 return can_do;
1589}
1590
1591static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req)
1592{
1593 /* If the backend cannot discard, or does not guarantee
1594 * read-back zeroes in discarded ranges, we fall back to
1595 * zero-out. Unless configuration specifically requested
1596 * otherwise. */
1597 if (!can_do_reliable_discards(device))
1598 peer_req->flags |= EE_ZEROOUT;
1599
1600 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
1601 peer_req->i.size >> 9, peer_req->flags & (EE_ZEROOUT|EE_TRIM)))
1602 peer_req->flags |= EE_WAS_ERROR;
dd4f699d
LE
1603 drbd_endio_write_sec_final(peer_req);
1604}
1605
a34592ff 1606/**
fbe29dec 1607 * drbd_submit_peer_request()
b30ab791 1608 * @device: DRBD device.
db830c46 1609 * @peer_req: peer request
10f6d992
LE
1610 *
1611 * May spread the pages to multiple bios,
1612 * depending on bio_add_page restrictions.
1613 *
1614 * Returns 0 if all bios have been submitted,
1615 * -ENOMEM if we could not allocate enough bios,
1616 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1617 * single page to an empty bio (which should never happen and likely indicates
1618 * that the lower level IO stack is in some way broken). This has been observed
1619 * on certain Xen deployments.
45bb912b
LE
1620 */
1621/* TODO allocate from our own bio_set. */
b30ab791 1622int drbd_submit_peer_request(struct drbd_device *device,
fbe29dec 1623 struct drbd_peer_request *peer_req,
86563de8 1624 const blk_opf_t opf, const int fault_type)
45bb912b
LE
1625{
1626 struct bio *bios = NULL;
1627 struct bio *bio;
db830c46
AG
1628 struct page *page = peer_req->pages;
1629 sector_t sector = peer_req->i.sector;
e6be38a1
CH
1630 unsigned int data_size = peer_req->i.size;
1631 unsigned int n_bios = 0;
1632 unsigned int nr_pages = PFN_UP(data_size);
45bb912b 1633
dd4f699d
LE
1634 /* TRIM/DISCARD: for now, always use the helper function
1635 * blkdev_issue_zeroout(..., discard=true).
1636 * It's synchronous, but it does the right thing wrt. bio splitting.
1637 * Correctness first, performance later. Next step is to code an
1638 * asynchronous variant of the same.
1639 */
a34592ff 1640 if (peer_req->flags & (EE_TRIM | EE_ZEROOUT)) {
a0fb3c47
LE
1641 /* wait for all pending IO completions, before we start
1642 * zeroing things out. */
5dd2ca19 1643 conn_wait_active_ee_empty(peer_req->peer_device->connection);
45d2933c
LE
1644 /* add it to the active list now,
1645 * so we can find it to present it in debugfs */
21ae5d7f
LE
1646 peer_req->submit_jif = jiffies;
1647 peer_req->flags |= EE_SUBMITTED;
700ca8c0
PR
1648
1649 /* If this was a resync request from receive_rs_deallocated(),
1650 * it is already on the sync_ee list */
1651 if (list_empty(&peer_req->w.list)) {
1652 spin_lock_irq(&device->resource->req_lock);
1653 list_add_tail(&peer_req->w.list, &device->active_ee);
1654 spin_unlock_irq(&device->resource->req_lock);
1655 }
1656
a34592ff 1657 drbd_issue_peer_discard_or_zero_out(device, peer_req);
a0fb3c47
LE
1658 return 0;
1659 }
1660
45bb912b
LE
1661 /* In most cases, we will only need one bio. But in case the lower
1662 * level restrictions happen to be different at this offset on this
1663 * side than those of the sending peer, we may need to submit the
9476f39d
LE
1664 * request in more than one bio.
1665 *
1666 * Plain bio_alloc is good enough here, this is no DRBD internally
1667 * generated bio, but a bio allocated on behalf of the peer.
1668 */
45bb912b 1669next_bio:
86563de8 1670 bio = bio_alloc(device->ldev->backing_bdev, nr_pages, opf, GFP_NOIO);
db830c46 1671 /* > peer_req->i.sector, unless this is the first bio */
4f024f37 1672 bio->bi_iter.bi_sector = sector;
db830c46 1673 bio->bi_private = peer_req;
fcefa62e 1674 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1675
1676 bio->bi_next = bios;
1677 bios = bio;
1678 ++n_bios;
1679
1680 page_chain_for_each(page) {
11f8b2b6 1681 unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
06efffda 1682 if (!bio_add_page(bio, page, len, 0))
45bb912b 1683 goto next_bio;
11f8b2b6 1684 data_size -= len;
45bb912b
LE
1685 sector += len >> 9;
1686 --nr_pages;
1687 }
11f8b2b6 1688 D_ASSERT(device, data_size == 0);
a0fb3c47 1689 D_ASSERT(device, page == NULL);
45bb912b 1690
db830c46 1691 atomic_set(&peer_req->pending_bios, n_bios);
21ae5d7f
LE
1692 /* for debugfs: update timestamp, mark as submitted */
1693 peer_req->submit_jif = jiffies;
1694 peer_req->flags |= EE_SUBMITTED;
45bb912b
LE
1695 do {
1696 bio = bios;
1697 bios = bios->bi_next;
1698 bio->bi_next = NULL;
1699
ed00aabd 1700 drbd_submit_bio_noacct(device, fault_type, bio);
45bb912b 1701 } while (bios);
45bb912b 1702 return 0;
45bb912b
LE
1703}
1704
b30ab791 1705static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
db830c46 1706 struct drbd_peer_request *peer_req)
53840641 1707{
db830c46 1708 struct drbd_interval *i = &peer_req->i;
53840641 1709
b30ab791 1710 drbd_remove_interval(&device->write_requests, i);
53840641
AG
1711 drbd_clear_interval(i);
1712
6c852bec 1713 /* Wake up any processes waiting for this peer request to complete. */
53840641 1714 if (i->waiting)
b30ab791 1715 wake_up(&device->misc_wait);
53840641
AG
1716}
1717
bde89a9e 1718static void conn_wait_active_ee_empty(struct drbd_connection *connection)
77fede51 1719{
c06ece6b 1720 struct drbd_peer_device *peer_device;
77fede51
PR
1721 int vnr;
1722
1723 rcu_read_lock();
c06ece6b
AG
1724 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1725 struct drbd_device *device = peer_device->device;
1726
b30ab791 1727 kref_get(&device->kref);
77fede51 1728 rcu_read_unlock();
b30ab791 1729 drbd_wait_ee_list_empty(device, &device->active_ee);
05a10ec7 1730 kref_put(&device->kref, drbd_destroy_device);
77fede51
PR
1731 rcu_read_lock();
1732 }
1733 rcu_read_unlock();
1734}
1735
bde89a9e 1736static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
b411b363 1737{
2451fc3b 1738 int rv;
e658983a 1739 struct p_barrier *p = pi->data;
b411b363
PR
1740 struct drbd_epoch *epoch;
1741
9ed57dcb
LE
1742 /* FIXME these are unacked on connection,
1743 * not a specific (peer)device.
1744 */
bde89a9e
AG
1745 connection->current_epoch->barrier_nr = p->barrier;
1746 connection->current_epoch->connection = connection;
1747 rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
b411b363
PR
1748
1749 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1750 * the activity log, which means it would not be resynced in case the
1751 * R_PRIMARY crashes now.
1752 * Therefore we must send the barrier_ack after the barrier request was
1753 * completed. */
e9526580 1754 switch (connection->resource->write_ordering) {
f6ba8636 1755 case WO_NONE:
b411b363 1756 if (rv == FE_RECYCLED)
82bc0194 1757 return 0;
2451fc3b
PR
1758
1759 /* receiver context, in the writeout path of the other node.
1760 * avoid potential distributed deadlock */
1761 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1762 if (epoch)
1763 break;
1764 else
1ec861eb 1765 drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
df561f66 1766 fallthrough;
b411b363 1767
f6ba8636
AG
1768 case WO_BDEV_FLUSH:
1769 case WO_DRAIN_IO:
bde89a9e
AG
1770 conn_wait_active_ee_empty(connection);
1771 drbd_flush(connection);
2451fc3b 1772
bde89a9e 1773 if (atomic_read(&connection->current_epoch->epoch_size)) {
2451fc3b
PR
1774 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1775 if (epoch)
1776 break;
b411b363
PR
1777 }
1778
82bc0194 1779 return 0;
2451fc3b 1780 default:
e9526580
PR
1781 drbd_err(connection, "Strangeness in connection->write_ordering %d\n",
1782 connection->resource->write_ordering);
82bc0194 1783 return -EIO;
b411b363
PR
1784 }
1785
1786 epoch->flags = 0;
1787 atomic_set(&epoch->epoch_size, 0);
1788 atomic_set(&epoch->active, 0);
1789
bde89a9e
AG
1790 spin_lock(&connection->epoch_lock);
1791 if (atomic_read(&connection->current_epoch->epoch_size)) {
1792 list_add(&epoch->list, &connection->current_epoch->list);
1793 connection->current_epoch = epoch;
1794 connection->epochs++;
b411b363
PR
1795 } else {
1796 /* The current_epoch got recycled while we allocated this one... */
1797 kfree(epoch);
1798 }
bde89a9e 1799 spin_unlock(&connection->epoch_lock);
b411b363 1800
82bc0194 1801 return 0;
b411b363
PR
1802}
1803
9104d31a 1804/* quick wrapper in case payload size != request_size (write same) */
3d0e6375 1805static void drbd_csum_ee_size(struct crypto_shash *h,
9104d31a
LE
1806 struct drbd_peer_request *r, void *d,
1807 unsigned int payload_size)
1808{
1809 unsigned int tmp = r->i.size;
1810 r->i.size = payload_size;
1811 drbd_csum_ee(h, r, d);
1812 r->i.size = tmp;
1813}
1814
b411b363 1815/* used from receive_RSDataReply (recv_resync_read)
9104d31a
LE
1816 * and from receive_Data.
1817 * data_size: actual payload ("data in")
1818 * for normal writes that is bi_size.
1819 * for discards, that is zero.
1820 * for write same, it is logical_block_size.
1821 * both trim and write same have the bi_size ("data len to be affected")
1822 * as extra argument in the packet header.
1823 */
f6ffca9f 1824static struct drbd_peer_request *
69a22773 1825read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
a0fb3c47 1826 struct packet_info *pi) __must_hold(local)
b411b363 1827{
69a22773 1828 struct drbd_device *device = peer_device->device;
155bd9d1 1829 const sector_t capacity = get_capacity(device->vdisk);
db830c46 1830 struct drbd_peer_request *peer_req;
b411b363 1831 struct page *page;
11f8b2b6
AG
1832 int digest_size, err;
1833 unsigned int data_size = pi->size, ds;
69a22773
AG
1834 void *dig_in = peer_device->connection->int_dig_in;
1835 void *dig_vv = peer_device->connection->int_dig_vv;
6b4388ac 1836 unsigned long *data;
a0fb3c47 1837 struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
f31e583a 1838 struct p_trim *zeroes = (pi->cmd == P_ZEROES) ? pi->data : NULL;
b411b363 1839
11f8b2b6 1840 digest_size = 0;
a0fb3c47 1841 if (!trim && peer_device->connection->peer_integrity_tfm) {
3d0e6375 1842 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
9f5bdc33
AG
1843 /*
1844 * FIXME: Receive the incoming digest into the receive buffer
1845 * here, together with its struct p_data?
1846 */
11f8b2b6 1847 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
a5c31904 1848 if (err)
b411b363 1849 return NULL;
11f8b2b6 1850 data_size -= digest_size;
b411b363
PR
1851 }
1852
a34592ff 1853 /* assume request_size == data_size, but special case trim. */
9104d31a 1854 ds = data_size;
a0fb3c47 1855 if (trim) {
9104d31a
LE
1856 if (!expect(data_size == 0))
1857 return NULL;
1858 ds = be32_to_cpu(trim->size);
f31e583a
LE
1859 } else if (zeroes) {
1860 if (!expect(data_size == 0))
1861 return NULL;
1862 ds = be32_to_cpu(zeroes->size);
a0fb3c47
LE
1863 }
1864
9104d31a 1865 if (!expect(IS_ALIGNED(ds, 512)))
841ce241 1866 return NULL;
a34592ff 1867 if (trim || zeroes) {
9104d31a
LE
1868 if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
1869 return NULL;
1870 } else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
841ce241 1871 return NULL;
b411b363 1872
6666032a
LE
1873 /* even though we trust out peer,
1874 * we sometimes have to double check. */
9104d31a 1875 if (sector + (ds>>9) > capacity) {
d0180171 1876 drbd_err(device, "request from peer beyond end of local disk: "
fdda6544 1877 "capacity: %llus < sector: %llus + size: %u\n",
6666032a 1878 (unsigned long long)capacity,
9104d31a 1879 (unsigned long long)sector, ds);
6666032a
LE
1880 return NULL;
1881 }
1882
b411b363
PR
1883 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1884 * "criss-cross" setup, that might cause write-out on some other DRBD,
1885 * which in turn might block on the other node at this very place. */
9104d31a 1886 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
db830c46 1887 if (!peer_req)
b411b363 1888 return NULL;
45bb912b 1889
21ae5d7f 1890 peer_req->flags |= EE_WRITE;
9104d31a 1891 if (trim) {
f31e583a
LE
1892 peer_req->flags |= EE_TRIM;
1893 return peer_req;
1894 }
1895 if (zeroes) {
1896 peer_req->flags |= EE_ZEROOUT;
81a3537a 1897 return peer_req;
9104d31a 1898 }
a73ff323 1899
9104d31a 1900 /* receive payload size bytes into page chain */
b411b363 1901 ds = data_size;
db830c46 1902 page = peer_req->pages;
45bb912b
LE
1903 page_chain_for_each(page) {
1904 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1905 data = kmap(page);
69a22773 1906 err = drbd_recv_all_warn(peer_device->connection, data, len);
b30ab791 1907 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
d0180171 1908 drbd_err(device, "Fault injection: Corrupting data on receive\n");
6b4388ac
PR
1909 data[0] = data[0] ^ (unsigned long)-1;
1910 }
b411b363 1911 kunmap(page);
a5c31904 1912 if (err) {
b30ab791 1913 drbd_free_peer_req(device, peer_req);
b411b363
PR
1914 return NULL;
1915 }
a5c31904 1916 ds -= len;
b411b363
PR
1917 }
1918
11f8b2b6 1919 if (digest_size) {
9104d31a 1920 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
11f8b2b6 1921 if (memcmp(dig_in, dig_vv, digest_size)) {
d0180171 1922 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
470be44a 1923 (unsigned long long)sector, data_size);
b30ab791 1924 drbd_free_peer_req(device, peer_req);
b411b363
PR
1925 return NULL;
1926 }
1927 }
11f8b2b6 1928 device->recv_cnt += data_size >> 9;
db830c46 1929 return peer_req;
b411b363
PR
1930}
1931
1932/* drbd_drain_block() just takes a data block
1933 * out of the socket input buffer, and discards it.
1934 */
69a22773 1935static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
b411b363
PR
1936{
1937 struct page *page;
a5c31904 1938 int err = 0;
b411b363
PR
1939 void *data;
1940
c3470cde 1941 if (!data_size)
fc5be839 1942 return 0;
c3470cde 1943
69a22773 1944 page = drbd_alloc_pages(peer_device, 1, 1);
b411b363
PR
1945
1946 data = kmap(page);
1947 while (data_size) {
fc5be839
AG
1948 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1949
69a22773 1950 err = drbd_recv_all_warn(peer_device->connection, data, len);
a5c31904 1951 if (err)
b411b363 1952 break;
a5c31904 1953 data_size -= len;
b411b363
PR
1954 }
1955 kunmap(page);
69a22773 1956 drbd_free_pages(peer_device->device, page, 0);
fc5be839 1957 return err;
b411b363
PR
1958}
1959
69a22773 1960static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
b411b363
PR
1961 sector_t sector, int data_size)
1962{
7988613b
KO
1963 struct bio_vec bvec;
1964 struct bvec_iter iter;
b411b363 1965 struct bio *bio;
11f8b2b6 1966 int digest_size, err, expect;
69a22773
AG
1967 void *dig_in = peer_device->connection->int_dig_in;
1968 void *dig_vv = peer_device->connection->int_dig_vv;
b411b363 1969
11f8b2b6 1970 digest_size = 0;
69a22773 1971 if (peer_device->connection->peer_integrity_tfm) {
3d0e6375 1972 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
11f8b2b6 1973 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
a5c31904
AG
1974 if (err)
1975 return err;
11f8b2b6 1976 data_size -= digest_size;
b411b363
PR
1977 }
1978
b411b363
PR
1979 /* optimistically update recv_cnt. if receiving fails below,
1980 * we disconnect anyways, and counters will be reset. */
69a22773 1981 peer_device->device->recv_cnt += data_size>>9;
b411b363
PR
1982
1983 bio = req->master_bio;
69a22773 1984 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
b411b363 1985
7988613b 1986 bio_for_each_segment(bvec, bio, iter) {
3eddaa60 1987 void *mapped = bvec_kmap_local(&bvec);
7988613b 1988 expect = min_t(int, data_size, bvec.bv_len);
69a22773 1989 err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
3eddaa60 1990 kunmap_local(mapped);
a5c31904
AG
1991 if (err)
1992 return err;
1993 data_size -= expect;
b411b363
PR
1994 }
1995
11f8b2b6 1996 if (digest_size) {
69a22773 1997 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
11f8b2b6 1998 if (memcmp(dig_in, dig_vv, digest_size)) {
69a22773 1999 drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 2000 return -EINVAL;
b411b363
PR
2001 }
2002 }
2003
69a22773 2004 D_ASSERT(peer_device->device, data_size == 0);
28284cef 2005 return 0;
b411b363
PR
2006}
2007
a990be46 2008/*
668700b4 2009 * e_end_resync_block() is called in ack_sender context via
a990be46
AG
2010 * drbd_finish_peer_reqs().
2011 */
99920dc5 2012static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 2013{
8050e6d0 2014 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2015 container_of(w, struct drbd_peer_request, w);
2016 struct drbd_peer_device *peer_device = peer_req->peer_device;
2017 struct drbd_device *device = peer_device->device;
db830c46 2018 sector_t sector = peer_req->i.sector;
99920dc5 2019 int err;
b411b363 2020
0b0ba1ef 2021 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
b411b363 2022
db830c46 2023 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b30ab791 2024 drbd_set_in_sync(device, sector, peer_req->i.size);
a8cd15ba 2025 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
b411b363
PR
2026 } else {
2027 /* Record failure to sync */
b30ab791 2028 drbd_rs_failed_io(device, sector, peer_req->i.size);
b411b363 2029
a8cd15ba 2030 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
b411b363 2031 }
b30ab791 2032 dec_unacked(device);
b411b363 2033
99920dc5 2034 return err;
b411b363
PR
2035}
2036
69a22773 2037static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
a0fb3c47 2038 struct packet_info *pi) __releases(local)
b411b363 2039{
69a22773 2040 struct drbd_device *device = peer_device->device;
db830c46 2041 struct drbd_peer_request *peer_req;
b411b363 2042
a0fb3c47 2043 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
db830c46 2044 if (!peer_req)
45bb912b 2045 goto fail;
b411b363 2046
b30ab791 2047 dec_rs_pending(device);
b411b363 2048
b30ab791 2049 inc_unacked(device);
b411b363
PR
2050 /* corresponding dec_unacked() in e_end_resync_block()
2051 * respective _drbd_clear_done_ee */
2052
a8cd15ba 2053 peer_req->w.cb = e_end_resync_block;
21ae5d7f 2054 peer_req->submit_jif = jiffies;
45bb912b 2055
0500813f 2056 spin_lock_irq(&device->resource->req_lock);
b9ed7080 2057 list_add_tail(&peer_req->w.list, &device->sync_ee);
0500813f 2058 spin_unlock_irq(&device->resource->req_lock);
b411b363 2059
a0fb3c47 2060 atomic_add(pi->size >> 9, &device->rs_sect_ev);
86563de8 2061 if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE,
bb3cc85e 2062 DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 2063 return 0;
b411b363 2064
10f6d992 2065 /* don't care for the reason here */
d0180171 2066 drbd_err(device, "submit failed, triggering re-connect\n");
0500813f 2067 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2068 list_del(&peer_req->w.list);
0500813f 2069 spin_unlock_irq(&device->resource->req_lock);
22cc37a9 2070
b30ab791 2071 drbd_free_peer_req(device, peer_req);
45bb912b 2072fail:
b30ab791 2073 put_ldev(device);
e1c1b0fc 2074 return -EIO;
b411b363
PR
2075}
2076
668eebc6 2077static struct drbd_request *
b30ab791 2078find_request(struct drbd_device *device, struct rb_root *root, u64 id,
bc9c5c41 2079 sector_t sector, bool missing_ok, const char *func)
51624585 2080{
51624585
AG
2081 struct drbd_request *req;
2082
bc9c5c41
AG
2083 /* Request object according to our peer */
2084 req = (struct drbd_request *)(unsigned long)id;
5e472264 2085 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 2086 return req;
c3afd8f5 2087 if (!missing_ok) {
d0180171 2088 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
c3afd8f5
AG
2089 (unsigned long)id, (unsigned long long)sector);
2090 }
51624585 2091 return NULL;
b411b363
PR
2092}
2093
bde89a9e 2094static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2095{
9f4fe9ad 2096 struct drbd_peer_device *peer_device;
b30ab791 2097 struct drbd_device *device;
b411b363
PR
2098 struct drbd_request *req;
2099 sector_t sector;
82bc0194 2100 int err;
e658983a 2101 struct p_data *p = pi->data;
4a76b161 2102
9f4fe9ad
AG
2103 peer_device = conn_peer_device(connection, pi->vnr);
2104 if (!peer_device)
4a76b161 2105 return -EIO;
9f4fe9ad 2106 device = peer_device->device;
b411b363
PR
2107
2108 sector = be64_to_cpu(p->sector);
2109
0500813f 2110 spin_lock_irq(&device->resource->req_lock);
b30ab791 2111 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
0500813f 2112 spin_unlock_irq(&device->resource->req_lock);
c3afd8f5 2113 if (unlikely(!req))
82bc0194 2114 return -EIO;
b411b363 2115
69a22773 2116 err = recv_dless_read(peer_device, req, sector, pi->size);
82bc0194 2117 if (!err)
8554df1c 2118 req_mod(req, DATA_RECEIVED);
b411b363
PR
2119 /* else: nothing. handled from drbd_disconnect...
2120 * I don't think we may complete this just yet
2121 * in case we are "on-disconnect: freeze" */
2122
82bc0194 2123 return err;
b411b363
PR
2124}
2125
bde89a9e 2126static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2127{
9f4fe9ad 2128 struct drbd_peer_device *peer_device;
b30ab791 2129 struct drbd_device *device;
b411b363 2130 sector_t sector;
82bc0194 2131 int err;
e658983a 2132 struct p_data *p = pi->data;
4a76b161 2133
9f4fe9ad
AG
2134 peer_device = conn_peer_device(connection, pi->vnr);
2135 if (!peer_device)
4a76b161 2136 return -EIO;
9f4fe9ad 2137 device = peer_device->device;
b411b363
PR
2138
2139 sector = be64_to_cpu(p->sector);
0b0ba1ef 2140 D_ASSERT(device, p->block_id == ID_SYNCER);
b411b363 2141
b30ab791 2142 if (get_ldev(device)) {
b411b363
PR
2143 /* data is submitted to disk within recv_resync_read.
2144 * corresponding put_ldev done below on error,
fcefa62e 2145 * or in drbd_peer_request_endio. */
a0fb3c47 2146 err = recv_resync_read(peer_device, sector, pi);
b411b363
PR
2147 } else {
2148 if (__ratelimit(&drbd_ratelimit_state))
d0180171 2149 drbd_err(device, "Can not write resync data to local disk.\n");
b411b363 2150
69a22773 2151 err = drbd_drain_block(peer_device, pi->size);
b411b363 2152
69a22773 2153 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
b411b363
PR
2154 }
2155
b30ab791 2156 atomic_add(pi->size >> 9, &device->rs_sect_in);
778f271d 2157
82bc0194 2158 return err;
b411b363
PR
2159}
2160
b30ab791 2161static void restart_conflicting_writes(struct drbd_device *device,
7be8da07 2162 sector_t sector, int size)
b411b363 2163{
7be8da07
AG
2164 struct drbd_interval *i;
2165 struct drbd_request *req;
2166
b30ab791 2167 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2168 if (!i->local)
2169 continue;
2170 req = container_of(i, struct drbd_request, i);
2171 if (req->rq_state & RQ_LOCAL_PENDING ||
2172 !(req->rq_state & RQ_POSTPONED))
2173 continue;
2312f0b3
LE
2174 /* as it is RQ_POSTPONED, this will cause it to
2175 * be queued on the retry workqueue. */
d4dabbe2 2176 __req_mod(req, CONFLICT_RESOLVED, NULL);
7be8da07
AG
2177 }
2178}
b411b363 2179
a990be46 2180/*
668700b4 2181 * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs().
b411b363 2182 */
99920dc5 2183static int e_end_block(struct drbd_work *w, int cancel)
b411b363 2184{
8050e6d0 2185 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2186 container_of(w, struct drbd_peer_request, w);
2187 struct drbd_peer_device *peer_device = peer_req->peer_device;
2188 struct drbd_device *device = peer_device->device;
db830c46 2189 sector_t sector = peer_req->i.sector;
99920dc5 2190 int err = 0, pcmd;
b411b363 2191
303d1448 2192 if (peer_req->flags & EE_SEND_WRITE_ACK) {
db830c46 2193 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b30ab791
AG
2194 pcmd = (device->state.conn >= C_SYNC_SOURCE &&
2195 device->state.conn <= C_PAUSED_SYNC_T &&
db830c46 2196 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 2197 P_RS_WRITE_ACK : P_WRITE_ACK;
a8cd15ba 2198 err = drbd_send_ack(peer_device, pcmd, peer_req);
b411b363 2199 if (pcmd == P_RS_WRITE_ACK)
b30ab791 2200 drbd_set_in_sync(device, sector, peer_req->i.size);
b411b363 2201 } else {
a8cd15ba 2202 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
b411b363
PR
2203 /* we expect it to be marked out of sync anyways...
2204 * maybe assert this? */
2205 }
b30ab791 2206 dec_unacked(device);
b411b363 2207 }
08d0dabf 2208
b411b363
PR
2209 /* we delete from the conflict detection hash _after_ we sent out the
2210 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
302bdeae 2211 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
0500813f 2212 spin_lock_irq(&device->resource->req_lock);
0b0ba1ef 2213 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
b30ab791 2214 drbd_remove_epoch_entry_interval(device, peer_req);
7be8da07 2215 if (peer_req->flags & EE_RESTART_REQUESTS)
b30ab791 2216 restart_conflicting_writes(device, sector, peer_req->i.size);
0500813f 2217 spin_unlock_irq(&device->resource->req_lock);
bb3bfe96 2218 } else
0b0ba1ef 2219 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
b411b363 2220
5dd2ca19 2221 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 2222
99920dc5 2223 return err;
b411b363
PR
2224}
2225
a8cd15ba 2226static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 2227{
8050e6d0 2228 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2229 container_of(w, struct drbd_peer_request, w);
2230 struct drbd_peer_device *peer_device = peer_req->peer_device;
99920dc5 2231 int err;
b411b363 2232
a8cd15ba
AG
2233 err = drbd_send_ack(peer_device, ack, peer_req);
2234 dec_unacked(peer_device->device);
b411b363 2235
99920dc5 2236 return err;
b411b363
PR
2237}
2238
d4dabbe2 2239static int e_send_superseded(struct drbd_work *w, int unused)
7be8da07 2240{
a8cd15ba 2241 return e_send_ack(w, P_SUPERSEDED);
7be8da07
AG
2242}
2243
99920dc5 2244static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07 2245{
a8cd15ba
AG
2246 struct drbd_peer_request *peer_req =
2247 container_of(w, struct drbd_peer_request, w);
2248 struct drbd_connection *connection = peer_req->peer_device->connection;
7be8da07 2249
a8cd15ba 2250 return e_send_ack(w, connection->agreed_pro_version >= 100 ?
d4dabbe2 2251 P_RETRY_WRITE : P_SUPERSEDED);
7be8da07 2252}
b411b363 2253
3e394da1
AG
2254static bool seq_greater(u32 a, u32 b)
2255{
2256 /*
2257 * We assume 32-bit wrap-around here.
2258 * For 24-bit wrap-around, we would have to shift:
2259 * a <<= 8; b <<= 8;
2260 */
2261 return (s32)a - (s32)b > 0;
2262}
b411b363 2263
3e394da1
AG
2264static u32 seq_max(u32 a, u32 b)
2265{
2266 return seq_greater(a, b) ? a : b;
b411b363
PR
2267}
2268
69a22773 2269static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
3e394da1 2270{
69a22773 2271 struct drbd_device *device = peer_device->device;
3c13b680 2272 unsigned int newest_peer_seq;
3e394da1 2273
69a22773 2274 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
b30ab791
AG
2275 spin_lock(&device->peer_seq_lock);
2276 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
2277 device->peer_seq = newest_peer_seq;
2278 spin_unlock(&device->peer_seq_lock);
2279 /* wake up only if we actually changed device->peer_seq */
3c13b680 2280 if (peer_seq == newest_peer_seq)
b30ab791 2281 wake_up(&device->seq_wait);
7be8da07 2282 }
b411b363
PR
2283}
2284
d93f6302 2285static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
b6a370ba 2286{
d93f6302
LE
2287 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
2288}
b6a370ba 2289
d93f6302 2290/* maybe change sync_ee into interval trees as well? */
b30ab791 2291static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
d93f6302
LE
2292{
2293 struct drbd_peer_request *rs_req;
7e5fec31 2294 bool rv = false;
b6a370ba 2295
0500813f 2296 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2297 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
d93f6302
LE
2298 if (overlaps(peer_req->i.sector, peer_req->i.size,
2299 rs_req->i.sector, rs_req->i.size)) {
7e5fec31 2300 rv = true;
b6a370ba
PR
2301 break;
2302 }
2303 }
0500813f 2304 spin_unlock_irq(&device->resource->req_lock);
b6a370ba
PR
2305
2306 return rv;
2307}
2308
b411b363
PR
2309/* Called from receive_Data.
2310 * Synchronize packets on sock with packets on msock.
2311 *
2312 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
2313 * packet traveling on msock, they are still processed in the order they have
2314 * been sent.
2315 *
2316 * Note: we don't care for Ack packets overtaking P_DATA packets.
2317 *
b30ab791 2318 * In case packet_seq is larger than device->peer_seq number, there are
b411b363 2319 * outstanding packets on the msock. We wait for them to arrive.
b30ab791 2320 * In case we are the logically next packet, we update device->peer_seq
b411b363
PR
2321 * ourselves. Correctly handles 32bit wrap around.
2322 *
2323 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
2324 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
2325 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
2326 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
2327 *
2328 * returns 0 if we may process the packet,
2329 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
69a22773 2330static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
b411b363 2331{
69a22773 2332 struct drbd_device *device = peer_device->device;
b411b363 2333 DEFINE_WAIT(wait);
b411b363 2334 long timeout;
b874d231 2335 int ret = 0, tp;
7be8da07 2336
69a22773 2337 if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
7be8da07
AG
2338 return 0;
2339
b30ab791 2340 spin_lock(&device->peer_seq_lock);
b411b363 2341 for (;;) {
b30ab791
AG
2342 if (!seq_greater(peer_seq - 1, device->peer_seq)) {
2343 device->peer_seq = seq_max(device->peer_seq, peer_seq);
b411b363 2344 break;
7be8da07 2345 }
b874d231 2346
b411b363
PR
2347 if (signal_pending(current)) {
2348 ret = -ERESTARTSYS;
2349 break;
2350 }
b874d231
PR
2351
2352 rcu_read_lock();
5dd2ca19 2353 tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
b874d231
PR
2354 rcu_read_unlock();
2355
2356 if (!tp)
2357 break;
2358
2359 /* Only need to wait if two_primaries is enabled */
b30ab791
AG
2360 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
2361 spin_unlock(&device->peer_seq_lock);
44ed167d 2362 rcu_read_lock();
69a22773 2363 timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
44ed167d 2364 rcu_read_unlock();
71b1c1eb 2365 timeout = schedule_timeout(timeout);
b30ab791 2366 spin_lock(&device->peer_seq_lock);
7be8da07 2367 if (!timeout) {
b411b363 2368 ret = -ETIMEDOUT;
d0180171 2369 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
2370 break;
2371 }
2372 }
b30ab791
AG
2373 spin_unlock(&device->peer_seq_lock);
2374 finish_wait(&device->seq_wait, &wait);
b411b363
PR
2375 return ret;
2376}
2377
688593c5
LE
2378/* see also bio_flags_to_wire()
2379 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2380 * flags and back. We may replicate to other kernel versions. */
9945172a 2381static blk_opf_t wire_flags_to_bio_flags(u32 dpf)
76d2e7ec 2382{
688593c5
LE
2383 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2384 (dpf & DP_FUA ? REQ_FUA : 0) |
28a8f0d3 2385 (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
bb3cc85e
MC
2386}
2387
9945172a 2388static enum req_op wire_flags_to_bio_op(u32 dpf)
bb3cc85e 2389{
f31e583a 2390 if (dpf & DP_ZEROES)
45c21793 2391 return REQ_OP_WRITE_ZEROES;
f31e583a
LE
2392 if (dpf & DP_DISCARD)
2393 return REQ_OP_DISCARD;
bb3cc85e
MC
2394 else
2395 return REQ_OP_WRITE;
76d2e7ec
PR
2396}
2397
b30ab791 2398static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
7be8da07
AG
2399 unsigned int size)
2400{
2401 struct drbd_interval *i;
2402
2403 repeat:
b30ab791 2404 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2405 struct drbd_request *req;
2406 struct bio_and_error m;
2407
2408 if (!i->local)
2409 continue;
2410 req = container_of(i, struct drbd_request, i);
2411 if (!(req->rq_state & RQ_POSTPONED))
2412 continue;
2413 req->rq_state &= ~RQ_POSTPONED;
2414 __req_mod(req, NEG_ACKED, &m);
0500813f 2415 spin_unlock_irq(&device->resource->req_lock);
7be8da07 2416 if (m.bio)
b30ab791 2417 complete_master_bio(device, &m);
0500813f 2418 spin_lock_irq(&device->resource->req_lock);
7be8da07
AG
2419 goto repeat;
2420 }
2421}
2422
b30ab791 2423static int handle_write_conflicts(struct drbd_device *device,
7be8da07
AG
2424 struct drbd_peer_request *peer_req)
2425{
e33b32de 2426 struct drbd_connection *connection = peer_req->peer_device->connection;
bde89a9e 2427 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
7be8da07
AG
2428 sector_t sector = peer_req->i.sector;
2429 const unsigned int size = peer_req->i.size;
2430 struct drbd_interval *i;
2431 bool equal;
2432 int err;
2433
2434 /*
2435 * Inserting the peer request into the write_requests tree will prevent
2436 * new conflicting local requests from being added.
2437 */
b30ab791 2438 drbd_insert_interval(&device->write_requests, &peer_req->i);
7be8da07
AG
2439
2440 repeat:
b30ab791 2441 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2442 if (i == &peer_req->i)
2443 continue;
08d0dabf
LE
2444 if (i->completed)
2445 continue;
7be8da07
AG
2446
2447 if (!i->local) {
2448 /*
2449 * Our peer has sent a conflicting remote request; this
2450 * should not happen in a two-node setup. Wait for the
2451 * earlier peer request to complete.
2452 */
b30ab791 2453 err = drbd_wait_misc(device, i);
7be8da07
AG
2454 if (err)
2455 goto out;
2456 goto repeat;
2457 }
2458
2459 equal = i->sector == sector && i->size == size;
2460 if (resolve_conflicts) {
2461 /*
2462 * If the peer request is fully contained within the
d4dabbe2
LE
2463 * overlapping request, it can be considered overwritten
2464 * and thus superseded; otherwise, it will be retried
2465 * once all overlapping requests have completed.
7be8da07 2466 */
d4dabbe2 2467 bool superseded = i->sector <= sector && i->sector +
7be8da07
AG
2468 (i->size >> 9) >= sector + (size >> 9);
2469
2470 if (!equal)
d0180171 2471 drbd_alert(device, "Concurrent writes detected: "
7be8da07
AG
2472 "local=%llus +%u, remote=%llus +%u, "
2473 "assuming %s came first\n",
2474 (unsigned long long)i->sector, i->size,
2475 (unsigned long long)sector, size,
d4dabbe2 2476 superseded ? "local" : "remote");
7be8da07 2477
a8cd15ba 2478 peer_req->w.cb = superseded ? e_send_superseded :
7be8da07 2479 e_send_retry_write;
a8cd15ba 2480 list_add_tail(&peer_req->w.list, &device->done_ee);
668700b4 2481 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
7be8da07
AG
2482
2483 err = -ENOENT;
2484 goto out;
2485 } else {
2486 struct drbd_request *req =
2487 container_of(i, struct drbd_request, i);
2488
2489 if (!equal)
d0180171 2490 drbd_alert(device, "Concurrent writes detected: "
7be8da07
AG
2491 "local=%llus +%u, remote=%llus +%u\n",
2492 (unsigned long long)i->sector, i->size,
2493 (unsigned long long)sector, size);
2494
2495 if (req->rq_state & RQ_LOCAL_PENDING ||
2496 !(req->rq_state & RQ_POSTPONED)) {
2497 /*
2498 * Wait for the node with the discard flag to
d4dabbe2
LE
2499 * decide if this request has been superseded
2500 * or needs to be retried.
2501 * Requests that have been superseded will
7be8da07
AG
2502 * disappear from the write_requests tree.
2503 *
2504 * In addition, wait for the conflicting
2505 * request to finish locally before submitting
2506 * the conflicting peer request.
2507 */
b30ab791 2508 err = drbd_wait_misc(device, &req->i);
7be8da07 2509 if (err) {
e33b32de 2510 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
b30ab791 2511 fail_postponed_requests(device, sector, size);
7be8da07
AG
2512 goto out;
2513 }
2514 goto repeat;
2515 }
2516 /*
2517 * Remember to restart the conflicting requests after
2518 * the new peer request has completed.
2519 */
2520 peer_req->flags |= EE_RESTART_REQUESTS;
2521 }
2522 }
2523 err = 0;
2524
2525 out:
2526 if (err)
b30ab791 2527 drbd_remove_epoch_entry_interval(device, peer_req);
7be8da07
AG
2528 return err;
2529}
2530
b411b363 2531/* mirrored write */
bde89a9e 2532static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2533{
9f4fe9ad 2534 struct drbd_peer_device *peer_device;
b30ab791 2535 struct drbd_device *device;
21ae5d7f 2536 struct net_conf *nc;
b411b363 2537 sector_t sector;
db830c46 2538 struct drbd_peer_request *peer_req;
e658983a 2539 struct p_data *p = pi->data;
7be8da07 2540 u32 peer_seq = be32_to_cpu(p->seq_num);
9945172a
BVA
2541 enum req_op op;
2542 blk_opf_t op_flags;
b411b363 2543 u32 dp_flags;
302bdeae 2544 int err, tp;
b411b363 2545
9f4fe9ad
AG
2546 peer_device = conn_peer_device(connection, pi->vnr);
2547 if (!peer_device)
4a76b161 2548 return -EIO;
9f4fe9ad 2549 device = peer_device->device;
b411b363 2550
b30ab791 2551 if (!get_ldev(device)) {
82bc0194
AG
2552 int err2;
2553
69a22773
AG
2554 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2555 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
bde89a9e 2556 atomic_inc(&connection->current_epoch->epoch_size);
69a22773 2557 err2 = drbd_drain_block(peer_device, pi->size);
82bc0194
AG
2558 if (!err)
2559 err = err2;
2560 return err;
b411b363
PR
2561 }
2562
fcefa62e
AG
2563 /*
2564 * Corresponding put_ldev done either below (on various errors), or in
2565 * drbd_peer_request_endio, if we successfully submit the data at the
2566 * end of this function.
2567 */
b411b363
PR
2568
2569 sector = be64_to_cpu(p->sector);
a0fb3c47 2570 peer_req = read_in_block(peer_device, p->block_id, sector, pi);
db830c46 2571 if (!peer_req) {
b30ab791 2572 put_ldev(device);
82bc0194 2573 return -EIO;
b411b363
PR
2574 }
2575
a8cd15ba 2576 peer_req->w.cb = e_end_block;
21ae5d7f
LE
2577 peer_req->submit_jif = jiffies;
2578 peer_req->flags |= EE_APPLICATION;
b411b363 2579
688593c5 2580 dp_flags = be32_to_cpu(p->dp_flags);
bb3cc85e
MC
2581 op = wire_flags_to_bio_op(dp_flags);
2582 op_flags = wire_flags_to_bio_flags(dp_flags);
a0fb3c47 2583 if (pi->cmd == P_TRIM) {
f31e583a
LE
2584 D_ASSERT(peer_device, peer_req->i.size > 0);
2585 D_ASSERT(peer_device, op == REQ_OP_DISCARD);
2586 D_ASSERT(peer_device, peer_req->pages == NULL);
2587 /* need to play safe: an older DRBD sender
2588 * may mean zero-out while sending P_TRIM. */
2589 if (0 == (connection->agreed_features & DRBD_FF_WZEROES))
2590 peer_req->flags |= EE_ZEROOUT;
2591 } else if (pi->cmd == P_ZEROES) {
a0fb3c47 2592 D_ASSERT(peer_device, peer_req->i.size > 0);
45c21793 2593 D_ASSERT(peer_device, op == REQ_OP_WRITE_ZEROES);
a0fb3c47 2594 D_ASSERT(peer_device, peer_req->pages == NULL);
f31e583a
LE
2595 /* Do (not) pass down BLKDEV_ZERO_NOUNMAP? */
2596 if (dp_flags & DP_DISCARD)
2597 peer_req->flags |= EE_TRIM;
a0fb3c47 2598 } else if (peer_req->pages == NULL) {
0b0ba1ef
AG
2599 D_ASSERT(device, peer_req->i.size == 0);
2600 D_ASSERT(device, dp_flags & DP_FLUSH);
a73ff323 2601 }
688593c5
LE
2602
2603 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2604 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2605
bde89a9e
AG
2606 spin_lock(&connection->epoch_lock);
2607 peer_req->epoch = connection->current_epoch;
db830c46
AG
2608 atomic_inc(&peer_req->epoch->epoch_size);
2609 atomic_inc(&peer_req->epoch->active);
bde89a9e 2610 spin_unlock(&connection->epoch_lock);
b411b363 2611
302bdeae 2612 rcu_read_lock();
21ae5d7f
LE
2613 nc = rcu_dereference(peer_device->connection->net_conf);
2614 tp = nc->two_primaries;
2615 if (peer_device->connection->agreed_pro_version < 100) {
2616 switch (nc->wire_protocol) {
2617 case DRBD_PROT_C:
2618 dp_flags |= DP_SEND_WRITE_ACK;
2619 break;
2620 case DRBD_PROT_B:
2621 dp_flags |= DP_SEND_RECEIVE_ACK;
2622 break;
2623 }
2624 }
302bdeae 2625 rcu_read_unlock();
21ae5d7f
LE
2626
2627 if (dp_flags & DP_SEND_WRITE_ACK) {
2628 peer_req->flags |= EE_SEND_WRITE_ACK;
2629 inc_unacked(device);
2630 /* corresponding dec_unacked() in e_end_block()
2631 * respective _drbd_clear_done_ee */
2632 }
2633
2634 if (dp_flags & DP_SEND_RECEIVE_ACK) {
2635 /* I really don't like it that the receiver thread
2636 * sends on the msock, but anyways */
5dd2ca19 2637 drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
21ae5d7f
LE
2638 }
2639
302bdeae 2640 if (tp) {
21ae5d7f
LE
2641 /* two primaries implies protocol C */
2642 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
302bdeae 2643 peer_req->flags |= EE_IN_INTERVAL_TREE;
69a22773 2644 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
7be8da07 2645 if (err)
b411b363 2646 goto out_interrupted;
0500813f 2647 spin_lock_irq(&device->resource->req_lock);
b30ab791 2648 err = handle_write_conflicts(device, peer_req);
7be8da07 2649 if (err) {
0500813f 2650 spin_unlock_irq(&device->resource->req_lock);
7be8da07 2651 if (err == -ENOENT) {
b30ab791 2652 put_ldev(device);
82bc0194 2653 return 0;
b411b363 2654 }
7be8da07 2655 goto out_interrupted;
b411b363 2656 }
b874d231 2657 } else {
69a22773 2658 update_peer_seq(peer_device, peer_seq);
0500813f 2659 spin_lock_irq(&device->resource->req_lock);
b874d231 2660 }
a34592ff 2661 /* TRIM and is processed synchronously,
9104d31a 2662 * we wait for all pending requests, respectively wait for
a0fb3c47
LE
2663 * active_ee to become empty in drbd_submit_peer_request();
2664 * better not add ourselves here. */
a34592ff 2665 if ((peer_req->flags & (EE_TRIM | EE_ZEROOUT)) == 0)
b9ed7080 2666 list_add_tail(&peer_req->w.list, &device->active_ee);
0500813f 2667 spin_unlock_irq(&device->resource->req_lock);
b411b363 2668
b30ab791
AG
2669 if (device->state.conn == C_SYNC_TARGET)
2670 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
b411b363 2671
b30ab791 2672 if (device->state.pdsk < D_INCONSISTENT) {
b411b363 2673 /* In case we have the only disk of the cluster, */
b30ab791 2674 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
db830c46 2675 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
4dd726f0 2676 drbd_al_begin_io(device, &peer_req->i);
21ae5d7f 2677 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
b411b363
PR
2678 }
2679
86563de8 2680 err = drbd_submit_peer_request(device, peer_req, op | op_flags,
bb3cc85e 2681 DRBD_FAULT_DT_WR);
82bc0194
AG
2682 if (!err)
2683 return 0;
b411b363 2684
10f6d992 2685 /* don't care for the reason here */
d0180171 2686 drbd_err(device, "submit failed, triggering re-connect\n");
0500813f 2687 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2688 list_del(&peer_req->w.list);
b30ab791 2689 drbd_remove_epoch_entry_interval(device, peer_req);
0500813f 2690 spin_unlock_irq(&device->resource->req_lock);
21ae5d7f
LE
2691 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
2692 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
b30ab791 2693 drbd_al_complete_io(device, &peer_req->i);
21ae5d7f 2694 }
22cc37a9 2695
b411b363 2696out_interrupted:
7e5fec31 2697 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
b30ab791
AG
2698 put_ldev(device);
2699 drbd_free_peer_req(device, peer_req);
82bc0194 2700 return err;
b411b363
PR
2701}
2702
0f0601f4
LE
2703/* We may throttle resync, if the lower device seems to be busy,
2704 * and current sync rate is above c_min_rate.
2705 *
2706 * To decide whether or not the lower device is busy, we use a scheme similar
2707 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2708 * (more than 64 sectors) of activity we cannot account for with our own resync
2709 * activity, it obviously is "busy".
2710 *
2711 * The current sync rate used here uses only the most recent two step marks,
2712 * to have a short time average so we can react faster.
2713 */
ad3fee79
LE
2714bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
2715 bool throttle_if_app_is_waiting)
0f0601f4 2716{
e3555d85 2717 struct lc_element *tmp;
ad3fee79 2718 bool throttle = drbd_rs_c_min_rate_throttle(device);
daeda1cc 2719
ad3fee79
LE
2720 if (!throttle || throttle_if_app_is_waiting)
2721 return throttle;
0f0601f4 2722
b30ab791
AG
2723 spin_lock_irq(&device->al_lock);
2724 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
e3555d85
PR
2725 if (tmp) {
2726 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
e8299874
LE
2727 if (test_bit(BME_PRIORITY, &bm_ext->flags))
2728 throttle = false;
ad3fee79
LE
2729 /* Do not slow down if app IO is already waiting for this extent,
2730 * and our progress is necessary for application IO to complete. */
e3555d85 2731 }
b30ab791 2732 spin_unlock_irq(&device->al_lock);
e3555d85 2733
e8299874
LE
2734 return throttle;
2735}
2736
2737bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
2738{
8c40c7c4 2739 struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
e8299874
LE
2740 unsigned long db, dt, dbdt;
2741 unsigned int c_min_rate;
2742 int curr_events;
2743
2744 rcu_read_lock();
2745 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2746 rcu_read_unlock();
2747
2748 /* feature disabled? */
2749 if (c_min_rate == 0)
2750 return false;
2751
8446fe92 2752 curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
b30ab791 2753 atomic_read(&device->rs_sect_ev);
ad3fee79
LE
2754
2755 if (atomic_read(&device->ap_actlog_cnt)
ff8bd88b 2756 || curr_events - device->rs_last_events > 64) {
0f0601f4
LE
2757 unsigned long rs_left;
2758 int i;
2759
b30ab791 2760 device->rs_last_events = curr_events;
0f0601f4
LE
2761
2762 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2763 * approx. */
b30ab791 2764 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2649f080 2765
b30ab791
AG
2766 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2767 rs_left = device->ov_left;
2649f080 2768 else
b30ab791 2769 rs_left = drbd_bm_total_weight(device) - device->rs_failed;
0f0601f4 2770
b30ab791 2771 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
0f0601f4
LE
2772 if (!dt)
2773 dt++;
b30ab791 2774 db = device->rs_mark_left[i] - rs_left;
0f0601f4
LE
2775 dbdt = Bit2KB(db/dt);
2776
daeda1cc 2777 if (dbdt > c_min_rate)
e8299874 2778 return true;
0f0601f4 2779 }
e8299874 2780 return false;
0f0601f4
LE
2781}
2782
bde89a9e 2783static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2784{
9f4fe9ad 2785 struct drbd_peer_device *peer_device;
b30ab791 2786 struct drbd_device *device;
b411b363 2787 sector_t sector;
4a76b161 2788 sector_t capacity;
db830c46 2789 struct drbd_peer_request *peer_req;
b411b363 2790 struct digest_info *di = NULL;
b18b37be 2791 int size, verb;
b411b363 2792 unsigned int fault_type;
e658983a 2793 struct p_block_req *p = pi->data;
4a76b161 2794
9f4fe9ad
AG
2795 peer_device = conn_peer_device(connection, pi->vnr);
2796 if (!peer_device)
4a76b161 2797 return -EIO;
9f4fe9ad 2798 device = peer_device->device;
155bd9d1 2799 capacity = get_capacity(device->vdisk);
b411b363
PR
2800
2801 sector = be64_to_cpu(p->sector);
2802 size = be32_to_cpu(p->blksize);
2803
c670a398 2804 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
d0180171 2805 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
b411b363 2806 (unsigned long long)sector, size);
82bc0194 2807 return -EINVAL;
b411b363
PR
2808 }
2809 if (sector + (size>>9) > capacity) {
d0180171 2810 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
b411b363 2811 (unsigned long long)sector, size);
82bc0194 2812 return -EINVAL;
b411b363
PR
2813 }
2814
b30ab791 2815 if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
b18b37be 2816 verb = 1;
e2857216 2817 switch (pi->cmd) {
b18b37be 2818 case P_DATA_REQUEST:
69a22773 2819 drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
b18b37be 2820 break;
700ca8c0 2821 case P_RS_THIN_REQ:
b18b37be
PR
2822 case P_RS_DATA_REQUEST:
2823 case P_CSUM_RS_REQUEST:
2824 case P_OV_REQUEST:
69a22773 2825 drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
b18b37be
PR
2826 break;
2827 case P_OV_REPLY:
2828 verb = 0;
b30ab791 2829 dec_rs_pending(device);
69a22773 2830 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
b18b37be
PR
2831 break;
2832 default:
49ba9b1b 2833 BUG();
b18b37be
PR
2834 }
2835 if (verb && __ratelimit(&drbd_ratelimit_state))
d0180171 2836 drbd_err(device, "Can not satisfy peer's read request, "
b411b363 2837 "no local data.\n");
b18b37be 2838
a821cc4a 2839 /* drain possibly payload */
69a22773 2840 return drbd_drain_block(peer_device, pi->size);
b411b363
PR
2841 }
2842
2843 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2844 * "criss-cross" setup, that might cause write-out on some other DRBD,
2845 * which in turn might block on the other node at this very place. */
a0fb3c47 2846 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
9104d31a 2847 size, GFP_NOIO);
db830c46 2848 if (!peer_req) {
b30ab791 2849 put_ldev(device);
82bc0194 2850 return -ENOMEM;
b411b363
PR
2851 }
2852
e2857216 2853 switch (pi->cmd) {
b411b363 2854 case P_DATA_REQUEST:
a8cd15ba 2855 peer_req->w.cb = w_e_end_data_req;
b411b363 2856 fault_type = DRBD_FAULT_DT_RD;
80a40e43 2857 /* application IO, don't drbd_rs_begin_io */
21ae5d7f 2858 peer_req->flags |= EE_APPLICATION;
80a40e43
LE
2859 goto submit;
2860
700ca8c0
PR
2861 case P_RS_THIN_REQ:
2862 /* If at some point in the future we have a smart way to
2863 find out if this data block is completely deallocated,
2864 then we would do something smarter here than reading
2865 the block... */
2866 peer_req->flags |= EE_RS_THIN_REQ;
df561f66 2867 fallthrough;
b411b363 2868 case P_RS_DATA_REQUEST:
a8cd15ba 2869 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2870 fault_type = DRBD_FAULT_RS_RD;
5f9915bb 2871 /* used in the sector offset progress display */
b30ab791 2872 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2873 break;
2874
2875 case P_OV_REPLY:
2876 case P_CSUM_RS_REQUEST:
2877 fault_type = DRBD_FAULT_RS_RD;
e2857216 2878 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
b411b363
PR
2879 if (!di)
2880 goto out_free_e;
2881
e2857216 2882 di->digest_size = pi->size;
b411b363
PR
2883 di->digest = (((char *)di)+sizeof(struct digest_info));
2884
db830c46
AG
2885 peer_req->digest = di;
2886 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2887
9f4fe9ad 2888 if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
b411b363
PR
2889 goto out_free_e;
2890
e2857216 2891 if (pi->cmd == P_CSUM_RS_REQUEST) {
9f4fe9ad 2892 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
a8cd15ba 2893 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb 2894 /* used in the sector offset progress display */
b30ab791 2895 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
aaaba345
LE
2896 /* remember to report stats in drbd_resync_finished */
2897 device->use_csums = true;
e2857216 2898 } else if (pi->cmd == P_OV_REPLY) {
2649f080 2899 /* track progress, we may need to throttle */
b30ab791 2900 atomic_add(size >> 9, &device->rs_sect_in);
a8cd15ba 2901 peer_req->w.cb = w_e_end_ov_reply;
b30ab791 2902 dec_rs_pending(device);
0f0601f4
LE
2903 /* drbd_rs_begin_io done when we sent this request,
2904 * but accounting still needs to be done. */
2905 goto submit_for_resync;
b411b363
PR
2906 }
2907 break;
2908
2909 case P_OV_REQUEST:
b30ab791 2910 if (device->ov_start_sector == ~(sector_t)0 &&
9f4fe9ad 2911 peer_device->connection->agreed_pro_version >= 90) {
de228bba
LE
2912 unsigned long now = jiffies;
2913 int i;
b30ab791
AG
2914 device->ov_start_sector = sector;
2915 device->ov_position = sector;
2916 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2917 device->rs_total = device->ov_left;
de228bba 2918 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
b30ab791
AG
2919 device->rs_mark_left[i] = device->ov_left;
2920 device->rs_mark_time[i] = now;
de228bba 2921 }
d0180171 2922 drbd_info(device, "Online Verify start sector: %llu\n",
b411b363
PR
2923 (unsigned long long)sector);
2924 }
a8cd15ba 2925 peer_req->w.cb = w_e_end_ov_req;
b411b363 2926 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2927 break;
2928
b411b363 2929 default:
49ba9b1b 2930 BUG();
b411b363
PR
2931 }
2932
0f0601f4
LE
2933 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2934 * wrt the receiver, but it is not as straightforward as it may seem.
2935 * Various places in the resync start and stop logic assume resync
2936 * requests are processed in order, requeuing this on the worker thread
2937 * introduces a bunch of new code for synchronization between threads.
2938 *
2939 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2940 * "forever", throttling after drbd_rs_begin_io will lock that extent
2941 * for application writes for the same time. For now, just throttle
2942 * here, where the rest of the code expects the receiver to sleep for
2943 * a while, anyways.
2944 */
2945
2946 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2947 * this defers syncer requests for some time, before letting at least
2948 * on request through. The resync controller on the receiving side
2949 * will adapt to the incoming rate accordingly.
2950 *
2951 * We cannot throttle here if remote is Primary/SyncTarget:
2952 * we would also throttle its application reads.
2953 * In that case, throttling is done on the SyncTarget only.
2954 */
c5a2c150
LE
2955
2956 /* Even though this may be a resync request, we do add to "read_ee";
2957 * "sync_ee" is only used for resync WRITEs.
2958 * Add to list early, so debugfs can find this request
2959 * even if we have to sleep below. */
2960 spin_lock_irq(&device->resource->req_lock);
2961 list_add_tail(&peer_req->w.list, &device->read_ee);
2962 spin_unlock_irq(&device->resource->req_lock);
2963
944410e9 2964 update_receiver_timing_details(connection, drbd_rs_should_slow_down);
ad3fee79
LE
2965 if (device->state.peer != R_PRIMARY
2966 && drbd_rs_should_slow_down(device, sector, false))
e3555d85 2967 schedule_timeout_uninterruptible(HZ/10);
944410e9 2968 update_receiver_timing_details(connection, drbd_rs_begin_io);
b30ab791 2969 if (drbd_rs_begin_io(device, sector))
80a40e43 2970 goto out_free_e;
b411b363 2971
0f0601f4 2972submit_for_resync:
b30ab791 2973 atomic_add(size >> 9, &device->rs_sect_ev);
0f0601f4 2974
80a40e43 2975submit:
944410e9 2976 update_receiver_timing_details(connection, drbd_submit_peer_request);
b30ab791 2977 inc_unacked(device);
86563de8 2978 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ,
bb3cc85e 2979 fault_type) == 0)
82bc0194 2980 return 0;
b411b363 2981
10f6d992 2982 /* don't care for the reason here */
d0180171 2983 drbd_err(device, "submit failed, triggering re-connect\n");
c5a2c150
LE
2984
2985out_free_e:
0500813f 2986 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2987 list_del(&peer_req->w.list);
0500813f 2988 spin_unlock_irq(&device->resource->req_lock);
22cc37a9
LE
2989 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2990
b30ab791
AG
2991 put_ldev(device);
2992 drbd_free_peer_req(device, peer_req);
82bc0194 2993 return -EIO;
b411b363
PR
2994}
2995
9b48ff07 2996/*
69a22773
AG
2997 * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries
2998 */
2999static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3000{
69a22773 3001 struct drbd_device *device = peer_device->device;
b411b363
PR
3002 int self, peer, rv = -100;
3003 unsigned long ch_self, ch_peer;
44ed167d 3004 enum drbd_after_sb_p after_sb_0p;
b411b363 3005
b30ab791
AG
3006 self = device->ldev->md.uuid[UI_BITMAP] & 1;
3007 peer = device->p_uuid[UI_BITMAP] & 1;
b411b363 3008
b30ab791
AG
3009 ch_peer = device->p_uuid[UI_SIZE];
3010 ch_self = device->comm_bm_set;
b411b363 3011
44ed167d 3012 rcu_read_lock();
69a22773 3013 after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
44ed167d
PR
3014 rcu_read_unlock();
3015 switch (after_sb_0p) {
b411b363
PR
3016 case ASB_CONSENSUS:
3017 case ASB_DISCARD_SECONDARY:
3018 case ASB_CALL_HELPER:
44ed167d 3019 case ASB_VIOLENTLY:
d0180171 3020 drbd_err(device, "Configuration error.\n");
b411b363
PR
3021 break;
3022 case ASB_DISCONNECT:
3023 break;
3024 case ASB_DISCARD_YOUNGER_PRI:
3025 if (self == 0 && peer == 1) {
3026 rv = -1;
3027 break;
3028 }
3029 if (self == 1 && peer == 0) {
3030 rv = 1;
3031 break;
3032 }
df561f66 3033 fallthrough; /* to one of the other strategies */
b411b363
PR
3034 case ASB_DISCARD_OLDER_PRI:
3035 if (self == 0 && peer == 1) {
3036 rv = 1;
3037 break;
3038 }
3039 if (self == 1 && peer == 0) {
3040 rv = -1;
3041 break;
3042 }
3043 /* Else fall through to one of the other strategies... */
d0180171 3044 drbd_warn(device, "Discard younger/older primary did not find a decision\n"
b411b363 3045 "Using discard-least-changes instead\n");
df561f66 3046 fallthrough;
b411b363
PR
3047 case ASB_DISCARD_ZERO_CHG:
3048 if (ch_peer == 0 && ch_self == 0) {
69a22773 3049 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
b411b363
PR
3050 ? -1 : 1;
3051 break;
3052 } else {
3053 if (ch_peer == 0) { rv = 1; break; }
3054 if (ch_self == 0) { rv = -1; break; }
3055 }
44ed167d 3056 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363 3057 break;
df561f66 3058 fallthrough;
b411b363
PR
3059 case ASB_DISCARD_LEAST_CHG:
3060 if (ch_self < ch_peer)
3061 rv = -1;
3062 else if (ch_self > ch_peer)
3063 rv = 1;
3064 else /* ( ch_self == ch_peer ) */
3065 /* Well, then use something else. */
69a22773 3066 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
b411b363
PR
3067 ? -1 : 1;
3068 break;
3069 case ASB_DISCARD_LOCAL:
3070 rv = -1;
3071 break;
3072 case ASB_DISCARD_REMOTE:
3073 rv = 1;
3074 }
3075
3076 return rv;
3077}
3078
9b48ff07 3079/*
69a22773
AG
3080 * drbd_asb_recover_1p - Recover after split-brain with one remaining primary
3081 */
3082static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3083{
69a22773 3084 struct drbd_device *device = peer_device->device;
6184ea21 3085 int hg, rv = -100;
44ed167d 3086 enum drbd_after_sb_p after_sb_1p;
b411b363 3087
44ed167d 3088 rcu_read_lock();
69a22773 3089 after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
44ed167d
PR
3090 rcu_read_unlock();
3091 switch (after_sb_1p) {
b411b363
PR
3092 case ASB_DISCARD_YOUNGER_PRI:
3093 case ASB_DISCARD_OLDER_PRI:
3094 case ASB_DISCARD_LEAST_CHG:
3095 case ASB_DISCARD_LOCAL:
3096 case ASB_DISCARD_REMOTE:
44ed167d 3097 case ASB_DISCARD_ZERO_CHG:
d0180171 3098 drbd_err(device, "Configuration error.\n");
b411b363
PR
3099 break;
3100 case ASB_DISCONNECT:
3101 break;
3102 case ASB_CONSENSUS:
69a22773 3103 hg = drbd_asb_recover_0p(peer_device);
b30ab791 3104 if (hg == -1 && device->state.role == R_SECONDARY)
b411b363 3105 rv = hg;
b30ab791 3106 if (hg == 1 && device->state.role == R_PRIMARY)
b411b363
PR
3107 rv = hg;
3108 break;
3109 case ASB_VIOLENTLY:
69a22773 3110 rv = drbd_asb_recover_0p(peer_device);
b411b363
PR
3111 break;
3112 case ASB_DISCARD_SECONDARY:
b30ab791 3113 return device->state.role == R_PRIMARY ? 1 : -1;
b411b363 3114 case ASB_CALL_HELPER:
69a22773 3115 hg = drbd_asb_recover_0p(peer_device);
b30ab791 3116 if (hg == -1 && device->state.role == R_PRIMARY) {
bb437946
AG
3117 enum drbd_state_rv rv2;
3118
b411b363
PR
3119 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3120 * we might be here in C_WF_REPORT_PARAMS which is transient.
3121 * we do not need to wait for the after state change work either. */
b30ab791 3122 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
bb437946 3123 if (rv2 != SS_SUCCESS) {
b30ab791 3124 drbd_khelper(device, "pri-lost-after-sb");
b411b363 3125 } else {
d0180171 3126 drbd_warn(device, "Successfully gave up primary role.\n");
b411b363
PR
3127 rv = hg;
3128 }
3129 } else
3130 rv = hg;
3131 }
3132
3133 return rv;
3134}
3135
9b48ff07 3136/*
69a22773
AG
3137 * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries
3138 */
3139static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3140{
69a22773 3141 struct drbd_device *device = peer_device->device;
6184ea21 3142 int hg, rv = -100;
44ed167d 3143 enum drbd_after_sb_p after_sb_2p;
b411b363 3144
44ed167d 3145 rcu_read_lock();
69a22773 3146 after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
44ed167d
PR
3147 rcu_read_unlock();
3148 switch (after_sb_2p) {
b411b363
PR
3149 case ASB_DISCARD_YOUNGER_PRI:
3150 case ASB_DISCARD_OLDER_PRI:
3151 case ASB_DISCARD_LEAST_CHG:
3152 case ASB_DISCARD_LOCAL:
3153 case ASB_DISCARD_REMOTE:
3154 case ASB_CONSENSUS:
3155 case ASB_DISCARD_SECONDARY:
44ed167d 3156 case ASB_DISCARD_ZERO_CHG:
d0180171 3157 drbd_err(device, "Configuration error.\n");
b411b363
PR
3158 break;
3159 case ASB_VIOLENTLY:
69a22773 3160 rv = drbd_asb_recover_0p(peer_device);
b411b363
PR
3161 break;
3162 case ASB_DISCONNECT:
3163 break;
3164 case ASB_CALL_HELPER:
69a22773 3165 hg = drbd_asb_recover_0p(peer_device);
b411b363 3166 if (hg == -1) {
bb437946
AG
3167 enum drbd_state_rv rv2;
3168
b411b363
PR
3169 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3170 * we might be here in C_WF_REPORT_PARAMS which is transient.
3171 * we do not need to wait for the after state change work either. */
b30ab791 3172 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
bb437946 3173 if (rv2 != SS_SUCCESS) {
b30ab791 3174 drbd_khelper(device, "pri-lost-after-sb");
b411b363 3175 } else {
d0180171 3176 drbd_warn(device, "Successfully gave up primary role.\n");
b411b363
PR
3177 rv = hg;
3178 }
3179 } else
3180 rv = hg;
3181 }
3182
3183 return rv;
3184}
3185
b30ab791 3186static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
b411b363
PR
3187 u64 bits, u64 flags)
3188{
3189 if (!uuid) {
d0180171 3190 drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
b411b363
PR
3191 return;
3192 }
d0180171 3193 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
b411b363
PR
3194 text,
3195 (unsigned long long)uuid[UI_CURRENT],
3196 (unsigned long long)uuid[UI_BITMAP],
3197 (unsigned long long)uuid[UI_HISTORY_START],
3198 (unsigned long long)uuid[UI_HISTORY_END],
3199 (unsigned long long)bits,
3200 (unsigned long long)flags);
3201}
3202
3203/*
3204 100 after split brain try auto recover
3205 2 C_SYNC_SOURCE set BitMap
3206 1 C_SYNC_SOURCE use BitMap
3207 0 no Sync
3208 -1 C_SYNC_TARGET use BitMap
3209 -2 C_SYNC_TARGET set BitMap
3210 -100 after split brain, disconnect
3211-1000 unrelated data
4a23f264
PR
3212-1091 requires proto 91
3213-1096 requires proto 96
b411b363 3214 */
f2d3d75b
LE
3215
3216static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
b411b363 3217{
44a4d551
LE
3218 struct drbd_peer_device *const peer_device = first_peer_device(device);
3219 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
b411b363
PR
3220 u64 self, peer;
3221 int i, j;
3222
b30ab791
AG
3223 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3224 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
3225
3226 *rule_nr = 10;
3227 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
3228 return 0;
3229
3230 *rule_nr = 20;
3231 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
3232 peer != UUID_JUST_CREATED)
3233 return -2;
3234
3235 *rule_nr = 30;
3236 if (self != UUID_JUST_CREATED &&
3237 (peer == UUID_JUST_CREATED || peer == (u64)0))
3238 return 2;
3239
3240 if (self == peer) {
3241 int rct, dc; /* roles at crash time */
3242
b30ab791 3243 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
b411b363 3244
44a4d551 3245 if (connection->agreed_pro_version < 91)
4a23f264 3246 return -1091;
b411b363 3247
b30ab791
AG
3248 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
3249 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
d0180171 3250 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
b30ab791
AG
3251 drbd_uuid_move_history(device);
3252 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3253 device->ldev->md.uuid[UI_BITMAP] = 0;
b411b363 3254
b30ab791
AG
3255 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3256 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
b411b363
PR
3257 *rule_nr = 34;
3258 } else {
d0180171 3259 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
b411b363
PR
3260 *rule_nr = 36;
3261 }
3262
3263 return 1;
3264 }
3265
b30ab791 3266 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
b411b363 3267
44a4d551 3268 if (connection->agreed_pro_version < 91)
4a23f264 3269 return -1091;
b411b363 3270
b30ab791
AG
3271 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
3272 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
d0180171 3273 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
b411b363 3274
b30ab791
AG
3275 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
3276 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
3277 device->p_uuid[UI_BITMAP] = 0UL;
b411b363 3278
b30ab791 3279 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
b411b363
PR
3280 *rule_nr = 35;
3281 } else {
d0180171 3282 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
b411b363
PR
3283 *rule_nr = 37;
3284 }
3285
3286 return -1;
3287 }
3288
3289 /* Common power [off|failure] */
b30ab791
AG
3290 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
3291 (device->p_uuid[UI_FLAGS] & 2);
b411b363
PR
3292 /* lowest bit is set when we were primary,
3293 * next bit (weight 2) is set when peer was primary */
3294 *rule_nr = 40;
3295
f2d3d75b
LE
3296 /* Neither has the "crashed primary" flag set,
3297 * only a replication link hickup. */
3298 if (rct == 0)
3299 return 0;
3300
3301 /* Current UUID equal and no bitmap uuid; does not necessarily
3302 * mean this was a "simultaneous hard crash", maybe IO was
3303 * frozen, so no UUID-bump happened.
3304 * This is a protocol change, overload DRBD_FF_WSAME as flag
3305 * for "new-enough" peer DRBD version. */
3306 if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
3307 *rule_nr = 41;
3308 if (!(connection->agreed_features & DRBD_FF_WSAME)) {
3309 drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
3310 return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8));
3311 }
3312 if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
3313 /* At least one has the "crashed primary" bit set,
3314 * both are primary now, but neither has rotated its UUIDs?
3315 * "Can not happen." */
3316 drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
3317 return -100;
3318 }
3319 if (device->state.role == R_PRIMARY)
3320 return 1;
3321 return -1;
3322 }
3323
3324 /* Both are secondary.
3325 * Really looks like recovery from simultaneous hard crash.
3326 * Check which had been primary before, and arbitrate. */
b411b363 3327 switch (rct) {
f2d3d75b 3328 case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */
b411b363
PR
3329 case 1: /* self_pri && !peer_pri */ return 1;
3330 case 2: /* !self_pri && peer_pri */ return -1;
3331 case 3: /* self_pri && peer_pri */
44a4d551 3332 dc = test_bit(RESOLVE_CONFLICTS, &connection->flags);
b411b363
PR
3333 return dc ? -1 : 1;
3334 }
3335 }
3336
3337 *rule_nr = 50;
b30ab791 3338 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
b411b363
PR
3339 if (self == peer)
3340 return -1;
3341
3342 *rule_nr = 51;
b30ab791 3343 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
b411b363 3344 if (self == peer) {
44a4d551 3345 if (connection->agreed_pro_version < 96 ?
b30ab791
AG
3346 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
3347 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
3348 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
3349 /* The last P_SYNC_UUID did not get though. Undo the last start of
3350 resync as sync source modifications of the peer's UUIDs. */
3351
44a4d551 3352 if (connection->agreed_pro_version < 91)
4a23f264 3353 return -1091;
b411b363 3354
b30ab791
AG
3355 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
3356 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
4a23f264 3357
d0180171 3358 drbd_info(device, "Lost last syncUUID packet, corrected:\n");
b30ab791 3359 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
4a23f264 3360
b411b363
PR
3361 return -1;
3362 }
3363 }
3364
3365 *rule_nr = 60;
b30ab791 3366 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
b411b363 3367 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3368 peer = device->p_uuid[i] & ~((u64)1);
b411b363
PR
3369 if (self == peer)
3370 return -2;
3371 }
3372
3373 *rule_nr = 70;
b30ab791
AG
3374 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3375 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
3376 if (self == peer)
3377 return 1;
3378
3379 *rule_nr = 71;
b30ab791 3380 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
b411b363 3381 if (self == peer) {
44a4d551 3382 if (connection->agreed_pro_version < 96 ?
b30ab791
AG
3383 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
3384 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
3385 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
3386 /* The last P_SYNC_UUID did not get though. Undo the last start of
3387 resync as sync source modifications of our UUIDs. */
3388
44a4d551 3389 if (connection->agreed_pro_version < 91)
4a23f264 3390 return -1091;
b411b363 3391
b30ab791
AG
3392 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
3393 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
b411b363 3394
d0180171 3395 drbd_info(device, "Last syncUUID did not get through, corrected:\n");
b30ab791
AG
3396 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3397 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
b411b363
PR
3398
3399 return 1;
3400 }
3401 }
3402
3403
3404 *rule_nr = 80;
b30ab791 3405 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363 3406 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3407 self = device->ldev->md.uuid[i] & ~((u64)1);
b411b363
PR
3408 if (self == peer)
3409 return 2;
3410 }
3411
3412 *rule_nr = 90;
b30ab791
AG
3413 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3414 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
b411b363
PR
3415 if (self == peer && self != ((u64)0))
3416 return 100;
3417
3418 *rule_nr = 100;
3419 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3420 self = device->ldev->md.uuid[i] & ~((u64)1);
b411b363 3421 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
b30ab791 3422 peer = device->p_uuid[j] & ~((u64)1);
b411b363
PR
3423 if (self == peer)
3424 return -100;
3425 }
3426 }
3427
3428 return -1000;
3429}
3430
3431/* drbd_sync_handshake() returns the new conn state on success, or
3432 CONN_MASK (-1) on failure.
3433 */
69a22773
AG
3434static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
3435 enum drbd_role peer_role,
b411b363
PR
3436 enum drbd_disk_state peer_disk) __must_hold(local)
3437{
69a22773 3438 struct drbd_device *device = peer_device->device;
b411b363
PR
3439 enum drbd_conns rv = C_MASK;
3440 enum drbd_disk_state mydisk;
44ed167d 3441 struct net_conf *nc;
d29e89e3 3442 int hg, rule_nr, rr_conflict, tentative, always_asbp;
b411b363 3443
b30ab791 3444 mydisk = device->state.disk;
b411b363 3445 if (mydisk == D_NEGOTIATING)
b30ab791 3446 mydisk = device->new_state_tmp.disk;
b411b363 3447
d0180171 3448 drbd_info(device, "drbd_sync_handshake:\n");
9f2247bb 3449
b30ab791
AG
3450 spin_lock_irq(&device->ldev->md.uuid_lock);
3451 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
3452 drbd_uuid_dump(device, "peer", device->p_uuid,
3453 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
b411b363 3454
f2d3d75b 3455 hg = drbd_uuid_compare(device, peer_role, &rule_nr);
b30ab791 3456 spin_unlock_irq(&device->ldev->md.uuid_lock);
b411b363 3457
d0180171 3458 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
b411b363
PR
3459
3460 if (hg == -1000) {
d0180171 3461 drbd_alert(device, "Unrelated data, aborting!\n");
b411b363
PR
3462 return C_MASK;
3463 }
f2d3d75b
LE
3464 if (hg < -0x10000) {
3465 int proto, fflags;
3466 hg = -hg;
3467 proto = hg & 0xff;
3468 fflags = (hg >> 8) & 0xff;
3469 drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
3470 proto, fflags);
3471 return C_MASK;
3472 }
4a23f264 3473 if (hg < -1000) {
d0180171 3474 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
3475 return C_MASK;
3476 }
3477
3478 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
3479 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
3480 int f = (hg == -100) || abs(hg) == 2;
3481 hg = mydisk > D_INCONSISTENT ? 1 : -1;
3482 if (f)
3483 hg = hg*2;
d0180171 3484 drbd_info(device, "Becoming sync %s due to disk states.\n",
b411b363
PR
3485 hg > 0 ? "source" : "target");
3486 }
3487
3a11a487 3488 if (abs(hg) == 100)
b30ab791 3489 drbd_khelper(device, "initial-split-brain");
3a11a487 3490
44ed167d 3491 rcu_read_lock();
69a22773 3492 nc = rcu_dereference(peer_device->connection->net_conf);
d29e89e3
RK
3493 always_asbp = nc->always_asbp;
3494 rr_conflict = nc->rr_conflict;
3495 tentative = nc->tentative;
3496 rcu_read_unlock();
44ed167d 3497
d29e89e3 3498 if (hg == 100 || (hg == -100 && always_asbp)) {
b30ab791 3499 int pcount = (device->state.role == R_PRIMARY)
b411b363
PR
3500 + (peer_role == R_PRIMARY);
3501 int forced = (hg == -100);
3502
3503 switch (pcount) {
3504 case 0:
69a22773 3505 hg = drbd_asb_recover_0p(peer_device);
b411b363
PR
3506 break;
3507 case 1:
69a22773 3508 hg = drbd_asb_recover_1p(peer_device);
b411b363
PR
3509 break;
3510 case 2:
69a22773 3511 hg = drbd_asb_recover_2p(peer_device);
b411b363
PR
3512 break;
3513 }
3514 if (abs(hg) < 100) {
d0180171 3515 drbd_warn(device, "Split-Brain detected, %d primaries, "
b411b363
PR
3516 "automatically solved. Sync from %s node\n",
3517 pcount, (hg < 0) ? "peer" : "this");
3518 if (forced) {
d0180171 3519 drbd_warn(device, "Doing a full sync, since"
b411b363
PR
3520 " UUIDs where ambiguous.\n");
3521 hg = hg*2;
3522 }
3523 }
3524 }
3525
3526 if (hg == -100) {
b30ab791 3527 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
b411b363 3528 hg = -1;
b30ab791 3529 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
b411b363
PR
3530 hg = 1;
3531
3532 if (abs(hg) < 100)
d0180171 3533 drbd_warn(device, "Split-Brain detected, manually solved. "
b411b363
PR
3534 "Sync from %s node\n",
3535 (hg < 0) ? "peer" : "this");
3536 }
3537
3538 if (hg == -100) {
580b9767
LE
3539 /* FIXME this log message is not correct if we end up here
3540 * after an attempted attach on a diskless node.
3541 * We just refuse to attach -- well, we drop the "connection"
3542 * to that disk, in a way... */
d0180171 3543 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
b30ab791 3544 drbd_khelper(device, "split-brain");
b411b363
PR
3545 return C_MASK;
3546 }
3547
3548 if (hg > 0 && mydisk <= D_INCONSISTENT) {
d0180171 3549 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
b411b363
PR
3550 return C_MASK;
3551 }
3552
3553 if (hg < 0 && /* by intention we do not use mydisk here. */
b30ab791 3554 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
44ed167d 3555 switch (rr_conflict) {
b411b363 3556 case ASB_CALL_HELPER:
b30ab791 3557 drbd_khelper(device, "pri-lost");
df561f66 3558 fallthrough;
b411b363 3559 case ASB_DISCONNECT:
d0180171 3560 drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
b411b363
PR
3561 return C_MASK;
3562 case ASB_VIOLENTLY:
d0180171 3563 drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
b411b363
PR
3564 "assumption\n");
3565 }
3566 }
3567
69a22773 3568 if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
cf14c2e9 3569 if (hg == 0)
d0180171 3570 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
cf14c2e9 3571 else
d0180171 3572 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
cf14c2e9
PR
3573 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3574 abs(hg) >= 2 ? "full" : "bit-map based");
3575 return C_MASK;
3576 }
3577
b411b363 3578 if (abs(hg) >= 2) {
d0180171 3579 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
b30ab791 3580 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
20ceb2b2 3581 BM_LOCKED_SET_ALLOWED))
b411b363
PR
3582 return C_MASK;
3583 }
3584
3585 if (hg > 0) { /* become sync source. */
3586 rv = C_WF_BITMAP_S;
3587 } else if (hg < 0) { /* become sync target */
3588 rv = C_WF_BITMAP_T;
3589 } else {
3590 rv = C_CONNECTED;
b30ab791 3591 if (drbd_bm_total_weight(device)) {
d0180171 3592 drbd_info(device, "No resync, but %lu bits in bitmap!\n",
b30ab791 3593 drbd_bm_total_weight(device));
b411b363
PR
3594 }
3595 }
3596
3597 return rv;
3598}
3599
f179d76d 3600static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
b411b363
PR
3601{
3602 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
f179d76d
PR
3603 if (peer == ASB_DISCARD_REMOTE)
3604 return ASB_DISCARD_LOCAL;
b411b363
PR
3605
3606 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
f179d76d
PR
3607 if (peer == ASB_DISCARD_LOCAL)
3608 return ASB_DISCARD_REMOTE;
b411b363
PR
3609
3610 /* everything else is valid if they are equal on both sides. */
f179d76d 3611 return peer;
b411b363
PR
3612}
3613
bde89a9e 3614static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3615{
e658983a 3616 struct p_protocol *p = pi->data;
036b17ea
PR
3617 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3618 int p_proto, p_discard_my_data, p_two_primaries, cf;
3619 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3620 char integrity_alg[SHARED_SECRET_MAX] = "";
3d0e6375 3621 struct crypto_shash *peer_integrity_tfm = NULL;
7aca6c75 3622 void *int_dig_in = NULL, *int_dig_vv = NULL;
b411b363 3623
b411b363
PR
3624 p_proto = be32_to_cpu(p->protocol);
3625 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3626 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3627 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 3628 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9 3629 cf = be32_to_cpu(p->conn_flags);
6139f60d 3630 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
cf14c2e9 3631
bde89a9e 3632 if (connection->agreed_pro_version >= 87) {
86db0618 3633 int err;
cf14c2e9 3634
88104ca4 3635 if (pi->size > sizeof(integrity_alg))
86db0618 3636 return -EIO;
bde89a9e 3637 err = drbd_recv_all(connection, integrity_alg, pi->size);
86db0618
AG
3638 if (err)
3639 return err;
036b17ea 3640 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
b411b363
PR
3641 }
3642
7d4c782c 3643 if (pi->cmd != P_PROTOCOL_UPDATE) {
bde89a9e 3644 clear_bit(CONN_DRY_RUN, &connection->flags);
b411b363 3645
fbc12f45 3646 if (cf & CF_DRY_RUN)
bde89a9e 3647 set_bit(CONN_DRY_RUN, &connection->flags);
b411b363 3648
fbc12f45 3649 rcu_read_lock();
bde89a9e 3650 nc = rcu_dereference(connection->net_conf);
b411b363 3651
fbc12f45 3652 if (p_proto != nc->wire_protocol) {
1ec861eb 3653 drbd_err(connection, "incompatible %s settings\n", "protocol");
fbc12f45
AG
3654 goto disconnect_rcu_unlock;
3655 }
b411b363 3656
fbc12f45 3657 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
1ec861eb 3658 drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
fbc12f45
AG
3659 goto disconnect_rcu_unlock;
3660 }
b411b363 3661
fbc12f45 3662 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
1ec861eb 3663 drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
fbc12f45
AG
3664 goto disconnect_rcu_unlock;
3665 }
b411b363 3666
fbc12f45 3667 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
1ec861eb 3668 drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
fbc12f45
AG
3669 goto disconnect_rcu_unlock;
3670 }
b411b363 3671
fbc12f45 3672 if (p_discard_my_data && nc->discard_my_data) {
1ec861eb 3673 drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
fbc12f45
AG
3674 goto disconnect_rcu_unlock;
3675 }
b411b363 3676
fbc12f45 3677 if (p_two_primaries != nc->two_primaries) {
1ec861eb 3678 drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
fbc12f45
AG
3679 goto disconnect_rcu_unlock;
3680 }
b411b363 3681
fbc12f45 3682 if (strcmp(integrity_alg, nc->integrity_alg)) {
1ec861eb 3683 drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
fbc12f45
AG
3684 goto disconnect_rcu_unlock;
3685 }
b411b363 3686
fbc12f45 3687 rcu_read_unlock();
b411b363
PR
3688 }
3689
7d4c782c
AG
3690 if (integrity_alg[0]) {
3691 int hash_size;
3692
3693 /*
3694 * We can only change the peer data integrity algorithm
3695 * here. Changing our own data integrity algorithm
3696 * requires that we send a P_PROTOCOL_UPDATE packet at
3697 * the same time; otherwise, the peer has no way to
3698 * tell between which packets the algorithm should
3699 * change.
3700 */
b411b363 3701
3d234b33 3702 peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, 0);
1b57e663
LE
3703 if (IS_ERR(peer_integrity_tfm)) {
3704 peer_integrity_tfm = NULL;
1ec861eb 3705 drbd_err(connection, "peer data-integrity-alg %s not supported\n",
7d4c782c
AG
3706 integrity_alg);
3707 goto disconnect;
3708 }
b411b363 3709
3d0e6375 3710 hash_size = crypto_shash_digestsize(peer_integrity_tfm);
7d4c782c
AG
3711 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3712 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3713 if (!(int_dig_in && int_dig_vv)) {
1ec861eb 3714 drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
b411b363
PR
3715 goto disconnect;
3716 }
b411b363
PR
3717 }
3718
7d4c782c 3719 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
8404e191 3720 if (!new_net_conf)
7d4c782c 3721 goto disconnect;
7d4c782c 3722
bde89a9e 3723 mutex_lock(&connection->data.mutex);
0500813f 3724 mutex_lock(&connection->resource->conf_update);
bde89a9e 3725 old_net_conf = connection->net_conf;
7d4c782c
AG
3726 *new_net_conf = *old_net_conf;
3727
3728 new_net_conf->wire_protocol = p_proto;
3729 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3730 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3731 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3732 new_net_conf->two_primaries = p_two_primaries;
3733
bde89a9e 3734 rcu_assign_pointer(connection->net_conf, new_net_conf);
0500813f 3735 mutex_unlock(&connection->resource->conf_update);
bde89a9e 3736 mutex_unlock(&connection->data.mutex);
7d4c782c 3737
3d0e6375 3738 crypto_free_shash(connection->peer_integrity_tfm);
bde89a9e
AG
3739 kfree(connection->int_dig_in);
3740 kfree(connection->int_dig_vv);
3741 connection->peer_integrity_tfm = peer_integrity_tfm;
3742 connection->int_dig_in = int_dig_in;
3743 connection->int_dig_vv = int_dig_vv;
7d4c782c
AG
3744
3745 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
1ec861eb 3746 drbd_info(connection, "peer data-integrity-alg: %s\n",
7d4c782c
AG
3747 integrity_alg[0] ? integrity_alg : "(none)");
3748
90c6c291 3749 kvfree_rcu(old_net_conf);
82bc0194 3750 return 0;
b411b363 3751
44ed167d
PR
3752disconnect_rcu_unlock:
3753 rcu_read_unlock();
b411b363 3754disconnect:
3d0e6375 3755 crypto_free_shash(peer_integrity_tfm);
036b17ea
PR
3756 kfree(int_dig_in);
3757 kfree(int_dig_vv);
bde89a9e 3758 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3759 return -EIO;
b411b363
PR
3760}
3761
3762/* helper function
3763 * input: alg name, feature name
3764 * return: NULL (alg name was "")
3765 * ERR_PTR(error) if something goes wrong
3766 * or the crypto hash ptr, if it worked out ok. */
3d0e6375
KC
3767static struct crypto_shash *drbd_crypto_alloc_digest_safe(
3768 const struct drbd_device *device,
b411b363
PR
3769 const char *alg, const char *name)
3770{
3d0e6375 3771 struct crypto_shash *tfm;
b411b363
PR
3772
3773 if (!alg[0])
3774 return NULL;
3775
3d0e6375 3776 tfm = crypto_alloc_shash(alg, 0, 0);
b411b363 3777 if (IS_ERR(tfm)) {
d0180171 3778 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
b411b363
PR
3779 alg, name, PTR_ERR(tfm));
3780 return tfm;
3781 }
b411b363
PR
3782 return tfm;
3783}
3784
bde89a9e 3785static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
4a76b161 3786{
bde89a9e 3787 void *buffer = connection->data.rbuf;
4a76b161
AG
3788 int size = pi->size;
3789
3790 while (size) {
3791 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
bde89a9e 3792 s = drbd_recv(connection, buffer, s);
4a76b161
AG
3793 if (s <= 0) {
3794 if (s < 0)
3795 return s;
3796 break;
3797 }
3798 size -= s;
3799 }
3800 if (size)
3801 return -EIO;
3802 return 0;
3803}
3804
3805/*
3806 * config_unknown_volume - device configuration command for unknown volume
3807 *
3808 * When a device is added to an existing connection, the node on which the
3809 * device is added first will send configuration commands to its peer but the
3810 * peer will not know about the device yet. It will warn and ignore these
3811 * commands. Once the device is added on the second node, the second node will
3812 * send the same device configuration commands, but in the other direction.
3813 *
3814 * (We can also end up here if drbd is misconfigured.)
3815 */
bde89a9e 3816static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
4a76b161 3817{
1ec861eb 3818 drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
2fcb8f30 3819 cmdname(pi->cmd), pi->vnr);
bde89a9e 3820 return ignore_remaining_packet(connection, pi);
4a76b161
AG
3821}
3822
bde89a9e 3823static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3824{
9f4fe9ad 3825 struct drbd_peer_device *peer_device;
b30ab791 3826 struct drbd_device *device;
e658983a 3827 struct p_rs_param_95 *p;
b411b363 3828 unsigned int header_size, data_size, exp_max_sz;
3d0e6375
KC
3829 struct crypto_shash *verify_tfm = NULL;
3830 struct crypto_shash *csums_tfm = NULL;
2ec91e0e 3831 struct net_conf *old_net_conf, *new_net_conf = NULL;
813472ce 3832 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
bde89a9e 3833 const int apv = connection->agreed_pro_version;
813472ce 3834 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
6a365874 3835 unsigned int fifo_size = 0;
82bc0194 3836 int err;
b411b363 3837
9f4fe9ad
AG
3838 peer_device = conn_peer_device(connection, pi->vnr);
3839 if (!peer_device)
bde89a9e 3840 return config_unknown_volume(connection, pi);
9f4fe9ad 3841 device = peer_device->device;
b411b363
PR
3842
3843 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3844 : apv == 88 ? sizeof(struct p_rs_param)
3845 + SHARED_SECRET_MAX
8e26f9cc
PR
3846 : apv <= 94 ? sizeof(struct p_rs_param_89)
3847 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 3848
e2857216 3849 if (pi->size > exp_max_sz) {
d0180171 3850 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
e2857216 3851 pi->size, exp_max_sz);
82bc0194 3852 return -EIO;
b411b363
PR
3853 }
3854
3855 if (apv <= 88) {
e658983a 3856 header_size = sizeof(struct p_rs_param);
e2857216 3857 data_size = pi->size - header_size;
8e26f9cc 3858 } else if (apv <= 94) {
e658983a 3859 header_size = sizeof(struct p_rs_param_89);
e2857216 3860 data_size = pi->size - header_size;
0b0ba1ef 3861 D_ASSERT(device, data_size == 0);
8e26f9cc 3862 } else {
e658983a 3863 header_size = sizeof(struct p_rs_param_95);
e2857216 3864 data_size = pi->size - header_size;
0b0ba1ef 3865 D_ASSERT(device, data_size == 0);
b411b363
PR
3866 }
3867
3868 /* initialize verify_alg and csums_alg */
e658983a 3869 p = pi->data;
52a0cab3
KC
3870 BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
3871 memset(&p->algs, 0, sizeof(p->algs));
b411b363 3872
9f4fe9ad 3873 err = drbd_recv_all(peer_device->connection, p, header_size);
82bc0194
AG
3874 if (err)
3875 return err;
b411b363 3876
0500813f 3877 mutex_lock(&connection->resource->conf_update);
9f4fe9ad 3878 old_net_conf = peer_device->connection->net_conf;
b30ab791 3879 if (get_ldev(device)) {
813472ce
PR
3880 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3881 if (!new_disk_conf) {
b30ab791 3882 put_ldev(device);
0500813f 3883 mutex_unlock(&connection->resource->conf_update);
d0180171 3884 drbd_err(device, "Allocation of new disk_conf failed\n");
813472ce
PR
3885 return -ENOMEM;
3886 }
daeda1cc 3887
b30ab791 3888 old_disk_conf = device->ldev->disk_conf;
813472ce 3889 *new_disk_conf = *old_disk_conf;
b411b363 3890
6394b935 3891 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
813472ce 3892 }
b411b363
PR
3893
3894 if (apv >= 88) {
3895 if (apv == 88) {
5de73827 3896 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
d0180171 3897 drbd_err(device, "verify-alg of wrong size, "
5de73827
PR
3898 "peer wants %u, accepting only up to %u byte\n",
3899 data_size, SHARED_SECRET_MAX);
813472ce 3900 goto reconnect;
b411b363
PR
3901 }
3902
9f4fe9ad 3903 err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
813472ce
PR
3904 if (err)
3905 goto reconnect;
b411b363
PR
3906 /* we expect NUL terminated string */
3907 /* but just in case someone tries to be evil */
0b0ba1ef 3908 D_ASSERT(device, p->verify_alg[data_size-1] == 0);
b411b363
PR
3909 p->verify_alg[data_size-1] = 0;
3910
3911 } else /* apv >= 89 */ {
3912 /* we still expect NUL terminated strings */
3913 /* but just in case someone tries to be evil */
0b0ba1ef
AG
3914 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3915 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
b411b363
PR
3916 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3917 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3918 }
3919
2ec91e0e 3920 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
b30ab791 3921 if (device->state.conn == C_WF_REPORT_PARAMS) {
d0180171 3922 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3923 old_net_conf->verify_alg, p->verify_alg);
b411b363
PR
3924 goto disconnect;
3925 }
b30ab791 3926 verify_tfm = drbd_crypto_alloc_digest_safe(device,
b411b363
PR
3927 p->verify_alg, "verify-alg");
3928 if (IS_ERR(verify_tfm)) {
3929 verify_tfm = NULL;
3930 goto disconnect;
3931 }
3932 }
3933
2ec91e0e 3934 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
b30ab791 3935 if (device->state.conn == C_WF_REPORT_PARAMS) {
d0180171 3936 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3937 old_net_conf->csums_alg, p->csums_alg);
b411b363
PR
3938 goto disconnect;
3939 }
b30ab791 3940 csums_tfm = drbd_crypto_alloc_digest_safe(device,
b411b363
PR
3941 p->csums_alg, "csums-alg");
3942 if (IS_ERR(csums_tfm)) {
3943 csums_tfm = NULL;
3944 goto disconnect;
3945 }
3946 }
3947
813472ce 3948 if (apv > 94 && new_disk_conf) {
daeda1cc
PR
3949 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3950 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3951 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3952 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 3953
daeda1cc 3954 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
b30ab791 3955 if (fifo_size != device->rs_plan_s->size) {
813472ce
PR
3956 new_plan = fifo_alloc(fifo_size);
3957 if (!new_plan) {
d0180171 3958 drbd_err(device, "kmalloc of fifo_buffer failed");
b30ab791 3959 put_ldev(device);
778f271d
PR
3960 goto disconnect;
3961 }
3962 }
8e26f9cc 3963 }
b411b363 3964
91fd4dad 3965 if (verify_tfm || csums_tfm) {
2ec91e0e 3966 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
8404e191 3967 if (!new_net_conf)
91fd4dad 3968 goto disconnect;
91fd4dad 3969
2ec91e0e 3970 *new_net_conf = *old_net_conf;
91fd4dad
PR
3971
3972 if (verify_tfm) {
2ec91e0e
PR
3973 strcpy(new_net_conf->verify_alg, p->verify_alg);
3974 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3d0e6375 3975 crypto_free_shash(peer_device->connection->verify_tfm);
9f4fe9ad 3976 peer_device->connection->verify_tfm = verify_tfm;
d0180171 3977 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
91fd4dad
PR
3978 }
3979 if (csums_tfm) {
2ec91e0e
PR
3980 strcpy(new_net_conf->csums_alg, p->csums_alg);
3981 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3d0e6375 3982 crypto_free_shash(peer_device->connection->csums_tfm);
9f4fe9ad 3983 peer_device->connection->csums_tfm = csums_tfm;
d0180171 3984 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
91fd4dad 3985 }
bde89a9e 3986 rcu_assign_pointer(connection->net_conf, new_net_conf);
778f271d 3987 }
b411b363
PR
3988 }
3989
813472ce 3990 if (new_disk_conf) {
b30ab791
AG
3991 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3992 put_ldev(device);
813472ce
PR
3993 }
3994
3995 if (new_plan) {
b30ab791
AG
3996 old_plan = device->rs_plan_s;
3997 rcu_assign_pointer(device->rs_plan_s, new_plan);
b411b363 3998 }
daeda1cc 3999
0500813f 4000 mutex_unlock(&connection->resource->conf_update);
daeda1cc
PR
4001 synchronize_rcu();
4002 if (new_net_conf)
4003 kfree(old_net_conf);
4004 kfree(old_disk_conf);
813472ce 4005 kfree(old_plan);
daeda1cc 4006
82bc0194 4007 return 0;
b411b363 4008
813472ce
PR
4009reconnect:
4010 if (new_disk_conf) {
b30ab791 4011 put_ldev(device);
813472ce
PR
4012 kfree(new_disk_conf);
4013 }
0500813f 4014 mutex_unlock(&connection->resource->conf_update);
813472ce
PR
4015 return -EIO;
4016
b411b363 4017disconnect:
813472ce
PR
4018 kfree(new_plan);
4019 if (new_disk_conf) {
b30ab791 4020 put_ldev(device);
813472ce
PR
4021 kfree(new_disk_conf);
4022 }
0500813f 4023 mutex_unlock(&connection->resource->conf_update);
b411b363
PR
4024 /* just for completeness: actually not needed,
4025 * as this is not reached if csums_tfm was ok. */
3d0e6375 4026 crypto_free_shash(csums_tfm);
b411b363 4027 /* but free the verify_tfm again, if csums_tfm did not work out */
3d0e6375 4028 crypto_free_shash(verify_tfm);
9f4fe9ad 4029 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4030 return -EIO;
b411b363
PR
4031}
4032
b411b363 4033/* warn if the arguments differ by more than 12.5% */
b30ab791 4034static void warn_if_differ_considerably(struct drbd_device *device,
b411b363
PR
4035 const char *s, sector_t a, sector_t b)
4036{
4037 sector_t d;
4038 if (a == 0 || b == 0)
4039 return;
4040 d = (a > b) ? (a - b) : (b - a);
4041 if (d > (a>>3) || d > (b>>3))
d0180171 4042 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
b411b363
PR
4043 (unsigned long long)a, (unsigned long long)b);
4044}
4045
bde89a9e 4046static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4047{
9f4fe9ad 4048 struct drbd_peer_device *peer_device;
b30ab791 4049 struct drbd_device *device;
e658983a 4050 struct p_sizes *p = pi->data;
9104d31a 4051 struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
e96c9633 4052 enum determine_dev_size dd = DS_UNCHANGED;
6a8d68b1 4053 sector_t p_size, p_usize, p_csize, my_usize;
94c43a13 4054 sector_t new_size, cur_size;
b411b363 4055 int ldsc = 0; /* local disk size changed */
e89b591c 4056 enum dds_flags ddsf;
b411b363 4057
9f4fe9ad
AG
4058 peer_device = conn_peer_device(connection, pi->vnr);
4059 if (!peer_device)
bde89a9e 4060 return config_unknown_volume(connection, pi);
9f4fe9ad 4061 device = peer_device->device;
155bd9d1 4062 cur_size = get_capacity(device->vdisk);
4a76b161 4063
b411b363
PR
4064 p_size = be64_to_cpu(p->d_size);
4065 p_usize = be64_to_cpu(p->u_size);
6a8d68b1 4066 p_csize = be64_to_cpu(p->c_size);
b411b363 4067
b411b363
PR
4068 /* just store the peer's disk size for now.
4069 * we still need to figure out whether we accept that. */
b30ab791 4070 device->p_size = p_size;
b411b363 4071
b30ab791 4072 if (get_ldev(device)) {
daeda1cc 4073 rcu_read_lock();
b30ab791 4074 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
daeda1cc
PR
4075 rcu_read_unlock();
4076
b30ab791
AG
4077 warn_if_differ_considerably(device, "lower level device sizes",
4078 p_size, drbd_get_max_capacity(device->ldev));
4079 warn_if_differ_considerably(device, "user requested size",
daeda1cc 4080 p_usize, my_usize);
b411b363
PR
4081
4082 /* if this is the first connect, or an otherwise expected
4083 * param exchange, choose the minimum */
b30ab791 4084 if (device->state.conn == C_WF_REPORT_PARAMS)
daeda1cc 4085 p_usize = min_not_zero(my_usize, p_usize);
b411b363 4086
ad6e8979
LE
4087 /* Never shrink a device with usable data during connect,
4088 * or "attach" on the peer.
4089 * But allow online shrinking if we are connected. */
60bac040 4090 new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
60bac040 4091 if (new_size < cur_size &&
b30ab791 4092 device->state.disk >= D_OUTDATED &&
ad6e8979 4093 (device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS)) {
60bac040
LE
4094 drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
4095 (unsigned long long)new_size, (unsigned long long)cur_size);
9f4fe9ad 4096 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
b30ab791 4097 put_ldev(device);
82bc0194 4098 return -EIO;
b411b363 4099 }
daeda1cc
PR
4100
4101 if (my_usize != p_usize) {
4102 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
4103
4104 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
4105 if (!new_disk_conf) {
b30ab791 4106 put_ldev(device);
daeda1cc
PR
4107 return -ENOMEM;
4108 }
4109
0500813f 4110 mutex_lock(&connection->resource->conf_update);
b30ab791 4111 old_disk_conf = device->ldev->disk_conf;
daeda1cc
PR
4112 *new_disk_conf = *old_disk_conf;
4113 new_disk_conf->disk_size = p_usize;
4114
b30ab791 4115 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
0500813f 4116 mutex_unlock(&connection->resource->conf_update);
90c6c291 4117 kvfree_rcu(old_disk_conf);
daeda1cc 4118
ad6e8979
LE
4119 drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n",
4120 (unsigned long)p_usize, (unsigned long)my_usize);
b411b363 4121 }
daeda1cc 4122
b30ab791 4123 put_ldev(device);
b411b363 4124 }
b411b363 4125
20c68fde 4126 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
dd4f699d 4127 /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
20c68fde 4128 In case we cleared the QUEUE_FLAG_DISCARD from our queue in
dd4f699d 4129 drbd_reconsider_queue_parameters(), we can be sure that after
20c68fde
LE
4130 drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
4131
e89b591c 4132 ddsf = be16_to_cpu(p->dds_flags);
b30ab791 4133 if (get_ldev(device)) {
9104d31a 4134 drbd_reconsider_queue_parameters(device, device->ldev, o);
b30ab791
AG
4135 dd = drbd_determine_dev_size(device, ddsf, NULL);
4136 put_ldev(device);
e96c9633 4137 if (dd == DS_ERROR)
82bc0194 4138 return -EIO;
b30ab791 4139 drbd_md_sync(device);
b411b363 4140 } else {
6a8d68b1
LE
4141 /*
4142 * I am diskless, need to accept the peer's *current* size.
4143 * I must NOT accept the peers backing disk size,
4144 * it may have been larger than mine all along...
4145 *
4146 * At this point, the peer knows more about my disk, or at
4147 * least about what we last agreed upon, than myself.
4148 * So if his c_size is less than his d_size, the most likely
4149 * reason is that *my* d_size was smaller last time we checked.
4150 *
4151 * However, if he sends a zero current size,
4152 * take his (user-capped or) backing disk size anyways.
94c43a13
LE
4153 *
4154 * Unless of course he does not have a disk himself.
4155 * In which case we ignore this completely.
6a8d68b1 4156 */
94c43a13 4157 sector_t new_size = p_csize ?: p_usize ?: p_size;
9104d31a 4158 drbd_reconsider_queue_parameters(device, NULL, o);
94c43a13
LE
4159 if (new_size == 0) {
4160 /* Ignore, peer does not know nothing. */
4161 } else if (new_size == cur_size) {
4162 /* nothing to do */
4163 } else if (cur_size != 0 && p_size == 0) {
4164 drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n",
4165 (unsigned long long)new_size, (unsigned long long)cur_size);
4166 } else if (new_size < cur_size && device->state.role == R_PRIMARY) {
4167 drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n",
4168 (unsigned long long)new_size, (unsigned long long)cur_size);
4169 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4170 return -EIO;
4171 } else {
4172 /* I believe the peer, if
4173 * - I don't have a current size myself
4174 * - we agree on the size anyways
4175 * - I do have a current size, am Secondary,
4176 * and he has the only disk
4177 * - I do have a current size, am Primary,
4178 * and he has the only disk,
4179 * which is larger than my current size
4180 */
4181 drbd_set_my_capacity(device, new_size);
4182 }
b411b363
PR
4183 }
4184
b30ab791
AG
4185 if (get_ldev(device)) {
4186 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
4187 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
b411b363
PR
4188 ldsc = 1;
4189 }
4190
b30ab791 4191 put_ldev(device);
b411b363
PR
4192 }
4193
b30ab791 4194 if (device->state.conn > C_WF_REPORT_PARAMS) {
155bd9d1
CH
4195 if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) ||
4196 ldsc) {
b411b363
PR
4197 /* we have different sizes, probably peer
4198 * needs to know my new size... */
69a22773 4199 drbd_send_sizes(peer_device, 0, ddsf);
b411b363 4200 }
b30ab791
AG
4201 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
4202 (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
4203 if (device->state.pdsk >= D_INCONSISTENT &&
4204 device->state.disk >= D_INCONSISTENT) {
e89b591c 4205 if (ddsf & DDSF_NO_RESYNC)
d0180171 4206 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
e89b591c 4207 else
b30ab791 4208 resync_after_online_grow(device);
e89b591c 4209 } else
b30ab791 4210 set_bit(RESYNC_AFTER_NEG, &device->flags);
b411b363
PR
4211 }
4212 }
4213
82bc0194 4214 return 0;
b411b363
PR
4215}
4216
bde89a9e 4217static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4218{
9f4fe9ad 4219 struct drbd_peer_device *peer_device;
b30ab791 4220 struct drbd_device *device;
e658983a 4221 struct p_uuids *p = pi->data;
b411b363 4222 u64 *p_uuid;
62b0da3a 4223 int i, updated_uuids = 0;
b411b363 4224
9f4fe9ad
AG
4225 peer_device = conn_peer_device(connection, pi->vnr);
4226 if (!peer_device)
bde89a9e 4227 return config_unknown_volume(connection, pi);
9f4fe9ad 4228 device = peer_device->device;
4a76b161 4229
365cf663 4230 p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
8404e191 4231 if (!p_uuid)
063eacf8 4232 return false;
b411b363
PR
4233
4234 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
4235 p_uuid[i] = be64_to_cpu(p->uuid[i]);
4236
b30ab791
AG
4237 kfree(device->p_uuid);
4238 device->p_uuid = p_uuid;
b411b363 4239
b17b5960 4240 if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
b30ab791
AG
4241 device->state.disk < D_INCONSISTENT &&
4242 device->state.role == R_PRIMARY &&
4243 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
d0180171 4244 drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
b30ab791 4245 (unsigned long long)device->ed_uuid);
9f4fe9ad 4246 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4247 return -EIO;
b411b363
PR
4248 }
4249
b30ab791 4250 if (get_ldev(device)) {
b411b363 4251 int skip_initial_sync =
b30ab791 4252 device->state.conn == C_CONNECTED &&
9f4fe9ad 4253 peer_device->connection->agreed_pro_version >= 90 &&
b30ab791 4254 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
b411b363
PR
4255 (p_uuid[UI_FLAGS] & 8);
4256 if (skip_initial_sync) {
d0180171 4257 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
b30ab791 4258 drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
20ceb2b2
LE
4259 "clear_n_write from receive_uuids",
4260 BM_LOCKED_TEST_ALLOWED);
b30ab791
AG
4261 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
4262 _drbd_uuid_set(device, UI_BITMAP, 0);
4263 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
b411b363 4264 CS_VERBOSE, NULL);
b30ab791 4265 drbd_md_sync(device);
62b0da3a 4266 updated_uuids = 1;
b411b363 4267 }
b30ab791
AG
4268 put_ldev(device);
4269 } else if (device->state.disk < D_INCONSISTENT &&
4270 device->state.role == R_PRIMARY) {
18a50fa2
PR
4271 /* I am a diskless primary, the peer just created a new current UUID
4272 for me. */
b30ab791 4273 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
b411b363
PR
4274 }
4275
4276 /* Before we test for the disk state, we should wait until an eventually
4277 ongoing cluster wide state change is finished. That is important if
4278 we are primary and are detaching from our disk. We need to see the
4279 new disk state... */
b30ab791
AG
4280 mutex_lock(device->state_mutex);
4281 mutex_unlock(device->state_mutex);
4282 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
4283 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
62b0da3a
LE
4284
4285 if (updated_uuids)
b30ab791 4286 drbd_print_uuids(device, "receiver updated UUIDs to");
b411b363 4287
82bc0194 4288 return 0;
b411b363
PR
4289}
4290
4291/**
4292 * convert_state() - Converts the peer's view of the cluster state to our point of view
4293 * @ps: The state as seen by the peer.
4294 */
4295static union drbd_state convert_state(union drbd_state ps)
4296{
4297 union drbd_state ms;
4298
4299 static enum drbd_conns c_tab[] = {
369bea63 4300 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
b411b363
PR
4301 [C_CONNECTED] = C_CONNECTED,
4302
4303 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
4304 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
4305 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
4306 [C_VERIFY_S] = C_VERIFY_T,
4307 [C_MASK] = C_MASK,
4308 };
4309
4310 ms.i = ps.i;
4311
4312 ms.conn = c_tab[ps.conn];
4313 ms.peer = ps.role;
4314 ms.role = ps.peer;
4315 ms.pdsk = ps.disk;
4316 ms.disk = ps.pdsk;
4317 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
4318
4319 return ms;
4320}
4321
bde89a9e 4322static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4323{
9f4fe9ad 4324 struct drbd_peer_device *peer_device;
b30ab791 4325 struct drbd_device *device;
e658983a 4326 struct p_req_state *p = pi->data;
b411b363 4327 union drbd_state mask, val;
bf885f8a 4328 enum drbd_state_rv rv;
b411b363 4329
9f4fe9ad
AG
4330 peer_device = conn_peer_device(connection, pi->vnr);
4331 if (!peer_device)
4a76b161 4332 return -EIO;
9f4fe9ad 4333 device = peer_device->device;
4a76b161 4334
b411b363
PR
4335 mask.i = be32_to_cpu(p->mask);
4336 val.i = be32_to_cpu(p->val);
4337
9f4fe9ad 4338 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
b30ab791 4339 mutex_is_locked(device->state_mutex)) {
69a22773 4340 drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
82bc0194 4341 return 0;
b411b363
PR
4342 }
4343
4344 mask = convert_state(mask);
4345 val = convert_state(val);
4346
b30ab791 4347 rv = drbd_change_state(device, CS_VERBOSE, mask, val);
69a22773 4348 drbd_send_sr_reply(peer_device, rv);
b411b363 4349
b30ab791 4350 drbd_md_sync(device);
b411b363 4351
82bc0194 4352 return 0;
b411b363
PR
4353}
4354
bde89a9e 4355static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4356{
e658983a 4357 struct p_req_state *p = pi->data;
b411b363 4358 union drbd_state mask, val;
bf885f8a 4359 enum drbd_state_rv rv;
b411b363 4360
b411b363
PR
4361 mask.i = be32_to_cpu(p->mask);
4362 val.i = be32_to_cpu(p->val);
4363
bde89a9e
AG
4364 if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
4365 mutex_is_locked(&connection->cstate_mutex)) {
4366 conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
82bc0194 4367 return 0;
b411b363
PR
4368 }
4369
4370 mask = convert_state(mask);
4371 val = convert_state(val);
4372
bde89a9e
AG
4373 rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
4374 conn_send_sr_reply(connection, rv);
b411b363 4375
82bc0194 4376 return 0;
b411b363
PR
4377}
4378
bde89a9e 4379static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4380{
9f4fe9ad 4381 struct drbd_peer_device *peer_device;
b30ab791 4382 struct drbd_device *device;
e658983a 4383 struct p_state *p = pi->data;
4ac4aada 4384 union drbd_state os, ns, peer_state;
b411b363 4385 enum drbd_disk_state real_peer_disk;
65d922c3 4386 enum chg_state_flags cs_flags;
b411b363
PR
4387 int rv;
4388
9f4fe9ad
AG
4389 peer_device = conn_peer_device(connection, pi->vnr);
4390 if (!peer_device)
bde89a9e 4391 return config_unknown_volume(connection, pi);
9f4fe9ad 4392 device = peer_device->device;
4a76b161 4393
b411b363
PR
4394 peer_state.i = be32_to_cpu(p->state);
4395
4396 real_peer_disk = peer_state.disk;
4397 if (peer_state.disk == D_NEGOTIATING) {
b30ab791 4398 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
d0180171 4399 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
b411b363
PR
4400 }
4401
0500813f 4402 spin_lock_irq(&device->resource->req_lock);
b411b363 4403 retry:
b30ab791 4404 os = ns = drbd_read_state(device);
0500813f 4405 spin_unlock_irq(&device->resource->req_lock);
b411b363 4406
668700b4 4407 /* If some other part of the code (ack_receiver thread, timeout)
545752d5
LE
4408 * already decided to close the connection again,
4409 * we must not "re-establish" it here. */
4410 if (os.conn <= C_TEAR_DOWN)
58ffa580 4411 return -ECONNRESET;
545752d5 4412
40424e4a
LE
4413 /* If this is the "end of sync" confirmation, usually the peer disk
4414 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
4415 * set) resync started in PausedSyncT, or if the timing of pause-/
4416 * unpause-sync events has been "just right", the peer disk may
4417 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
4418 */
4419 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
4420 real_peer_disk == D_UP_TO_DATE &&
e9ef7bb6
LE
4421 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
4422 /* If we are (becoming) SyncSource, but peer is still in sync
4423 * preparation, ignore its uptodate-ness to avoid flapping, it
4424 * will change to inconsistent once the peer reaches active
4425 * syncing states.
4426 * It may have changed syncer-paused flags, however, so we
4427 * cannot ignore this completely. */
4428 if (peer_state.conn > C_CONNECTED &&
4429 peer_state.conn < C_SYNC_SOURCE)
4430 real_peer_disk = D_INCONSISTENT;
4431
4432 /* if peer_state changes to connected at the same time,
4433 * it explicitly notifies us that it finished resync.
4434 * Maybe we should finish it up, too? */
4435 else if (os.conn >= C_SYNC_SOURCE &&
4436 peer_state.conn == C_CONNECTED) {
b30ab791
AG
4437 if (drbd_bm_total_weight(device) <= device->rs_failed)
4438 drbd_resync_finished(device);
82bc0194 4439 return 0;
e9ef7bb6
LE
4440 }
4441 }
4442
02b91b55
LE
4443 /* explicit verify finished notification, stop sector reached. */
4444 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
4445 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
b30ab791
AG
4446 ov_out_of_sync_print(device);
4447 drbd_resync_finished(device);
58ffa580 4448 return 0;
02b91b55
LE
4449 }
4450
e9ef7bb6
LE
4451 /* peer says his disk is inconsistent, while we think it is uptodate,
4452 * and this happens while the peer still thinks we have a sync going on,
4453 * but we think we are already done with the sync.
4454 * We ignore this to avoid flapping pdsk.
4455 * This should not happen, if the peer is a recent version of drbd. */
4456 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
4457 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
4458 real_peer_disk = D_UP_TO_DATE;
4459
4ac4aada
LE
4460 if (ns.conn == C_WF_REPORT_PARAMS)
4461 ns.conn = C_CONNECTED;
b411b363 4462
67531718
PR
4463 if (peer_state.conn == C_AHEAD)
4464 ns.conn = C_BEHIND;
4465
fe43ed97
LE
4466 /* TODO:
4467 * if (primary and diskless and peer uuid != effective uuid)
4468 * abort attach on peer;
4469 *
4470 * If this node does not have good data, was already connected, but
4471 * the peer did a late attach only now, trying to "negotiate" with me,
4472 * AND I am currently Primary, possibly frozen, with some specific
4473 * "effective" uuid, this should never be reached, really, because
4474 * we first send the uuids, then the current state.
4475 *
4476 * In this scenario, we already dropped the connection hard
4477 * when we received the unsuitable uuids (receive_uuids().
4478 *
4479 * Should we want to change this, that is: not drop the connection in
4480 * receive_uuids() already, then we would need to add a branch here
4481 * that aborts the attach of "unsuitable uuids" on the peer in case
4482 * this node is currently Diskless Primary.
4483 */
4484
b30ab791
AG
4485 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
4486 get_ldev_if_state(device, D_NEGOTIATING)) {
b411b363
PR
4487 int cr; /* consider resync */
4488
4489 /* if we established a new connection */
4ac4aada 4490 cr = (os.conn < C_CONNECTED);
b411b363
PR
4491 /* if we had an established connection
4492 * and one of the nodes newly attaches a disk */
4ac4aada 4493 cr |= (os.conn == C_CONNECTED &&
b411b363 4494 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 4495 os.disk == D_NEGOTIATING));
b411b363 4496 /* if we have both been inconsistent, and the peer has been
a2823ea9 4497 * forced to be UpToDate with --force */
b30ab791 4498 cr |= test_bit(CONSIDER_RESYNC, &device->flags);
b411b363
PR
4499 /* if we had been plain connected, and the admin requested to
4500 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 4501 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
4502 (peer_state.conn >= C_STARTING_SYNC_S &&
4503 peer_state.conn <= C_WF_BITMAP_T));
4504
4505 if (cr)
69a22773 4506 ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
b411b363 4507
b30ab791 4508 put_ldev(device);
4ac4aada
LE
4509 if (ns.conn == C_MASK) {
4510 ns.conn = C_CONNECTED;
b30ab791
AG
4511 if (device->state.disk == D_NEGOTIATING) {
4512 drbd_force_state(device, NS(disk, D_FAILED));
b411b363 4513 } else if (peer_state.disk == D_NEGOTIATING) {
d0180171 4514 drbd_err(device, "Disk attach process on the peer node was aborted.\n");
b411b363 4515 peer_state.disk = D_DISKLESS;
580b9767 4516 real_peer_disk = D_DISKLESS;
b411b363 4517 } else {
9f4fe9ad 4518 if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
82bc0194 4519 return -EIO;
0b0ba1ef 4520 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
9f4fe9ad 4521 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4522 return -EIO;
b411b363
PR
4523 }
4524 }
4525 }
4526
0500813f 4527 spin_lock_irq(&device->resource->req_lock);
b30ab791 4528 if (os.i != drbd_read_state(device).i)
b411b363 4529 goto retry;
b30ab791 4530 clear_bit(CONSIDER_RESYNC, &device->flags);
b411b363
PR
4531 ns.peer = peer_state.role;
4532 ns.pdsk = real_peer_disk;
4533 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 4534 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b30ab791 4535 ns.disk = device->new_state_tmp.disk;
4ac4aada 4536 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
b30ab791
AG
4537 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
4538 test_bit(NEW_CUR_UUID, &device->flags)) {
8554df1c 4539 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 4540 for temporal network outages! */
0500813f 4541 spin_unlock_irq(&device->resource->req_lock);
d0180171 4542 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
9f4fe9ad 4543 tl_clear(peer_device->connection);
b30ab791
AG
4544 drbd_uuid_new_current(device);
4545 clear_bit(NEW_CUR_UUID, &device->flags);
9f4fe9ad 4546 conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 4547 return -EIO;
481c6f50 4548 }
b30ab791
AG
4549 rv = _drbd_set_state(device, ns, cs_flags, NULL);
4550 ns = drbd_read_state(device);
0500813f 4551 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
4552
4553 if (rv < SS_SUCCESS) {
9f4fe9ad 4554 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4555 return -EIO;
b411b363
PR
4556 }
4557
4ac4aada
LE
4558 if (os.conn > C_WF_REPORT_PARAMS) {
4559 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
4560 peer_state.disk != D_NEGOTIATING ) {
4561 /* we want resync, peer has not yet decided to sync... */
4562 /* Nowadays only used when forcing a node into primary role and
4563 setting its disk to UpToDate with that */
69a22773
AG
4564 drbd_send_uuids(peer_device);
4565 drbd_send_current_state(peer_device);
b411b363
PR
4566 }
4567 }
4568
b30ab791 4569 clear_bit(DISCARD_MY_DATA, &device->flags);
b411b363 4570
b30ab791 4571 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
b411b363 4572
82bc0194 4573 return 0;
b411b363
PR
4574}
4575
bde89a9e 4576static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4577{
9f4fe9ad 4578 struct drbd_peer_device *peer_device;
b30ab791 4579 struct drbd_device *device;
e658983a 4580 struct p_rs_uuid *p = pi->data;
4a76b161 4581
9f4fe9ad
AG
4582 peer_device = conn_peer_device(connection, pi->vnr);
4583 if (!peer_device)
4a76b161 4584 return -EIO;
9f4fe9ad 4585 device = peer_device->device;
b411b363 4586
b30ab791
AG
4587 wait_event(device->misc_wait,
4588 device->state.conn == C_WF_SYNC_UUID ||
4589 device->state.conn == C_BEHIND ||
4590 device->state.conn < C_CONNECTED ||
4591 device->state.disk < D_NEGOTIATING);
b411b363 4592
0b0ba1ef 4593 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
b411b363 4594
b411b363
PR
4595 /* Here the _drbd_uuid_ functions are right, current should
4596 _not_ be rotated into the history */
b30ab791
AG
4597 if (get_ldev_if_state(device, D_NEGOTIATING)) {
4598 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
4599 _drbd_uuid_set(device, UI_BITMAP, 0UL);
b411b363 4600
b30ab791
AG
4601 drbd_print_uuids(device, "updated sync uuid");
4602 drbd_start_resync(device, C_SYNC_TARGET);
b411b363 4603
b30ab791 4604 put_ldev(device);
b411b363 4605 } else
d0180171 4606 drbd_err(device, "Ignoring SyncUUID packet!\n");
b411b363 4607
82bc0194 4608 return 0;
b411b363
PR
4609}
4610
9b48ff07 4611/*
2c46407d
AG
4612 * receive_bitmap_plain
4613 *
4614 * Return 0 when done, 1 when another iteration is needed, and a negative error
4615 * code upon failure.
4616 */
4617static int
69a22773 4618receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
e658983a 4619 unsigned long *p, struct bm_xfer_ctx *c)
b411b363 4620{
50d0b1ad 4621 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
69a22773 4622 drbd_header_size(peer_device->connection);
e658983a 4623 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 4624 c->bm_words - c->word_offset);
e658983a 4625 unsigned int want = num_words * sizeof(*p);
2c46407d 4626 int err;
b411b363 4627
50d0b1ad 4628 if (want != size) {
69a22773 4629 drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
2c46407d 4630 return -EIO;
b411b363
PR
4631 }
4632 if (want == 0)
2c46407d 4633 return 0;
69a22773 4634 err = drbd_recv_all(peer_device->connection, p, want);
82bc0194 4635 if (err)
2c46407d 4636 return err;
b411b363 4637
69a22773 4638 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
b411b363
PR
4639
4640 c->word_offset += num_words;
4641 c->bit_offset = c->word_offset * BITS_PER_LONG;
4642 if (c->bit_offset > c->bm_bits)
4643 c->bit_offset = c->bm_bits;
4644
2c46407d 4645 return 1;
b411b363
PR
4646}
4647
a02d1240
AG
4648static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4649{
4650 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4651}
4652
4653static int dcbp_get_start(struct p_compressed_bm *p)
4654{
4655 return (p->encoding & 0x80) != 0;
4656}
4657
4658static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4659{
4660 return (p->encoding >> 4) & 0x7;
4661}
4662
9b48ff07 4663/*
2c46407d
AG
4664 * recv_bm_rle_bits
4665 *
4666 * Return 0 when done, 1 when another iteration is needed, and a negative error
4667 * code upon failure.
4668 */
4669static int
69a22773 4670recv_bm_rle_bits(struct drbd_peer_device *peer_device,
b411b363 4671 struct p_compressed_bm *p,
c6d25cfe
PR
4672 struct bm_xfer_ctx *c,
4673 unsigned int len)
b411b363
PR
4674{
4675 struct bitstream bs;
4676 u64 look_ahead;
4677 u64 rl;
4678 u64 tmp;
4679 unsigned long s = c->bit_offset;
4680 unsigned long e;
a02d1240 4681 int toggle = dcbp_get_start(p);
b411b363
PR
4682 int have;
4683 int bits;
4684
a02d1240 4685 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
b411b363
PR
4686
4687 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4688 if (bits < 0)
2c46407d 4689 return -EIO;
b411b363
PR
4690
4691 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4692 bits = vli_decode_bits(&rl, look_ahead);
4693 if (bits <= 0)
2c46407d 4694 return -EIO;
b411b363
PR
4695
4696 if (toggle) {
4697 e = s + rl -1;
4698 if (e >= c->bm_bits) {
69a22773 4699 drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 4700 return -EIO;
b411b363 4701 }
69a22773 4702 _drbd_bm_set_bits(peer_device->device, s, e);
b411b363
PR
4703 }
4704
4705 if (have < bits) {
69a22773 4706 drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
b411b363
PR
4707 have, bits, look_ahead,
4708 (unsigned int)(bs.cur.b - p->code),
4709 (unsigned int)bs.buf_len);
2c46407d 4710 return -EIO;
b411b363 4711 }
d2da5b0c
LE
4712 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4713 if (likely(bits < 64))
4714 look_ahead >>= bits;
4715 else
4716 look_ahead = 0;
b411b363
PR
4717 have -= bits;
4718
4719 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4720 if (bits < 0)
2c46407d 4721 return -EIO;
b411b363
PR
4722 look_ahead |= tmp << have;
4723 have += bits;
4724 }
4725
4726 c->bit_offset = s;
4727 bm_xfer_ctx_bit_to_word_offset(c);
4728
2c46407d 4729 return (s != c->bm_bits);
b411b363
PR
4730}
4731
9b48ff07 4732/*
2c46407d
AG
4733 * decode_bitmap_c
4734 *
4735 * Return 0 when done, 1 when another iteration is needed, and a negative error
4736 * code upon failure.
4737 */
4738static int
69a22773 4739decode_bitmap_c(struct drbd_peer_device *peer_device,
b411b363 4740 struct p_compressed_bm *p,
c6d25cfe
PR
4741 struct bm_xfer_ctx *c,
4742 unsigned int len)
b411b363 4743{
a02d1240 4744 if (dcbp_get_code(p) == RLE_VLI_Bits)
69a22773 4745 return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
b411b363
PR
4746
4747 /* other variants had been implemented for evaluation,
4748 * but have been dropped as this one turned out to be "best"
4749 * during all our tests. */
4750
69a22773
AG
4751 drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4752 conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 4753 return -EIO;
b411b363
PR
4754}
4755
b30ab791 4756void INFO_bm_xfer_stats(struct drbd_device *device,
b411b363
PR
4757 const char *direction, struct bm_xfer_ctx *c)
4758{
4759 /* what would it take to transfer it "plaintext" */
a6b32bc3 4760 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
50d0b1ad
AG
4761 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4762 unsigned int plain =
4763 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4764 c->bm_words * sizeof(unsigned long);
4765 unsigned int total = c->bytes[0] + c->bytes[1];
4766 unsigned int r;
b411b363
PR
4767
4768 /* total can not be zero. but just in case: */
4769 if (total == 0)
4770 return;
4771
4772 /* don't report if not compressed */
4773 if (total >= plain)
4774 return;
4775
4776 /* total < plain. check for overflow, still */
4777 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4778 : (1000 * total / plain);
4779
4780 if (r > 1000)
4781 r = 1000;
4782
4783 r = 1000 - r;
d0180171 4784 drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
b411b363
PR
4785 "total %u; compression: %u.%u%%\n",
4786 direction,
4787 c->bytes[1], c->packets[1],
4788 c->bytes[0], c->packets[0],
4789 total, r/10, r % 10);
4790}
4791
4792/* Since we are processing the bitfield from lower addresses to higher,
4793 it does not matter if the process it in 32 bit chunks or 64 bit
4794 chunks as long as it is little endian. (Understand it as byte stream,
4795 beginning with the lowest byte...) If we would use big endian
4796 we would need to process it from the highest address to the lowest,
4797 in order to be agnostic to the 32 vs 64 bits issue.
4798
4799 returns 0 on failure, 1 if we successfully received it. */
bde89a9e 4800static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4801{
9f4fe9ad 4802 struct drbd_peer_device *peer_device;
b30ab791 4803 struct drbd_device *device;
b411b363 4804 struct bm_xfer_ctx c;
2c46407d 4805 int err;
4a76b161 4806
9f4fe9ad
AG
4807 peer_device = conn_peer_device(connection, pi->vnr);
4808 if (!peer_device)
4a76b161 4809 return -EIO;
9f4fe9ad 4810 device = peer_device->device;
b411b363 4811
b30ab791 4812 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
20ceb2b2
LE
4813 /* you are supposed to send additional out-of-sync information
4814 * if you actually set bits during this phase */
b411b363 4815
b411b363 4816 c = (struct bm_xfer_ctx) {
b30ab791
AG
4817 .bm_bits = drbd_bm_bits(device),
4818 .bm_words = drbd_bm_words(device),
b411b363
PR
4819 };
4820
2c46407d 4821 for(;;) {
e658983a 4822 if (pi->cmd == P_BITMAP)
69a22773 4823 err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
e658983a 4824 else if (pi->cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
4825 /* MAYBE: sanity check that we speak proto >= 90,
4826 * and the feature is enabled! */
e658983a 4827 struct p_compressed_bm *p = pi->data;
b411b363 4828
bde89a9e 4829 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
d0180171 4830 drbd_err(device, "ReportCBitmap packet too large\n");
82bc0194 4831 err = -EIO;
b411b363
PR
4832 goto out;
4833 }
e658983a 4834 if (pi->size <= sizeof(*p)) {
d0180171 4835 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
82bc0194 4836 err = -EIO;
78fcbdae 4837 goto out;
b411b363 4838 }
9f4fe9ad 4839 err = drbd_recv_all(peer_device->connection, p, pi->size);
e658983a
AG
4840 if (err)
4841 goto out;
69a22773 4842 err = decode_bitmap_c(peer_device, p, &c, pi->size);
b411b363 4843 } else {
d0180171 4844 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
82bc0194 4845 err = -EIO;
b411b363
PR
4846 goto out;
4847 }
4848
e2857216 4849 c.packets[pi->cmd == P_BITMAP]++;
bde89a9e 4850 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
b411b363 4851
2c46407d
AG
4852 if (err <= 0) {
4853 if (err < 0)
4854 goto out;
b411b363 4855 break;
2c46407d 4856 }
9f4fe9ad 4857 err = drbd_recv_header(peer_device->connection, pi);
82bc0194 4858 if (err)
b411b363 4859 goto out;
2c46407d 4860 }
b411b363 4861
b30ab791 4862 INFO_bm_xfer_stats(device, "receive", &c);
b411b363 4863
b30ab791 4864 if (device->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
4865 enum drbd_state_rv rv;
4866
b30ab791 4867 err = drbd_send_bitmap(device);
82bc0194 4868 if (err)
b411b363
PR
4869 goto out;
4870 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
b30ab791 4871 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
0b0ba1ef 4872 D_ASSERT(device, rv == SS_SUCCESS);
b30ab791 4873 } else if (device->state.conn != C_WF_BITMAP_S) {
b411b363
PR
4874 /* admin may have requested C_DISCONNECTING,
4875 * other threads may have noticed network errors */
d0180171 4876 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
b30ab791 4877 drbd_conn_str(device->state.conn));
b411b363 4878 }
82bc0194 4879 err = 0;
b411b363 4880
b411b363 4881 out:
b30ab791
AG
4882 drbd_bm_unlock(device);
4883 if (!err && device->state.conn == C_WF_BITMAP_S)
4884 drbd_start_resync(device, C_SYNC_SOURCE);
82bc0194 4885 return err;
b411b363
PR
4886}
4887
bde89a9e 4888static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4889{
1ec861eb 4890 drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
e2857216 4891 pi->cmd, pi->size);
b411b363 4892
bde89a9e 4893 return ignore_remaining_packet(connection, pi);
b411b363
PR
4894}
4895
bde89a9e 4896static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
0ced55a3 4897{
e7f52dfb
LE
4898 /* Make sure we've acked all the TCP data associated
4899 * with the data requests being unplugged */
ddd061b8 4900 tcp_sock_set_quickack(connection->data.socket->sk, 2);
82bc0194 4901 return 0;
0ced55a3
PR
4902}
4903
bde89a9e 4904static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
73a01a18 4905{
9f4fe9ad 4906 struct drbd_peer_device *peer_device;
b30ab791 4907 struct drbd_device *device;
e658983a 4908 struct p_block_desc *p = pi->data;
4a76b161 4909
9f4fe9ad
AG
4910 peer_device = conn_peer_device(connection, pi->vnr);
4911 if (!peer_device)
4a76b161 4912 return -EIO;
9f4fe9ad 4913 device = peer_device->device;
73a01a18 4914
b30ab791 4915 switch (device->state.conn) {
f735e363
LE
4916 case C_WF_SYNC_UUID:
4917 case C_WF_BITMAP_T:
4918 case C_BEHIND:
4919 break;
4920 default:
d0180171 4921 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
b30ab791 4922 drbd_conn_str(device->state.conn));
f735e363
LE
4923 }
4924
b30ab791 4925 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
73a01a18 4926
82bc0194 4927 return 0;
73a01a18
PR
4928}
4929
700ca8c0
PR
4930static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
4931{
4932 struct drbd_peer_device *peer_device;
4933 struct p_block_desc *p = pi->data;
4934 struct drbd_device *device;
4935 sector_t sector;
4936 int size, err = 0;
4937
4938 peer_device = conn_peer_device(connection, pi->vnr);
4939 if (!peer_device)
4940 return -EIO;
4941 device = peer_device->device;
4942
4943 sector = be64_to_cpu(p->sector);
4944 size = be32_to_cpu(p->blksize);
4945
4946 dec_rs_pending(device);
4947
4948 if (get_ldev(device)) {
4949 struct drbd_peer_request *peer_req;
9945172a 4950 const enum req_op op = REQ_OP_WRITE_ZEROES;
700ca8c0
PR
4951
4952 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
9104d31a 4953 size, 0, GFP_NOIO);
700ca8c0
PR
4954 if (!peer_req) {
4955 put_ldev(device);
4956 return -ENOMEM;
4957 }
4958
4959 peer_req->w.cb = e_end_resync_block;
4960 peer_req->submit_jif = jiffies;
f31e583a 4961 peer_req->flags |= EE_TRIM;
700ca8c0
PR
4962
4963 spin_lock_irq(&device->resource->req_lock);
4964 list_add_tail(&peer_req->w.list, &device->sync_ee);
4965 spin_unlock_irq(&device->resource->req_lock);
4966
4967 atomic_add(pi->size >> 9, &device->rs_sect_ev);
86563de8
BVA
4968 err = drbd_submit_peer_request(device, peer_req, op,
4969 DRBD_FAULT_RS_WR);
700ca8c0
PR
4970
4971 if (err) {
4972 spin_lock_irq(&device->resource->req_lock);
4973 list_del(&peer_req->w.list);
4974 spin_unlock_irq(&device->resource->req_lock);
4975
4976 drbd_free_peer_req(device, peer_req);
4977 put_ldev(device);
4978 err = 0;
4979 goto fail;
4980 }
4981
4982 inc_unacked(device);
4983
4984 /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(),
4985 as well as drbd_rs_complete_io() */
4986 } else {
4987 fail:
4988 drbd_rs_complete_io(device, sector);
4989 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
4990 }
4991
4992 atomic_add(size >> 9, &device->rs_sect_in);
4993
4994 return err;
4995}
4996
02918be2
PR
4997struct data_cmd {
4998 int expect_payload;
9104d31a 4999 unsigned int pkt_size;
bde89a9e 5000 int (*fn)(struct drbd_connection *, struct packet_info *);
02918be2
PR
5001};
5002
5003static struct data_cmd drbd_cmd_handler[] = {
5004 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
5005 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
5006 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
5007 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
e658983a
AG
5008 [P_BITMAP] = { 1, 0, receive_bitmap } ,
5009 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
5010 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
02918be2
PR
5011 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
5012 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
e658983a
AG
5013 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
5014 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
02918be2
PR
5015 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
5016 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
5017 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
5018 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
5019 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
5020 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
5021 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
5022 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
5023 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
700ca8c0 5024 [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest },
02918be2 5025 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
73a01a18 5026 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4a76b161 5027 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
036b17ea 5028 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
a0fb3c47 5029 [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
f31e583a 5030 [P_ZEROES] = { 0, sizeof(struct p_trim), receive_Data },
700ca8c0 5031 [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
b411b363
PR
5032};
5033
bde89a9e 5034static void drbdd(struct drbd_connection *connection)
b411b363 5035{
77351055 5036 struct packet_info pi;
02918be2 5037 size_t shs; /* sub header size */
82bc0194 5038 int err;
b411b363 5039
bde89a9e 5040 while (get_t_state(&connection->receiver) == RUNNING) {
9104d31a 5041 struct data_cmd const *cmd;
b411b363 5042
bde89a9e 5043 drbd_thread_current_set_cpu(&connection->receiver);
c51a0ef3
LE
5044 update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
5045 if (drbd_recv_header_maybe_unplug(connection, &pi))
02918be2 5046 goto err_out;
b411b363 5047
deebe195 5048 cmd = &drbd_cmd_handler[pi.cmd];
4a76b161 5049 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
1ec861eb 5050 drbd_err(connection, "Unexpected data packet %s (0x%04x)",
2fcb8f30 5051 cmdname(pi.cmd), pi.cmd);
02918be2 5052 goto err_out;
0b33a916 5053 }
b411b363 5054
e658983a 5055 shs = cmd->pkt_size;
9104d31a
LE
5056 if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME)
5057 shs += sizeof(struct o_qlim);
e658983a 5058 if (pi.size > shs && !cmd->expect_payload) {
1ec861eb 5059 drbd_err(connection, "No payload expected %s l:%d\n",
2fcb8f30 5060 cmdname(pi.cmd), pi.size);
02918be2 5061 goto err_out;
b411b363 5062 }
9104d31a
LE
5063 if (pi.size < shs) {
5064 drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n",
5065 cmdname(pi.cmd), (int)shs, pi.size);
5066 goto err_out;
5067 }
b411b363 5068
c13f7e1a 5069 if (shs) {
944410e9 5070 update_receiver_timing_details(connection, drbd_recv_all_warn);
bde89a9e 5071 err = drbd_recv_all_warn(connection, pi.data, shs);
a5c31904 5072 if (err)
c13f7e1a 5073 goto err_out;
e2857216 5074 pi.size -= shs;
c13f7e1a
LE
5075 }
5076
944410e9 5077 update_receiver_timing_details(connection, cmd->fn);
bde89a9e 5078 err = cmd->fn(connection, &pi);
4a76b161 5079 if (err) {
1ec861eb 5080 drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
9f5bdc33 5081 cmdname(pi.cmd), err, pi.size);
02918be2 5082 goto err_out;
b411b363
PR
5083 }
5084 }
82bc0194 5085 return;
b411b363 5086
82bc0194 5087 err_out:
bde89a9e 5088 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
5089}
5090
bde89a9e 5091static void conn_disconnect(struct drbd_connection *connection)
b411b363 5092{
c06ece6b 5093 struct drbd_peer_device *peer_device;
bbeb641c 5094 enum drbd_conns oc;
376694a0 5095 int vnr;
b411b363 5096
bde89a9e 5097 if (connection->cstate == C_STANDALONE)
b411b363 5098 return;
b411b363 5099
545752d5
LE
5100 /* We are about to start the cleanup after connection loss.
5101 * Make sure drbd_make_request knows about that.
5102 * Usually we should be in some network failure state already,
5103 * but just in case we are not, we fix it up here.
5104 */
bde89a9e 5105 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
545752d5 5106
668700b4 5107 /* ack_receiver does not clean up anything. it must not interfere, either */
1c03e520 5108 drbd_thread_stop(&connection->ack_receiver);
668700b4
PR
5109 if (connection->ack_sender) {
5110 destroy_workqueue(connection->ack_sender);
5111 connection->ack_sender = NULL;
5112 }
bde89a9e 5113 drbd_free_sock(connection);
360cc740 5114
c141ebda 5115 rcu_read_lock();
c06ece6b
AG
5116 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5117 struct drbd_device *device = peer_device->device;
b30ab791 5118 kref_get(&device->kref);
c141ebda 5119 rcu_read_unlock();
69a22773 5120 drbd_disconnected(peer_device);
c06ece6b 5121 kref_put(&device->kref, drbd_destroy_device);
c141ebda
PR
5122 rcu_read_lock();
5123 }
5124 rcu_read_unlock();
5125
bde89a9e 5126 if (!list_empty(&connection->current_epoch->list))
1ec861eb 5127 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
12038a3a 5128 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
bde89a9e
AG
5129 atomic_set(&connection->current_epoch->epoch_size, 0);
5130 connection->send.seen_any_write_yet = false;
12038a3a 5131
1ec861eb 5132 drbd_info(connection, "Connection closed\n");
360cc740 5133
bde89a9e
AG
5134 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
5135 conn_try_outdate_peer_async(connection);
cb703454 5136
0500813f 5137 spin_lock_irq(&connection->resource->req_lock);
bde89a9e 5138 oc = connection->cstate;
bbeb641c 5139 if (oc >= C_UNCONNECTED)
bde89a9e 5140 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
bbeb641c 5141
0500813f 5142 spin_unlock_irq(&connection->resource->req_lock);
360cc740 5143
f3dfa40a 5144 if (oc == C_DISCONNECTING)
bde89a9e 5145 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
360cc740
PR
5146}
5147
69a22773 5148static int drbd_disconnected(struct drbd_peer_device *peer_device)
360cc740 5149{
69a22773 5150 struct drbd_device *device = peer_device->device;
360cc740 5151 unsigned int i;
b411b363 5152
85719573 5153 /* wait for current activity to cease. */
0500813f 5154 spin_lock_irq(&device->resource->req_lock);
b30ab791
AG
5155 _drbd_wait_ee_list_empty(device, &device->active_ee);
5156 _drbd_wait_ee_list_empty(device, &device->sync_ee);
5157 _drbd_wait_ee_list_empty(device, &device->read_ee);
0500813f 5158 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
5159
5160 /* We do not have data structures that would allow us to
5161 * get the rs_pending_cnt down to 0 again.
5162 * * On C_SYNC_TARGET we do not have any data structures describing
5163 * the pending RSDataRequest's we have sent.
5164 * * On C_SYNC_SOURCE there is no data structure that tracks
5165 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
5166 * And no, it is not the sum of the reference counts in the
5167 * resync_LRU. The resync_LRU tracks the whole operation including
5168 * the disk-IO, while the rs_pending_cnt only tracks the blocks
5169 * on the fly. */
b30ab791
AG
5170 drbd_rs_cancel_all(device);
5171 device->rs_total = 0;
5172 device->rs_failed = 0;
5173 atomic_set(&device->rs_pending_cnt, 0);
5174 wake_up(&device->misc_wait);
b411b363 5175
b30ab791 5176 del_timer_sync(&device->resync_timer);
2bccef39 5177 resync_timer_fn(&device->resync_timer);
b411b363 5178
b411b363
PR
5179 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
5180 * w_make_resync_request etc. which may still be on the worker queue
5181 * to be "canceled" */
b5043c5e 5182 drbd_flush_workqueue(&peer_device->connection->sender_work);
b411b363 5183
b30ab791 5184 drbd_finish_peer_reqs(device);
b411b363 5185
d10b4ea3
PR
5186 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
5187 might have issued a work again. The one before drbd_finish_peer_reqs() is
5188 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
b5043c5e 5189 drbd_flush_workqueue(&peer_device->connection->sender_work);
d10b4ea3 5190
08332d73
LE
5191 /* need to do it again, drbd_finish_peer_reqs() may have populated it
5192 * again via drbd_try_clear_on_disk_bm(). */
b30ab791 5193 drbd_rs_cancel_all(device);
b411b363 5194
b30ab791
AG
5195 kfree(device->p_uuid);
5196 device->p_uuid = NULL;
b411b363 5197
b30ab791 5198 if (!drbd_suspended(device))
69a22773 5199 tl_clear(peer_device->connection);
b411b363 5200
b30ab791 5201 drbd_md_sync(device);
b411b363 5202
be115b69
LE
5203 if (get_ldev(device)) {
5204 drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
5205 "write from disconnected", BM_LOCKED_CHANGE_ALLOWED);
5206 put_ldev(device);
5207 }
20ceb2b2 5208
b411b363
PR
5209 /* tcp_close and release of sendpage pages can be deferred. I don't
5210 * want to use SO_LINGER, because apparently it can be deferred for
5211 * more than 20 seconds (longest time I checked).
5212 *
5213 * Actually we don't care for exactly when the network stack does its
5214 * put_page(), but release our reference on these pages right here.
5215 */
b30ab791 5216 i = drbd_free_peer_reqs(device, &device->net_ee);
b411b363 5217 if (i)
d0180171 5218 drbd_info(device, "net_ee not empty, killed %u entries\n", i);
b30ab791 5219 i = atomic_read(&device->pp_in_use_by_net);
435f0740 5220 if (i)
d0180171 5221 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
b30ab791 5222 i = atomic_read(&device->pp_in_use);
b411b363 5223 if (i)
d0180171 5224 drbd_info(device, "pp_in_use = %d, expected 0\n", i);
b411b363 5225
0b0ba1ef
AG
5226 D_ASSERT(device, list_empty(&device->read_ee));
5227 D_ASSERT(device, list_empty(&device->active_ee));
5228 D_ASSERT(device, list_empty(&device->sync_ee));
5229 D_ASSERT(device, list_empty(&device->done_ee));
b411b363 5230
360cc740 5231 return 0;
b411b363
PR
5232}
5233
5234/*
5235 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
5236 * we can agree on is stored in agreed_pro_version.
5237 *
5238 * feature flags and the reserved array should be enough room for future
5239 * enhancements of the handshake protocol, and possible plugins...
5240 *
5241 * for now, they are expected to be zero, but ignored.
5242 */
bde89a9e 5243static int drbd_send_features(struct drbd_connection *connection)
b411b363 5244{
9f5bdc33
AG
5245 struct drbd_socket *sock;
5246 struct p_connection_features *p;
b411b363 5247
bde89a9e
AG
5248 sock = &connection->data;
5249 p = conn_prepare_command(connection, sock);
9f5bdc33 5250 if (!p)
e8d17b01 5251 return -EIO;
b411b363
PR
5252 memset(p, 0, sizeof(*p));
5253 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
5254 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
20c68fde 5255 p->feature_flags = cpu_to_be32(PRO_FEATURES);
bde89a9e 5256 return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
b411b363
PR
5257}
5258
5259/*
5260 * return values:
5261 * 1 yes, we have a valid connection
5262 * 0 oops, did not work out, please try again
5263 * -1 peer talks different language,
5264 * no point in trying again, please go standalone.
5265 */
bde89a9e 5266static int drbd_do_features(struct drbd_connection *connection)
b411b363 5267{
bde89a9e 5268 /* ASSERT current == connection->receiver ... */
e658983a
AG
5269 struct p_connection_features *p;
5270 const int expect = sizeof(struct p_connection_features);
77351055 5271 struct packet_info pi;
a5c31904 5272 int err;
b411b363 5273
bde89a9e 5274 err = drbd_send_features(connection);
e8d17b01 5275 if (err)
b411b363
PR
5276 return 0;
5277
bde89a9e 5278 err = drbd_recv_header(connection, &pi);
69bc7bc3 5279 if (err)
b411b363
PR
5280 return 0;
5281
6038178e 5282 if (pi.cmd != P_CONNECTION_FEATURES) {
1ec861eb 5283 drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
2fcb8f30 5284 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5285 return -1;
5286 }
5287
77351055 5288 if (pi.size != expect) {
1ec861eb 5289 drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
77351055 5290 expect, pi.size);
b411b363
PR
5291 return -1;
5292 }
5293
e658983a 5294 p = pi.data;
bde89a9e 5295 err = drbd_recv_all_warn(connection, p, expect);
a5c31904 5296 if (err)
b411b363 5297 return 0;
b411b363 5298
b411b363
PR
5299 p->protocol_min = be32_to_cpu(p->protocol_min);
5300 p->protocol_max = be32_to_cpu(p->protocol_max);
5301 if (p->protocol_max == 0)
5302 p->protocol_max = p->protocol_min;
5303
5304 if (PRO_VERSION_MAX < p->protocol_min ||
5305 PRO_VERSION_MIN > p->protocol_max)
5306 goto incompat;
5307
bde89a9e 5308 connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
20c68fde 5309 connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags);
b411b363 5310
1ec861eb 5311 drbd_info(connection, "Handshake successful: "
bde89a9e 5312 "Agreed network protocol version %d\n", connection->agreed_pro_version);
b411b363 5313
f31e583a 5314 drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s%s.\n",
9104d31a
LE
5315 connection->agreed_features,
5316 connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "",
5317 connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "",
f31e583a
LE
5318 connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" : "",
5319 connection->agreed_features & DRBD_FF_WZEROES ? " WRITE_ZEROES" :
9104d31a 5320 connection->agreed_features ? "" : " none");
92d94ae6 5321
b411b363
PR
5322 return 1;
5323
5324 incompat:
1ec861eb 5325 drbd_err(connection, "incompatible DRBD dialects: "
b411b363
PR
5326 "I support %d-%d, peer supports %d-%d\n",
5327 PRO_VERSION_MIN, PRO_VERSION_MAX,
5328 p->protocol_min, p->protocol_max);
5329 return -1;
5330}
5331
5332#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
bde89a9e 5333static int drbd_do_auth(struct drbd_connection *connection)
b411b363 5334{
1ec861eb
AG
5335 drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
5336 drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 5337 return -1;
b411b363
PR
5338}
5339#else
5340#define CHALLENGE_LEN 64
b10d96cb
JT
5341
5342/* Return value:
5343 1 - auth succeeded,
5344 0 - failed, try again (network error),
5345 -1 - auth failed, don't try again.
5346*/
5347
bde89a9e 5348static int drbd_do_auth(struct drbd_connection *connection)
b411b363 5349{
9f5bdc33 5350 struct drbd_socket *sock;
b411b363 5351 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
b411b363
PR
5352 char *response = NULL;
5353 char *right_response = NULL;
5354 char *peers_ch = NULL;
44ed167d
PR
5355 unsigned int key_len;
5356 char secret[SHARED_SECRET_MAX]; /* 64 byte */
b411b363 5357 unsigned int resp_size;
77ce56e2 5358 struct shash_desc *desc;
77351055 5359 struct packet_info pi;
44ed167d 5360 struct net_conf *nc;
69bc7bc3 5361 int err, rv;
b411b363 5362
9f5bdc33 5363 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
b411b363 5364
44ed167d 5365 rcu_read_lock();
bde89a9e 5366 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
5367 key_len = strlen(nc->shared_secret);
5368 memcpy(secret, nc->shared_secret, key_len);
5369 rcu_read_unlock();
5370
77ce56e2
AB
5371 desc = kmalloc(sizeof(struct shash_desc) +
5372 crypto_shash_descsize(connection->cram_hmac_tfm),
5373 GFP_KERNEL);
5374 if (!desc) {
5375 rv = -1;
5376 goto fail;
5377 }
9534d671 5378 desc->tfm = connection->cram_hmac_tfm;
b411b363 5379
9534d671 5380 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
b411b363 5381 if (rv) {
9534d671 5382 drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
b10d96cb 5383 rv = -1;
b411b363
PR
5384 goto fail;
5385 }
5386
5387 get_random_bytes(my_challenge, CHALLENGE_LEN);
5388
bde89a9e
AG
5389 sock = &connection->data;
5390 if (!conn_prepare_command(connection, sock)) {
9f5bdc33
AG
5391 rv = 0;
5392 goto fail;
5393 }
bde89a9e 5394 rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
9f5bdc33 5395 my_challenge, CHALLENGE_LEN);
b411b363
PR
5396 if (!rv)
5397 goto fail;
5398
bde89a9e 5399 err = drbd_recv_header(connection, &pi);
69bc7bc3
AG
5400 if (err) {
5401 rv = 0;
b411b363 5402 goto fail;
69bc7bc3 5403 }
b411b363 5404
77351055 5405 if (pi.cmd != P_AUTH_CHALLENGE) {
1ec861eb 5406 drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
2fcb8f30 5407 cmdname(pi.cmd), pi.cmd);
9049ccd4 5408 rv = -1;
b411b363
PR
5409 goto fail;
5410 }
5411
77351055 5412 if (pi.size > CHALLENGE_LEN * 2) {
1ec861eb 5413 drbd_err(connection, "expected AuthChallenge payload too big.\n");
b10d96cb 5414 rv = -1;
b411b363
PR
5415 goto fail;
5416 }
5417
67cca286
PR
5418 if (pi.size < CHALLENGE_LEN) {
5419 drbd_err(connection, "AuthChallenge payload too small.\n");
5420 rv = -1;
5421 goto fail;
5422 }
5423
77351055 5424 peers_ch = kmalloc(pi.size, GFP_NOIO);
8404e191 5425 if (!peers_ch) {
b10d96cb 5426 rv = -1;
b411b363
PR
5427 goto fail;
5428 }
5429
bde89a9e 5430 err = drbd_recv_all_warn(connection, peers_ch, pi.size);
a5c31904 5431 if (err) {
b411b363
PR
5432 rv = 0;
5433 goto fail;
5434 }
5435
67cca286
PR
5436 if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) {
5437 drbd_err(connection, "Peer presented the same challenge!\n");
5438 rv = -1;
5439 goto fail;
5440 }
5441
9534d671 5442 resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
b411b363 5443 response = kmalloc(resp_size, GFP_NOIO);
8404e191 5444 if (!response) {
b10d96cb 5445 rv = -1;
b411b363
PR
5446 goto fail;
5447 }
5448
9534d671 5449 rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
b411b363 5450 if (rv) {
1ec861eb 5451 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 5452 rv = -1;
b411b363
PR
5453 goto fail;
5454 }
5455
bde89a9e 5456 if (!conn_prepare_command(connection, sock)) {
9f5bdc33 5457 rv = 0;
b411b363 5458 goto fail;
9f5bdc33 5459 }
bde89a9e 5460 rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
9f5bdc33 5461 response, resp_size);
b411b363
PR
5462 if (!rv)
5463 goto fail;
5464
bde89a9e 5465 err = drbd_recv_header(connection, &pi);
69bc7bc3 5466 if (err) {
b411b363
PR
5467 rv = 0;
5468 goto fail;
5469 }
5470
77351055 5471 if (pi.cmd != P_AUTH_RESPONSE) {
1ec861eb 5472 drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
2fcb8f30 5473 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5474 rv = 0;
5475 goto fail;
5476 }
5477
77351055 5478 if (pi.size != resp_size) {
1ec861eb 5479 drbd_err(connection, "expected AuthResponse payload of wrong size\n");
b411b363
PR
5480 rv = 0;
5481 goto fail;
5482 }
b411b363 5483
bde89a9e 5484 err = drbd_recv_all_warn(connection, response , resp_size);
a5c31904 5485 if (err) {
b411b363
PR
5486 rv = 0;
5487 goto fail;
5488 }
5489
5490 right_response = kmalloc(resp_size, GFP_NOIO);
8404e191 5491 if (!right_response) {
b10d96cb 5492 rv = -1;
b411b363
PR
5493 goto fail;
5494 }
5495
9534d671
HX
5496 rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
5497 right_response);
b411b363 5498 if (rv) {
1ec861eb 5499 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 5500 rv = -1;
b411b363
PR
5501 goto fail;
5502 }
5503
5504 rv = !memcmp(response, right_response, resp_size);
5505
5506 if (rv)
1ec861eb 5507 drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
44ed167d 5508 resp_size);
b10d96cb
JT
5509 else
5510 rv = -1;
b411b363
PR
5511
5512 fail:
5513 kfree(peers_ch);
5514 kfree(response);
5515 kfree(right_response);
77ce56e2
AB
5516 if (desc) {
5517 shash_desc_zero(desc);
5518 kfree(desc);
5519 }
b411b363
PR
5520
5521 return rv;
5522}
5523#endif
5524
8fe60551 5525int drbd_receiver(struct drbd_thread *thi)
b411b363 5526{
bde89a9e 5527 struct drbd_connection *connection = thi->connection;
b411b363
PR
5528 int h;
5529
1ec861eb 5530 drbd_info(connection, "receiver (re)started\n");
b411b363
PR
5531
5532 do {
bde89a9e 5533 h = conn_connect(connection);
b411b363 5534 if (h == 0) {
bde89a9e 5535 conn_disconnect(connection);
20ee6390 5536 schedule_timeout_interruptible(HZ);
b411b363
PR
5537 }
5538 if (h == -1) {
1ec861eb 5539 drbd_warn(connection, "Discarding network configuration.\n");
bde89a9e 5540 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
5541 }
5542 } while (h == 0);
5543
c51a0ef3
LE
5544 if (h > 0) {
5545 blk_start_plug(&connection->receiver_plug);
bde89a9e 5546 drbdd(connection);
c51a0ef3
LE
5547 blk_finish_plug(&connection->receiver_plug);
5548 }
b411b363 5549
bde89a9e 5550 conn_disconnect(connection);
b411b363 5551
1ec861eb 5552 drbd_info(connection, "receiver terminated\n");
b411b363
PR
5553 return 0;
5554}
5555
5556/* ********* acknowledge sender ******** */
5557
bde89a9e 5558static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5559{
e658983a 5560 struct p_req_state_reply *p = pi->data;
e4f78ede
PR
5561 int retcode = be32_to_cpu(p->retcode);
5562
5563 if (retcode >= SS_SUCCESS) {
bde89a9e 5564 set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
e4f78ede 5565 } else {
bde89a9e 5566 set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
1ec861eb 5567 drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
e4f78ede
PR
5568 drbd_set_st_err_str(retcode), retcode);
5569 }
bde89a9e 5570 wake_up(&connection->ping_wait);
e4f78ede 5571
2735a594 5572 return 0;
e4f78ede 5573}
b411b363 5574
bde89a9e 5575static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5576{
9f4fe9ad 5577 struct drbd_peer_device *peer_device;
b30ab791 5578 struct drbd_device *device;
e658983a 5579 struct p_req_state_reply *p = pi->data;
b411b363
PR
5580 int retcode = be32_to_cpu(p->retcode);
5581
9f4fe9ad
AG
5582 peer_device = conn_peer_device(connection, pi->vnr);
5583 if (!peer_device)
2735a594 5584 return -EIO;
9f4fe9ad 5585 device = peer_device->device;
1952e916 5586
bde89a9e 5587 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
0b0ba1ef 5588 D_ASSERT(device, connection->agreed_pro_version < 100);
bde89a9e 5589 return got_conn_RqSReply(connection, pi);
4d0fc3fd
PR
5590 }
5591
b411b363 5592 if (retcode >= SS_SUCCESS) {
b30ab791 5593 set_bit(CL_ST_CHG_SUCCESS, &device->flags);
b411b363 5594 } else {
b30ab791 5595 set_bit(CL_ST_CHG_FAIL, &device->flags);
d0180171 5596 drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
e4f78ede 5597 drbd_set_st_err_str(retcode), retcode);
b411b363 5598 }
b30ab791 5599 wake_up(&device->state_wait);
b411b363 5600
2735a594 5601 return 0;
b411b363
PR
5602}
5603
bde89a9e 5604static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5605{
bde89a9e 5606 return drbd_send_ping_ack(connection);
b411b363
PR
5607
5608}
5609
bde89a9e 5610static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363
PR
5611{
5612 /* restore idle timeout */
bde89a9e
AG
5613 connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
5614 if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
5615 wake_up(&connection->ping_wait);
b411b363 5616
2735a594 5617 return 0;
b411b363
PR
5618}
5619
bde89a9e 5620static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5621{
9f4fe9ad 5622 struct drbd_peer_device *peer_device;
b30ab791 5623 struct drbd_device *device;
e658983a 5624 struct p_block_ack *p = pi->data;
b411b363
PR
5625 sector_t sector = be64_to_cpu(p->sector);
5626 int blksize = be32_to_cpu(p->blksize);
5627
9f4fe9ad
AG
5628 peer_device = conn_peer_device(connection, pi->vnr);
5629 if (!peer_device)
2735a594 5630 return -EIO;
9f4fe9ad 5631 device = peer_device->device;
1952e916 5632
9f4fe9ad 5633 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
b411b363 5634
69a22773 5635 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5636
b30ab791
AG
5637 if (get_ldev(device)) {
5638 drbd_rs_complete_io(device, sector);
5639 drbd_set_in_sync(device, sector, blksize);
1d53f09e 5640 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
b30ab791
AG
5641 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
5642 put_ldev(device);
1d53f09e 5643 }
b30ab791
AG
5644 dec_rs_pending(device);
5645 atomic_add(blksize >> 9, &device->rs_sect_in);
b411b363 5646
2735a594 5647 return 0;
b411b363
PR
5648}
5649
bc9c5c41 5650static int
b30ab791 5651validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
bc9c5c41
AG
5652 struct rb_root *root, const char *func,
5653 enum drbd_req_event what, bool missing_ok)
b411b363
PR
5654{
5655 struct drbd_request *req;
5656 struct bio_and_error m;
5657
0500813f 5658 spin_lock_irq(&device->resource->req_lock);
b30ab791 5659 req = find_request(device, root, id, sector, missing_ok, func);
b411b363 5660 if (unlikely(!req)) {
0500813f 5661 spin_unlock_irq(&device->resource->req_lock);
85997675 5662 return -EIO;
b411b363
PR
5663 }
5664 __req_mod(req, what, &m);
0500813f 5665 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
5666
5667 if (m.bio)
b30ab791 5668 complete_master_bio(device, &m);
85997675 5669 return 0;
b411b363
PR
5670}
5671
bde89a9e 5672static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5673{
9f4fe9ad 5674 struct drbd_peer_device *peer_device;
b30ab791 5675 struct drbd_device *device;
e658983a 5676 struct p_block_ack *p = pi->data;
b411b363
PR
5677 sector_t sector = be64_to_cpu(p->sector);
5678 int blksize = be32_to_cpu(p->blksize);
5679 enum drbd_req_event what;
5680
9f4fe9ad
AG
5681 peer_device = conn_peer_device(connection, pi->vnr);
5682 if (!peer_device)
2735a594 5683 return -EIO;
9f4fe9ad 5684 device = peer_device->device;
1952e916 5685
69a22773 5686 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5687
579b57ed 5688 if (p->block_id == ID_SYNCER) {
b30ab791
AG
5689 drbd_set_in_sync(device, sector, blksize);
5690 dec_rs_pending(device);
2735a594 5691 return 0;
b411b363 5692 }
e05e1e59 5693 switch (pi->cmd) {
b411b363 5694 case P_RS_WRITE_ACK:
8554df1c 5695 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
5696 break;
5697 case P_WRITE_ACK:
8554df1c 5698 what = WRITE_ACKED_BY_PEER;
b411b363
PR
5699 break;
5700 case P_RECV_ACK:
8554df1c 5701 what = RECV_ACKED_BY_PEER;
b411b363 5702 break;
d4dabbe2
LE
5703 case P_SUPERSEDED:
5704 what = CONFLICT_RESOLVED;
b411b363 5705 break;
7be8da07 5706 case P_RETRY_WRITE:
7be8da07 5707 what = POSTPONE_WRITE;
b411b363
PR
5708 break;
5709 default:
2735a594 5710 BUG();
b411b363
PR
5711 }
5712
b30ab791
AG
5713 return validate_req_change_req_state(device, p->block_id, sector,
5714 &device->write_requests, __func__,
2735a594 5715 what, false);
b411b363
PR
5716}
5717
bde89a9e 5718static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5719{
9f4fe9ad 5720 struct drbd_peer_device *peer_device;
b30ab791 5721 struct drbd_device *device;
e658983a 5722 struct p_block_ack *p = pi->data;
b411b363 5723 sector_t sector = be64_to_cpu(p->sector);
2deb8336 5724 int size = be32_to_cpu(p->blksize);
85997675 5725 int err;
b411b363 5726
9f4fe9ad
AG
5727 peer_device = conn_peer_device(connection, pi->vnr);
5728 if (!peer_device)
2735a594 5729 return -EIO;
9f4fe9ad 5730 device = peer_device->device;
b411b363 5731
69a22773 5732 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5733
579b57ed 5734 if (p->block_id == ID_SYNCER) {
b30ab791
AG
5735 dec_rs_pending(device);
5736 drbd_rs_failed_io(device, sector, size);
2735a594 5737 return 0;
b411b363 5738 }
2deb8336 5739
b30ab791
AG
5740 err = validate_req_change_req_state(device, p->block_id, sector,
5741 &device->write_requests, __func__,
303d1448 5742 NEG_ACKED, true);
85997675 5743 if (err) {
c3afd8f5
AG
5744 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5745 The master bio might already be completed, therefore the
5746 request is no longer in the collision hash. */
5747 /* In Protocol B we might already have got a P_RECV_ACK
5748 but then get a P_NEG_ACK afterwards. */
b30ab791 5749 drbd_set_out_of_sync(device, sector, size);
2deb8336 5750 }
2735a594 5751 return 0;
b411b363
PR
5752}
5753
bde89a9e 5754static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5755{
9f4fe9ad 5756 struct drbd_peer_device *peer_device;
b30ab791 5757 struct drbd_device *device;
e658983a 5758 struct p_block_ack *p = pi->data;
b411b363
PR
5759 sector_t sector = be64_to_cpu(p->sector);
5760
9f4fe9ad
AG
5761 peer_device = conn_peer_device(connection, pi->vnr);
5762 if (!peer_device)
2735a594 5763 return -EIO;
9f4fe9ad 5764 device = peer_device->device;
1952e916 5765
69a22773 5766 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
7be8da07 5767
d0180171 5768 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
b411b363
PR
5769 (unsigned long long)sector, be32_to_cpu(p->blksize));
5770
b30ab791
AG
5771 return validate_req_change_req_state(device, p->block_id, sector,
5772 &device->read_requests, __func__,
2735a594 5773 NEG_ACKED, false);
b411b363
PR
5774}
5775
bde89a9e 5776static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5777{
9f4fe9ad 5778 struct drbd_peer_device *peer_device;
b30ab791 5779 struct drbd_device *device;
b411b363
PR
5780 sector_t sector;
5781 int size;
e658983a 5782 struct p_block_ack *p = pi->data;
1952e916 5783
9f4fe9ad
AG
5784 peer_device = conn_peer_device(connection, pi->vnr);
5785 if (!peer_device)
2735a594 5786 return -EIO;
9f4fe9ad 5787 device = peer_device->device;
b411b363
PR
5788
5789 sector = be64_to_cpu(p->sector);
5790 size = be32_to_cpu(p->blksize);
b411b363 5791
69a22773 5792 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5793
b30ab791 5794 dec_rs_pending(device);
b411b363 5795
b30ab791
AG
5796 if (get_ldev_if_state(device, D_FAILED)) {
5797 drbd_rs_complete_io(device, sector);
e05e1e59 5798 switch (pi->cmd) {
d612d309 5799 case P_NEG_RS_DREPLY:
b30ab791 5800 drbd_rs_failed_io(device, sector, size);
6327c911 5801 break;
d612d309
PR
5802 case P_RS_CANCEL:
5803 break;
5804 default:
2735a594 5805 BUG();
d612d309 5806 }
b30ab791 5807 put_ldev(device);
b411b363
PR
5808 }
5809
2735a594 5810 return 0;
b411b363
PR
5811}
5812
bde89a9e 5813static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5814{
e658983a 5815 struct p_barrier_ack *p = pi->data;
c06ece6b 5816 struct drbd_peer_device *peer_device;
9ed57dcb 5817 int vnr;
1952e916 5818
bde89a9e 5819 tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
b411b363 5820
9ed57dcb 5821 rcu_read_lock();
c06ece6b
AG
5822 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5823 struct drbd_device *device = peer_device->device;
5824
b30ab791
AG
5825 if (device->state.conn == C_AHEAD &&
5826 atomic_read(&device->ap_in_flight) == 0 &&
5827 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
5828 device->start_resync_timer.expires = jiffies + HZ;
5829 add_timer(&device->start_resync_timer);
9ed57dcb 5830 }
c4752ef1 5831 }
9ed57dcb 5832 rcu_read_unlock();
c4752ef1 5833
2735a594 5834 return 0;
b411b363
PR
5835}
5836
bde89a9e 5837static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5838{
9f4fe9ad 5839 struct drbd_peer_device *peer_device;
b30ab791 5840 struct drbd_device *device;
e658983a 5841 struct p_block_ack *p = pi->data;
84b8c06b 5842 struct drbd_device_work *dw;
b411b363
PR
5843 sector_t sector;
5844 int size;
5845
9f4fe9ad
AG
5846 peer_device = conn_peer_device(connection, pi->vnr);
5847 if (!peer_device)
2735a594 5848 return -EIO;
9f4fe9ad 5849 device = peer_device->device;
1952e916 5850
b411b363
PR
5851 sector = be64_to_cpu(p->sector);
5852 size = be32_to_cpu(p->blksize);
5853
69a22773 5854 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363
PR
5855
5856 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
b30ab791 5857 drbd_ov_out_of_sync_found(device, sector, size);
b411b363 5858 else
b30ab791 5859 ov_out_of_sync_print(device);
b411b363 5860
b30ab791 5861 if (!get_ldev(device))
2735a594 5862 return 0;
1d53f09e 5863
b30ab791
AG
5864 drbd_rs_complete_io(device, sector);
5865 dec_rs_pending(device);
b411b363 5866
b30ab791 5867 --device->ov_left;
ea5442af
LE
5868
5869 /* let's advance progress step marks only for every other megabyte */
b30ab791
AG
5870 if ((device->ov_left & 0x200) == 0x200)
5871 drbd_advance_rs_marks(device, device->ov_left);
ea5442af 5872
b30ab791 5873 if (device->ov_left == 0) {
84b8c06b
AG
5874 dw = kmalloc(sizeof(*dw), GFP_NOIO);
5875 if (dw) {
5876 dw->w.cb = w_ov_finished;
5877 dw->device = device;
5878 drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
b411b363 5879 } else {
84b8c06b 5880 drbd_err(device, "kmalloc(dw) failed.");
b30ab791
AG
5881 ov_out_of_sync_print(device);
5882 drbd_resync_finished(device);
b411b363
PR
5883 }
5884 }
b30ab791 5885 put_ldev(device);
2735a594 5886 return 0;
b411b363
PR
5887}
5888
bde89a9e 5889static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
0ced55a3 5890{
2735a594 5891 return 0;
b411b363
PR
5892}
5893
668700b4
PR
5894struct meta_sock_cmd {
5895 size_t pkt_size;
5896 int (*fn)(struct drbd_connection *connection, struct packet_info *);
5897};
5898
5899static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout)
0ced55a3 5900{
668700b4
PR
5901 long t;
5902 struct net_conf *nc;
32862ec7 5903
668700b4
PR
5904 rcu_read_lock();
5905 nc = rcu_dereference(connection->net_conf);
5906 t = ping_timeout ? nc->ping_timeo : nc->ping_int;
5907 rcu_read_unlock();
c141ebda 5908
668700b4
PR
5909 t *= HZ;
5910 if (ping_timeout)
5911 t /= 10;
082a3439 5912
668700b4
PR
5913 connection->meta.socket->sk->sk_rcvtimeo = t;
5914}
32862ec7 5915
668700b4
PR
5916static void set_ping_timeout(struct drbd_connection *connection)
5917{
5918 set_rcvtimeo(connection, 1);
0ced55a3
PR
5919}
5920
668700b4
PR
5921static void set_idle_timeout(struct drbd_connection *connection)
5922{
5923 set_rcvtimeo(connection, 0);
5924}
b411b363 5925
668700b4 5926static struct meta_sock_cmd ack_receiver_tbl[] = {
e658983a
AG
5927 [P_PING] = { 0, got_Ping },
5928 [P_PING_ACK] = { 0, got_PingAck },
b411b363
PR
5929 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5930 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5931 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
d4dabbe2 5932 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
b411b363
PR
5933 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5934 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
1952e916 5935 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
b411b363
PR
5936 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5937 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5938 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5939 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 5940 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
1952e916
AG
5941 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5942 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5943 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
7201b972 5944};
b411b363 5945
1c03e520 5946int drbd_ack_receiver(struct drbd_thread *thi)
b411b363 5947{
bde89a9e 5948 struct drbd_connection *connection = thi->connection;
668700b4 5949 struct meta_sock_cmd *cmd = NULL;
77351055 5950 struct packet_info pi;
668700b4 5951 unsigned long pre_recv_jif;
257d0af6 5952 int rv;
bde89a9e 5953 void *buf = connection->meta.rbuf;
b411b363 5954 int received = 0;
bde89a9e 5955 unsigned int header_size = drbd_header_size(connection);
52b061a4 5956 int expect = header_size;
44ed167d 5957 bool ping_timeout_active = false;
b411b363 5958
8b700983 5959 sched_set_fifo_low(current);
b411b363 5960
e77a0a5c 5961 while (get_t_state(thi) == RUNNING) {
80822284 5962 drbd_thread_current_set_cpu(thi);
b411b363 5963
668700b4 5964 conn_reclaim_net_peer_reqs(connection);
44ed167d 5965
bde89a9e
AG
5966 if (test_and_clear_bit(SEND_PING, &connection->flags)) {
5967 if (drbd_send_ping(connection)) {
1ec861eb 5968 drbd_err(connection, "drbd_send_ping has failed\n");
b411b363 5969 goto reconnect;
841ce241 5970 }
668700b4 5971 set_ping_timeout(connection);
44ed167d 5972 ping_timeout_active = true;
b411b363
PR
5973 }
5974
668700b4 5975 pre_recv_jif = jiffies;
bde89a9e 5976 rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
b411b363
PR
5977
5978 /* Note:
5979 * -EINTR (on meta) we got a signal
5980 * -EAGAIN (on meta) rcvtimeo expired
5981 * -ECONNRESET other side closed the connection
5982 * -ERESTARTSYS (on data) we got a signal
5983 * rv < 0 other than above: unexpected error!
5984 * rv == expected: full header or command
5985 * rv < expected: "woken" by signal during receive
5986 * rv == 0 : "connection shut down by peer"
5987 */
5988 if (likely(rv > 0)) {
5989 received += rv;
5990 buf += rv;
5991 } else if (rv == 0) {
bde89a9e 5992 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
b66623e3
PR
5993 long t;
5994 rcu_read_lock();
bde89a9e 5995 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
b66623e3
PR
5996 rcu_read_unlock();
5997
bde89a9e
AG
5998 t = wait_event_timeout(connection->ping_wait,
5999 connection->cstate < C_WF_REPORT_PARAMS,
b66623e3 6000 t);
599377ac
PR
6001 if (t)
6002 break;
6003 }
1ec861eb 6004 drbd_err(connection, "meta connection shut down by peer.\n");
b411b363
PR
6005 goto reconnect;
6006 } else if (rv == -EAGAIN) {
cb6518cb
LE
6007 /* If the data socket received something meanwhile,
6008 * that is good enough: peer is still alive. */
668700b4 6009 if (time_after(connection->last_received, pre_recv_jif))
cb6518cb 6010 continue;
f36af18c 6011 if (ping_timeout_active) {
1ec861eb 6012 drbd_err(connection, "PingAck did not arrive in time.\n");
b411b363
PR
6013 goto reconnect;
6014 }
bde89a9e 6015 set_bit(SEND_PING, &connection->flags);
b411b363
PR
6016 continue;
6017 } else if (rv == -EINTR) {
668700b4
PR
6018 /* maybe drbd_thread_stop(): the while condition will notice.
6019 * maybe woken for send_ping: we'll send a ping above,
6020 * and change the rcvtimeo */
6021 flush_signals(current);
b411b363
PR
6022 continue;
6023 } else {
1ec861eb 6024 drbd_err(connection, "sock_recvmsg returned %d\n", rv);
b411b363
PR
6025 goto reconnect;
6026 }
6027
6028 if (received == expect && cmd == NULL) {
bde89a9e 6029 if (decode_header(connection, connection->meta.rbuf, &pi))
b411b363 6030 goto reconnect;
668700b4
PR
6031 cmd = &ack_receiver_tbl[pi.cmd];
6032 if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) {
1ec861eb 6033 drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
2fcb8f30 6034 cmdname(pi.cmd), pi.cmd);
b411b363
PR
6035 goto disconnect;
6036 }
e658983a 6037 expect = header_size + cmd->pkt_size;
52b061a4 6038 if (pi.size != expect - header_size) {
1ec861eb 6039 drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 6040 pi.cmd, pi.size);
b411b363 6041 goto reconnect;
257d0af6 6042 }
b411b363
PR
6043 }
6044 if (received == expect) {
2735a594 6045 bool err;
a4fbda8e 6046
bde89a9e 6047 err = cmd->fn(connection, &pi);
2735a594 6048 if (err) {
d75f773c 6049 drbd_err(connection, "%ps failed\n", cmd->fn);
b411b363 6050 goto reconnect;
1952e916 6051 }
b411b363 6052
bde89a9e 6053 connection->last_received = jiffies;
f36af18c 6054
668700b4
PR
6055 if (cmd == &ack_receiver_tbl[P_PING_ACK]) {
6056 set_idle_timeout(connection);
44ed167d
PR
6057 ping_timeout_active = false;
6058 }
f36af18c 6059
bde89a9e 6060 buf = connection->meta.rbuf;
b411b363 6061 received = 0;
52b061a4 6062 expect = header_size;
b411b363
PR
6063 cmd = NULL;
6064 }
6065 }
6066
6067 if (0) {
6068reconnect:
bde89a9e
AG
6069 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
6070 conn_md_sync(connection);
b411b363
PR
6071 }
6072 if (0) {
6073disconnect:
bde89a9e 6074 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 6075 }
b411b363 6076
668700b4 6077 drbd_info(connection, "ack_receiver terminated\n");
b411b363
PR
6078
6079 return 0;
6080}
668700b4
PR
6081
6082void drbd_send_acks_wf(struct work_struct *ws)
6083{
6084 struct drbd_peer_device *peer_device =
6085 container_of(ws, struct drbd_peer_device, send_acks_work);
6086 struct drbd_connection *connection = peer_device->connection;
6087 struct drbd_device *device = peer_device->device;
6088 struct net_conf *nc;
6089 int tcp_cork, err;
6090
6091 rcu_read_lock();
6092 nc = rcu_dereference(connection->net_conf);
6093 tcp_cork = nc->tcp_cork;
6094 rcu_read_unlock();
6095
6096 if (tcp_cork)
db10538a 6097 tcp_sock_set_cork(connection->meta.socket->sk, true);
668700b4
PR
6098
6099 err = drbd_finish_peer_reqs(device);
6100 kref_put(&device->kref, drbd_destroy_device);
6101 /* get is in drbd_endio_write_sec_final(). That is necessary to keep the
6102 struct work_struct send_acks_work alive, which is in the peer_device object */
6103
6104 if (err) {
6105 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
6106 return;
6107 }
6108
6109 if (tcp_cork)
db10538a 6110 tcp_sock_set_cork(connection->meta.socket->sk, false);
668700b4
PR
6111
6112 return;
6113}