md: don't detour through bd_contains for the gendisk
[linux-block.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
c6ae4c04 1// SPDX-License-Identifier: GPL-2.0-or-later
b411b363
PR
2/*
3 drbd_receiver.c
4
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
b411b363
PR
11 */
12
13
b411b363
PR
14#include <linux/module.h>
15
7e5fec31 16#include <linux/uaccess.h>
b411b363
PR
17#include <net/sock.h>
18
b411b363
PR
19#include <linux/drbd.h>
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/in.h>
23#include <linux/mm.h>
24#include <linux/memcontrol.h>
25#include <linux/mm_inline.h>
26#include <linux/slab.h>
ae7e81c0 27#include <uapi/linux/sched/types.h>
174cd4b1 28#include <linux/sched/signal.h>
b411b363
PR
29#include <linux/pkt_sched.h>
30#define __KERNEL_SYSCALLS__
31#include <linux/unistd.h>
32#include <linux/vmalloc.h>
33#include <linux/random.h>
b411b363
PR
34#include <linux/string.h>
35#include <linux/scatterlist.h>
c6a564ff 36#include <linux/part_stat.h>
b411b363 37#include "drbd_int.h"
a3603a6e 38#include "drbd_protocol.h"
b411b363 39#include "drbd_req.h"
b411b363
PR
40#include "drbd_vli.h"
41
f31e583a 42#define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME|DRBD_FF_WZEROES)
20c68fde 43
77351055
PR
44struct packet_info {
45 enum drbd_packet cmd;
e2857216
AG
46 unsigned int size;
47 unsigned int vnr;
e658983a 48 void *data;
77351055
PR
49};
50
b411b363
PR
51enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
bde89a9e
AG
57static int drbd_do_features(struct drbd_connection *connection);
58static int drbd_do_auth(struct drbd_connection *connection);
69a22773 59static int drbd_disconnected(struct drbd_peer_device *);
a0fb3c47 60static void conn_wait_active_ee_empty(struct drbd_connection *connection);
bde89a9e 61static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
99920dc5 62static int e_end_block(struct drbd_work *, int);
b411b363 63
b411b363
PR
64
65#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
66
45bb912b
LE
67/*
68 * some helper functions to deal with single linked page lists,
69 * page->private being our "next" pointer.
70 */
71
72/* If at least n pages are linked at head, get n pages off.
73 * Otherwise, don't modify head, and return NULL.
74 * Locking is the responsibility of the caller.
75 */
76static struct page *page_chain_del(struct page **head, int n)
77{
78 struct page *page;
79 struct page *tmp;
80
81 BUG_ON(!n);
82 BUG_ON(!head);
83
84 page = *head;
23ce4227
PR
85
86 if (!page)
87 return NULL;
88
45bb912b
LE
89 while (page) {
90 tmp = page_chain_next(page);
91 if (--n == 0)
92 break; /* found sufficient pages */
93 if (tmp == NULL)
94 /* insufficient pages, don't use any of them. */
95 return NULL;
96 page = tmp;
97 }
98
99 /* add end of list marker for the returned list */
100 set_page_private(page, 0);
101 /* actual return value, and adjustment of head */
102 page = *head;
103 *head = tmp;
104 return page;
105}
106
107/* may be used outside of locks to find the tail of a (usually short)
108 * "private" page chain, before adding it back to a global chain head
109 * with page_chain_add() under a spinlock. */
110static struct page *page_chain_tail(struct page *page, int *len)
111{
112 struct page *tmp;
113 int i = 1;
114 while ((tmp = page_chain_next(page)))
115 ++i, page = tmp;
116 if (len)
117 *len = i;
118 return page;
119}
120
121static int page_chain_free(struct page *page)
122{
123 struct page *tmp;
124 int i = 0;
125 page_chain_for_each_safe(page, tmp) {
126 put_page(page);
127 ++i;
128 }
129 return i;
130}
131
132static void page_chain_add(struct page **head,
133 struct page *chain_first, struct page *chain_last)
134{
135#if 1
136 struct page *tmp;
137 tmp = page_chain_tail(chain_first, NULL);
138 BUG_ON(tmp != chain_last);
139#endif
140
141 /* add chain to head */
142 set_page_private(chain_last, (unsigned long)*head);
143 *head = chain_first;
144}
145
b30ab791 146static struct page *__drbd_alloc_pages(struct drbd_device *device,
18c2d522 147 unsigned int number)
b411b363
PR
148{
149 struct page *page = NULL;
45bb912b 150 struct page *tmp = NULL;
18c2d522 151 unsigned int i = 0;
b411b363
PR
152
153 /* Yes, testing drbd_pp_vacant outside the lock is racy.
154 * So what. It saves a spin_lock. */
45bb912b 155 if (drbd_pp_vacant >= number) {
b411b363 156 spin_lock(&drbd_pp_lock);
45bb912b
LE
157 page = page_chain_del(&drbd_pp_pool, number);
158 if (page)
159 drbd_pp_vacant -= number;
b411b363 160 spin_unlock(&drbd_pp_lock);
45bb912b
LE
161 if (page)
162 return page;
b411b363 163 }
45bb912b 164
b411b363
PR
165 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
166 * "criss-cross" setup, that might cause write-out on some other DRBD,
167 * which in turn might block on the other node at this very place. */
45bb912b
LE
168 for (i = 0; i < number; i++) {
169 tmp = alloc_page(GFP_TRY);
170 if (!tmp)
171 break;
172 set_page_private(tmp, (unsigned long)page);
173 page = tmp;
174 }
175
176 if (i == number)
177 return page;
178
179 /* Not enough pages immediately available this time.
c37c8ecf 180 * No need to jump around here, drbd_alloc_pages will retry this
45bb912b
LE
181 * function "soon". */
182 if (page) {
183 tmp = page_chain_tail(page, NULL);
184 spin_lock(&drbd_pp_lock);
185 page_chain_add(&drbd_pp_pool, page, tmp);
186 drbd_pp_vacant += i;
187 spin_unlock(&drbd_pp_lock);
188 }
189 return NULL;
b411b363
PR
190}
191
b30ab791 192static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
a990be46 193 struct list_head *to_be_freed)
b411b363 194{
a8cd15ba 195 struct drbd_peer_request *peer_req, *tmp;
b411b363
PR
196
197 /* The EEs are always appended to the end of the list. Since
198 they are sent in order over the wire, they have to finish
199 in order. As soon as we see the first not finished we can
200 stop to examine the list... */
201
a8cd15ba 202 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
045417f7 203 if (drbd_peer_req_has_active_page(peer_req))
b411b363 204 break;
a8cd15ba 205 list_move(&peer_req->w.list, to_be_freed);
b411b363
PR
206 }
207}
208
668700b4 209static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
b411b363
PR
210{
211 LIST_HEAD(reclaimed);
db830c46 212 struct drbd_peer_request *peer_req, *t;
b411b363 213
0500813f 214 spin_lock_irq(&device->resource->req_lock);
b30ab791 215 reclaim_finished_net_peer_reqs(device, &reclaimed);
0500813f 216 spin_unlock_irq(&device->resource->req_lock);
a8cd15ba 217 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
b30ab791 218 drbd_free_net_peer_req(device, peer_req);
b411b363
PR
219}
220
668700b4
PR
221static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
222{
223 struct drbd_peer_device *peer_device;
224 int vnr;
225
226 rcu_read_lock();
227 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
228 struct drbd_device *device = peer_device->device;
229 if (!atomic_read(&device->pp_in_use_by_net))
230 continue;
231
232 kref_get(&device->kref);
233 rcu_read_unlock();
234 drbd_reclaim_net_peer_reqs(device);
235 kref_put(&device->kref, drbd_destroy_device);
236 rcu_read_lock();
237 }
238 rcu_read_unlock();
239}
240
b411b363 241/**
c37c8ecf 242 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
b30ab791 243 * @device: DRBD device.
45bb912b
LE
244 * @number: number of pages requested
245 * @retry: whether to retry, if not enough pages are available right now
246 *
247 * Tries to allocate number pages, first from our own page pool, then from
0e49d7b0 248 * the kernel.
45bb912b 249 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 250 *
0e49d7b0
LE
251 * If this allocation would exceed the max_buffers setting, we throttle
252 * allocation (schedule_timeout) to give the system some room to breathe.
253 *
254 * We do not use max-buffers as hard limit, because it could lead to
255 * congestion and further to a distributed deadlock during online-verify or
256 * (checksum based) resync, if the max-buffers, socket buffer sizes and
257 * resync-rate settings are mis-configured.
258 *
45bb912b 259 * Returns a page chain linked via page->private.
b411b363 260 */
69a22773 261struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
c37c8ecf 262 bool retry)
b411b363 263{
69a22773 264 struct drbd_device *device = peer_device->device;
b411b363 265 struct page *page = NULL;
44ed167d 266 struct net_conf *nc;
b411b363 267 DEFINE_WAIT(wait);
0e49d7b0 268 unsigned int mxb;
b411b363 269
44ed167d 270 rcu_read_lock();
69a22773 271 nc = rcu_dereference(peer_device->connection->net_conf);
44ed167d
PR
272 mxb = nc ? nc->max_buffers : 1000000;
273 rcu_read_unlock();
274
b30ab791
AG
275 if (atomic_read(&device->pp_in_use) < mxb)
276 page = __drbd_alloc_pages(device, number);
b411b363 277
668700b4
PR
278 /* Try to keep the fast path fast, but occasionally we need
279 * to reclaim the pages we lended to the network stack. */
280 if (page && atomic_read(&device->pp_in_use_by_net) > 512)
281 drbd_reclaim_net_peer_reqs(device);
282
45bb912b 283 while (page == NULL) {
b411b363
PR
284 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
285
668700b4 286 drbd_reclaim_net_peer_reqs(device);
b411b363 287
b30ab791
AG
288 if (atomic_read(&device->pp_in_use) < mxb) {
289 page = __drbd_alloc_pages(device, number);
b411b363
PR
290 if (page)
291 break;
292 }
293
294 if (!retry)
295 break;
296
297 if (signal_pending(current)) {
d0180171 298 drbd_warn(device, "drbd_alloc_pages interrupted!\n");
b411b363
PR
299 break;
300 }
301
0e49d7b0
LE
302 if (schedule_timeout(HZ/10) == 0)
303 mxb = UINT_MAX;
b411b363
PR
304 }
305 finish_wait(&drbd_pp_wait, &wait);
306
45bb912b 307 if (page)
b30ab791 308 atomic_add(number, &device->pp_in_use);
b411b363
PR
309 return page;
310}
311
c37c8ecf 312/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
0500813f 313 * Is also used from inside an other spin_lock_irq(&resource->req_lock);
45bb912b
LE
314 * Either links the page chain back to the global pool,
315 * or returns all pages to the system. */
b30ab791 316static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
b411b363 317{
b30ab791 318 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
b411b363 319 int i;
435f0740 320
a73ff323
LE
321 if (page == NULL)
322 return;
323
183ece30 324 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
45bb912b
LE
325 i = page_chain_free(page);
326 else {
327 struct page *tmp;
328 tmp = page_chain_tail(page, &i);
329 spin_lock(&drbd_pp_lock);
330 page_chain_add(&drbd_pp_pool, page, tmp);
331 drbd_pp_vacant += i;
332 spin_unlock(&drbd_pp_lock);
b411b363 333 }
435f0740 334 i = atomic_sub_return(i, a);
45bb912b 335 if (i < 0)
d0180171 336 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
435f0740 337 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
338 wake_up(&drbd_pp_wait);
339}
340
341/*
342You need to hold the req_lock:
343 _drbd_wait_ee_list_empty()
344
345You must not have the req_lock:
3967deb1 346 drbd_free_peer_req()
0db55363 347 drbd_alloc_peer_req()
7721f567 348 drbd_free_peer_reqs()
b411b363 349 drbd_ee_fix_bhs()
a990be46 350 drbd_finish_peer_reqs()
b411b363
PR
351 drbd_clear_done_ee()
352 drbd_wait_ee_list_empty()
353*/
354
9104d31a
LE
355/* normal: payload_size == request size (bi_size)
356 * w_same: payload_size == logical_block_size
357 * trim: payload_size == 0 */
f6ffca9f 358struct drbd_peer_request *
69a22773 359drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
9104d31a 360 unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
b411b363 361{
69a22773 362 struct drbd_device *device = peer_device->device;
db830c46 363 struct drbd_peer_request *peer_req;
a73ff323 364 struct page *page = NULL;
9104d31a 365 unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363 366
b30ab791 367 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
b411b363
PR
368 return NULL;
369
0892fac8 370 peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
db830c46 371 if (!peer_req) {
b411b363 372 if (!(gfp_mask & __GFP_NOWARN))
d0180171 373 drbd_err(device, "%s: allocation failed\n", __func__);
b411b363
PR
374 return NULL;
375 }
376
9104d31a 377 if (nr_pages) {
d0164adc
MG
378 page = drbd_alloc_pages(peer_device, nr_pages,
379 gfpflags_allow_blocking(gfp_mask));
a73ff323
LE
380 if (!page)
381 goto fail;
382 }
b411b363 383
c5a2c150
LE
384 memset(peer_req, 0, sizeof(*peer_req));
385 INIT_LIST_HEAD(&peer_req->w.list);
db830c46 386 drbd_clear_interval(&peer_req->i);
9104d31a 387 peer_req->i.size = request_size;
db830c46 388 peer_req->i.sector = sector;
c5a2c150 389 peer_req->submit_jif = jiffies;
a8cd15ba 390 peer_req->peer_device = peer_device;
db830c46 391 peer_req->pages = page;
9a8e7753
AG
392 /*
393 * The block_id is opaque to the receiver. It is not endianness
394 * converted, and sent back to the sender unchanged.
395 */
db830c46 396 peer_req->block_id = id;
b411b363 397
db830c46 398 return peer_req;
b411b363 399
45bb912b 400 fail:
0892fac8 401 mempool_free(peer_req, &drbd_ee_mempool);
b411b363
PR
402 return NULL;
403}
404
b30ab791 405void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
f6ffca9f 406 int is_net)
b411b363 407{
21ae5d7f 408 might_sleep();
db830c46
AG
409 if (peer_req->flags & EE_HAS_DIGEST)
410 kfree(peer_req->digest);
b30ab791 411 drbd_free_pages(device, peer_req->pages, is_net);
0b0ba1ef
AG
412 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
413 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
21ae5d7f
LE
414 if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
415 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
416 drbd_al_complete_io(device, &peer_req->i);
417 }
0892fac8 418 mempool_free(peer_req, &drbd_ee_mempool);
b411b363
PR
419}
420
b30ab791 421int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
b411b363
PR
422{
423 LIST_HEAD(work_list);
db830c46 424 struct drbd_peer_request *peer_req, *t;
b411b363 425 int count = 0;
b30ab791 426 int is_net = list == &device->net_ee;
b411b363 427
0500813f 428 spin_lock_irq(&device->resource->req_lock);
b411b363 429 list_splice_init(list, &work_list);
0500813f 430 spin_unlock_irq(&device->resource->req_lock);
b411b363 431
a8cd15ba 432 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
b30ab791 433 __drbd_free_peer_req(device, peer_req, is_net);
b411b363
PR
434 count++;
435 }
436 return count;
437}
438
b411b363 439/*
a990be46 440 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
b411b363 441 */
b30ab791 442static int drbd_finish_peer_reqs(struct drbd_device *device)
b411b363
PR
443{
444 LIST_HEAD(work_list);
445 LIST_HEAD(reclaimed);
db830c46 446 struct drbd_peer_request *peer_req, *t;
e2b3032b 447 int err = 0;
b411b363 448
0500813f 449 spin_lock_irq(&device->resource->req_lock);
b30ab791
AG
450 reclaim_finished_net_peer_reqs(device, &reclaimed);
451 list_splice_init(&device->done_ee, &work_list);
0500813f 452 spin_unlock_irq(&device->resource->req_lock);
b411b363 453
a8cd15ba 454 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
b30ab791 455 drbd_free_net_peer_req(device, peer_req);
b411b363
PR
456
457 /* possible callbacks here:
d4dabbe2 458 * e_end_block, and e_end_resync_block, e_send_superseded.
b411b363
PR
459 * all ignore the last argument.
460 */
a8cd15ba 461 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
462 int err2;
463
b411b363 464 /* list_del not necessary, next/prev members not touched */
a8cd15ba 465 err2 = peer_req->w.cb(&peer_req->w, !!err);
e2b3032b
AG
466 if (!err)
467 err = err2;
b30ab791 468 drbd_free_peer_req(device, peer_req);
b411b363 469 }
b30ab791 470 wake_up(&device->ee_wait);
b411b363 471
e2b3032b 472 return err;
b411b363
PR
473}
474
b30ab791 475static void _drbd_wait_ee_list_empty(struct drbd_device *device,
d4da1537 476 struct list_head *head)
b411b363
PR
477{
478 DEFINE_WAIT(wait);
479
480 /* avoids spin_lock/unlock
481 * and calling prepare_to_wait in the fast path */
482 while (!list_empty(head)) {
b30ab791 483 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
0500813f 484 spin_unlock_irq(&device->resource->req_lock);
7eaceacc 485 io_schedule();
b30ab791 486 finish_wait(&device->ee_wait, &wait);
0500813f 487 spin_lock_irq(&device->resource->req_lock);
b411b363
PR
488 }
489}
490
b30ab791 491static void drbd_wait_ee_list_empty(struct drbd_device *device,
d4da1537 492 struct list_head *head)
b411b363 493{
0500813f 494 spin_lock_irq(&device->resource->req_lock);
b30ab791 495 _drbd_wait_ee_list_empty(device, head);
0500813f 496 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
497}
498
dbd9eea0 499static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363 500{
b411b363
PR
501 struct kvec iov = {
502 .iov_base = buf,
503 .iov_len = size,
504 };
505 struct msghdr msg = {
b411b363
PR
506 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
507 };
aa563d7b 508 iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
f7765c36 509 return sock_recvmsg(sock, &msg, msg.msg_flags);
b411b363
PR
510}
511
bde89a9e 512static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
b411b363 513{
b411b363
PR
514 int rv;
515
bde89a9e 516 rv = drbd_recv_short(connection->data.socket, buf, size, 0);
b411b363 517
dbd0820c
PR
518 if (rv < 0) {
519 if (rv == -ECONNRESET)
1ec861eb 520 drbd_info(connection, "sock was reset by peer\n");
dbd0820c 521 else if (rv != -ERESTARTSYS)
1ec861eb 522 drbd_err(connection, "sock_recvmsg returned %d\n", rv);
dbd0820c 523 } else if (rv == 0) {
bde89a9e 524 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
b66623e3
PR
525 long t;
526 rcu_read_lock();
bde89a9e 527 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
b66623e3
PR
528 rcu_read_unlock();
529
bde89a9e 530 t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
b66623e3 531
599377ac
PR
532 if (t)
533 goto out;
534 }
1ec861eb 535 drbd_info(connection, "sock was shut down by peer\n");
599377ac
PR
536 }
537
b411b363 538 if (rv != size)
bde89a9e 539 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 540
599377ac 541out:
b411b363
PR
542 return rv;
543}
544
bde89a9e 545static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
c6967746
AG
546{
547 int err;
548
bde89a9e 549 err = drbd_recv(connection, buf, size);
c6967746
AG
550 if (err != size) {
551 if (err >= 0)
552 err = -EIO;
553 } else
554 err = 0;
555 return err;
556}
557
bde89a9e 558static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
a5c31904
AG
559{
560 int err;
561
bde89a9e 562 err = drbd_recv_all(connection, buf, size);
a5c31904 563 if (err && !signal_pending(current))
1ec861eb 564 drbd_warn(connection, "short read (expected size %d)\n", (int)size);
a5c31904
AG
565 return err;
566}
567
5dbf1673
LE
568/* quoting tcp(7):
569 * On individual connections, the socket buffer size must be set prior to the
570 * listen(2) or connect(2) calls in order to have it take effect.
571 * This is our wrapper to do so.
572 */
573static void drbd_setbufsize(struct socket *sock, unsigned int snd,
574 unsigned int rcv)
575{
576 /* open coded SO_SNDBUF, SO_RCVBUF */
577 if (snd) {
578 sock->sk->sk_sndbuf = snd;
579 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
580 }
581 if (rcv) {
582 sock->sk->sk_rcvbuf = rcv;
583 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
584 }
585}
586
bde89a9e 587static struct socket *drbd_try_connect(struct drbd_connection *connection)
b411b363
PR
588{
589 const char *what;
590 struct socket *sock;
591 struct sockaddr_in6 src_in6;
44ed167d
PR
592 struct sockaddr_in6 peer_in6;
593 struct net_conf *nc;
594 int err, peer_addr_len, my_addr_len;
69ef82de 595 int sndbuf_size, rcvbuf_size, connect_int;
b411b363
PR
596 int disconnect_on_error = 1;
597
44ed167d 598 rcu_read_lock();
bde89a9e 599 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
600 if (!nc) {
601 rcu_read_unlock();
b411b363 602 return NULL;
44ed167d 603 }
44ed167d
PR
604 sndbuf_size = nc->sndbuf_size;
605 rcvbuf_size = nc->rcvbuf_size;
69ef82de 606 connect_int = nc->connect_int;
089c075d 607 rcu_read_unlock();
44ed167d 608
bde89a9e
AG
609 my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
610 memcpy(&src_in6, &connection->my_addr, my_addr_len);
44ed167d 611
bde89a9e 612 if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
44ed167d
PR
613 src_in6.sin6_port = 0;
614 else
615 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
616
bde89a9e
AG
617 peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
618 memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
b411b363
PR
619
620 what = "sock_create_kern";
eeb1bd5c 621 err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
44ed167d 622 SOCK_STREAM, IPPROTO_TCP, &sock);
b411b363
PR
623 if (err < 0) {
624 sock = NULL;
625 goto out;
626 }
627
628 sock->sk->sk_rcvtimeo =
69ef82de 629 sock->sk->sk_sndtimeo = connect_int * HZ;
44ed167d 630 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
b411b363
PR
631
632 /* explicitly bind to the configured IP as source IP
633 * for the outgoing connections.
634 * This is needed for multihomed hosts and to be
635 * able to use lo: interfaces for drbd.
636 * Make sure to use 0 as port number, so linux selects
637 * a free one dynamically.
638 */
b411b363 639 what = "bind before connect";
44ed167d 640 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
b411b363
PR
641 if (err < 0)
642 goto out;
643
644 /* connect may fail, peer not yet available.
645 * stay C_WF_CONNECTION, don't go Disconnecting! */
646 disconnect_on_error = 0;
647 what = "connect";
44ed167d 648 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
b411b363
PR
649
650out:
651 if (err < 0) {
652 if (sock) {
653 sock_release(sock);
654 sock = NULL;
655 }
656 switch (-err) {
657 /* timeout, busy, signal pending */
658 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
659 case EINTR: case ERESTARTSYS:
660 /* peer not (yet) available, network problem */
661 case ECONNREFUSED: case ENETUNREACH:
662 case EHOSTDOWN: case EHOSTUNREACH:
663 disconnect_on_error = 0;
664 break;
665 default:
1ec861eb 666 drbd_err(connection, "%s failed, err = %d\n", what, err);
b411b363
PR
667 }
668 if (disconnect_on_error)
bde89a9e 669 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 670 }
44ed167d 671
b411b363
PR
672 return sock;
673}
674
7a426fd8 675struct accept_wait_data {
bde89a9e 676 struct drbd_connection *connection;
7a426fd8
PR
677 struct socket *s_listen;
678 struct completion door_bell;
679 void (*original_sk_state_change)(struct sock *sk);
680
681};
682
715306f6 683static void drbd_incoming_connection(struct sock *sk)
7a426fd8
PR
684{
685 struct accept_wait_data *ad = sk->sk_user_data;
715306f6 686 void (*state_change)(struct sock *sk);
7a426fd8 687
715306f6
AG
688 state_change = ad->original_sk_state_change;
689 if (sk->sk_state == TCP_ESTABLISHED)
690 complete(&ad->door_bell);
691 state_change(sk);
7a426fd8
PR
692}
693
bde89a9e 694static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
b411b363 695{
1f3e509b 696 int err, sndbuf_size, rcvbuf_size, my_addr_len;
44ed167d 697 struct sockaddr_in6 my_addr;
1f3e509b 698 struct socket *s_listen;
44ed167d 699 struct net_conf *nc;
b411b363
PR
700 const char *what;
701
44ed167d 702 rcu_read_lock();
bde89a9e 703 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
704 if (!nc) {
705 rcu_read_unlock();
7a426fd8 706 return -EIO;
44ed167d 707 }
44ed167d
PR
708 sndbuf_size = nc->sndbuf_size;
709 rcvbuf_size = nc->rcvbuf_size;
44ed167d 710 rcu_read_unlock();
b411b363 711
bde89a9e
AG
712 my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
713 memcpy(&my_addr, &connection->my_addr, my_addr_len);
b411b363
PR
714
715 what = "sock_create_kern";
eeb1bd5c 716 err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
1f3e509b 717 SOCK_STREAM, IPPROTO_TCP, &s_listen);
b411b363
PR
718 if (err) {
719 s_listen = NULL;
720 goto out;
721 }
722
98683650 723 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
44ed167d 724 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
b411b363
PR
725
726 what = "bind before listen";
44ed167d 727 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
b411b363
PR
728 if (err < 0)
729 goto out;
730
7a426fd8
PR
731 ad->s_listen = s_listen;
732 write_lock_bh(&s_listen->sk->sk_callback_lock);
733 ad->original_sk_state_change = s_listen->sk->sk_state_change;
715306f6 734 s_listen->sk->sk_state_change = drbd_incoming_connection;
7a426fd8
PR
735 s_listen->sk->sk_user_data = ad;
736 write_unlock_bh(&s_listen->sk->sk_callback_lock);
b411b363 737
2820fd39
PR
738 what = "listen";
739 err = s_listen->ops->listen(s_listen, 5);
740 if (err < 0)
741 goto out;
742
7a426fd8 743 return 0;
b411b363
PR
744out:
745 if (s_listen)
746 sock_release(s_listen);
747 if (err < 0) {
748 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1ec861eb 749 drbd_err(connection, "%s failed, err = %d\n", what, err);
bde89a9e 750 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
751 }
752 }
b411b363 753
7a426fd8 754 return -EIO;
b411b363
PR
755}
756
715306f6 757static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
b411b363 758{
715306f6
AG
759 write_lock_bh(&sk->sk_callback_lock);
760 sk->sk_state_change = ad->original_sk_state_change;
761 sk->sk_user_data = NULL;
762 write_unlock_bh(&sk->sk_callback_lock);
b411b363
PR
763}
764
bde89a9e 765static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
b411b363 766{
1f3e509b
PR
767 int timeo, connect_int, err = 0;
768 struct socket *s_estab = NULL;
1f3e509b
PR
769 struct net_conf *nc;
770
771 rcu_read_lock();
bde89a9e 772 nc = rcu_dereference(connection->net_conf);
1f3e509b
PR
773 if (!nc) {
774 rcu_read_unlock();
775 return NULL;
776 }
777 connect_int = nc->connect_int;
778 rcu_read_unlock();
779
780 timeo = connect_int * HZ;
38b682b2
AM
781 /* 28.5% random jitter */
782 timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
1f3e509b 783
7a426fd8
PR
784 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
785 if (err <= 0)
786 return NULL;
b411b363 787
7a426fd8 788 err = kernel_accept(ad->s_listen, &s_estab, 0);
b411b363
PR
789 if (err < 0) {
790 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1ec861eb 791 drbd_err(connection, "accept failed, err = %d\n", err);
bde89a9e 792 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
793 }
794 }
b411b363 795
715306f6
AG
796 if (s_estab)
797 unregister_state_change(s_estab->sk, ad);
b411b363 798
b411b363
PR
799 return s_estab;
800}
b411b363 801
bde89a9e 802static int decode_header(struct drbd_connection *, void *, struct packet_info *);
b411b363 803
bde89a9e 804static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
9f5bdc33
AG
805 enum drbd_packet cmd)
806{
bde89a9e 807 if (!conn_prepare_command(connection, sock))
9f5bdc33 808 return -EIO;
bde89a9e 809 return conn_send_command(connection, sock, cmd, 0, NULL, 0);
b411b363
PR
810}
811
bde89a9e 812static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
b411b363 813{
bde89a9e 814 unsigned int header_size = drbd_header_size(connection);
9f5bdc33 815 struct packet_info pi;
4920e37a 816 struct net_conf *nc;
9f5bdc33 817 int err;
b411b363 818
4920e37a
PR
819 rcu_read_lock();
820 nc = rcu_dereference(connection->net_conf);
821 if (!nc) {
822 rcu_read_unlock();
823 return -EIO;
824 }
825 sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10;
826 rcu_read_unlock();
827
bde89a9e 828 err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
9f5bdc33
AG
829 if (err != header_size) {
830 if (err >= 0)
831 err = -EIO;
832 return err;
833 }
bde89a9e 834 err = decode_header(connection, connection->data.rbuf, &pi);
9f5bdc33
AG
835 if (err)
836 return err;
837 return pi.cmd;
b411b363
PR
838}
839
840/**
841 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
842 * @sock: pointer to the pointer to the socket.
843 */
5d0b17f1 844static bool drbd_socket_okay(struct socket **sock)
b411b363
PR
845{
846 int rr;
847 char tb[4];
848
849 if (!*sock)
81e84650 850 return false;
b411b363 851
dbd9eea0 852 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
853
854 if (rr > 0 || rr == -EAGAIN) {
81e84650 855 return true;
b411b363
PR
856 } else {
857 sock_release(*sock);
858 *sock = NULL;
81e84650 859 return false;
b411b363
PR
860 }
861}
5d0b17f1
PR
862
863static bool connection_established(struct drbd_connection *connection,
864 struct socket **sock1,
865 struct socket **sock2)
866{
867 struct net_conf *nc;
868 int timeout;
869 bool ok;
870
871 if (!*sock1 || !*sock2)
872 return false;
873
874 rcu_read_lock();
875 nc = rcu_dereference(connection->net_conf);
876 timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10;
877 rcu_read_unlock();
878 schedule_timeout_interruptible(timeout);
879
880 ok = drbd_socket_okay(sock1);
881 ok = drbd_socket_okay(sock2) && ok;
882
883 return ok;
884}
885
2325eb66
PR
886/* Gets called if a connection is established, or if a new minor gets created
887 in a connection */
69a22773 888int drbd_connected(struct drbd_peer_device *peer_device)
907599e0 889{
69a22773 890 struct drbd_device *device = peer_device->device;
0829f5ed 891 int err;
907599e0 892
b30ab791
AG
893 atomic_set(&device->packet_seq, 0);
894 device->peer_seq = 0;
907599e0 895
69a22773
AG
896 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
897 &peer_device->connection->cstate_mutex :
b30ab791 898 &device->own_state_mutex;
8410da8f 899
69a22773 900 err = drbd_send_sync_param(peer_device);
0829f5ed 901 if (!err)
69a22773 902 err = drbd_send_sizes(peer_device, 0, 0);
0829f5ed 903 if (!err)
69a22773 904 err = drbd_send_uuids(peer_device);
0829f5ed 905 if (!err)
69a22773 906 err = drbd_send_current_state(peer_device);
b30ab791
AG
907 clear_bit(USE_DEGR_WFC_T, &device->flags);
908 clear_bit(RESIZE_PENDING, &device->flags);
909 atomic_set(&device->ap_in_flight, 0);
910 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
0829f5ed 911 return err;
907599e0 912}
b411b363
PR
913
914/*
915 * return values:
916 * 1 yes, we have a valid connection
917 * 0 oops, did not work out, please try again
918 * -1 peer talks different language,
919 * no point in trying again, please go standalone.
920 * -2 We do not have a network config...
921 */
bde89a9e 922static int conn_connect(struct drbd_connection *connection)
b411b363 923{
7da35862 924 struct drbd_socket sock, msock;
c06ece6b 925 struct drbd_peer_device *peer_device;
44ed167d 926 struct net_conf *nc;
5d0b17f1
PR
927 int vnr, timeout, h;
928 bool discard_my_data, ok;
197296ff 929 enum drbd_state_rv rv;
7a426fd8 930 struct accept_wait_data ad = {
bde89a9e 931 .connection = connection,
7a426fd8
PR
932 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
933 };
b411b363 934
bde89a9e
AG
935 clear_bit(DISCONNECT_SENT, &connection->flags);
936 if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
937 return -2;
938
7da35862 939 mutex_init(&sock.mutex);
bde89a9e
AG
940 sock.sbuf = connection->data.sbuf;
941 sock.rbuf = connection->data.rbuf;
7da35862
PR
942 sock.socket = NULL;
943 mutex_init(&msock.mutex);
bde89a9e
AG
944 msock.sbuf = connection->meta.sbuf;
945 msock.rbuf = connection->meta.rbuf;
7da35862
PR
946 msock.socket = NULL;
947
0916e0e3 948 /* Assume that the peer only understands protocol 80 until we know better. */
bde89a9e 949 connection->agreed_pro_version = 80;
b411b363 950
bde89a9e 951 if (prepare_listen_socket(connection, &ad))
7a426fd8 952 return 0;
b411b363
PR
953
954 do {
2bf89621 955 struct socket *s;
b411b363 956
bde89a9e 957 s = drbd_try_connect(connection);
b411b363 958 if (s) {
7da35862
PR
959 if (!sock.socket) {
960 sock.socket = s;
bde89a9e 961 send_first_packet(connection, &sock, P_INITIAL_DATA);
7da35862 962 } else if (!msock.socket) {
bde89a9e 963 clear_bit(RESOLVE_CONFLICTS, &connection->flags);
7da35862 964 msock.socket = s;
bde89a9e 965 send_first_packet(connection, &msock, P_INITIAL_META);
b411b363 966 } else {
1ec861eb 967 drbd_err(connection, "Logic error in conn_connect()\n");
b411b363
PR
968 goto out_release_sockets;
969 }
970 }
971
5d0b17f1
PR
972 if (connection_established(connection, &sock.socket, &msock.socket))
973 break;
b411b363
PR
974
975retry:
bde89a9e 976 s = drbd_wait_for_connect(connection, &ad);
b411b363 977 if (s) {
bde89a9e 978 int fp = receive_first_packet(connection, s);
7da35862
PR
979 drbd_socket_okay(&sock.socket);
980 drbd_socket_okay(&msock.socket);
92f14951 981 switch (fp) {
e5d6f33a 982 case P_INITIAL_DATA:
7da35862 983 if (sock.socket) {
1ec861eb 984 drbd_warn(connection, "initial packet S crossed\n");
7da35862 985 sock_release(sock.socket);
80c6eed4
PR
986 sock.socket = s;
987 goto randomize;
b411b363 988 }
7da35862 989 sock.socket = s;
b411b363 990 break;
e5d6f33a 991 case P_INITIAL_META:
bde89a9e 992 set_bit(RESOLVE_CONFLICTS, &connection->flags);
7da35862 993 if (msock.socket) {
1ec861eb 994 drbd_warn(connection, "initial packet M crossed\n");
7da35862 995 sock_release(msock.socket);
80c6eed4
PR
996 msock.socket = s;
997 goto randomize;
b411b363 998 }
7da35862 999 msock.socket = s;
b411b363
PR
1000 break;
1001 default:
1ec861eb 1002 drbd_warn(connection, "Error receiving initial packet\n");
b411b363 1003 sock_release(s);
80c6eed4 1004randomize:
38b682b2 1005 if (prandom_u32() & 1)
b411b363
PR
1006 goto retry;
1007 }
1008 }
1009
bde89a9e 1010 if (connection->cstate <= C_DISCONNECTING)
b411b363
PR
1011 goto out_release_sockets;
1012 if (signal_pending(current)) {
1013 flush_signals(current);
1014 smp_rmb();
bde89a9e 1015 if (get_t_state(&connection->receiver) == EXITING)
b411b363
PR
1016 goto out_release_sockets;
1017 }
1018
5d0b17f1 1019 ok = connection_established(connection, &sock.socket, &msock.socket);
b666dbf8 1020 } while (!ok);
b411b363 1021
7a426fd8
PR
1022 if (ad.s_listen)
1023 sock_release(ad.s_listen);
b411b363 1024
98683650
PR
1025 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1026 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
b411b363 1027
7da35862
PR
1028 sock.socket->sk->sk_allocation = GFP_NOIO;
1029 msock.socket->sk->sk_allocation = GFP_NOIO;
b411b363 1030
7da35862
PR
1031 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1032 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
b411b363 1033
b411b363 1034 /* NOT YET ...
bde89a9e 1035 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
7da35862 1036 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
6038178e 1037 * first set it to the P_CONNECTION_FEATURES timeout,
b411b363 1038 * which we set to 4x the configured ping_timeout. */
44ed167d 1039 rcu_read_lock();
bde89a9e 1040 nc = rcu_dereference(connection->net_conf);
44ed167d 1041
7da35862
PR
1042 sock.socket->sk->sk_sndtimeo =
1043 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
b411b363 1044
7da35862 1045 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
44ed167d 1046 timeout = nc->timeout * HZ / 10;
08b165ba 1047 discard_my_data = nc->discard_my_data;
44ed167d 1048 rcu_read_unlock();
b411b363 1049
7da35862 1050 msock.socket->sk->sk_sndtimeo = timeout;
b411b363
PR
1051
1052 /* we don't want delays.
25985edc 1053 * we use TCP_CORK where appropriate, though */
12abc5ee
CH
1054 tcp_sock_set_nodelay(sock.socket->sk);
1055 tcp_sock_set_nodelay(msock.socket->sk);
b411b363 1056
bde89a9e
AG
1057 connection->data.socket = sock.socket;
1058 connection->meta.socket = msock.socket;
1059 connection->last_received = jiffies;
b411b363 1060
bde89a9e 1061 h = drbd_do_features(connection);
b411b363
PR
1062 if (h <= 0)
1063 return h;
1064
bde89a9e 1065 if (connection->cram_hmac_tfm) {
b30ab791 1066 /* drbd_request_state(device, NS(conn, WFAuth)); */
bde89a9e 1067 switch (drbd_do_auth(connection)) {
b10d96cb 1068 case -1:
1ec861eb 1069 drbd_err(connection, "Authentication of peer failed\n");
b411b363 1070 return -1;
b10d96cb 1071 case 0:
1ec861eb 1072 drbd_err(connection, "Authentication of peer failed, trying again.\n");
b10d96cb 1073 return 0;
b411b363
PR
1074 }
1075 }
1076
bde89a9e
AG
1077 connection->data.socket->sk->sk_sndtimeo = timeout;
1078 connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
b411b363 1079
bde89a9e 1080 if (drbd_send_protocol(connection) == -EOPNOTSUPP)
7e2455c1 1081 return -1;
b411b363 1082
31007745
PR
1083 /* Prevent a race between resync-handshake and
1084 * being promoted to Primary.
1085 *
1086 * Grab and release the state mutex, so we know that any current
1087 * drbd_set_role() is finished, and any incoming drbd_set_role
1088 * will see the STATE_SENT flag, and wait for it to be cleared.
1089 */
1090 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1091 mutex_lock(peer_device->device->state_mutex);
1092
cde81d99
LE
1093 /* avoid a race with conn_request_state( C_DISCONNECTING ) */
1094 spin_lock_irq(&connection->resource->req_lock);
bde89a9e 1095 set_bit(STATE_SENT, &connection->flags);
cde81d99 1096 spin_unlock_irq(&connection->resource->req_lock);
a1096a6e 1097
31007745
PR
1098 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1099 mutex_unlock(peer_device->device->state_mutex);
1100
c141ebda 1101 rcu_read_lock();
c06ece6b
AG
1102 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1103 struct drbd_device *device = peer_device->device;
b30ab791 1104 kref_get(&device->kref);
26ea8f92
AG
1105 rcu_read_unlock();
1106
08b165ba 1107 if (discard_my_data)
b30ab791 1108 set_bit(DISCARD_MY_DATA, &device->flags);
08b165ba 1109 else
b30ab791 1110 clear_bit(DISCARD_MY_DATA, &device->flags);
08b165ba 1111
69a22773 1112 drbd_connected(peer_device);
05a10ec7 1113 kref_put(&device->kref, drbd_destroy_device);
c141ebda
PR
1114 rcu_read_lock();
1115 }
1116 rcu_read_unlock();
1117
bde89a9e
AG
1118 rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1119 if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
1120 clear_bit(STATE_SENT, &connection->flags);
1e86ac48 1121 return 0;
a1096a6e 1122 }
1e86ac48 1123
1c03e520 1124 drbd_thread_start(&connection->ack_receiver);
39e91a60
LE
1125 /* opencoded create_singlethread_workqueue(),
1126 * to be able to use format string arguments */
1127 connection->ack_sender =
1128 alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name);
668700b4
PR
1129 if (!connection->ack_sender) {
1130 drbd_err(connection, "Failed to create workqueue ack_sender\n");
1131 return 0;
1132 }
b411b363 1133
0500813f 1134 mutex_lock(&connection->resource->conf_update);
08b165ba
PR
1135 /* The discard_my_data flag is a single-shot modifier to the next
1136 * connection attempt, the handshake of which is now well underway.
1137 * No need for rcu style copying of the whole struct
1138 * just to clear a single value. */
bde89a9e 1139 connection->net_conf->discard_my_data = 0;
0500813f 1140 mutex_unlock(&connection->resource->conf_update);
08b165ba 1141
d3fcb490 1142 return h;
b411b363
PR
1143
1144out_release_sockets:
7a426fd8
PR
1145 if (ad.s_listen)
1146 sock_release(ad.s_listen);
7da35862
PR
1147 if (sock.socket)
1148 sock_release(sock.socket);
1149 if (msock.socket)
1150 sock_release(msock.socket);
b411b363
PR
1151 return -1;
1152}
1153
bde89a9e 1154static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
b411b363 1155{
bde89a9e 1156 unsigned int header_size = drbd_header_size(connection);
e658983a 1157
0c8e36d9
AG
1158 if (header_size == sizeof(struct p_header100) &&
1159 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1160 struct p_header100 *h = header;
1161 if (h->pad != 0) {
1ec861eb 1162 drbd_err(connection, "Header padding is not zero\n");
0c8e36d9
AG
1163 return -EINVAL;
1164 }
1165 pi->vnr = be16_to_cpu(h->volume);
1166 pi->cmd = be16_to_cpu(h->command);
1167 pi->size = be32_to_cpu(h->length);
1168 } else if (header_size == sizeof(struct p_header95) &&
1169 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
e658983a 1170 struct p_header95 *h = header;
e658983a 1171 pi->cmd = be16_to_cpu(h->command);
b55d84ba
AG
1172 pi->size = be32_to_cpu(h->length);
1173 pi->vnr = 0;
e658983a
AG
1174 } else if (header_size == sizeof(struct p_header80) &&
1175 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1176 struct p_header80 *h = header;
1177 pi->cmd = be16_to_cpu(h->command);
1178 pi->size = be16_to_cpu(h->length);
77351055 1179 pi->vnr = 0;
02918be2 1180 } else {
1ec861eb 1181 drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
e658983a 1182 be32_to_cpu(*(__be32 *)header),
bde89a9e 1183 connection->agreed_pro_version);
8172f3e9 1184 return -EINVAL;
b411b363 1185 }
e658983a 1186 pi->data = header + header_size;
8172f3e9 1187 return 0;
257d0af6 1188}
b411b363 1189
c51a0ef3
LE
1190static void drbd_unplug_all_devices(struct drbd_connection *connection)
1191{
1192 if (current->plug == &connection->receiver_plug) {
1193 blk_finish_plug(&connection->receiver_plug);
1194 blk_start_plug(&connection->receiver_plug);
1195 } /* else: maybe just schedule() ?? */
1196}
1197
bde89a9e 1198static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
257d0af6 1199{
bde89a9e 1200 void *buffer = connection->data.rbuf;
69bc7bc3 1201 int err;
257d0af6 1202
bde89a9e 1203 err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
a5c31904 1204 if (err)
69bc7bc3 1205 return err;
257d0af6 1206
bde89a9e
AG
1207 err = decode_header(connection, buffer, pi);
1208 connection->last_received = jiffies;
b411b363 1209
69bc7bc3 1210 return err;
b411b363
PR
1211}
1212
c51a0ef3
LE
1213static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
1214{
1215 void *buffer = connection->data.rbuf;
1216 unsigned int size = drbd_header_size(connection);
1217 int err;
1218
1219 err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
1220 if (err != size) {
1221 /* If we have nothing in the receive buffer now, to reduce
1222 * application latency, try to drain the backend queues as
1223 * quickly as possible, and let remote TCP know what we have
1224 * received so far. */
1225 if (err == -EAGAIN) {
ddd061b8 1226 tcp_sock_set_quickack(connection->data.socket->sk, 2);
c51a0ef3
LE
1227 drbd_unplug_all_devices(connection);
1228 }
1229 if (err > 0) {
1230 buffer += err;
1231 size -= err;
1232 }
1233 err = drbd_recv_all_warn(connection, buffer, size);
1234 if (err)
1235 return err;
1236 }
1237
1238 err = decode_header(connection, connection->data.rbuf, pi);
1239 connection->last_received = jiffies;
1240
1241 return err;
1242}
f9ff0da5
LE
1243/* This is blkdev_issue_flush, but asynchronous.
1244 * We want to submit to all component volumes in parallel,
1245 * then wait for all completions.
1246 */
1247struct issue_flush_context {
1248 atomic_t pending;
1249 int error;
1250 struct completion done;
1251};
1252struct one_flush_context {
1253 struct drbd_device *device;
1254 struct issue_flush_context *ctx;
1255};
1256
1ffa7bfa 1257static void one_flush_endio(struct bio *bio)
b411b363 1258{
f9ff0da5
LE
1259 struct one_flush_context *octx = bio->bi_private;
1260 struct drbd_device *device = octx->device;
1261 struct issue_flush_context *ctx = octx->ctx;
1262
4e4cbee9
CH
1263 if (bio->bi_status) {
1264 ctx->error = blk_status_to_errno(bio->bi_status);
1265 drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
f9ff0da5
LE
1266 }
1267 kfree(octx);
1268 bio_put(bio);
1269
1270 clear_bit(FLUSH_PENDING, &device->flags);
1271 put_ldev(device);
1272 kref_put(&device->kref, drbd_destroy_device);
1273
1274 if (atomic_dec_and_test(&ctx->pending))
1275 complete(&ctx->done);
1276}
1277
1278static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
1279{
1280 struct bio *bio = bio_alloc(GFP_NOIO, 0);
1281 struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
1282 if (!bio || !octx) {
1283 drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
1284 /* FIXME: what else can I do now? disconnecting or detaching
1285 * really does not help to improve the state of the world, either.
1286 */
1287 kfree(octx);
1288 if (bio)
1289 bio_put(bio);
1290
1291 ctx->error = -ENOMEM;
1292 put_ldev(device);
1293 kref_put(&device->kref, drbd_destroy_device);
1294 return;
1295 }
4b0007c0 1296
f9ff0da5
LE
1297 octx->device = device;
1298 octx->ctx = ctx;
74d46992 1299 bio_set_dev(bio, device->ldev->backing_bdev);
f9ff0da5
LE
1300 bio->bi_private = octx;
1301 bio->bi_end_io = one_flush_endio;
70fd7614 1302 bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
f9ff0da5
LE
1303
1304 device->flush_jif = jiffies;
1305 set_bit(FLUSH_PENDING, &device->flags);
1306 atomic_inc(&ctx->pending);
1307 submit_bio(bio);
1308}
1309
1310static void drbd_flush(struct drbd_connection *connection)
1311{
f6ba8636 1312 if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
f9ff0da5
LE
1313 struct drbd_peer_device *peer_device;
1314 struct issue_flush_context ctx;
1315 int vnr;
1316
1317 atomic_set(&ctx.pending, 1);
1318 ctx.error = 0;
1319 init_completion(&ctx.done);
1320
615e087f 1321 rcu_read_lock();
c06ece6b
AG
1322 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1323 struct drbd_device *device = peer_device->device;
1324
b30ab791 1325 if (!get_ldev(device))
615e087f 1326 continue;
b30ab791 1327 kref_get(&device->kref);
615e087f
LE
1328 rcu_read_unlock();
1329
f9ff0da5 1330 submit_one_flush(device, &ctx);
b411b363 1331
615e087f 1332 rcu_read_lock();
b411b363 1333 }
615e087f 1334 rcu_read_unlock();
f9ff0da5
LE
1335
1336 /* Do we want to add a timeout,
1337 * if disk-timeout is set? */
1338 if (!atomic_dec_and_test(&ctx.pending))
1339 wait_for_completion(&ctx.done);
1340
1341 if (ctx.error) {
1342 /* would rather check on EOPNOTSUPP, but that is not reliable.
1343 * don't try again for ANY return value != 0
1344 * if (rv == -EOPNOTSUPP) */
1345 /* Any error is already reported by bio_endio callback. */
1346 drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
1347 }
b411b363 1348 }
b411b363
PR
1349}
1350
1351/**
1352 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
b30ab791 1353 * @device: DRBD device.
b411b363
PR
1354 * @epoch: Epoch object.
1355 * @ev: Epoch event.
1356 */
bde89a9e 1357static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
b411b363
PR
1358 struct drbd_epoch *epoch,
1359 enum epoch_event ev)
1360{
2451fc3b 1361 int epoch_size;
b411b363 1362 struct drbd_epoch *next_epoch;
b411b363
PR
1363 enum finish_epoch rv = FE_STILL_LIVE;
1364
bde89a9e 1365 spin_lock(&connection->epoch_lock);
b411b363
PR
1366 do {
1367 next_epoch = NULL;
b411b363
PR
1368
1369 epoch_size = atomic_read(&epoch->epoch_size);
1370
1371 switch (ev & ~EV_CLEANUP) {
1372 case EV_PUT:
1373 atomic_dec(&epoch->active);
1374 break;
1375 case EV_GOT_BARRIER_NR:
1376 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1377 break;
1378 case EV_BECAME_LAST:
1379 /* nothing to do*/
1380 break;
1381 }
1382
b411b363
PR
1383 if (epoch_size != 0 &&
1384 atomic_read(&epoch->active) == 0 &&
80f9fd55 1385 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
b411b363 1386 if (!(ev & EV_CLEANUP)) {
bde89a9e
AG
1387 spin_unlock(&connection->epoch_lock);
1388 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1389 spin_lock(&connection->epoch_lock);
b411b363 1390 }
9ed57dcb
LE
1391#if 0
1392 /* FIXME: dec unacked on connection, once we have
1393 * something to count pending connection packets in. */
80f9fd55 1394 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
bde89a9e 1395 dec_unacked(epoch->connection);
9ed57dcb 1396#endif
b411b363 1397
bde89a9e 1398 if (connection->current_epoch != epoch) {
b411b363
PR
1399 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1400 list_del(&epoch->list);
1401 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
bde89a9e 1402 connection->epochs--;
b411b363
PR
1403 kfree(epoch);
1404
1405 if (rv == FE_STILL_LIVE)
1406 rv = FE_DESTROYED;
1407 } else {
1408 epoch->flags = 0;
1409 atomic_set(&epoch->epoch_size, 0);
698f9315 1410 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1411 if (rv == FE_STILL_LIVE)
1412 rv = FE_RECYCLED;
1413 }
1414 }
1415
1416 if (!next_epoch)
1417 break;
1418
1419 epoch = next_epoch;
1420 } while (1);
1421
bde89a9e 1422 spin_unlock(&connection->epoch_lock);
b411b363 1423
b411b363
PR
1424 return rv;
1425}
1426
8fe39aac
PR
1427static enum write_ordering_e
1428max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
1429{
1430 struct disk_conf *dc;
1431
1432 dc = rcu_dereference(bdev->disk_conf);
1433
f6ba8636
AG
1434 if (wo == WO_BDEV_FLUSH && !dc->disk_flushes)
1435 wo = WO_DRAIN_IO;
1436 if (wo == WO_DRAIN_IO && !dc->disk_drain)
1437 wo = WO_NONE;
8fe39aac
PR
1438
1439 return wo;
1440}
1441
b411b363
PR
1442/**
1443 * drbd_bump_write_ordering() - Fall back to an other write ordering method
bde89a9e 1444 * @connection: DRBD connection.
b411b363
PR
1445 * @wo: Write ordering method to try.
1446 */
8fe39aac
PR
1447void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1448 enum write_ordering_e wo)
b411b363 1449{
e9526580 1450 struct drbd_device *device;
b411b363 1451 enum write_ordering_e pwo;
4b0007c0 1452 int vnr;
b411b363 1453 static char *write_ordering_str[] = {
f6ba8636
AG
1454 [WO_NONE] = "none",
1455 [WO_DRAIN_IO] = "drain",
1456 [WO_BDEV_FLUSH] = "flush",
b411b363
PR
1457 };
1458
e9526580 1459 pwo = resource->write_ordering;
f6ba8636 1460 if (wo != WO_BDEV_FLUSH)
70df7092 1461 wo = min(pwo, wo);
daeda1cc 1462 rcu_read_lock();
e9526580 1463 idr_for_each_entry(&resource->devices, device, vnr) {
8fe39aac
PR
1464 if (get_ldev(device)) {
1465 wo = max_allowed_wo(device->ldev, wo);
1466 if (device->ldev == bdev)
1467 bdev = NULL;
1468 put_ldev(device);
1469 }
4b0007c0 1470 }
8fe39aac
PR
1471
1472 if (bdev)
1473 wo = max_allowed_wo(bdev, wo);
1474
70df7092
LE
1475 rcu_read_unlock();
1476
e9526580 1477 resource->write_ordering = wo;
f6ba8636 1478 if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH)
e9526580 1479 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
b411b363
PR
1480}
1481
f31e583a
LE
1482/*
1483 * Mapping "discard" to ZEROOUT with UNMAP does not work for us:
1484 * Drivers have to "announce" q->limits.max_write_zeroes_sectors, or it
1485 * will directly go to fallback mode, submitting normal writes, and
1486 * never even try to UNMAP.
1487 *
1488 * And dm-thin does not do this (yet), mostly because in general it has
1489 * to assume that "skip_block_zeroing" is set. See also:
1490 * https://www.mail-archive.com/dm-devel%40redhat.com/msg07965.html
1491 * https://www.redhat.com/archives/dm-devel/2018-January/msg00271.html
1492 *
1493 * We *may* ignore the discard-zeroes-data setting, if so configured.
1494 *
1495 * Assumption is that this "discard_zeroes_data=0" is only because the backend
1496 * may ignore partial unaligned discards.
1497 *
1498 * LVM/DM thin as of at least
1499 * LVM version: 2.02.115(2)-RHEL7 (2015-01-28)
1500 * Library version: 1.02.93-RHEL7 (2015-01-28)
1501 * Driver version: 4.29.0
1502 * still behaves this way.
1503 *
1504 * For unaligned (wrt. alignment and granularity) or too small discards,
1505 * we zero-out the initial (and/or) trailing unaligned partial chunks,
1506 * but discard all the aligned full chunks.
1507 *
1508 * At least for LVM/DM thin, with skip_block_zeroing=false,
1509 * the result is effectively "discard_zeroes_data=1".
1510 */
1511/* flags: EE_TRIM|EE_ZEROOUT */
1512int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
dd4f699d
LE
1513{
1514 struct block_device *bdev = device->ldev->backing_bdev;
f31e583a
LE
1515 struct request_queue *q = bdev_get_queue(bdev);
1516 sector_t tmp, nr;
1517 unsigned int max_discard_sectors, granularity;
1518 int alignment;
1519 int err = 0;
dd4f699d 1520
f31e583a
LE
1521 if ((flags & EE_ZEROOUT) || !(flags & EE_TRIM))
1522 goto zero_out;
1523
1524 /* Zero-sector (unknown) and one-sector granularities are the same. */
1525 granularity = max(q->limits.discard_granularity >> 9, 1U);
1526 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
1527
1528 max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22));
1529 max_discard_sectors -= max_discard_sectors % granularity;
1530 if (unlikely(!max_discard_sectors))
1531 goto zero_out;
1532
1533 if (nr_sectors < granularity)
1534 goto zero_out;
1535
1536 tmp = start;
1537 if (sector_div(tmp, granularity) != alignment) {
1538 if (nr_sectors < 2*granularity)
1539 goto zero_out;
1540 /* start + gran - (start + gran - align) % gran */
1541 tmp = start + granularity - alignment;
1542 tmp = start + granularity - sector_div(tmp, granularity);
1543
1544 nr = tmp - start;
1545 /* don't flag BLKDEV_ZERO_NOUNMAP, we don't know how many
1546 * layers are below us, some may have smaller granularity */
1547 err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0);
1548 nr_sectors -= nr;
1549 start = tmp;
1550 }
1551 while (nr_sectors >= max_discard_sectors) {
1552 err |= blkdev_issue_discard(bdev, start, max_discard_sectors, GFP_NOIO, 0);
1553 nr_sectors -= max_discard_sectors;
1554 start += max_discard_sectors;
1555 }
1556 if (nr_sectors) {
1557 /* max_discard_sectors is unsigned int (and a multiple of
1558 * granularity, we made sure of that above already);
1559 * nr is < max_discard_sectors;
1560 * I don't need sector_div here, even though nr is sector_t */
1561 nr = nr_sectors;
1562 nr -= (unsigned int)nr % granularity;
1563 if (nr) {
1564 err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0);
1565 nr_sectors -= nr;
1566 start += nr;
1567 }
1568 }
1569 zero_out:
1570 if (nr_sectors) {
1571 err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO,
1572 (flags & EE_TRIM) ? 0 : BLKDEV_ZERO_NOUNMAP);
1573 }
1574 return err != 0;
1575}
0dbed96a 1576
f31e583a
LE
1577static bool can_do_reliable_discards(struct drbd_device *device)
1578{
1579 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
1580 struct disk_conf *dc;
1581 bool can_do;
0dbed96a 1582
f31e583a
LE
1583 if (!blk_queue_discard(q))
1584 return false;
1585
1586 rcu_read_lock();
1587 dc = rcu_dereference(device->ldev->disk_conf);
1588 can_do = dc->discard_zeroes_if_aligned;
1589 rcu_read_unlock();
1590 return can_do;
1591}
1592
1593static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req)
1594{
1595 /* If the backend cannot discard, or does not guarantee
1596 * read-back zeroes in discarded ranges, we fall back to
1597 * zero-out. Unless configuration specifically requested
1598 * otherwise. */
1599 if (!can_do_reliable_discards(device))
1600 peer_req->flags |= EE_ZEROOUT;
1601
1602 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
1603 peer_req->i.size >> 9, peer_req->flags & (EE_ZEROOUT|EE_TRIM)))
1604 peer_req->flags |= EE_WAS_ERROR;
dd4f699d
LE
1605 drbd_endio_write_sec_final(peer_req);
1606}
1607
9104d31a
LE
1608static void drbd_issue_peer_wsame(struct drbd_device *device,
1609 struct drbd_peer_request *peer_req)
1610{
1611 struct block_device *bdev = device->ldev->backing_bdev;
1612 sector_t s = peer_req->i.sector;
1613 sector_t nr = peer_req->i.size >> 9;
1614 if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
1615 peer_req->flags |= EE_WAS_ERROR;
1616 drbd_endio_write_sec_final(peer_req);
1617}
1618
1619
45bb912b 1620/**
fbe29dec 1621 * drbd_submit_peer_request()
b30ab791 1622 * @device: DRBD device.
db830c46 1623 * @peer_req: peer request
1eff9d32 1624 * @rw: flag field, see bio->bi_opf
10f6d992
LE
1625 *
1626 * May spread the pages to multiple bios,
1627 * depending on bio_add_page restrictions.
1628 *
1629 * Returns 0 if all bios have been submitted,
1630 * -ENOMEM if we could not allocate enough bios,
1631 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1632 * single page to an empty bio (which should never happen and likely indicates
1633 * that the lower level IO stack is in some way broken). This has been observed
1634 * on certain Xen deployments.
45bb912b
LE
1635 */
1636/* TODO allocate from our own bio_set. */
b30ab791 1637int drbd_submit_peer_request(struct drbd_device *device,
fbe29dec 1638 struct drbd_peer_request *peer_req,
bb3cc85e
MC
1639 const unsigned op, const unsigned op_flags,
1640 const int fault_type)
45bb912b
LE
1641{
1642 struct bio *bios = NULL;
1643 struct bio *bio;
db830c46
AG
1644 struct page *page = peer_req->pages;
1645 sector_t sector = peer_req->i.sector;
11f8b2b6 1646 unsigned data_size = peer_req->i.size;
45bb912b 1647 unsigned n_bios = 0;
11f8b2b6 1648 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
10f6d992 1649 int err = -ENOMEM;
45bb912b 1650
dd4f699d
LE
1651 /* TRIM/DISCARD: for now, always use the helper function
1652 * blkdev_issue_zeroout(..., discard=true).
1653 * It's synchronous, but it does the right thing wrt. bio splitting.
1654 * Correctness first, performance later. Next step is to code an
1655 * asynchronous variant of the same.
1656 */
f31e583a 1657 if (peer_req->flags & (EE_TRIM|EE_WRITE_SAME|EE_ZEROOUT)) {
a0fb3c47
LE
1658 /* wait for all pending IO completions, before we start
1659 * zeroing things out. */
5dd2ca19 1660 conn_wait_active_ee_empty(peer_req->peer_device->connection);
45d2933c
LE
1661 /* add it to the active list now,
1662 * so we can find it to present it in debugfs */
21ae5d7f
LE
1663 peer_req->submit_jif = jiffies;
1664 peer_req->flags |= EE_SUBMITTED;
700ca8c0
PR
1665
1666 /* If this was a resync request from receive_rs_deallocated(),
1667 * it is already on the sync_ee list */
1668 if (list_empty(&peer_req->w.list)) {
1669 spin_lock_irq(&device->resource->req_lock);
1670 list_add_tail(&peer_req->w.list, &device->active_ee);
1671 spin_unlock_irq(&device->resource->req_lock);
1672 }
1673
f31e583a
LE
1674 if (peer_req->flags & (EE_TRIM|EE_ZEROOUT))
1675 drbd_issue_peer_discard_or_zero_out(device, peer_req);
9104d31a
LE
1676 else /* EE_WRITE_SAME */
1677 drbd_issue_peer_wsame(device, peer_req);
a0fb3c47
LE
1678 return 0;
1679 }
1680
45bb912b
LE
1681 /* In most cases, we will only need one bio. But in case the lower
1682 * level restrictions happen to be different at this offset on this
1683 * side than those of the sending peer, we may need to submit the
9476f39d
LE
1684 * request in more than one bio.
1685 *
1686 * Plain bio_alloc is good enough here, this is no DRBD internally
1687 * generated bio, but a bio allocated on behalf of the peer.
1688 */
45bb912b
LE
1689next_bio:
1690 bio = bio_alloc(GFP_NOIO, nr_pages);
1691 if (!bio) {
a0fb3c47 1692 drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
45bb912b
LE
1693 goto fail;
1694 }
db830c46 1695 /* > peer_req->i.sector, unless this is the first bio */
4f024f37 1696 bio->bi_iter.bi_sector = sector;
74d46992 1697 bio_set_dev(bio, device->ldev->backing_bdev);
bb3cc85e 1698 bio_set_op_attrs(bio, op, op_flags);
db830c46 1699 bio->bi_private = peer_req;
fcefa62e 1700 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1701
1702 bio->bi_next = bios;
1703 bios = bio;
1704 ++n_bios;
1705
1706 page_chain_for_each(page) {
11f8b2b6 1707 unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
06efffda 1708 if (!bio_add_page(bio, page, len, 0))
45bb912b 1709 goto next_bio;
11f8b2b6 1710 data_size -= len;
45bb912b
LE
1711 sector += len >> 9;
1712 --nr_pages;
1713 }
11f8b2b6 1714 D_ASSERT(device, data_size == 0);
a0fb3c47 1715 D_ASSERT(device, page == NULL);
45bb912b 1716
db830c46 1717 atomic_set(&peer_req->pending_bios, n_bios);
21ae5d7f
LE
1718 /* for debugfs: update timestamp, mark as submitted */
1719 peer_req->submit_jif = jiffies;
1720 peer_req->flags |= EE_SUBMITTED;
45bb912b
LE
1721 do {
1722 bio = bios;
1723 bios = bios->bi_next;
1724 bio->bi_next = NULL;
1725
ed00aabd 1726 drbd_submit_bio_noacct(device, fault_type, bio);
45bb912b 1727 } while (bios);
45bb912b
LE
1728 return 0;
1729
1730fail:
1731 while (bios) {
1732 bio = bios;
1733 bios = bios->bi_next;
1734 bio_put(bio);
1735 }
10f6d992 1736 return err;
45bb912b
LE
1737}
1738
b30ab791 1739static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
db830c46 1740 struct drbd_peer_request *peer_req)
53840641 1741{
db830c46 1742 struct drbd_interval *i = &peer_req->i;
53840641 1743
b30ab791 1744 drbd_remove_interval(&device->write_requests, i);
53840641
AG
1745 drbd_clear_interval(i);
1746
6c852bec 1747 /* Wake up any processes waiting for this peer request to complete. */
53840641 1748 if (i->waiting)
b30ab791 1749 wake_up(&device->misc_wait);
53840641
AG
1750}
1751
bde89a9e 1752static void conn_wait_active_ee_empty(struct drbd_connection *connection)
77fede51 1753{
c06ece6b 1754 struct drbd_peer_device *peer_device;
77fede51
PR
1755 int vnr;
1756
1757 rcu_read_lock();
c06ece6b
AG
1758 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1759 struct drbd_device *device = peer_device->device;
1760
b30ab791 1761 kref_get(&device->kref);
77fede51 1762 rcu_read_unlock();
b30ab791 1763 drbd_wait_ee_list_empty(device, &device->active_ee);
05a10ec7 1764 kref_put(&device->kref, drbd_destroy_device);
77fede51
PR
1765 rcu_read_lock();
1766 }
1767 rcu_read_unlock();
1768}
1769
bde89a9e 1770static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
b411b363 1771{
2451fc3b 1772 int rv;
e658983a 1773 struct p_barrier *p = pi->data;
b411b363
PR
1774 struct drbd_epoch *epoch;
1775
9ed57dcb
LE
1776 /* FIXME these are unacked on connection,
1777 * not a specific (peer)device.
1778 */
bde89a9e
AG
1779 connection->current_epoch->barrier_nr = p->barrier;
1780 connection->current_epoch->connection = connection;
1781 rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
b411b363
PR
1782
1783 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1784 * the activity log, which means it would not be resynced in case the
1785 * R_PRIMARY crashes now.
1786 * Therefore we must send the barrier_ack after the barrier request was
1787 * completed. */
e9526580 1788 switch (connection->resource->write_ordering) {
f6ba8636 1789 case WO_NONE:
b411b363 1790 if (rv == FE_RECYCLED)
82bc0194 1791 return 0;
2451fc3b
PR
1792
1793 /* receiver context, in the writeout path of the other node.
1794 * avoid potential distributed deadlock */
1795 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1796 if (epoch)
1797 break;
1798 else
1ec861eb 1799 drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
df561f66 1800 fallthrough;
b411b363 1801
f6ba8636
AG
1802 case WO_BDEV_FLUSH:
1803 case WO_DRAIN_IO:
bde89a9e
AG
1804 conn_wait_active_ee_empty(connection);
1805 drbd_flush(connection);
2451fc3b 1806
bde89a9e 1807 if (atomic_read(&connection->current_epoch->epoch_size)) {
2451fc3b
PR
1808 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1809 if (epoch)
1810 break;
b411b363
PR
1811 }
1812
82bc0194 1813 return 0;
2451fc3b 1814 default:
e9526580
PR
1815 drbd_err(connection, "Strangeness in connection->write_ordering %d\n",
1816 connection->resource->write_ordering);
82bc0194 1817 return -EIO;
b411b363
PR
1818 }
1819
1820 epoch->flags = 0;
1821 atomic_set(&epoch->epoch_size, 0);
1822 atomic_set(&epoch->active, 0);
1823
bde89a9e
AG
1824 spin_lock(&connection->epoch_lock);
1825 if (atomic_read(&connection->current_epoch->epoch_size)) {
1826 list_add(&epoch->list, &connection->current_epoch->list);
1827 connection->current_epoch = epoch;
1828 connection->epochs++;
b411b363
PR
1829 } else {
1830 /* The current_epoch got recycled while we allocated this one... */
1831 kfree(epoch);
1832 }
bde89a9e 1833 spin_unlock(&connection->epoch_lock);
b411b363 1834
82bc0194 1835 return 0;
b411b363
PR
1836}
1837
9104d31a 1838/* quick wrapper in case payload size != request_size (write same) */
3d0e6375 1839static void drbd_csum_ee_size(struct crypto_shash *h,
9104d31a
LE
1840 struct drbd_peer_request *r, void *d,
1841 unsigned int payload_size)
1842{
1843 unsigned int tmp = r->i.size;
1844 r->i.size = payload_size;
1845 drbd_csum_ee(h, r, d);
1846 r->i.size = tmp;
1847}
1848
b411b363 1849/* used from receive_RSDataReply (recv_resync_read)
9104d31a
LE
1850 * and from receive_Data.
1851 * data_size: actual payload ("data in")
1852 * for normal writes that is bi_size.
1853 * for discards, that is zero.
1854 * for write same, it is logical_block_size.
1855 * both trim and write same have the bi_size ("data len to be affected")
1856 * as extra argument in the packet header.
1857 */
f6ffca9f 1858static struct drbd_peer_request *
69a22773 1859read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
a0fb3c47 1860 struct packet_info *pi) __must_hold(local)
b411b363 1861{
69a22773 1862 struct drbd_device *device = peer_device->device;
b30ab791 1863 const sector_t capacity = drbd_get_capacity(device->this_bdev);
db830c46 1864 struct drbd_peer_request *peer_req;
b411b363 1865 struct page *page;
11f8b2b6
AG
1866 int digest_size, err;
1867 unsigned int data_size = pi->size, ds;
69a22773
AG
1868 void *dig_in = peer_device->connection->int_dig_in;
1869 void *dig_vv = peer_device->connection->int_dig_vv;
6b4388ac 1870 unsigned long *data;
a0fb3c47 1871 struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
f31e583a 1872 struct p_trim *zeroes = (pi->cmd == P_ZEROES) ? pi->data : NULL;
9104d31a 1873 struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL;
b411b363 1874
11f8b2b6 1875 digest_size = 0;
a0fb3c47 1876 if (!trim && peer_device->connection->peer_integrity_tfm) {
3d0e6375 1877 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
9f5bdc33
AG
1878 /*
1879 * FIXME: Receive the incoming digest into the receive buffer
1880 * here, together with its struct p_data?
1881 */
11f8b2b6 1882 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
a5c31904 1883 if (err)
b411b363 1884 return NULL;
11f8b2b6 1885 data_size -= digest_size;
b411b363
PR
1886 }
1887
9104d31a
LE
1888 /* assume request_size == data_size, but special case trim and wsame. */
1889 ds = data_size;
a0fb3c47 1890 if (trim) {
9104d31a
LE
1891 if (!expect(data_size == 0))
1892 return NULL;
1893 ds = be32_to_cpu(trim->size);
f31e583a
LE
1894 } else if (zeroes) {
1895 if (!expect(data_size == 0))
1896 return NULL;
1897 ds = be32_to_cpu(zeroes->size);
9104d31a
LE
1898 } else if (wsame) {
1899 if (data_size != queue_logical_block_size(device->rq_queue)) {
1900 drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n",
1901 data_size, queue_logical_block_size(device->rq_queue));
1902 return NULL;
1903 }
1904 if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
1905 drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n",
1906 data_size, bdev_logical_block_size(device->ldev->backing_bdev));
1907 return NULL;
1908 }
1909 ds = be32_to_cpu(wsame->size);
a0fb3c47
LE
1910 }
1911
9104d31a 1912 if (!expect(IS_ALIGNED(ds, 512)))
841ce241 1913 return NULL;
f31e583a 1914 if (trim || wsame || zeroes) {
9104d31a
LE
1915 if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
1916 return NULL;
1917 } else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
841ce241 1918 return NULL;
b411b363 1919
6666032a
LE
1920 /* even though we trust out peer,
1921 * we sometimes have to double check. */
9104d31a 1922 if (sector + (ds>>9) > capacity) {
d0180171 1923 drbd_err(device, "request from peer beyond end of local disk: "
fdda6544 1924 "capacity: %llus < sector: %llus + size: %u\n",
6666032a 1925 (unsigned long long)capacity,
9104d31a 1926 (unsigned long long)sector, ds);
6666032a
LE
1927 return NULL;
1928 }
1929
b411b363
PR
1930 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1931 * "criss-cross" setup, that might cause write-out on some other DRBD,
1932 * which in turn might block on the other node at this very place. */
9104d31a 1933 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
db830c46 1934 if (!peer_req)
b411b363 1935 return NULL;
45bb912b 1936
21ae5d7f 1937 peer_req->flags |= EE_WRITE;
9104d31a 1938 if (trim) {
f31e583a
LE
1939 peer_req->flags |= EE_TRIM;
1940 return peer_req;
1941 }
1942 if (zeroes) {
1943 peer_req->flags |= EE_ZEROOUT;
81a3537a 1944 return peer_req;
9104d31a
LE
1945 }
1946 if (wsame)
1947 peer_req->flags |= EE_WRITE_SAME;
a73ff323 1948
9104d31a 1949 /* receive payload size bytes into page chain */
b411b363 1950 ds = data_size;
db830c46 1951 page = peer_req->pages;
45bb912b
LE
1952 page_chain_for_each(page) {
1953 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1954 data = kmap(page);
69a22773 1955 err = drbd_recv_all_warn(peer_device->connection, data, len);
b30ab791 1956 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
d0180171 1957 drbd_err(device, "Fault injection: Corrupting data on receive\n");
6b4388ac
PR
1958 data[0] = data[0] ^ (unsigned long)-1;
1959 }
b411b363 1960 kunmap(page);
a5c31904 1961 if (err) {
b30ab791 1962 drbd_free_peer_req(device, peer_req);
b411b363
PR
1963 return NULL;
1964 }
a5c31904 1965 ds -= len;
b411b363
PR
1966 }
1967
11f8b2b6 1968 if (digest_size) {
9104d31a 1969 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
11f8b2b6 1970 if (memcmp(dig_in, dig_vv, digest_size)) {
d0180171 1971 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
470be44a 1972 (unsigned long long)sector, data_size);
b30ab791 1973 drbd_free_peer_req(device, peer_req);
b411b363
PR
1974 return NULL;
1975 }
1976 }
11f8b2b6 1977 device->recv_cnt += data_size >> 9;
db830c46 1978 return peer_req;
b411b363
PR
1979}
1980
1981/* drbd_drain_block() just takes a data block
1982 * out of the socket input buffer, and discards it.
1983 */
69a22773 1984static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
b411b363
PR
1985{
1986 struct page *page;
a5c31904 1987 int err = 0;
b411b363
PR
1988 void *data;
1989
c3470cde 1990 if (!data_size)
fc5be839 1991 return 0;
c3470cde 1992
69a22773 1993 page = drbd_alloc_pages(peer_device, 1, 1);
b411b363
PR
1994
1995 data = kmap(page);
1996 while (data_size) {
fc5be839
AG
1997 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1998
69a22773 1999 err = drbd_recv_all_warn(peer_device->connection, data, len);
a5c31904 2000 if (err)
b411b363 2001 break;
a5c31904 2002 data_size -= len;
b411b363
PR
2003 }
2004 kunmap(page);
69a22773 2005 drbd_free_pages(peer_device->device, page, 0);
fc5be839 2006 return err;
b411b363
PR
2007}
2008
69a22773 2009static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
b411b363
PR
2010 sector_t sector, int data_size)
2011{
7988613b
KO
2012 struct bio_vec bvec;
2013 struct bvec_iter iter;
b411b363 2014 struct bio *bio;
11f8b2b6 2015 int digest_size, err, expect;
69a22773
AG
2016 void *dig_in = peer_device->connection->int_dig_in;
2017 void *dig_vv = peer_device->connection->int_dig_vv;
b411b363 2018
11f8b2b6 2019 digest_size = 0;
69a22773 2020 if (peer_device->connection->peer_integrity_tfm) {
3d0e6375 2021 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
11f8b2b6 2022 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
a5c31904
AG
2023 if (err)
2024 return err;
11f8b2b6 2025 data_size -= digest_size;
b411b363
PR
2026 }
2027
b411b363
PR
2028 /* optimistically update recv_cnt. if receiving fails below,
2029 * we disconnect anyways, and counters will be reset. */
69a22773 2030 peer_device->device->recv_cnt += data_size>>9;
b411b363
PR
2031
2032 bio = req->master_bio;
69a22773 2033 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
b411b363 2034
7988613b
KO
2035 bio_for_each_segment(bvec, bio, iter) {
2036 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
2037 expect = min_t(int, data_size, bvec.bv_len);
69a22773 2038 err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
7988613b 2039 kunmap(bvec.bv_page);
a5c31904
AG
2040 if (err)
2041 return err;
2042 data_size -= expect;
b411b363
PR
2043 }
2044
11f8b2b6 2045 if (digest_size) {
69a22773 2046 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
11f8b2b6 2047 if (memcmp(dig_in, dig_vv, digest_size)) {
69a22773 2048 drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 2049 return -EINVAL;
b411b363
PR
2050 }
2051 }
2052
69a22773 2053 D_ASSERT(peer_device->device, data_size == 0);
28284cef 2054 return 0;
b411b363
PR
2055}
2056
a990be46 2057/*
668700b4 2058 * e_end_resync_block() is called in ack_sender context via
a990be46
AG
2059 * drbd_finish_peer_reqs().
2060 */
99920dc5 2061static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 2062{
8050e6d0 2063 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2064 container_of(w, struct drbd_peer_request, w);
2065 struct drbd_peer_device *peer_device = peer_req->peer_device;
2066 struct drbd_device *device = peer_device->device;
db830c46 2067 sector_t sector = peer_req->i.sector;
99920dc5 2068 int err;
b411b363 2069
0b0ba1ef 2070 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
b411b363 2071
db830c46 2072 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b30ab791 2073 drbd_set_in_sync(device, sector, peer_req->i.size);
a8cd15ba 2074 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
b411b363
PR
2075 } else {
2076 /* Record failure to sync */
b30ab791 2077 drbd_rs_failed_io(device, sector, peer_req->i.size);
b411b363 2078
a8cd15ba 2079 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
b411b363 2080 }
b30ab791 2081 dec_unacked(device);
b411b363 2082
99920dc5 2083 return err;
b411b363
PR
2084}
2085
69a22773 2086static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
a0fb3c47 2087 struct packet_info *pi) __releases(local)
b411b363 2088{
69a22773 2089 struct drbd_device *device = peer_device->device;
db830c46 2090 struct drbd_peer_request *peer_req;
b411b363 2091
a0fb3c47 2092 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
db830c46 2093 if (!peer_req)
45bb912b 2094 goto fail;
b411b363 2095
b30ab791 2096 dec_rs_pending(device);
b411b363 2097
b30ab791 2098 inc_unacked(device);
b411b363
PR
2099 /* corresponding dec_unacked() in e_end_resync_block()
2100 * respective _drbd_clear_done_ee */
2101
a8cd15ba 2102 peer_req->w.cb = e_end_resync_block;
21ae5d7f 2103 peer_req->submit_jif = jiffies;
45bb912b 2104
0500813f 2105 spin_lock_irq(&device->resource->req_lock);
b9ed7080 2106 list_add_tail(&peer_req->w.list, &device->sync_ee);
0500813f 2107 spin_unlock_irq(&device->resource->req_lock);
b411b363 2108
a0fb3c47 2109 atomic_add(pi->size >> 9, &device->rs_sect_ev);
bb3cc85e
MC
2110 if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
2111 DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 2112 return 0;
b411b363 2113
10f6d992 2114 /* don't care for the reason here */
d0180171 2115 drbd_err(device, "submit failed, triggering re-connect\n");
0500813f 2116 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2117 list_del(&peer_req->w.list);
0500813f 2118 spin_unlock_irq(&device->resource->req_lock);
22cc37a9 2119
b30ab791 2120 drbd_free_peer_req(device, peer_req);
45bb912b 2121fail:
b30ab791 2122 put_ldev(device);
e1c1b0fc 2123 return -EIO;
b411b363
PR
2124}
2125
668eebc6 2126static struct drbd_request *
b30ab791 2127find_request(struct drbd_device *device, struct rb_root *root, u64 id,
bc9c5c41 2128 sector_t sector, bool missing_ok, const char *func)
51624585 2129{
51624585
AG
2130 struct drbd_request *req;
2131
bc9c5c41
AG
2132 /* Request object according to our peer */
2133 req = (struct drbd_request *)(unsigned long)id;
5e472264 2134 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 2135 return req;
c3afd8f5 2136 if (!missing_ok) {
d0180171 2137 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
c3afd8f5
AG
2138 (unsigned long)id, (unsigned long long)sector);
2139 }
51624585 2140 return NULL;
b411b363
PR
2141}
2142
bde89a9e 2143static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2144{
9f4fe9ad 2145 struct drbd_peer_device *peer_device;
b30ab791 2146 struct drbd_device *device;
b411b363
PR
2147 struct drbd_request *req;
2148 sector_t sector;
82bc0194 2149 int err;
e658983a 2150 struct p_data *p = pi->data;
4a76b161 2151
9f4fe9ad
AG
2152 peer_device = conn_peer_device(connection, pi->vnr);
2153 if (!peer_device)
4a76b161 2154 return -EIO;
9f4fe9ad 2155 device = peer_device->device;
b411b363
PR
2156
2157 sector = be64_to_cpu(p->sector);
2158
0500813f 2159 spin_lock_irq(&device->resource->req_lock);
b30ab791 2160 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
0500813f 2161 spin_unlock_irq(&device->resource->req_lock);
c3afd8f5 2162 if (unlikely(!req))
82bc0194 2163 return -EIO;
b411b363 2164
24c4830c 2165 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
b411b363
PR
2166 * special casing it there for the various failure cases.
2167 * still no race with drbd_fail_pending_reads */
69a22773 2168 err = recv_dless_read(peer_device, req, sector, pi->size);
82bc0194 2169 if (!err)
8554df1c 2170 req_mod(req, DATA_RECEIVED);
b411b363
PR
2171 /* else: nothing. handled from drbd_disconnect...
2172 * I don't think we may complete this just yet
2173 * in case we are "on-disconnect: freeze" */
2174
82bc0194 2175 return err;
b411b363
PR
2176}
2177
bde89a9e 2178static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2179{
9f4fe9ad 2180 struct drbd_peer_device *peer_device;
b30ab791 2181 struct drbd_device *device;
b411b363 2182 sector_t sector;
82bc0194 2183 int err;
e658983a 2184 struct p_data *p = pi->data;
4a76b161 2185
9f4fe9ad
AG
2186 peer_device = conn_peer_device(connection, pi->vnr);
2187 if (!peer_device)
4a76b161 2188 return -EIO;
9f4fe9ad 2189 device = peer_device->device;
b411b363
PR
2190
2191 sector = be64_to_cpu(p->sector);
0b0ba1ef 2192 D_ASSERT(device, p->block_id == ID_SYNCER);
b411b363 2193
b30ab791 2194 if (get_ldev(device)) {
b411b363
PR
2195 /* data is submitted to disk within recv_resync_read.
2196 * corresponding put_ldev done below on error,
fcefa62e 2197 * or in drbd_peer_request_endio. */
a0fb3c47 2198 err = recv_resync_read(peer_device, sector, pi);
b411b363
PR
2199 } else {
2200 if (__ratelimit(&drbd_ratelimit_state))
d0180171 2201 drbd_err(device, "Can not write resync data to local disk.\n");
b411b363 2202
69a22773 2203 err = drbd_drain_block(peer_device, pi->size);
b411b363 2204
69a22773 2205 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
b411b363
PR
2206 }
2207
b30ab791 2208 atomic_add(pi->size >> 9, &device->rs_sect_in);
778f271d 2209
82bc0194 2210 return err;
b411b363
PR
2211}
2212
b30ab791 2213static void restart_conflicting_writes(struct drbd_device *device,
7be8da07 2214 sector_t sector, int size)
b411b363 2215{
7be8da07
AG
2216 struct drbd_interval *i;
2217 struct drbd_request *req;
2218
b30ab791 2219 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2220 if (!i->local)
2221 continue;
2222 req = container_of(i, struct drbd_request, i);
2223 if (req->rq_state & RQ_LOCAL_PENDING ||
2224 !(req->rq_state & RQ_POSTPONED))
2225 continue;
2312f0b3
LE
2226 /* as it is RQ_POSTPONED, this will cause it to
2227 * be queued on the retry workqueue. */
d4dabbe2 2228 __req_mod(req, CONFLICT_RESOLVED, NULL);
7be8da07
AG
2229 }
2230}
b411b363 2231
a990be46 2232/*
668700b4 2233 * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs().
b411b363 2234 */
99920dc5 2235static int e_end_block(struct drbd_work *w, int cancel)
b411b363 2236{
8050e6d0 2237 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2238 container_of(w, struct drbd_peer_request, w);
2239 struct drbd_peer_device *peer_device = peer_req->peer_device;
2240 struct drbd_device *device = peer_device->device;
db830c46 2241 sector_t sector = peer_req->i.sector;
99920dc5 2242 int err = 0, pcmd;
b411b363 2243
303d1448 2244 if (peer_req->flags & EE_SEND_WRITE_ACK) {
db830c46 2245 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b30ab791
AG
2246 pcmd = (device->state.conn >= C_SYNC_SOURCE &&
2247 device->state.conn <= C_PAUSED_SYNC_T &&
db830c46 2248 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 2249 P_RS_WRITE_ACK : P_WRITE_ACK;
a8cd15ba 2250 err = drbd_send_ack(peer_device, pcmd, peer_req);
b411b363 2251 if (pcmd == P_RS_WRITE_ACK)
b30ab791 2252 drbd_set_in_sync(device, sector, peer_req->i.size);
b411b363 2253 } else {
a8cd15ba 2254 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
b411b363
PR
2255 /* we expect it to be marked out of sync anyways...
2256 * maybe assert this? */
2257 }
b30ab791 2258 dec_unacked(device);
b411b363 2259 }
08d0dabf 2260
b411b363
PR
2261 /* we delete from the conflict detection hash _after_ we sent out the
2262 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
302bdeae 2263 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
0500813f 2264 spin_lock_irq(&device->resource->req_lock);
0b0ba1ef 2265 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
b30ab791 2266 drbd_remove_epoch_entry_interval(device, peer_req);
7be8da07 2267 if (peer_req->flags & EE_RESTART_REQUESTS)
b30ab791 2268 restart_conflicting_writes(device, sector, peer_req->i.size);
0500813f 2269 spin_unlock_irq(&device->resource->req_lock);
bb3bfe96 2270 } else
0b0ba1ef 2271 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
b411b363 2272
5dd2ca19 2273 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 2274
99920dc5 2275 return err;
b411b363
PR
2276}
2277
a8cd15ba 2278static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 2279{
8050e6d0 2280 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2281 container_of(w, struct drbd_peer_request, w);
2282 struct drbd_peer_device *peer_device = peer_req->peer_device;
99920dc5 2283 int err;
b411b363 2284
a8cd15ba
AG
2285 err = drbd_send_ack(peer_device, ack, peer_req);
2286 dec_unacked(peer_device->device);
b411b363 2287
99920dc5 2288 return err;
b411b363
PR
2289}
2290
d4dabbe2 2291static int e_send_superseded(struct drbd_work *w, int unused)
7be8da07 2292{
a8cd15ba 2293 return e_send_ack(w, P_SUPERSEDED);
7be8da07
AG
2294}
2295
99920dc5 2296static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07 2297{
a8cd15ba
AG
2298 struct drbd_peer_request *peer_req =
2299 container_of(w, struct drbd_peer_request, w);
2300 struct drbd_connection *connection = peer_req->peer_device->connection;
7be8da07 2301
a8cd15ba 2302 return e_send_ack(w, connection->agreed_pro_version >= 100 ?
d4dabbe2 2303 P_RETRY_WRITE : P_SUPERSEDED);
7be8da07 2304}
b411b363 2305
3e394da1
AG
2306static bool seq_greater(u32 a, u32 b)
2307{
2308 /*
2309 * We assume 32-bit wrap-around here.
2310 * For 24-bit wrap-around, we would have to shift:
2311 * a <<= 8; b <<= 8;
2312 */
2313 return (s32)a - (s32)b > 0;
2314}
b411b363 2315
3e394da1
AG
2316static u32 seq_max(u32 a, u32 b)
2317{
2318 return seq_greater(a, b) ? a : b;
b411b363
PR
2319}
2320
69a22773 2321static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
3e394da1 2322{
69a22773 2323 struct drbd_device *device = peer_device->device;
3c13b680 2324 unsigned int newest_peer_seq;
3e394da1 2325
69a22773 2326 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
b30ab791
AG
2327 spin_lock(&device->peer_seq_lock);
2328 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
2329 device->peer_seq = newest_peer_seq;
2330 spin_unlock(&device->peer_seq_lock);
2331 /* wake up only if we actually changed device->peer_seq */
3c13b680 2332 if (peer_seq == newest_peer_seq)
b30ab791 2333 wake_up(&device->seq_wait);
7be8da07 2334 }
b411b363
PR
2335}
2336
d93f6302 2337static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
b6a370ba 2338{
d93f6302
LE
2339 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
2340}
b6a370ba 2341
d93f6302 2342/* maybe change sync_ee into interval trees as well? */
b30ab791 2343static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
d93f6302
LE
2344{
2345 struct drbd_peer_request *rs_req;
7e5fec31 2346 bool rv = false;
b6a370ba 2347
0500813f 2348 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2349 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
d93f6302
LE
2350 if (overlaps(peer_req->i.sector, peer_req->i.size,
2351 rs_req->i.sector, rs_req->i.size)) {
7e5fec31 2352 rv = true;
b6a370ba
PR
2353 break;
2354 }
2355 }
0500813f 2356 spin_unlock_irq(&device->resource->req_lock);
b6a370ba
PR
2357
2358 return rv;
2359}
2360
b411b363
PR
2361/* Called from receive_Data.
2362 * Synchronize packets on sock with packets on msock.
2363 *
2364 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
2365 * packet traveling on msock, they are still processed in the order they have
2366 * been sent.
2367 *
2368 * Note: we don't care for Ack packets overtaking P_DATA packets.
2369 *
b30ab791 2370 * In case packet_seq is larger than device->peer_seq number, there are
b411b363 2371 * outstanding packets on the msock. We wait for them to arrive.
b30ab791 2372 * In case we are the logically next packet, we update device->peer_seq
b411b363
PR
2373 * ourselves. Correctly handles 32bit wrap around.
2374 *
2375 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
2376 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
2377 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
2378 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
2379 *
2380 * returns 0 if we may process the packet,
2381 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
69a22773 2382static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
b411b363 2383{
69a22773 2384 struct drbd_device *device = peer_device->device;
b411b363 2385 DEFINE_WAIT(wait);
b411b363 2386 long timeout;
b874d231 2387 int ret = 0, tp;
7be8da07 2388
69a22773 2389 if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
7be8da07
AG
2390 return 0;
2391
b30ab791 2392 spin_lock(&device->peer_seq_lock);
b411b363 2393 for (;;) {
b30ab791
AG
2394 if (!seq_greater(peer_seq - 1, device->peer_seq)) {
2395 device->peer_seq = seq_max(device->peer_seq, peer_seq);
b411b363 2396 break;
7be8da07 2397 }
b874d231 2398
b411b363
PR
2399 if (signal_pending(current)) {
2400 ret = -ERESTARTSYS;
2401 break;
2402 }
b874d231
PR
2403
2404 rcu_read_lock();
5dd2ca19 2405 tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
b874d231
PR
2406 rcu_read_unlock();
2407
2408 if (!tp)
2409 break;
2410
2411 /* Only need to wait if two_primaries is enabled */
b30ab791
AG
2412 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
2413 spin_unlock(&device->peer_seq_lock);
44ed167d 2414 rcu_read_lock();
69a22773 2415 timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
44ed167d 2416 rcu_read_unlock();
71b1c1eb 2417 timeout = schedule_timeout(timeout);
b30ab791 2418 spin_lock(&device->peer_seq_lock);
7be8da07 2419 if (!timeout) {
b411b363 2420 ret = -ETIMEDOUT;
d0180171 2421 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
2422 break;
2423 }
2424 }
b30ab791
AG
2425 spin_unlock(&device->peer_seq_lock);
2426 finish_wait(&device->seq_wait, &wait);
b411b363
PR
2427 return ret;
2428}
2429
688593c5
LE
2430/* see also bio_flags_to_wire()
2431 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2432 * flags and back. We may replicate to other kernel versions. */
bb3cc85e 2433static unsigned long wire_flags_to_bio_flags(u32 dpf)
76d2e7ec 2434{
688593c5
LE
2435 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2436 (dpf & DP_FUA ? REQ_FUA : 0) |
28a8f0d3 2437 (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
bb3cc85e
MC
2438}
2439
2440static unsigned long wire_flags_to_bio_op(u32 dpf)
2441{
f31e583a 2442 if (dpf & DP_ZEROES)
45c21793 2443 return REQ_OP_WRITE_ZEROES;
f31e583a
LE
2444 if (dpf & DP_DISCARD)
2445 return REQ_OP_DISCARD;
2446 if (dpf & DP_WSAME)
2447 return REQ_OP_WRITE_SAME;
bb3cc85e
MC
2448 else
2449 return REQ_OP_WRITE;
76d2e7ec
PR
2450}
2451
b30ab791 2452static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
7be8da07
AG
2453 unsigned int size)
2454{
2455 struct drbd_interval *i;
2456
2457 repeat:
b30ab791 2458 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2459 struct drbd_request *req;
2460 struct bio_and_error m;
2461
2462 if (!i->local)
2463 continue;
2464 req = container_of(i, struct drbd_request, i);
2465 if (!(req->rq_state & RQ_POSTPONED))
2466 continue;
2467 req->rq_state &= ~RQ_POSTPONED;
2468 __req_mod(req, NEG_ACKED, &m);
0500813f 2469 spin_unlock_irq(&device->resource->req_lock);
7be8da07 2470 if (m.bio)
b30ab791 2471 complete_master_bio(device, &m);
0500813f 2472 spin_lock_irq(&device->resource->req_lock);
7be8da07
AG
2473 goto repeat;
2474 }
2475}
2476
b30ab791 2477static int handle_write_conflicts(struct drbd_device *device,
7be8da07
AG
2478 struct drbd_peer_request *peer_req)
2479{
e33b32de 2480 struct drbd_connection *connection = peer_req->peer_device->connection;
bde89a9e 2481 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
7be8da07
AG
2482 sector_t sector = peer_req->i.sector;
2483 const unsigned int size = peer_req->i.size;
2484 struct drbd_interval *i;
2485 bool equal;
2486 int err;
2487
2488 /*
2489 * Inserting the peer request into the write_requests tree will prevent
2490 * new conflicting local requests from being added.
2491 */
b30ab791 2492 drbd_insert_interval(&device->write_requests, &peer_req->i);
7be8da07
AG
2493
2494 repeat:
b30ab791 2495 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2496 if (i == &peer_req->i)
2497 continue;
08d0dabf
LE
2498 if (i->completed)
2499 continue;
7be8da07
AG
2500
2501 if (!i->local) {
2502 /*
2503 * Our peer has sent a conflicting remote request; this
2504 * should not happen in a two-node setup. Wait for the
2505 * earlier peer request to complete.
2506 */
b30ab791 2507 err = drbd_wait_misc(device, i);
7be8da07
AG
2508 if (err)
2509 goto out;
2510 goto repeat;
2511 }
2512
2513 equal = i->sector == sector && i->size == size;
2514 if (resolve_conflicts) {
2515 /*
2516 * If the peer request is fully contained within the
d4dabbe2
LE
2517 * overlapping request, it can be considered overwritten
2518 * and thus superseded; otherwise, it will be retried
2519 * once all overlapping requests have completed.
7be8da07 2520 */
d4dabbe2 2521 bool superseded = i->sector <= sector && i->sector +
7be8da07
AG
2522 (i->size >> 9) >= sector + (size >> 9);
2523
2524 if (!equal)
d0180171 2525 drbd_alert(device, "Concurrent writes detected: "
7be8da07
AG
2526 "local=%llus +%u, remote=%llus +%u, "
2527 "assuming %s came first\n",
2528 (unsigned long long)i->sector, i->size,
2529 (unsigned long long)sector, size,
d4dabbe2 2530 superseded ? "local" : "remote");
7be8da07 2531
a8cd15ba 2532 peer_req->w.cb = superseded ? e_send_superseded :
7be8da07 2533 e_send_retry_write;
a8cd15ba 2534 list_add_tail(&peer_req->w.list, &device->done_ee);
668700b4 2535 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
7be8da07
AG
2536
2537 err = -ENOENT;
2538 goto out;
2539 } else {
2540 struct drbd_request *req =
2541 container_of(i, struct drbd_request, i);
2542
2543 if (!equal)
d0180171 2544 drbd_alert(device, "Concurrent writes detected: "
7be8da07
AG
2545 "local=%llus +%u, remote=%llus +%u\n",
2546 (unsigned long long)i->sector, i->size,
2547 (unsigned long long)sector, size);
2548
2549 if (req->rq_state & RQ_LOCAL_PENDING ||
2550 !(req->rq_state & RQ_POSTPONED)) {
2551 /*
2552 * Wait for the node with the discard flag to
d4dabbe2
LE
2553 * decide if this request has been superseded
2554 * or needs to be retried.
2555 * Requests that have been superseded will
7be8da07
AG
2556 * disappear from the write_requests tree.
2557 *
2558 * In addition, wait for the conflicting
2559 * request to finish locally before submitting
2560 * the conflicting peer request.
2561 */
b30ab791 2562 err = drbd_wait_misc(device, &req->i);
7be8da07 2563 if (err) {
e33b32de 2564 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
b30ab791 2565 fail_postponed_requests(device, sector, size);
7be8da07
AG
2566 goto out;
2567 }
2568 goto repeat;
2569 }
2570 /*
2571 * Remember to restart the conflicting requests after
2572 * the new peer request has completed.
2573 */
2574 peer_req->flags |= EE_RESTART_REQUESTS;
2575 }
2576 }
2577 err = 0;
2578
2579 out:
2580 if (err)
b30ab791 2581 drbd_remove_epoch_entry_interval(device, peer_req);
7be8da07
AG
2582 return err;
2583}
2584
b411b363 2585/* mirrored write */
bde89a9e 2586static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2587{
9f4fe9ad 2588 struct drbd_peer_device *peer_device;
b30ab791 2589 struct drbd_device *device;
21ae5d7f 2590 struct net_conf *nc;
b411b363 2591 sector_t sector;
db830c46 2592 struct drbd_peer_request *peer_req;
e658983a 2593 struct p_data *p = pi->data;
7be8da07 2594 u32 peer_seq = be32_to_cpu(p->seq_num);
bb3cc85e 2595 int op, op_flags;
b411b363 2596 u32 dp_flags;
302bdeae 2597 int err, tp;
b411b363 2598
9f4fe9ad
AG
2599 peer_device = conn_peer_device(connection, pi->vnr);
2600 if (!peer_device)
4a76b161 2601 return -EIO;
9f4fe9ad 2602 device = peer_device->device;
b411b363 2603
b30ab791 2604 if (!get_ldev(device)) {
82bc0194
AG
2605 int err2;
2606
69a22773
AG
2607 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2608 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
bde89a9e 2609 atomic_inc(&connection->current_epoch->epoch_size);
69a22773 2610 err2 = drbd_drain_block(peer_device, pi->size);
82bc0194
AG
2611 if (!err)
2612 err = err2;
2613 return err;
b411b363
PR
2614 }
2615
fcefa62e
AG
2616 /*
2617 * Corresponding put_ldev done either below (on various errors), or in
2618 * drbd_peer_request_endio, if we successfully submit the data at the
2619 * end of this function.
2620 */
b411b363
PR
2621
2622 sector = be64_to_cpu(p->sector);
a0fb3c47 2623 peer_req = read_in_block(peer_device, p->block_id, sector, pi);
db830c46 2624 if (!peer_req) {
b30ab791 2625 put_ldev(device);
82bc0194 2626 return -EIO;
b411b363
PR
2627 }
2628
a8cd15ba 2629 peer_req->w.cb = e_end_block;
21ae5d7f
LE
2630 peer_req->submit_jif = jiffies;
2631 peer_req->flags |= EE_APPLICATION;
b411b363 2632
688593c5 2633 dp_flags = be32_to_cpu(p->dp_flags);
bb3cc85e
MC
2634 op = wire_flags_to_bio_op(dp_flags);
2635 op_flags = wire_flags_to_bio_flags(dp_flags);
a0fb3c47 2636 if (pi->cmd == P_TRIM) {
f31e583a
LE
2637 D_ASSERT(peer_device, peer_req->i.size > 0);
2638 D_ASSERT(peer_device, op == REQ_OP_DISCARD);
2639 D_ASSERT(peer_device, peer_req->pages == NULL);
2640 /* need to play safe: an older DRBD sender
2641 * may mean zero-out while sending P_TRIM. */
2642 if (0 == (connection->agreed_features & DRBD_FF_WZEROES))
2643 peer_req->flags |= EE_ZEROOUT;
2644 } else if (pi->cmd == P_ZEROES) {
a0fb3c47 2645 D_ASSERT(peer_device, peer_req->i.size > 0);
45c21793 2646 D_ASSERT(peer_device, op == REQ_OP_WRITE_ZEROES);
a0fb3c47 2647 D_ASSERT(peer_device, peer_req->pages == NULL);
f31e583a
LE
2648 /* Do (not) pass down BLKDEV_ZERO_NOUNMAP? */
2649 if (dp_flags & DP_DISCARD)
2650 peer_req->flags |= EE_TRIM;
a0fb3c47 2651 } else if (peer_req->pages == NULL) {
0b0ba1ef
AG
2652 D_ASSERT(device, peer_req->i.size == 0);
2653 D_ASSERT(device, dp_flags & DP_FLUSH);
a73ff323 2654 }
688593c5
LE
2655
2656 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2657 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2658
bde89a9e
AG
2659 spin_lock(&connection->epoch_lock);
2660 peer_req->epoch = connection->current_epoch;
db830c46
AG
2661 atomic_inc(&peer_req->epoch->epoch_size);
2662 atomic_inc(&peer_req->epoch->active);
bde89a9e 2663 spin_unlock(&connection->epoch_lock);
b411b363 2664
302bdeae 2665 rcu_read_lock();
21ae5d7f
LE
2666 nc = rcu_dereference(peer_device->connection->net_conf);
2667 tp = nc->two_primaries;
2668 if (peer_device->connection->agreed_pro_version < 100) {
2669 switch (nc->wire_protocol) {
2670 case DRBD_PROT_C:
2671 dp_flags |= DP_SEND_WRITE_ACK;
2672 break;
2673 case DRBD_PROT_B:
2674 dp_flags |= DP_SEND_RECEIVE_ACK;
2675 break;
2676 }
2677 }
302bdeae 2678 rcu_read_unlock();
21ae5d7f
LE
2679
2680 if (dp_flags & DP_SEND_WRITE_ACK) {
2681 peer_req->flags |= EE_SEND_WRITE_ACK;
2682 inc_unacked(device);
2683 /* corresponding dec_unacked() in e_end_block()
2684 * respective _drbd_clear_done_ee */
2685 }
2686
2687 if (dp_flags & DP_SEND_RECEIVE_ACK) {
2688 /* I really don't like it that the receiver thread
2689 * sends on the msock, but anyways */
5dd2ca19 2690 drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
21ae5d7f
LE
2691 }
2692
302bdeae 2693 if (tp) {
21ae5d7f
LE
2694 /* two primaries implies protocol C */
2695 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
302bdeae 2696 peer_req->flags |= EE_IN_INTERVAL_TREE;
69a22773 2697 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
7be8da07 2698 if (err)
b411b363 2699 goto out_interrupted;
0500813f 2700 spin_lock_irq(&device->resource->req_lock);
b30ab791 2701 err = handle_write_conflicts(device, peer_req);
7be8da07 2702 if (err) {
0500813f 2703 spin_unlock_irq(&device->resource->req_lock);
7be8da07 2704 if (err == -ENOENT) {
b30ab791 2705 put_ldev(device);
82bc0194 2706 return 0;
b411b363 2707 }
7be8da07 2708 goto out_interrupted;
b411b363 2709 }
b874d231 2710 } else {
69a22773 2711 update_peer_seq(peer_device, peer_seq);
0500813f 2712 spin_lock_irq(&device->resource->req_lock);
b874d231 2713 }
9104d31a
LE
2714 /* TRIM and WRITE_SAME are processed synchronously,
2715 * we wait for all pending requests, respectively wait for
a0fb3c47
LE
2716 * active_ee to become empty in drbd_submit_peer_request();
2717 * better not add ourselves here. */
f31e583a 2718 if ((peer_req->flags & (EE_TRIM|EE_WRITE_SAME|EE_ZEROOUT)) == 0)
b9ed7080 2719 list_add_tail(&peer_req->w.list, &device->active_ee);
0500813f 2720 spin_unlock_irq(&device->resource->req_lock);
b411b363 2721
b30ab791
AG
2722 if (device->state.conn == C_SYNC_TARGET)
2723 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
b411b363 2724
b30ab791 2725 if (device->state.pdsk < D_INCONSISTENT) {
b411b363 2726 /* In case we have the only disk of the cluster, */
b30ab791 2727 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
db830c46 2728 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
4dd726f0 2729 drbd_al_begin_io(device, &peer_req->i);
21ae5d7f 2730 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
b411b363
PR
2731 }
2732
bb3cc85e
MC
2733 err = drbd_submit_peer_request(device, peer_req, op, op_flags,
2734 DRBD_FAULT_DT_WR);
82bc0194
AG
2735 if (!err)
2736 return 0;
b411b363 2737
10f6d992 2738 /* don't care for the reason here */
d0180171 2739 drbd_err(device, "submit failed, triggering re-connect\n");
0500813f 2740 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2741 list_del(&peer_req->w.list);
b30ab791 2742 drbd_remove_epoch_entry_interval(device, peer_req);
0500813f 2743 spin_unlock_irq(&device->resource->req_lock);
21ae5d7f
LE
2744 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
2745 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
b30ab791 2746 drbd_al_complete_io(device, &peer_req->i);
21ae5d7f 2747 }
22cc37a9 2748
b411b363 2749out_interrupted:
7e5fec31 2750 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
b30ab791
AG
2751 put_ldev(device);
2752 drbd_free_peer_req(device, peer_req);
82bc0194 2753 return err;
b411b363
PR
2754}
2755
0f0601f4
LE
2756/* We may throttle resync, if the lower device seems to be busy,
2757 * and current sync rate is above c_min_rate.
2758 *
2759 * To decide whether or not the lower device is busy, we use a scheme similar
2760 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2761 * (more than 64 sectors) of activity we cannot account for with our own resync
2762 * activity, it obviously is "busy".
2763 *
2764 * The current sync rate used here uses only the most recent two step marks,
2765 * to have a short time average so we can react faster.
2766 */
ad3fee79
LE
2767bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
2768 bool throttle_if_app_is_waiting)
0f0601f4 2769{
e3555d85 2770 struct lc_element *tmp;
ad3fee79 2771 bool throttle = drbd_rs_c_min_rate_throttle(device);
daeda1cc 2772
ad3fee79
LE
2773 if (!throttle || throttle_if_app_is_waiting)
2774 return throttle;
0f0601f4 2775
b30ab791
AG
2776 spin_lock_irq(&device->al_lock);
2777 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
e3555d85
PR
2778 if (tmp) {
2779 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
e8299874
LE
2780 if (test_bit(BME_PRIORITY, &bm_ext->flags))
2781 throttle = false;
ad3fee79
LE
2782 /* Do not slow down if app IO is already waiting for this extent,
2783 * and our progress is necessary for application IO to complete. */
e3555d85 2784 }
b30ab791 2785 spin_unlock_irq(&device->al_lock);
e3555d85 2786
e8299874
LE
2787 return throttle;
2788}
2789
2790bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
2791{
2792 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
2793 unsigned long db, dt, dbdt;
2794 unsigned int c_min_rate;
2795 int curr_events;
2796
2797 rcu_read_lock();
2798 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2799 rcu_read_unlock();
2800
2801 /* feature disabled? */
2802 if (c_min_rate == 0)
2803 return false;
2804
59767fbd 2805 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
b30ab791 2806 atomic_read(&device->rs_sect_ev);
ad3fee79
LE
2807
2808 if (atomic_read(&device->ap_actlog_cnt)
ff8bd88b 2809 || curr_events - device->rs_last_events > 64) {
0f0601f4
LE
2810 unsigned long rs_left;
2811 int i;
2812
b30ab791 2813 device->rs_last_events = curr_events;
0f0601f4
LE
2814
2815 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2816 * approx. */
b30ab791 2817 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2649f080 2818
b30ab791
AG
2819 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2820 rs_left = device->ov_left;
2649f080 2821 else
b30ab791 2822 rs_left = drbd_bm_total_weight(device) - device->rs_failed;
0f0601f4 2823
b30ab791 2824 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
0f0601f4
LE
2825 if (!dt)
2826 dt++;
b30ab791 2827 db = device->rs_mark_left[i] - rs_left;
0f0601f4
LE
2828 dbdt = Bit2KB(db/dt);
2829
daeda1cc 2830 if (dbdt > c_min_rate)
e8299874 2831 return true;
0f0601f4 2832 }
e8299874 2833 return false;
0f0601f4
LE
2834}
2835
bde89a9e 2836static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2837{
9f4fe9ad 2838 struct drbd_peer_device *peer_device;
b30ab791 2839 struct drbd_device *device;
b411b363 2840 sector_t sector;
4a76b161 2841 sector_t capacity;
db830c46 2842 struct drbd_peer_request *peer_req;
b411b363 2843 struct digest_info *di = NULL;
b18b37be 2844 int size, verb;
b411b363 2845 unsigned int fault_type;
e658983a 2846 struct p_block_req *p = pi->data;
4a76b161 2847
9f4fe9ad
AG
2848 peer_device = conn_peer_device(connection, pi->vnr);
2849 if (!peer_device)
4a76b161 2850 return -EIO;
9f4fe9ad 2851 device = peer_device->device;
b30ab791 2852 capacity = drbd_get_capacity(device->this_bdev);
b411b363
PR
2853
2854 sector = be64_to_cpu(p->sector);
2855 size = be32_to_cpu(p->blksize);
2856
c670a398 2857 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
d0180171 2858 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
b411b363 2859 (unsigned long long)sector, size);
82bc0194 2860 return -EINVAL;
b411b363
PR
2861 }
2862 if (sector + (size>>9) > capacity) {
d0180171 2863 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
b411b363 2864 (unsigned long long)sector, size);
82bc0194 2865 return -EINVAL;
b411b363
PR
2866 }
2867
b30ab791 2868 if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
b18b37be 2869 verb = 1;
e2857216 2870 switch (pi->cmd) {
b18b37be 2871 case P_DATA_REQUEST:
69a22773 2872 drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
b18b37be 2873 break;
700ca8c0 2874 case P_RS_THIN_REQ:
b18b37be
PR
2875 case P_RS_DATA_REQUEST:
2876 case P_CSUM_RS_REQUEST:
2877 case P_OV_REQUEST:
69a22773 2878 drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
b18b37be
PR
2879 break;
2880 case P_OV_REPLY:
2881 verb = 0;
b30ab791 2882 dec_rs_pending(device);
69a22773 2883 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
b18b37be
PR
2884 break;
2885 default:
49ba9b1b 2886 BUG();
b18b37be
PR
2887 }
2888 if (verb && __ratelimit(&drbd_ratelimit_state))
d0180171 2889 drbd_err(device, "Can not satisfy peer's read request, "
b411b363 2890 "no local data.\n");
b18b37be 2891
a821cc4a 2892 /* drain possibly payload */
69a22773 2893 return drbd_drain_block(peer_device, pi->size);
b411b363
PR
2894 }
2895
2896 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2897 * "criss-cross" setup, that might cause write-out on some other DRBD,
2898 * which in turn might block on the other node at this very place. */
a0fb3c47 2899 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
9104d31a 2900 size, GFP_NOIO);
db830c46 2901 if (!peer_req) {
b30ab791 2902 put_ldev(device);
82bc0194 2903 return -ENOMEM;
b411b363
PR
2904 }
2905
e2857216 2906 switch (pi->cmd) {
b411b363 2907 case P_DATA_REQUEST:
a8cd15ba 2908 peer_req->w.cb = w_e_end_data_req;
b411b363 2909 fault_type = DRBD_FAULT_DT_RD;
80a40e43 2910 /* application IO, don't drbd_rs_begin_io */
21ae5d7f 2911 peer_req->flags |= EE_APPLICATION;
80a40e43
LE
2912 goto submit;
2913
700ca8c0
PR
2914 case P_RS_THIN_REQ:
2915 /* If at some point in the future we have a smart way to
2916 find out if this data block is completely deallocated,
2917 then we would do something smarter here than reading
2918 the block... */
2919 peer_req->flags |= EE_RS_THIN_REQ;
df561f66 2920 fallthrough;
b411b363 2921 case P_RS_DATA_REQUEST:
a8cd15ba 2922 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2923 fault_type = DRBD_FAULT_RS_RD;
5f9915bb 2924 /* used in the sector offset progress display */
b30ab791 2925 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2926 break;
2927
2928 case P_OV_REPLY:
2929 case P_CSUM_RS_REQUEST:
2930 fault_type = DRBD_FAULT_RS_RD;
e2857216 2931 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
b411b363
PR
2932 if (!di)
2933 goto out_free_e;
2934
e2857216 2935 di->digest_size = pi->size;
b411b363
PR
2936 di->digest = (((char *)di)+sizeof(struct digest_info));
2937
db830c46
AG
2938 peer_req->digest = di;
2939 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2940
9f4fe9ad 2941 if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
b411b363
PR
2942 goto out_free_e;
2943
e2857216 2944 if (pi->cmd == P_CSUM_RS_REQUEST) {
9f4fe9ad 2945 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
a8cd15ba 2946 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb 2947 /* used in the sector offset progress display */
b30ab791 2948 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
aaaba345
LE
2949 /* remember to report stats in drbd_resync_finished */
2950 device->use_csums = true;
e2857216 2951 } else if (pi->cmd == P_OV_REPLY) {
2649f080 2952 /* track progress, we may need to throttle */
b30ab791 2953 atomic_add(size >> 9, &device->rs_sect_in);
a8cd15ba 2954 peer_req->w.cb = w_e_end_ov_reply;
b30ab791 2955 dec_rs_pending(device);
0f0601f4
LE
2956 /* drbd_rs_begin_io done when we sent this request,
2957 * but accounting still needs to be done. */
2958 goto submit_for_resync;
b411b363
PR
2959 }
2960 break;
2961
2962 case P_OV_REQUEST:
b30ab791 2963 if (device->ov_start_sector == ~(sector_t)0 &&
9f4fe9ad 2964 peer_device->connection->agreed_pro_version >= 90) {
de228bba
LE
2965 unsigned long now = jiffies;
2966 int i;
b30ab791
AG
2967 device->ov_start_sector = sector;
2968 device->ov_position = sector;
2969 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2970 device->rs_total = device->ov_left;
de228bba 2971 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
b30ab791
AG
2972 device->rs_mark_left[i] = device->ov_left;
2973 device->rs_mark_time[i] = now;
de228bba 2974 }
d0180171 2975 drbd_info(device, "Online Verify start sector: %llu\n",
b411b363
PR
2976 (unsigned long long)sector);
2977 }
a8cd15ba 2978 peer_req->w.cb = w_e_end_ov_req;
b411b363 2979 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2980 break;
2981
b411b363 2982 default:
49ba9b1b 2983 BUG();
b411b363
PR
2984 }
2985
0f0601f4
LE
2986 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2987 * wrt the receiver, but it is not as straightforward as it may seem.
2988 * Various places in the resync start and stop logic assume resync
2989 * requests are processed in order, requeuing this on the worker thread
2990 * introduces a bunch of new code for synchronization between threads.
2991 *
2992 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2993 * "forever", throttling after drbd_rs_begin_io will lock that extent
2994 * for application writes for the same time. For now, just throttle
2995 * here, where the rest of the code expects the receiver to sleep for
2996 * a while, anyways.
2997 */
2998
2999 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
3000 * this defers syncer requests for some time, before letting at least
3001 * on request through. The resync controller on the receiving side
3002 * will adapt to the incoming rate accordingly.
3003 *
3004 * We cannot throttle here if remote is Primary/SyncTarget:
3005 * we would also throttle its application reads.
3006 * In that case, throttling is done on the SyncTarget only.
3007 */
c5a2c150
LE
3008
3009 /* Even though this may be a resync request, we do add to "read_ee";
3010 * "sync_ee" is only used for resync WRITEs.
3011 * Add to list early, so debugfs can find this request
3012 * even if we have to sleep below. */
3013 spin_lock_irq(&device->resource->req_lock);
3014 list_add_tail(&peer_req->w.list, &device->read_ee);
3015 spin_unlock_irq(&device->resource->req_lock);
3016
944410e9 3017 update_receiver_timing_details(connection, drbd_rs_should_slow_down);
ad3fee79
LE
3018 if (device->state.peer != R_PRIMARY
3019 && drbd_rs_should_slow_down(device, sector, false))
e3555d85 3020 schedule_timeout_uninterruptible(HZ/10);
944410e9 3021 update_receiver_timing_details(connection, drbd_rs_begin_io);
b30ab791 3022 if (drbd_rs_begin_io(device, sector))
80a40e43 3023 goto out_free_e;
b411b363 3024
0f0601f4 3025submit_for_resync:
b30ab791 3026 atomic_add(size >> 9, &device->rs_sect_ev);
0f0601f4 3027
80a40e43 3028submit:
944410e9 3029 update_receiver_timing_details(connection, drbd_submit_peer_request);
b30ab791 3030 inc_unacked(device);
bb3cc85e
MC
3031 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
3032 fault_type) == 0)
82bc0194 3033 return 0;
b411b363 3034
10f6d992 3035 /* don't care for the reason here */
d0180171 3036 drbd_err(device, "submit failed, triggering re-connect\n");
c5a2c150
LE
3037
3038out_free_e:
0500813f 3039 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 3040 list_del(&peer_req->w.list);
0500813f 3041 spin_unlock_irq(&device->resource->req_lock);
22cc37a9
LE
3042 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
3043
b30ab791
AG
3044 put_ldev(device);
3045 drbd_free_peer_req(device, peer_req);
82bc0194 3046 return -EIO;
b411b363
PR
3047}
3048
69a22773
AG
3049/**
3050 * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries
3051 */
3052static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3053{
69a22773 3054 struct drbd_device *device = peer_device->device;
b411b363
PR
3055 int self, peer, rv = -100;
3056 unsigned long ch_self, ch_peer;
44ed167d 3057 enum drbd_after_sb_p after_sb_0p;
b411b363 3058
b30ab791
AG
3059 self = device->ldev->md.uuid[UI_BITMAP] & 1;
3060 peer = device->p_uuid[UI_BITMAP] & 1;
b411b363 3061
b30ab791
AG
3062 ch_peer = device->p_uuid[UI_SIZE];
3063 ch_self = device->comm_bm_set;
b411b363 3064
44ed167d 3065 rcu_read_lock();
69a22773 3066 after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
44ed167d
PR
3067 rcu_read_unlock();
3068 switch (after_sb_0p) {
b411b363
PR
3069 case ASB_CONSENSUS:
3070 case ASB_DISCARD_SECONDARY:
3071 case ASB_CALL_HELPER:
44ed167d 3072 case ASB_VIOLENTLY:
d0180171 3073 drbd_err(device, "Configuration error.\n");
b411b363
PR
3074 break;
3075 case ASB_DISCONNECT:
3076 break;
3077 case ASB_DISCARD_YOUNGER_PRI:
3078 if (self == 0 && peer == 1) {
3079 rv = -1;
3080 break;
3081 }
3082 if (self == 1 && peer == 0) {
3083 rv = 1;
3084 break;
3085 }
df561f66 3086 fallthrough; /* to one of the other strategies */
b411b363
PR
3087 case ASB_DISCARD_OLDER_PRI:
3088 if (self == 0 && peer == 1) {
3089 rv = 1;
3090 break;
3091 }
3092 if (self == 1 && peer == 0) {
3093 rv = -1;
3094 break;
3095 }
3096 /* Else fall through to one of the other strategies... */
d0180171 3097 drbd_warn(device, "Discard younger/older primary did not find a decision\n"
b411b363 3098 "Using discard-least-changes instead\n");
df561f66 3099 fallthrough;
b411b363
PR
3100 case ASB_DISCARD_ZERO_CHG:
3101 if (ch_peer == 0 && ch_self == 0) {
69a22773 3102 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
b411b363
PR
3103 ? -1 : 1;
3104 break;
3105 } else {
3106 if (ch_peer == 0) { rv = 1; break; }
3107 if (ch_self == 0) { rv = -1; break; }
3108 }
44ed167d 3109 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363 3110 break;
df561f66 3111 fallthrough;
b411b363
PR
3112 case ASB_DISCARD_LEAST_CHG:
3113 if (ch_self < ch_peer)
3114 rv = -1;
3115 else if (ch_self > ch_peer)
3116 rv = 1;
3117 else /* ( ch_self == ch_peer ) */
3118 /* Well, then use something else. */
69a22773 3119 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
b411b363
PR
3120 ? -1 : 1;
3121 break;
3122 case ASB_DISCARD_LOCAL:
3123 rv = -1;
3124 break;
3125 case ASB_DISCARD_REMOTE:
3126 rv = 1;
3127 }
3128
3129 return rv;
3130}
3131
69a22773
AG
3132/**
3133 * drbd_asb_recover_1p - Recover after split-brain with one remaining primary
3134 */
3135static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3136{
69a22773 3137 struct drbd_device *device = peer_device->device;
6184ea21 3138 int hg, rv = -100;
44ed167d 3139 enum drbd_after_sb_p after_sb_1p;
b411b363 3140
44ed167d 3141 rcu_read_lock();
69a22773 3142 after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
44ed167d
PR
3143 rcu_read_unlock();
3144 switch (after_sb_1p) {
b411b363
PR
3145 case ASB_DISCARD_YOUNGER_PRI:
3146 case ASB_DISCARD_OLDER_PRI:
3147 case ASB_DISCARD_LEAST_CHG:
3148 case ASB_DISCARD_LOCAL:
3149 case ASB_DISCARD_REMOTE:
44ed167d 3150 case ASB_DISCARD_ZERO_CHG:
d0180171 3151 drbd_err(device, "Configuration error.\n");
b411b363
PR
3152 break;
3153 case ASB_DISCONNECT:
3154 break;
3155 case ASB_CONSENSUS:
69a22773 3156 hg = drbd_asb_recover_0p(peer_device);
b30ab791 3157 if (hg == -1 && device->state.role == R_SECONDARY)
b411b363 3158 rv = hg;
b30ab791 3159 if (hg == 1 && device->state.role == R_PRIMARY)
b411b363
PR
3160 rv = hg;
3161 break;
3162 case ASB_VIOLENTLY:
69a22773 3163 rv = drbd_asb_recover_0p(peer_device);
b411b363
PR
3164 break;
3165 case ASB_DISCARD_SECONDARY:
b30ab791 3166 return device->state.role == R_PRIMARY ? 1 : -1;
b411b363 3167 case ASB_CALL_HELPER:
69a22773 3168 hg = drbd_asb_recover_0p(peer_device);
b30ab791 3169 if (hg == -1 && device->state.role == R_PRIMARY) {
bb437946
AG
3170 enum drbd_state_rv rv2;
3171
b411b363
PR
3172 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3173 * we might be here in C_WF_REPORT_PARAMS which is transient.
3174 * we do not need to wait for the after state change work either. */
b30ab791 3175 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
bb437946 3176 if (rv2 != SS_SUCCESS) {
b30ab791 3177 drbd_khelper(device, "pri-lost-after-sb");
b411b363 3178 } else {
d0180171 3179 drbd_warn(device, "Successfully gave up primary role.\n");
b411b363
PR
3180 rv = hg;
3181 }
3182 } else
3183 rv = hg;
3184 }
3185
3186 return rv;
3187}
3188
69a22773
AG
3189/**
3190 * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries
3191 */
3192static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3193{
69a22773 3194 struct drbd_device *device = peer_device->device;
6184ea21 3195 int hg, rv = -100;
44ed167d 3196 enum drbd_after_sb_p after_sb_2p;
b411b363 3197
44ed167d 3198 rcu_read_lock();
69a22773 3199 after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
44ed167d
PR
3200 rcu_read_unlock();
3201 switch (after_sb_2p) {
b411b363
PR
3202 case ASB_DISCARD_YOUNGER_PRI:
3203 case ASB_DISCARD_OLDER_PRI:
3204 case ASB_DISCARD_LEAST_CHG:
3205 case ASB_DISCARD_LOCAL:
3206 case ASB_DISCARD_REMOTE:
3207 case ASB_CONSENSUS:
3208 case ASB_DISCARD_SECONDARY:
44ed167d 3209 case ASB_DISCARD_ZERO_CHG:
d0180171 3210 drbd_err(device, "Configuration error.\n");
b411b363
PR
3211 break;
3212 case ASB_VIOLENTLY:
69a22773 3213 rv = drbd_asb_recover_0p(peer_device);
b411b363
PR
3214 break;
3215 case ASB_DISCONNECT:
3216 break;
3217 case ASB_CALL_HELPER:
69a22773 3218 hg = drbd_asb_recover_0p(peer_device);
b411b363 3219 if (hg == -1) {
bb437946
AG
3220 enum drbd_state_rv rv2;
3221
b411b363
PR
3222 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3223 * we might be here in C_WF_REPORT_PARAMS which is transient.
3224 * we do not need to wait for the after state change work either. */
b30ab791 3225 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
bb437946 3226 if (rv2 != SS_SUCCESS) {
b30ab791 3227 drbd_khelper(device, "pri-lost-after-sb");
b411b363 3228 } else {
d0180171 3229 drbd_warn(device, "Successfully gave up primary role.\n");
b411b363
PR
3230 rv = hg;
3231 }
3232 } else
3233 rv = hg;
3234 }
3235
3236 return rv;
3237}
3238
b30ab791 3239static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
b411b363
PR
3240 u64 bits, u64 flags)
3241{
3242 if (!uuid) {
d0180171 3243 drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
b411b363
PR
3244 return;
3245 }
d0180171 3246 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
b411b363
PR
3247 text,
3248 (unsigned long long)uuid[UI_CURRENT],
3249 (unsigned long long)uuid[UI_BITMAP],
3250 (unsigned long long)uuid[UI_HISTORY_START],
3251 (unsigned long long)uuid[UI_HISTORY_END],
3252 (unsigned long long)bits,
3253 (unsigned long long)flags);
3254}
3255
3256/*
3257 100 after split brain try auto recover
3258 2 C_SYNC_SOURCE set BitMap
3259 1 C_SYNC_SOURCE use BitMap
3260 0 no Sync
3261 -1 C_SYNC_TARGET use BitMap
3262 -2 C_SYNC_TARGET set BitMap
3263 -100 after split brain, disconnect
3264-1000 unrelated data
4a23f264
PR
3265-1091 requires proto 91
3266-1096 requires proto 96
b411b363 3267 */
f2d3d75b
LE
3268
3269static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
b411b363 3270{
44a4d551
LE
3271 struct drbd_peer_device *const peer_device = first_peer_device(device);
3272 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
b411b363
PR
3273 u64 self, peer;
3274 int i, j;
3275
b30ab791
AG
3276 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3277 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
3278
3279 *rule_nr = 10;
3280 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
3281 return 0;
3282
3283 *rule_nr = 20;
3284 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
3285 peer != UUID_JUST_CREATED)
3286 return -2;
3287
3288 *rule_nr = 30;
3289 if (self != UUID_JUST_CREATED &&
3290 (peer == UUID_JUST_CREATED || peer == (u64)0))
3291 return 2;
3292
3293 if (self == peer) {
3294 int rct, dc; /* roles at crash time */
3295
b30ab791 3296 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
b411b363 3297
44a4d551 3298 if (connection->agreed_pro_version < 91)
4a23f264 3299 return -1091;
b411b363 3300
b30ab791
AG
3301 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
3302 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
d0180171 3303 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
b30ab791
AG
3304 drbd_uuid_move_history(device);
3305 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3306 device->ldev->md.uuid[UI_BITMAP] = 0;
b411b363 3307
b30ab791
AG
3308 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3309 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
b411b363
PR
3310 *rule_nr = 34;
3311 } else {
d0180171 3312 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
b411b363
PR
3313 *rule_nr = 36;
3314 }
3315
3316 return 1;
3317 }
3318
b30ab791 3319 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
b411b363 3320
44a4d551 3321 if (connection->agreed_pro_version < 91)
4a23f264 3322 return -1091;
b411b363 3323
b30ab791
AG
3324 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
3325 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
d0180171 3326 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
b411b363 3327
b30ab791
AG
3328 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
3329 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
3330 device->p_uuid[UI_BITMAP] = 0UL;
b411b363 3331
b30ab791 3332 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
b411b363
PR
3333 *rule_nr = 35;
3334 } else {
d0180171 3335 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
b411b363
PR
3336 *rule_nr = 37;
3337 }
3338
3339 return -1;
3340 }
3341
3342 /* Common power [off|failure] */
b30ab791
AG
3343 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
3344 (device->p_uuid[UI_FLAGS] & 2);
b411b363
PR
3345 /* lowest bit is set when we were primary,
3346 * next bit (weight 2) is set when peer was primary */
3347 *rule_nr = 40;
3348
f2d3d75b
LE
3349 /* Neither has the "crashed primary" flag set,
3350 * only a replication link hickup. */
3351 if (rct == 0)
3352 return 0;
3353
3354 /* Current UUID equal and no bitmap uuid; does not necessarily
3355 * mean this was a "simultaneous hard crash", maybe IO was
3356 * frozen, so no UUID-bump happened.
3357 * This is a protocol change, overload DRBD_FF_WSAME as flag
3358 * for "new-enough" peer DRBD version. */
3359 if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
3360 *rule_nr = 41;
3361 if (!(connection->agreed_features & DRBD_FF_WSAME)) {
3362 drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
3363 return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8));
3364 }
3365 if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
3366 /* At least one has the "crashed primary" bit set,
3367 * both are primary now, but neither has rotated its UUIDs?
3368 * "Can not happen." */
3369 drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
3370 return -100;
3371 }
3372 if (device->state.role == R_PRIMARY)
3373 return 1;
3374 return -1;
3375 }
3376
3377 /* Both are secondary.
3378 * Really looks like recovery from simultaneous hard crash.
3379 * Check which had been primary before, and arbitrate. */
b411b363 3380 switch (rct) {
f2d3d75b 3381 case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */
b411b363
PR
3382 case 1: /* self_pri && !peer_pri */ return 1;
3383 case 2: /* !self_pri && peer_pri */ return -1;
3384 case 3: /* self_pri && peer_pri */
44a4d551 3385 dc = test_bit(RESOLVE_CONFLICTS, &connection->flags);
b411b363
PR
3386 return dc ? -1 : 1;
3387 }
3388 }
3389
3390 *rule_nr = 50;
b30ab791 3391 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
b411b363
PR
3392 if (self == peer)
3393 return -1;
3394
3395 *rule_nr = 51;
b30ab791 3396 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
b411b363 3397 if (self == peer) {
44a4d551 3398 if (connection->agreed_pro_version < 96 ?
b30ab791
AG
3399 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
3400 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
3401 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
3402 /* The last P_SYNC_UUID did not get though. Undo the last start of
3403 resync as sync source modifications of the peer's UUIDs. */
3404
44a4d551 3405 if (connection->agreed_pro_version < 91)
4a23f264 3406 return -1091;
b411b363 3407
b30ab791
AG
3408 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
3409 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
4a23f264 3410
d0180171 3411 drbd_info(device, "Lost last syncUUID packet, corrected:\n");
b30ab791 3412 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
4a23f264 3413
b411b363
PR
3414 return -1;
3415 }
3416 }
3417
3418 *rule_nr = 60;
b30ab791 3419 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
b411b363 3420 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3421 peer = device->p_uuid[i] & ~((u64)1);
b411b363
PR
3422 if (self == peer)
3423 return -2;
3424 }
3425
3426 *rule_nr = 70;
b30ab791
AG
3427 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3428 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
3429 if (self == peer)
3430 return 1;
3431
3432 *rule_nr = 71;
b30ab791 3433 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
b411b363 3434 if (self == peer) {
44a4d551 3435 if (connection->agreed_pro_version < 96 ?
b30ab791
AG
3436 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
3437 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
3438 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
3439 /* The last P_SYNC_UUID did not get though. Undo the last start of
3440 resync as sync source modifications of our UUIDs. */
3441
44a4d551 3442 if (connection->agreed_pro_version < 91)
4a23f264 3443 return -1091;
b411b363 3444
b30ab791
AG
3445 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
3446 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
b411b363 3447
d0180171 3448 drbd_info(device, "Last syncUUID did not get through, corrected:\n");
b30ab791
AG
3449 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3450 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
b411b363
PR
3451
3452 return 1;
3453 }
3454 }
3455
3456
3457 *rule_nr = 80;
b30ab791 3458 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363 3459 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3460 self = device->ldev->md.uuid[i] & ~((u64)1);
b411b363
PR
3461 if (self == peer)
3462 return 2;
3463 }
3464
3465 *rule_nr = 90;
b30ab791
AG
3466 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3467 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
b411b363
PR
3468 if (self == peer && self != ((u64)0))
3469 return 100;
3470
3471 *rule_nr = 100;
3472 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3473 self = device->ldev->md.uuid[i] & ~((u64)1);
b411b363 3474 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
b30ab791 3475 peer = device->p_uuid[j] & ~((u64)1);
b411b363
PR
3476 if (self == peer)
3477 return -100;
3478 }
3479 }
3480
3481 return -1000;
3482}
3483
3484/* drbd_sync_handshake() returns the new conn state on success, or
3485 CONN_MASK (-1) on failure.
3486 */
69a22773
AG
3487static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
3488 enum drbd_role peer_role,
b411b363
PR
3489 enum drbd_disk_state peer_disk) __must_hold(local)
3490{
69a22773 3491 struct drbd_device *device = peer_device->device;
b411b363
PR
3492 enum drbd_conns rv = C_MASK;
3493 enum drbd_disk_state mydisk;
44ed167d 3494 struct net_conf *nc;
d29e89e3 3495 int hg, rule_nr, rr_conflict, tentative, always_asbp;
b411b363 3496
b30ab791 3497 mydisk = device->state.disk;
b411b363 3498 if (mydisk == D_NEGOTIATING)
b30ab791 3499 mydisk = device->new_state_tmp.disk;
b411b363 3500
d0180171 3501 drbd_info(device, "drbd_sync_handshake:\n");
9f2247bb 3502
b30ab791
AG
3503 spin_lock_irq(&device->ldev->md.uuid_lock);
3504 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
3505 drbd_uuid_dump(device, "peer", device->p_uuid,
3506 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
b411b363 3507
f2d3d75b 3508 hg = drbd_uuid_compare(device, peer_role, &rule_nr);
b30ab791 3509 spin_unlock_irq(&device->ldev->md.uuid_lock);
b411b363 3510
d0180171 3511 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
b411b363
PR
3512
3513 if (hg == -1000) {
d0180171 3514 drbd_alert(device, "Unrelated data, aborting!\n");
b411b363
PR
3515 return C_MASK;
3516 }
f2d3d75b
LE
3517 if (hg < -0x10000) {
3518 int proto, fflags;
3519 hg = -hg;
3520 proto = hg & 0xff;
3521 fflags = (hg >> 8) & 0xff;
3522 drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
3523 proto, fflags);
3524 return C_MASK;
3525 }
4a23f264 3526 if (hg < -1000) {
d0180171 3527 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
3528 return C_MASK;
3529 }
3530
3531 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
3532 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
3533 int f = (hg == -100) || abs(hg) == 2;
3534 hg = mydisk > D_INCONSISTENT ? 1 : -1;
3535 if (f)
3536 hg = hg*2;
d0180171 3537 drbd_info(device, "Becoming sync %s due to disk states.\n",
b411b363
PR
3538 hg > 0 ? "source" : "target");
3539 }
3540
3a11a487 3541 if (abs(hg) == 100)
b30ab791 3542 drbd_khelper(device, "initial-split-brain");
3a11a487 3543
44ed167d 3544 rcu_read_lock();
69a22773 3545 nc = rcu_dereference(peer_device->connection->net_conf);
d29e89e3
RK
3546 always_asbp = nc->always_asbp;
3547 rr_conflict = nc->rr_conflict;
3548 tentative = nc->tentative;
3549 rcu_read_unlock();
44ed167d 3550
d29e89e3 3551 if (hg == 100 || (hg == -100 && always_asbp)) {
b30ab791 3552 int pcount = (device->state.role == R_PRIMARY)
b411b363
PR
3553 + (peer_role == R_PRIMARY);
3554 int forced = (hg == -100);
3555
3556 switch (pcount) {
3557 case 0:
69a22773 3558 hg = drbd_asb_recover_0p(peer_device);
b411b363
PR
3559 break;
3560 case 1:
69a22773 3561 hg = drbd_asb_recover_1p(peer_device);
b411b363
PR
3562 break;
3563 case 2:
69a22773 3564 hg = drbd_asb_recover_2p(peer_device);
b411b363
PR
3565 break;
3566 }
3567 if (abs(hg) < 100) {
d0180171 3568 drbd_warn(device, "Split-Brain detected, %d primaries, "
b411b363
PR
3569 "automatically solved. Sync from %s node\n",
3570 pcount, (hg < 0) ? "peer" : "this");
3571 if (forced) {
d0180171 3572 drbd_warn(device, "Doing a full sync, since"
b411b363
PR
3573 " UUIDs where ambiguous.\n");
3574 hg = hg*2;
3575 }
3576 }
3577 }
3578
3579 if (hg == -100) {
b30ab791 3580 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
b411b363 3581 hg = -1;
b30ab791 3582 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
b411b363
PR
3583 hg = 1;
3584
3585 if (abs(hg) < 100)
d0180171 3586 drbd_warn(device, "Split-Brain detected, manually solved. "
b411b363
PR
3587 "Sync from %s node\n",
3588 (hg < 0) ? "peer" : "this");
3589 }
3590
3591 if (hg == -100) {
580b9767
LE
3592 /* FIXME this log message is not correct if we end up here
3593 * after an attempted attach on a diskless node.
3594 * We just refuse to attach -- well, we drop the "connection"
3595 * to that disk, in a way... */
d0180171 3596 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
b30ab791 3597 drbd_khelper(device, "split-brain");
b411b363
PR
3598 return C_MASK;
3599 }
3600
3601 if (hg > 0 && mydisk <= D_INCONSISTENT) {
d0180171 3602 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
b411b363
PR
3603 return C_MASK;
3604 }
3605
3606 if (hg < 0 && /* by intention we do not use mydisk here. */
b30ab791 3607 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
44ed167d 3608 switch (rr_conflict) {
b411b363 3609 case ASB_CALL_HELPER:
b30ab791 3610 drbd_khelper(device, "pri-lost");
df561f66 3611 fallthrough;
b411b363 3612 case ASB_DISCONNECT:
d0180171 3613 drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
b411b363
PR
3614 return C_MASK;
3615 case ASB_VIOLENTLY:
d0180171 3616 drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
b411b363
PR
3617 "assumption\n");
3618 }
3619 }
3620
69a22773 3621 if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
cf14c2e9 3622 if (hg == 0)
d0180171 3623 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
cf14c2e9 3624 else
d0180171 3625 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
cf14c2e9
PR
3626 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3627 abs(hg) >= 2 ? "full" : "bit-map based");
3628 return C_MASK;
3629 }
3630
b411b363 3631 if (abs(hg) >= 2) {
d0180171 3632 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
b30ab791 3633 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
20ceb2b2 3634 BM_LOCKED_SET_ALLOWED))
b411b363
PR
3635 return C_MASK;
3636 }
3637
3638 if (hg > 0) { /* become sync source. */
3639 rv = C_WF_BITMAP_S;
3640 } else if (hg < 0) { /* become sync target */
3641 rv = C_WF_BITMAP_T;
3642 } else {
3643 rv = C_CONNECTED;
b30ab791 3644 if (drbd_bm_total_weight(device)) {
d0180171 3645 drbd_info(device, "No resync, but %lu bits in bitmap!\n",
b30ab791 3646 drbd_bm_total_weight(device));
b411b363
PR
3647 }
3648 }
3649
3650 return rv;
3651}
3652
f179d76d 3653static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
b411b363
PR
3654{
3655 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
f179d76d
PR
3656 if (peer == ASB_DISCARD_REMOTE)
3657 return ASB_DISCARD_LOCAL;
b411b363
PR
3658
3659 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
f179d76d
PR
3660 if (peer == ASB_DISCARD_LOCAL)
3661 return ASB_DISCARD_REMOTE;
b411b363
PR
3662
3663 /* everything else is valid if they are equal on both sides. */
f179d76d 3664 return peer;
b411b363
PR
3665}
3666
bde89a9e 3667static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3668{
e658983a 3669 struct p_protocol *p = pi->data;
036b17ea
PR
3670 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3671 int p_proto, p_discard_my_data, p_two_primaries, cf;
3672 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3673 char integrity_alg[SHARED_SECRET_MAX] = "";
3d0e6375 3674 struct crypto_shash *peer_integrity_tfm = NULL;
7aca6c75 3675 void *int_dig_in = NULL, *int_dig_vv = NULL;
b411b363 3676
b411b363
PR
3677 p_proto = be32_to_cpu(p->protocol);
3678 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3679 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3680 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 3681 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9 3682 cf = be32_to_cpu(p->conn_flags);
6139f60d 3683 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
cf14c2e9 3684
bde89a9e 3685 if (connection->agreed_pro_version >= 87) {
86db0618 3686 int err;
cf14c2e9 3687
88104ca4 3688 if (pi->size > sizeof(integrity_alg))
86db0618 3689 return -EIO;
bde89a9e 3690 err = drbd_recv_all(connection, integrity_alg, pi->size);
86db0618
AG
3691 if (err)
3692 return err;
036b17ea 3693 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
b411b363
PR
3694 }
3695
7d4c782c 3696 if (pi->cmd != P_PROTOCOL_UPDATE) {
bde89a9e 3697 clear_bit(CONN_DRY_RUN, &connection->flags);
b411b363 3698
fbc12f45 3699 if (cf & CF_DRY_RUN)
bde89a9e 3700 set_bit(CONN_DRY_RUN, &connection->flags);
b411b363 3701
fbc12f45 3702 rcu_read_lock();
bde89a9e 3703 nc = rcu_dereference(connection->net_conf);
b411b363 3704
fbc12f45 3705 if (p_proto != nc->wire_protocol) {
1ec861eb 3706 drbd_err(connection, "incompatible %s settings\n", "protocol");
fbc12f45
AG
3707 goto disconnect_rcu_unlock;
3708 }
b411b363 3709
fbc12f45 3710 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
1ec861eb 3711 drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
fbc12f45
AG
3712 goto disconnect_rcu_unlock;
3713 }
b411b363 3714
fbc12f45 3715 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
1ec861eb 3716 drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
fbc12f45
AG
3717 goto disconnect_rcu_unlock;
3718 }
b411b363 3719
fbc12f45 3720 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
1ec861eb 3721 drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
fbc12f45
AG
3722 goto disconnect_rcu_unlock;
3723 }
b411b363 3724
fbc12f45 3725 if (p_discard_my_data && nc->discard_my_data) {
1ec861eb 3726 drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
fbc12f45
AG
3727 goto disconnect_rcu_unlock;
3728 }
b411b363 3729
fbc12f45 3730 if (p_two_primaries != nc->two_primaries) {
1ec861eb 3731 drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
fbc12f45
AG
3732 goto disconnect_rcu_unlock;
3733 }
b411b363 3734
fbc12f45 3735 if (strcmp(integrity_alg, nc->integrity_alg)) {
1ec861eb 3736 drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
fbc12f45
AG
3737 goto disconnect_rcu_unlock;
3738 }
b411b363 3739
fbc12f45 3740 rcu_read_unlock();
b411b363
PR
3741 }
3742
7d4c782c
AG
3743 if (integrity_alg[0]) {
3744 int hash_size;
3745
3746 /*
3747 * We can only change the peer data integrity algorithm
3748 * here. Changing our own data integrity algorithm
3749 * requires that we send a P_PROTOCOL_UPDATE packet at
3750 * the same time; otherwise, the peer has no way to
3751 * tell between which packets the algorithm should
3752 * change.
3753 */
b411b363 3754
3d234b33 3755 peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, 0);
1b57e663
LE
3756 if (IS_ERR(peer_integrity_tfm)) {
3757 peer_integrity_tfm = NULL;
1ec861eb 3758 drbd_err(connection, "peer data-integrity-alg %s not supported\n",
7d4c782c
AG
3759 integrity_alg);
3760 goto disconnect;
3761 }
b411b363 3762
3d0e6375 3763 hash_size = crypto_shash_digestsize(peer_integrity_tfm);
7d4c782c
AG
3764 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3765 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3766 if (!(int_dig_in && int_dig_vv)) {
1ec861eb 3767 drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
b411b363
PR
3768 goto disconnect;
3769 }
b411b363
PR
3770 }
3771
7d4c782c
AG
3772 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3773 if (!new_net_conf) {
1ec861eb 3774 drbd_err(connection, "Allocation of new net_conf failed\n");
7d4c782c
AG
3775 goto disconnect;
3776 }
3777
bde89a9e 3778 mutex_lock(&connection->data.mutex);
0500813f 3779 mutex_lock(&connection->resource->conf_update);
bde89a9e 3780 old_net_conf = connection->net_conf;
7d4c782c
AG
3781 *new_net_conf = *old_net_conf;
3782
3783 new_net_conf->wire_protocol = p_proto;
3784 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3785 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3786 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3787 new_net_conf->two_primaries = p_two_primaries;
3788
bde89a9e 3789 rcu_assign_pointer(connection->net_conf, new_net_conf);
0500813f 3790 mutex_unlock(&connection->resource->conf_update);
bde89a9e 3791 mutex_unlock(&connection->data.mutex);
7d4c782c 3792
3d0e6375 3793 crypto_free_shash(connection->peer_integrity_tfm);
bde89a9e
AG
3794 kfree(connection->int_dig_in);
3795 kfree(connection->int_dig_vv);
3796 connection->peer_integrity_tfm = peer_integrity_tfm;
3797 connection->int_dig_in = int_dig_in;
3798 connection->int_dig_vv = int_dig_vv;
7d4c782c
AG
3799
3800 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
1ec861eb 3801 drbd_info(connection, "peer data-integrity-alg: %s\n",
7d4c782c
AG
3802 integrity_alg[0] ? integrity_alg : "(none)");
3803
3804 synchronize_rcu();
3805 kfree(old_net_conf);
82bc0194 3806 return 0;
b411b363 3807
44ed167d
PR
3808disconnect_rcu_unlock:
3809 rcu_read_unlock();
b411b363 3810disconnect:
3d0e6375 3811 crypto_free_shash(peer_integrity_tfm);
036b17ea
PR
3812 kfree(int_dig_in);
3813 kfree(int_dig_vv);
bde89a9e 3814 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3815 return -EIO;
b411b363
PR
3816}
3817
3818/* helper function
3819 * input: alg name, feature name
3820 * return: NULL (alg name was "")
3821 * ERR_PTR(error) if something goes wrong
3822 * or the crypto hash ptr, if it worked out ok. */
3d0e6375
KC
3823static struct crypto_shash *drbd_crypto_alloc_digest_safe(
3824 const struct drbd_device *device,
b411b363
PR
3825 const char *alg, const char *name)
3826{
3d0e6375 3827 struct crypto_shash *tfm;
b411b363
PR
3828
3829 if (!alg[0])
3830 return NULL;
3831
3d0e6375 3832 tfm = crypto_alloc_shash(alg, 0, 0);
b411b363 3833 if (IS_ERR(tfm)) {
d0180171 3834 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
b411b363
PR
3835 alg, name, PTR_ERR(tfm));
3836 return tfm;
3837 }
b411b363
PR
3838 return tfm;
3839}
3840
bde89a9e 3841static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
4a76b161 3842{
bde89a9e 3843 void *buffer = connection->data.rbuf;
4a76b161
AG
3844 int size = pi->size;
3845
3846 while (size) {
3847 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
bde89a9e 3848 s = drbd_recv(connection, buffer, s);
4a76b161
AG
3849 if (s <= 0) {
3850 if (s < 0)
3851 return s;
3852 break;
3853 }
3854 size -= s;
3855 }
3856 if (size)
3857 return -EIO;
3858 return 0;
3859}
3860
3861/*
3862 * config_unknown_volume - device configuration command for unknown volume
3863 *
3864 * When a device is added to an existing connection, the node on which the
3865 * device is added first will send configuration commands to its peer but the
3866 * peer will not know about the device yet. It will warn and ignore these
3867 * commands. Once the device is added on the second node, the second node will
3868 * send the same device configuration commands, but in the other direction.
3869 *
3870 * (We can also end up here if drbd is misconfigured.)
3871 */
bde89a9e 3872static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
4a76b161 3873{
1ec861eb 3874 drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
2fcb8f30 3875 cmdname(pi->cmd), pi->vnr);
bde89a9e 3876 return ignore_remaining_packet(connection, pi);
4a76b161
AG
3877}
3878
bde89a9e 3879static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3880{
9f4fe9ad 3881 struct drbd_peer_device *peer_device;
b30ab791 3882 struct drbd_device *device;
e658983a 3883 struct p_rs_param_95 *p;
b411b363 3884 unsigned int header_size, data_size, exp_max_sz;
3d0e6375
KC
3885 struct crypto_shash *verify_tfm = NULL;
3886 struct crypto_shash *csums_tfm = NULL;
2ec91e0e 3887 struct net_conf *old_net_conf, *new_net_conf = NULL;
813472ce 3888 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
bde89a9e 3889 const int apv = connection->agreed_pro_version;
813472ce 3890 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
6a365874 3891 unsigned int fifo_size = 0;
82bc0194 3892 int err;
b411b363 3893
9f4fe9ad
AG
3894 peer_device = conn_peer_device(connection, pi->vnr);
3895 if (!peer_device)
bde89a9e 3896 return config_unknown_volume(connection, pi);
9f4fe9ad 3897 device = peer_device->device;
b411b363
PR
3898
3899 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3900 : apv == 88 ? sizeof(struct p_rs_param)
3901 + SHARED_SECRET_MAX
8e26f9cc
PR
3902 : apv <= 94 ? sizeof(struct p_rs_param_89)
3903 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 3904
e2857216 3905 if (pi->size > exp_max_sz) {
d0180171 3906 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
e2857216 3907 pi->size, exp_max_sz);
82bc0194 3908 return -EIO;
b411b363
PR
3909 }
3910
3911 if (apv <= 88) {
e658983a 3912 header_size = sizeof(struct p_rs_param);
e2857216 3913 data_size = pi->size - header_size;
8e26f9cc 3914 } else if (apv <= 94) {
e658983a 3915 header_size = sizeof(struct p_rs_param_89);
e2857216 3916 data_size = pi->size - header_size;
0b0ba1ef 3917 D_ASSERT(device, data_size == 0);
8e26f9cc 3918 } else {
e658983a 3919 header_size = sizeof(struct p_rs_param_95);
e2857216 3920 data_size = pi->size - header_size;
0b0ba1ef 3921 D_ASSERT(device, data_size == 0);
b411b363
PR
3922 }
3923
3924 /* initialize verify_alg and csums_alg */
e658983a 3925 p = pi->data;
b411b363
PR
3926 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3927
9f4fe9ad 3928 err = drbd_recv_all(peer_device->connection, p, header_size);
82bc0194
AG
3929 if (err)
3930 return err;
b411b363 3931
0500813f 3932 mutex_lock(&connection->resource->conf_update);
9f4fe9ad 3933 old_net_conf = peer_device->connection->net_conf;
b30ab791 3934 if (get_ldev(device)) {
813472ce
PR
3935 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3936 if (!new_disk_conf) {
b30ab791 3937 put_ldev(device);
0500813f 3938 mutex_unlock(&connection->resource->conf_update);
d0180171 3939 drbd_err(device, "Allocation of new disk_conf failed\n");
813472ce
PR
3940 return -ENOMEM;
3941 }
daeda1cc 3942
b30ab791 3943 old_disk_conf = device->ldev->disk_conf;
813472ce 3944 *new_disk_conf = *old_disk_conf;
b411b363 3945
6394b935 3946 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
813472ce 3947 }
b411b363
PR
3948
3949 if (apv >= 88) {
3950 if (apv == 88) {
5de73827 3951 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
d0180171 3952 drbd_err(device, "verify-alg of wrong size, "
5de73827
PR
3953 "peer wants %u, accepting only up to %u byte\n",
3954 data_size, SHARED_SECRET_MAX);
813472ce
PR
3955 err = -EIO;
3956 goto reconnect;
b411b363
PR
3957 }
3958
9f4fe9ad 3959 err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
813472ce
PR
3960 if (err)
3961 goto reconnect;
b411b363
PR
3962 /* we expect NUL terminated string */
3963 /* but just in case someone tries to be evil */
0b0ba1ef 3964 D_ASSERT(device, p->verify_alg[data_size-1] == 0);
b411b363
PR
3965 p->verify_alg[data_size-1] = 0;
3966
3967 } else /* apv >= 89 */ {
3968 /* we still expect NUL terminated strings */
3969 /* but just in case someone tries to be evil */
0b0ba1ef
AG
3970 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3971 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
b411b363
PR
3972 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3973 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3974 }
3975
2ec91e0e 3976 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
b30ab791 3977 if (device->state.conn == C_WF_REPORT_PARAMS) {
d0180171 3978 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3979 old_net_conf->verify_alg, p->verify_alg);
b411b363
PR
3980 goto disconnect;
3981 }
b30ab791 3982 verify_tfm = drbd_crypto_alloc_digest_safe(device,
b411b363
PR
3983 p->verify_alg, "verify-alg");
3984 if (IS_ERR(verify_tfm)) {
3985 verify_tfm = NULL;
3986 goto disconnect;
3987 }
3988 }
3989
2ec91e0e 3990 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
b30ab791 3991 if (device->state.conn == C_WF_REPORT_PARAMS) {
d0180171 3992 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3993 old_net_conf->csums_alg, p->csums_alg);
b411b363
PR
3994 goto disconnect;
3995 }
b30ab791 3996 csums_tfm = drbd_crypto_alloc_digest_safe(device,
b411b363
PR
3997 p->csums_alg, "csums-alg");
3998 if (IS_ERR(csums_tfm)) {
3999 csums_tfm = NULL;
4000 goto disconnect;
4001 }
4002 }
4003
813472ce 4004 if (apv > 94 && new_disk_conf) {
daeda1cc
PR
4005 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
4006 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
4007 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
4008 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 4009
daeda1cc 4010 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
b30ab791 4011 if (fifo_size != device->rs_plan_s->size) {
813472ce
PR
4012 new_plan = fifo_alloc(fifo_size);
4013 if (!new_plan) {
d0180171 4014 drbd_err(device, "kmalloc of fifo_buffer failed");
b30ab791 4015 put_ldev(device);
778f271d
PR
4016 goto disconnect;
4017 }
4018 }
8e26f9cc 4019 }
b411b363 4020
91fd4dad 4021 if (verify_tfm || csums_tfm) {
2ec91e0e
PR
4022 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
4023 if (!new_net_conf) {
d0180171 4024 drbd_err(device, "Allocation of new net_conf failed\n");
91fd4dad
PR
4025 goto disconnect;
4026 }
4027
2ec91e0e 4028 *new_net_conf = *old_net_conf;
91fd4dad
PR
4029
4030 if (verify_tfm) {
2ec91e0e
PR
4031 strcpy(new_net_conf->verify_alg, p->verify_alg);
4032 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3d0e6375 4033 crypto_free_shash(peer_device->connection->verify_tfm);
9f4fe9ad 4034 peer_device->connection->verify_tfm = verify_tfm;
d0180171 4035 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
91fd4dad
PR
4036 }
4037 if (csums_tfm) {
2ec91e0e
PR
4038 strcpy(new_net_conf->csums_alg, p->csums_alg);
4039 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3d0e6375 4040 crypto_free_shash(peer_device->connection->csums_tfm);
9f4fe9ad 4041 peer_device->connection->csums_tfm = csums_tfm;
d0180171 4042 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
91fd4dad 4043 }
bde89a9e 4044 rcu_assign_pointer(connection->net_conf, new_net_conf);
778f271d 4045 }
b411b363
PR
4046 }
4047
813472ce 4048 if (new_disk_conf) {
b30ab791
AG
4049 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
4050 put_ldev(device);
813472ce
PR
4051 }
4052
4053 if (new_plan) {
b30ab791
AG
4054 old_plan = device->rs_plan_s;
4055 rcu_assign_pointer(device->rs_plan_s, new_plan);
b411b363 4056 }
daeda1cc 4057
0500813f 4058 mutex_unlock(&connection->resource->conf_update);
daeda1cc
PR
4059 synchronize_rcu();
4060 if (new_net_conf)
4061 kfree(old_net_conf);
4062 kfree(old_disk_conf);
813472ce 4063 kfree(old_plan);
daeda1cc 4064
82bc0194 4065 return 0;
b411b363 4066
813472ce
PR
4067reconnect:
4068 if (new_disk_conf) {
b30ab791 4069 put_ldev(device);
813472ce
PR
4070 kfree(new_disk_conf);
4071 }
0500813f 4072 mutex_unlock(&connection->resource->conf_update);
813472ce
PR
4073 return -EIO;
4074
b411b363 4075disconnect:
813472ce
PR
4076 kfree(new_plan);
4077 if (new_disk_conf) {
b30ab791 4078 put_ldev(device);
813472ce
PR
4079 kfree(new_disk_conf);
4080 }
0500813f 4081 mutex_unlock(&connection->resource->conf_update);
b411b363
PR
4082 /* just for completeness: actually not needed,
4083 * as this is not reached if csums_tfm was ok. */
3d0e6375 4084 crypto_free_shash(csums_tfm);
b411b363 4085 /* but free the verify_tfm again, if csums_tfm did not work out */
3d0e6375 4086 crypto_free_shash(verify_tfm);
9f4fe9ad 4087 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4088 return -EIO;
b411b363
PR
4089}
4090
b411b363 4091/* warn if the arguments differ by more than 12.5% */
b30ab791 4092static void warn_if_differ_considerably(struct drbd_device *device,
b411b363
PR
4093 const char *s, sector_t a, sector_t b)
4094{
4095 sector_t d;
4096 if (a == 0 || b == 0)
4097 return;
4098 d = (a > b) ? (a - b) : (b - a);
4099 if (d > (a>>3) || d > (b>>3))
d0180171 4100 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
b411b363
PR
4101 (unsigned long long)a, (unsigned long long)b);
4102}
4103
bde89a9e 4104static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4105{
9f4fe9ad 4106 struct drbd_peer_device *peer_device;
b30ab791 4107 struct drbd_device *device;
e658983a 4108 struct p_sizes *p = pi->data;
9104d31a 4109 struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
e96c9633 4110 enum determine_dev_size dd = DS_UNCHANGED;
6a8d68b1 4111 sector_t p_size, p_usize, p_csize, my_usize;
94c43a13 4112 sector_t new_size, cur_size;
b411b363 4113 int ldsc = 0; /* local disk size changed */
e89b591c 4114 enum dds_flags ddsf;
b411b363 4115
9f4fe9ad
AG
4116 peer_device = conn_peer_device(connection, pi->vnr);
4117 if (!peer_device)
bde89a9e 4118 return config_unknown_volume(connection, pi);
9f4fe9ad 4119 device = peer_device->device;
94c43a13 4120 cur_size = drbd_get_capacity(device->this_bdev);
4a76b161 4121
b411b363
PR
4122 p_size = be64_to_cpu(p->d_size);
4123 p_usize = be64_to_cpu(p->u_size);
6a8d68b1 4124 p_csize = be64_to_cpu(p->c_size);
b411b363 4125
b411b363
PR
4126 /* just store the peer's disk size for now.
4127 * we still need to figure out whether we accept that. */
b30ab791 4128 device->p_size = p_size;
b411b363 4129
b30ab791 4130 if (get_ldev(device)) {
daeda1cc 4131 rcu_read_lock();
b30ab791 4132 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
daeda1cc
PR
4133 rcu_read_unlock();
4134
b30ab791
AG
4135 warn_if_differ_considerably(device, "lower level device sizes",
4136 p_size, drbd_get_max_capacity(device->ldev));
4137 warn_if_differ_considerably(device, "user requested size",
daeda1cc 4138 p_usize, my_usize);
b411b363
PR
4139
4140 /* if this is the first connect, or an otherwise expected
4141 * param exchange, choose the minimum */
b30ab791 4142 if (device->state.conn == C_WF_REPORT_PARAMS)
daeda1cc 4143 p_usize = min_not_zero(my_usize, p_usize);
b411b363 4144
ad6e8979
LE
4145 /* Never shrink a device with usable data during connect,
4146 * or "attach" on the peer.
4147 * But allow online shrinking if we are connected. */
60bac040 4148 new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
60bac040 4149 if (new_size < cur_size &&
b30ab791 4150 device->state.disk >= D_OUTDATED &&
ad6e8979 4151 (device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS)) {
60bac040
LE
4152 drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
4153 (unsigned long long)new_size, (unsigned long long)cur_size);
9f4fe9ad 4154 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
b30ab791 4155 put_ldev(device);
82bc0194 4156 return -EIO;
b411b363 4157 }
daeda1cc
PR
4158
4159 if (my_usize != p_usize) {
4160 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
4161
4162 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
4163 if (!new_disk_conf) {
d0180171 4164 drbd_err(device, "Allocation of new disk_conf failed\n");
b30ab791 4165 put_ldev(device);
daeda1cc
PR
4166 return -ENOMEM;
4167 }
4168
0500813f 4169 mutex_lock(&connection->resource->conf_update);
b30ab791 4170 old_disk_conf = device->ldev->disk_conf;
daeda1cc
PR
4171 *new_disk_conf = *old_disk_conf;
4172 new_disk_conf->disk_size = p_usize;
4173
b30ab791 4174 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
0500813f 4175 mutex_unlock(&connection->resource->conf_update);
daeda1cc
PR
4176 synchronize_rcu();
4177 kfree(old_disk_conf);
4178
ad6e8979
LE
4179 drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n",
4180 (unsigned long)p_usize, (unsigned long)my_usize);
b411b363 4181 }
daeda1cc 4182
b30ab791 4183 put_ldev(device);
b411b363 4184 }
b411b363 4185
20c68fde 4186 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
dd4f699d 4187 /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
20c68fde 4188 In case we cleared the QUEUE_FLAG_DISCARD from our queue in
dd4f699d 4189 drbd_reconsider_queue_parameters(), we can be sure that after
20c68fde
LE
4190 drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
4191
e89b591c 4192 ddsf = be16_to_cpu(p->dds_flags);
b30ab791 4193 if (get_ldev(device)) {
9104d31a 4194 drbd_reconsider_queue_parameters(device, device->ldev, o);
b30ab791
AG
4195 dd = drbd_determine_dev_size(device, ddsf, NULL);
4196 put_ldev(device);
e96c9633 4197 if (dd == DS_ERROR)
82bc0194 4198 return -EIO;
b30ab791 4199 drbd_md_sync(device);
b411b363 4200 } else {
6a8d68b1
LE
4201 /*
4202 * I am diskless, need to accept the peer's *current* size.
4203 * I must NOT accept the peers backing disk size,
4204 * it may have been larger than mine all along...
4205 *
4206 * At this point, the peer knows more about my disk, or at
4207 * least about what we last agreed upon, than myself.
4208 * So if his c_size is less than his d_size, the most likely
4209 * reason is that *my* d_size was smaller last time we checked.
4210 *
4211 * However, if he sends a zero current size,
4212 * take his (user-capped or) backing disk size anyways.
94c43a13
LE
4213 *
4214 * Unless of course he does not have a disk himself.
4215 * In which case we ignore this completely.
6a8d68b1 4216 */
94c43a13 4217 sector_t new_size = p_csize ?: p_usize ?: p_size;
9104d31a 4218 drbd_reconsider_queue_parameters(device, NULL, o);
94c43a13
LE
4219 if (new_size == 0) {
4220 /* Ignore, peer does not know nothing. */
4221 } else if (new_size == cur_size) {
4222 /* nothing to do */
4223 } else if (cur_size != 0 && p_size == 0) {
4224 drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n",
4225 (unsigned long long)new_size, (unsigned long long)cur_size);
4226 } else if (new_size < cur_size && device->state.role == R_PRIMARY) {
4227 drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n",
4228 (unsigned long long)new_size, (unsigned long long)cur_size);
4229 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4230 return -EIO;
4231 } else {
4232 /* I believe the peer, if
4233 * - I don't have a current size myself
4234 * - we agree on the size anyways
4235 * - I do have a current size, am Secondary,
4236 * and he has the only disk
4237 * - I do have a current size, am Primary,
4238 * and he has the only disk,
4239 * which is larger than my current size
4240 */
4241 drbd_set_my_capacity(device, new_size);
4242 }
b411b363
PR
4243 }
4244
b30ab791
AG
4245 if (get_ldev(device)) {
4246 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
4247 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
b411b363
PR
4248 ldsc = 1;
4249 }
4250
b30ab791 4251 put_ldev(device);
b411b363
PR
4252 }
4253
b30ab791 4254 if (device->state.conn > C_WF_REPORT_PARAMS) {
b411b363 4255 if (be64_to_cpu(p->c_size) !=
b30ab791 4256 drbd_get_capacity(device->this_bdev) || ldsc) {
b411b363
PR
4257 /* we have different sizes, probably peer
4258 * needs to know my new size... */
69a22773 4259 drbd_send_sizes(peer_device, 0, ddsf);
b411b363 4260 }
b30ab791
AG
4261 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
4262 (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
4263 if (device->state.pdsk >= D_INCONSISTENT &&
4264 device->state.disk >= D_INCONSISTENT) {
e89b591c 4265 if (ddsf & DDSF_NO_RESYNC)
d0180171 4266 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
e89b591c 4267 else
b30ab791 4268 resync_after_online_grow(device);
e89b591c 4269 } else
b30ab791 4270 set_bit(RESYNC_AFTER_NEG, &device->flags);
b411b363
PR
4271 }
4272 }
4273
82bc0194 4274 return 0;
b411b363
PR
4275}
4276
bde89a9e 4277static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4278{
9f4fe9ad 4279 struct drbd_peer_device *peer_device;
b30ab791 4280 struct drbd_device *device;
e658983a 4281 struct p_uuids *p = pi->data;
b411b363 4282 u64 *p_uuid;
62b0da3a 4283 int i, updated_uuids = 0;
b411b363 4284
9f4fe9ad
AG
4285 peer_device = conn_peer_device(connection, pi->vnr);
4286 if (!peer_device)
bde89a9e 4287 return config_unknown_volume(connection, pi);
9f4fe9ad 4288 device = peer_device->device;
4a76b161 4289
365cf663 4290 p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
063eacf8 4291 if (!p_uuid) {
d0180171 4292 drbd_err(device, "kmalloc of p_uuid failed\n");
063eacf8
JW
4293 return false;
4294 }
b411b363
PR
4295
4296 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
4297 p_uuid[i] = be64_to_cpu(p->uuid[i]);
4298
b30ab791
AG
4299 kfree(device->p_uuid);
4300 device->p_uuid = p_uuid;
b411b363 4301
b17b5960 4302 if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
b30ab791
AG
4303 device->state.disk < D_INCONSISTENT &&
4304 device->state.role == R_PRIMARY &&
4305 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
d0180171 4306 drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
b30ab791 4307 (unsigned long long)device->ed_uuid);
9f4fe9ad 4308 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4309 return -EIO;
b411b363
PR
4310 }
4311
b30ab791 4312 if (get_ldev(device)) {
b411b363 4313 int skip_initial_sync =
b30ab791 4314 device->state.conn == C_CONNECTED &&
9f4fe9ad 4315 peer_device->connection->agreed_pro_version >= 90 &&
b30ab791 4316 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
b411b363
PR
4317 (p_uuid[UI_FLAGS] & 8);
4318 if (skip_initial_sync) {
d0180171 4319 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
b30ab791 4320 drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
20ceb2b2
LE
4321 "clear_n_write from receive_uuids",
4322 BM_LOCKED_TEST_ALLOWED);
b30ab791
AG
4323 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
4324 _drbd_uuid_set(device, UI_BITMAP, 0);
4325 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
b411b363 4326 CS_VERBOSE, NULL);
b30ab791 4327 drbd_md_sync(device);
62b0da3a 4328 updated_uuids = 1;
b411b363 4329 }
b30ab791
AG
4330 put_ldev(device);
4331 } else if (device->state.disk < D_INCONSISTENT &&
4332 device->state.role == R_PRIMARY) {
18a50fa2
PR
4333 /* I am a diskless primary, the peer just created a new current UUID
4334 for me. */
b30ab791 4335 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
b411b363
PR
4336 }
4337
4338 /* Before we test for the disk state, we should wait until an eventually
4339 ongoing cluster wide state change is finished. That is important if
4340 we are primary and are detaching from our disk. We need to see the
4341 new disk state... */
b30ab791
AG
4342 mutex_lock(device->state_mutex);
4343 mutex_unlock(device->state_mutex);
4344 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
4345 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
62b0da3a
LE
4346
4347 if (updated_uuids)
b30ab791 4348 drbd_print_uuids(device, "receiver updated UUIDs to");
b411b363 4349
82bc0194 4350 return 0;
b411b363
PR
4351}
4352
4353/**
4354 * convert_state() - Converts the peer's view of the cluster state to our point of view
4355 * @ps: The state as seen by the peer.
4356 */
4357static union drbd_state convert_state(union drbd_state ps)
4358{
4359 union drbd_state ms;
4360
4361 static enum drbd_conns c_tab[] = {
369bea63 4362 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
b411b363
PR
4363 [C_CONNECTED] = C_CONNECTED,
4364
4365 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
4366 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
4367 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
4368 [C_VERIFY_S] = C_VERIFY_T,
4369 [C_MASK] = C_MASK,
4370 };
4371
4372 ms.i = ps.i;
4373
4374 ms.conn = c_tab[ps.conn];
4375 ms.peer = ps.role;
4376 ms.role = ps.peer;
4377 ms.pdsk = ps.disk;
4378 ms.disk = ps.pdsk;
4379 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
4380
4381 return ms;
4382}
4383
bde89a9e 4384static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4385{
9f4fe9ad 4386 struct drbd_peer_device *peer_device;
b30ab791 4387 struct drbd_device *device;
e658983a 4388 struct p_req_state *p = pi->data;
b411b363 4389 union drbd_state mask, val;
bf885f8a 4390 enum drbd_state_rv rv;
b411b363 4391
9f4fe9ad
AG
4392 peer_device = conn_peer_device(connection, pi->vnr);
4393 if (!peer_device)
4a76b161 4394 return -EIO;
9f4fe9ad 4395 device = peer_device->device;
4a76b161 4396
b411b363
PR
4397 mask.i = be32_to_cpu(p->mask);
4398 val.i = be32_to_cpu(p->val);
4399
9f4fe9ad 4400 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
b30ab791 4401 mutex_is_locked(device->state_mutex)) {
69a22773 4402 drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
82bc0194 4403 return 0;
b411b363
PR
4404 }
4405
4406 mask = convert_state(mask);
4407 val = convert_state(val);
4408
b30ab791 4409 rv = drbd_change_state(device, CS_VERBOSE, mask, val);
69a22773 4410 drbd_send_sr_reply(peer_device, rv);
b411b363 4411
b30ab791 4412 drbd_md_sync(device);
b411b363 4413
82bc0194 4414 return 0;
b411b363
PR
4415}
4416
bde89a9e 4417static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4418{
e658983a 4419 struct p_req_state *p = pi->data;
b411b363 4420 union drbd_state mask, val;
bf885f8a 4421 enum drbd_state_rv rv;
b411b363 4422
b411b363
PR
4423 mask.i = be32_to_cpu(p->mask);
4424 val.i = be32_to_cpu(p->val);
4425
bde89a9e
AG
4426 if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
4427 mutex_is_locked(&connection->cstate_mutex)) {
4428 conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
82bc0194 4429 return 0;
b411b363
PR
4430 }
4431
4432 mask = convert_state(mask);
4433 val = convert_state(val);
4434
bde89a9e
AG
4435 rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
4436 conn_send_sr_reply(connection, rv);
b411b363 4437
82bc0194 4438 return 0;
b411b363
PR
4439}
4440
bde89a9e 4441static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4442{
9f4fe9ad 4443 struct drbd_peer_device *peer_device;
b30ab791 4444 struct drbd_device *device;
e658983a 4445 struct p_state *p = pi->data;
4ac4aada 4446 union drbd_state os, ns, peer_state;
b411b363 4447 enum drbd_disk_state real_peer_disk;
65d922c3 4448 enum chg_state_flags cs_flags;
b411b363
PR
4449 int rv;
4450
9f4fe9ad
AG
4451 peer_device = conn_peer_device(connection, pi->vnr);
4452 if (!peer_device)
bde89a9e 4453 return config_unknown_volume(connection, pi);
9f4fe9ad 4454 device = peer_device->device;
4a76b161 4455
b411b363
PR
4456 peer_state.i = be32_to_cpu(p->state);
4457
4458 real_peer_disk = peer_state.disk;
4459 if (peer_state.disk == D_NEGOTIATING) {
b30ab791 4460 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
d0180171 4461 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
b411b363
PR
4462 }
4463
0500813f 4464 spin_lock_irq(&device->resource->req_lock);
b411b363 4465 retry:
b30ab791 4466 os = ns = drbd_read_state(device);
0500813f 4467 spin_unlock_irq(&device->resource->req_lock);
b411b363 4468
668700b4 4469 /* If some other part of the code (ack_receiver thread, timeout)
545752d5
LE
4470 * already decided to close the connection again,
4471 * we must not "re-establish" it here. */
4472 if (os.conn <= C_TEAR_DOWN)
58ffa580 4473 return -ECONNRESET;
545752d5 4474
40424e4a
LE
4475 /* If this is the "end of sync" confirmation, usually the peer disk
4476 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
4477 * set) resync started in PausedSyncT, or if the timing of pause-/
4478 * unpause-sync events has been "just right", the peer disk may
4479 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
4480 */
4481 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
4482 real_peer_disk == D_UP_TO_DATE &&
e9ef7bb6
LE
4483 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
4484 /* If we are (becoming) SyncSource, but peer is still in sync
4485 * preparation, ignore its uptodate-ness to avoid flapping, it
4486 * will change to inconsistent once the peer reaches active
4487 * syncing states.
4488 * It may have changed syncer-paused flags, however, so we
4489 * cannot ignore this completely. */
4490 if (peer_state.conn > C_CONNECTED &&
4491 peer_state.conn < C_SYNC_SOURCE)
4492 real_peer_disk = D_INCONSISTENT;
4493
4494 /* if peer_state changes to connected at the same time,
4495 * it explicitly notifies us that it finished resync.
4496 * Maybe we should finish it up, too? */
4497 else if (os.conn >= C_SYNC_SOURCE &&
4498 peer_state.conn == C_CONNECTED) {
b30ab791
AG
4499 if (drbd_bm_total_weight(device) <= device->rs_failed)
4500 drbd_resync_finished(device);
82bc0194 4501 return 0;
e9ef7bb6
LE
4502 }
4503 }
4504
02b91b55
LE
4505 /* explicit verify finished notification, stop sector reached. */
4506 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
4507 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
b30ab791
AG
4508 ov_out_of_sync_print(device);
4509 drbd_resync_finished(device);
58ffa580 4510 return 0;
02b91b55
LE
4511 }
4512
e9ef7bb6
LE
4513 /* peer says his disk is inconsistent, while we think it is uptodate,
4514 * and this happens while the peer still thinks we have a sync going on,
4515 * but we think we are already done with the sync.
4516 * We ignore this to avoid flapping pdsk.
4517 * This should not happen, if the peer is a recent version of drbd. */
4518 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
4519 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
4520 real_peer_disk = D_UP_TO_DATE;
4521
4ac4aada
LE
4522 if (ns.conn == C_WF_REPORT_PARAMS)
4523 ns.conn = C_CONNECTED;
b411b363 4524
67531718
PR
4525 if (peer_state.conn == C_AHEAD)
4526 ns.conn = C_BEHIND;
4527
fe43ed97
LE
4528 /* TODO:
4529 * if (primary and diskless and peer uuid != effective uuid)
4530 * abort attach on peer;
4531 *
4532 * If this node does not have good data, was already connected, but
4533 * the peer did a late attach only now, trying to "negotiate" with me,
4534 * AND I am currently Primary, possibly frozen, with some specific
4535 * "effective" uuid, this should never be reached, really, because
4536 * we first send the uuids, then the current state.
4537 *
4538 * In this scenario, we already dropped the connection hard
4539 * when we received the unsuitable uuids (receive_uuids().
4540 *
4541 * Should we want to change this, that is: not drop the connection in
4542 * receive_uuids() already, then we would need to add a branch here
4543 * that aborts the attach of "unsuitable uuids" on the peer in case
4544 * this node is currently Diskless Primary.
4545 */
4546
b30ab791
AG
4547 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
4548 get_ldev_if_state(device, D_NEGOTIATING)) {
b411b363
PR
4549 int cr; /* consider resync */
4550
4551 /* if we established a new connection */
4ac4aada 4552 cr = (os.conn < C_CONNECTED);
b411b363
PR
4553 /* if we had an established connection
4554 * and one of the nodes newly attaches a disk */
4ac4aada 4555 cr |= (os.conn == C_CONNECTED &&
b411b363 4556 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 4557 os.disk == D_NEGOTIATING));
b411b363 4558 /* if we have both been inconsistent, and the peer has been
a2823ea9 4559 * forced to be UpToDate with --force */
b30ab791 4560 cr |= test_bit(CONSIDER_RESYNC, &device->flags);
b411b363
PR
4561 /* if we had been plain connected, and the admin requested to
4562 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 4563 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
4564 (peer_state.conn >= C_STARTING_SYNC_S &&
4565 peer_state.conn <= C_WF_BITMAP_T));
4566
4567 if (cr)
69a22773 4568 ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
b411b363 4569
b30ab791 4570 put_ldev(device);
4ac4aada
LE
4571 if (ns.conn == C_MASK) {
4572 ns.conn = C_CONNECTED;
b30ab791
AG
4573 if (device->state.disk == D_NEGOTIATING) {
4574 drbd_force_state(device, NS(disk, D_FAILED));
b411b363 4575 } else if (peer_state.disk == D_NEGOTIATING) {
d0180171 4576 drbd_err(device, "Disk attach process on the peer node was aborted.\n");
b411b363 4577 peer_state.disk = D_DISKLESS;
580b9767 4578 real_peer_disk = D_DISKLESS;
b411b363 4579 } else {
9f4fe9ad 4580 if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
82bc0194 4581 return -EIO;
0b0ba1ef 4582 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
9f4fe9ad 4583 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4584 return -EIO;
b411b363
PR
4585 }
4586 }
4587 }
4588
0500813f 4589 spin_lock_irq(&device->resource->req_lock);
b30ab791 4590 if (os.i != drbd_read_state(device).i)
b411b363 4591 goto retry;
b30ab791 4592 clear_bit(CONSIDER_RESYNC, &device->flags);
b411b363
PR
4593 ns.peer = peer_state.role;
4594 ns.pdsk = real_peer_disk;
4595 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 4596 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b30ab791 4597 ns.disk = device->new_state_tmp.disk;
4ac4aada 4598 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
b30ab791
AG
4599 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
4600 test_bit(NEW_CUR_UUID, &device->flags)) {
8554df1c 4601 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 4602 for temporal network outages! */
0500813f 4603 spin_unlock_irq(&device->resource->req_lock);
d0180171 4604 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
9f4fe9ad 4605 tl_clear(peer_device->connection);
b30ab791
AG
4606 drbd_uuid_new_current(device);
4607 clear_bit(NEW_CUR_UUID, &device->flags);
9f4fe9ad 4608 conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 4609 return -EIO;
481c6f50 4610 }
b30ab791
AG
4611 rv = _drbd_set_state(device, ns, cs_flags, NULL);
4612 ns = drbd_read_state(device);
0500813f 4613 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
4614
4615 if (rv < SS_SUCCESS) {
9f4fe9ad 4616 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4617 return -EIO;
b411b363
PR
4618 }
4619
4ac4aada
LE
4620 if (os.conn > C_WF_REPORT_PARAMS) {
4621 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
4622 peer_state.disk != D_NEGOTIATING ) {
4623 /* we want resync, peer has not yet decided to sync... */
4624 /* Nowadays only used when forcing a node into primary role and
4625 setting its disk to UpToDate with that */
69a22773
AG
4626 drbd_send_uuids(peer_device);
4627 drbd_send_current_state(peer_device);
b411b363
PR
4628 }
4629 }
4630
b30ab791 4631 clear_bit(DISCARD_MY_DATA, &device->flags);
b411b363 4632
b30ab791 4633 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
b411b363 4634
82bc0194 4635 return 0;
b411b363
PR
4636}
4637
bde89a9e 4638static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4639{
9f4fe9ad 4640 struct drbd_peer_device *peer_device;
b30ab791 4641 struct drbd_device *device;
e658983a 4642 struct p_rs_uuid *p = pi->data;
4a76b161 4643
9f4fe9ad
AG
4644 peer_device = conn_peer_device(connection, pi->vnr);
4645 if (!peer_device)
4a76b161 4646 return -EIO;
9f4fe9ad 4647 device = peer_device->device;
b411b363 4648
b30ab791
AG
4649 wait_event(device->misc_wait,
4650 device->state.conn == C_WF_SYNC_UUID ||
4651 device->state.conn == C_BEHIND ||
4652 device->state.conn < C_CONNECTED ||
4653 device->state.disk < D_NEGOTIATING);
b411b363 4654
0b0ba1ef 4655 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
b411b363 4656
b411b363
PR
4657 /* Here the _drbd_uuid_ functions are right, current should
4658 _not_ be rotated into the history */
b30ab791
AG
4659 if (get_ldev_if_state(device, D_NEGOTIATING)) {
4660 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
4661 _drbd_uuid_set(device, UI_BITMAP, 0UL);
b411b363 4662
b30ab791
AG
4663 drbd_print_uuids(device, "updated sync uuid");
4664 drbd_start_resync(device, C_SYNC_TARGET);
b411b363 4665
b30ab791 4666 put_ldev(device);
b411b363 4667 } else
d0180171 4668 drbd_err(device, "Ignoring SyncUUID packet!\n");
b411b363 4669
82bc0194 4670 return 0;
b411b363
PR
4671}
4672
2c46407d
AG
4673/**
4674 * receive_bitmap_plain
4675 *
4676 * Return 0 when done, 1 when another iteration is needed, and a negative error
4677 * code upon failure.
4678 */
4679static int
69a22773 4680receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
e658983a 4681 unsigned long *p, struct bm_xfer_ctx *c)
b411b363 4682{
50d0b1ad 4683 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
69a22773 4684 drbd_header_size(peer_device->connection);
e658983a 4685 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 4686 c->bm_words - c->word_offset);
e658983a 4687 unsigned int want = num_words * sizeof(*p);
2c46407d 4688 int err;
b411b363 4689
50d0b1ad 4690 if (want != size) {
69a22773 4691 drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
2c46407d 4692 return -EIO;
b411b363
PR
4693 }
4694 if (want == 0)
2c46407d 4695 return 0;
69a22773 4696 err = drbd_recv_all(peer_device->connection, p, want);
82bc0194 4697 if (err)
2c46407d 4698 return err;
b411b363 4699
69a22773 4700 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
b411b363
PR
4701
4702 c->word_offset += num_words;
4703 c->bit_offset = c->word_offset * BITS_PER_LONG;
4704 if (c->bit_offset > c->bm_bits)
4705 c->bit_offset = c->bm_bits;
4706
2c46407d 4707 return 1;
b411b363
PR
4708}
4709
a02d1240
AG
4710static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4711{
4712 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4713}
4714
4715static int dcbp_get_start(struct p_compressed_bm *p)
4716{
4717 return (p->encoding & 0x80) != 0;
4718}
4719
4720static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4721{
4722 return (p->encoding >> 4) & 0x7;
4723}
4724
2c46407d
AG
4725/**
4726 * recv_bm_rle_bits
4727 *
4728 * Return 0 when done, 1 when another iteration is needed, and a negative error
4729 * code upon failure.
4730 */
4731static int
69a22773 4732recv_bm_rle_bits(struct drbd_peer_device *peer_device,
b411b363 4733 struct p_compressed_bm *p,
c6d25cfe
PR
4734 struct bm_xfer_ctx *c,
4735 unsigned int len)
b411b363
PR
4736{
4737 struct bitstream bs;
4738 u64 look_ahead;
4739 u64 rl;
4740 u64 tmp;
4741 unsigned long s = c->bit_offset;
4742 unsigned long e;
a02d1240 4743 int toggle = dcbp_get_start(p);
b411b363
PR
4744 int have;
4745 int bits;
4746
a02d1240 4747 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
b411b363
PR
4748
4749 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4750 if (bits < 0)
2c46407d 4751 return -EIO;
b411b363
PR
4752
4753 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4754 bits = vli_decode_bits(&rl, look_ahead);
4755 if (bits <= 0)
2c46407d 4756 return -EIO;
b411b363
PR
4757
4758 if (toggle) {
4759 e = s + rl -1;
4760 if (e >= c->bm_bits) {
69a22773 4761 drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 4762 return -EIO;
b411b363 4763 }
69a22773 4764 _drbd_bm_set_bits(peer_device->device, s, e);
b411b363
PR
4765 }
4766
4767 if (have < bits) {
69a22773 4768 drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
b411b363
PR
4769 have, bits, look_ahead,
4770 (unsigned int)(bs.cur.b - p->code),
4771 (unsigned int)bs.buf_len);
2c46407d 4772 return -EIO;
b411b363 4773 }
d2da5b0c
LE
4774 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4775 if (likely(bits < 64))
4776 look_ahead >>= bits;
4777 else
4778 look_ahead = 0;
b411b363
PR
4779 have -= bits;
4780
4781 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4782 if (bits < 0)
2c46407d 4783 return -EIO;
b411b363
PR
4784 look_ahead |= tmp << have;
4785 have += bits;
4786 }
4787
4788 c->bit_offset = s;
4789 bm_xfer_ctx_bit_to_word_offset(c);
4790
2c46407d 4791 return (s != c->bm_bits);
b411b363
PR
4792}
4793
2c46407d
AG
4794/**
4795 * decode_bitmap_c
4796 *
4797 * Return 0 when done, 1 when another iteration is needed, and a negative error
4798 * code upon failure.
4799 */
4800static int
69a22773 4801decode_bitmap_c(struct drbd_peer_device *peer_device,
b411b363 4802 struct p_compressed_bm *p,
c6d25cfe
PR
4803 struct bm_xfer_ctx *c,
4804 unsigned int len)
b411b363 4805{
a02d1240 4806 if (dcbp_get_code(p) == RLE_VLI_Bits)
69a22773 4807 return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
b411b363
PR
4808
4809 /* other variants had been implemented for evaluation,
4810 * but have been dropped as this one turned out to be "best"
4811 * during all our tests. */
4812
69a22773
AG
4813 drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4814 conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 4815 return -EIO;
b411b363
PR
4816}
4817
b30ab791 4818void INFO_bm_xfer_stats(struct drbd_device *device,
b411b363
PR
4819 const char *direction, struct bm_xfer_ctx *c)
4820{
4821 /* what would it take to transfer it "plaintext" */
a6b32bc3 4822 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
50d0b1ad
AG
4823 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4824 unsigned int plain =
4825 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4826 c->bm_words * sizeof(unsigned long);
4827 unsigned int total = c->bytes[0] + c->bytes[1];
4828 unsigned int r;
b411b363
PR
4829
4830 /* total can not be zero. but just in case: */
4831 if (total == 0)
4832 return;
4833
4834 /* don't report if not compressed */
4835 if (total >= plain)
4836 return;
4837
4838 /* total < plain. check for overflow, still */
4839 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4840 : (1000 * total / plain);
4841
4842 if (r > 1000)
4843 r = 1000;
4844
4845 r = 1000 - r;
d0180171 4846 drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
b411b363
PR
4847 "total %u; compression: %u.%u%%\n",
4848 direction,
4849 c->bytes[1], c->packets[1],
4850 c->bytes[0], c->packets[0],
4851 total, r/10, r % 10);
4852}
4853
4854/* Since we are processing the bitfield from lower addresses to higher,
4855 it does not matter if the process it in 32 bit chunks or 64 bit
4856 chunks as long as it is little endian. (Understand it as byte stream,
4857 beginning with the lowest byte...) If we would use big endian
4858 we would need to process it from the highest address to the lowest,
4859 in order to be agnostic to the 32 vs 64 bits issue.
4860
4861 returns 0 on failure, 1 if we successfully received it. */
bde89a9e 4862static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4863{
9f4fe9ad 4864 struct drbd_peer_device *peer_device;
b30ab791 4865 struct drbd_device *device;
b411b363 4866 struct bm_xfer_ctx c;
2c46407d 4867 int err;
4a76b161 4868
9f4fe9ad
AG
4869 peer_device = conn_peer_device(connection, pi->vnr);
4870 if (!peer_device)
4a76b161 4871 return -EIO;
9f4fe9ad 4872 device = peer_device->device;
b411b363 4873
b30ab791 4874 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
20ceb2b2
LE
4875 /* you are supposed to send additional out-of-sync information
4876 * if you actually set bits during this phase */
b411b363 4877
b411b363 4878 c = (struct bm_xfer_ctx) {
b30ab791
AG
4879 .bm_bits = drbd_bm_bits(device),
4880 .bm_words = drbd_bm_words(device),
b411b363
PR
4881 };
4882
2c46407d 4883 for(;;) {
e658983a 4884 if (pi->cmd == P_BITMAP)
69a22773 4885 err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
e658983a 4886 else if (pi->cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
4887 /* MAYBE: sanity check that we speak proto >= 90,
4888 * and the feature is enabled! */
e658983a 4889 struct p_compressed_bm *p = pi->data;
b411b363 4890
bde89a9e 4891 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
d0180171 4892 drbd_err(device, "ReportCBitmap packet too large\n");
82bc0194 4893 err = -EIO;
b411b363
PR
4894 goto out;
4895 }
e658983a 4896 if (pi->size <= sizeof(*p)) {
d0180171 4897 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
82bc0194 4898 err = -EIO;
78fcbdae 4899 goto out;
b411b363 4900 }
9f4fe9ad 4901 err = drbd_recv_all(peer_device->connection, p, pi->size);
e658983a
AG
4902 if (err)
4903 goto out;
69a22773 4904 err = decode_bitmap_c(peer_device, p, &c, pi->size);
b411b363 4905 } else {
d0180171 4906 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
82bc0194 4907 err = -EIO;
b411b363
PR
4908 goto out;
4909 }
4910
e2857216 4911 c.packets[pi->cmd == P_BITMAP]++;
bde89a9e 4912 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
b411b363 4913
2c46407d
AG
4914 if (err <= 0) {
4915 if (err < 0)
4916 goto out;
b411b363 4917 break;
2c46407d 4918 }
9f4fe9ad 4919 err = drbd_recv_header(peer_device->connection, pi);
82bc0194 4920 if (err)
b411b363 4921 goto out;
2c46407d 4922 }
b411b363 4923
b30ab791 4924 INFO_bm_xfer_stats(device, "receive", &c);
b411b363 4925
b30ab791 4926 if (device->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
4927 enum drbd_state_rv rv;
4928
b30ab791 4929 err = drbd_send_bitmap(device);
82bc0194 4930 if (err)
b411b363
PR
4931 goto out;
4932 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
b30ab791 4933 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
0b0ba1ef 4934 D_ASSERT(device, rv == SS_SUCCESS);
b30ab791 4935 } else if (device->state.conn != C_WF_BITMAP_S) {
b411b363
PR
4936 /* admin may have requested C_DISCONNECTING,
4937 * other threads may have noticed network errors */
d0180171 4938 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
b30ab791 4939 drbd_conn_str(device->state.conn));
b411b363 4940 }
82bc0194 4941 err = 0;
b411b363 4942
b411b363 4943 out:
b30ab791
AG
4944 drbd_bm_unlock(device);
4945 if (!err && device->state.conn == C_WF_BITMAP_S)
4946 drbd_start_resync(device, C_SYNC_SOURCE);
82bc0194 4947 return err;
b411b363
PR
4948}
4949
bde89a9e 4950static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4951{
1ec861eb 4952 drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
e2857216 4953 pi->cmd, pi->size);
b411b363 4954
bde89a9e 4955 return ignore_remaining_packet(connection, pi);
b411b363
PR
4956}
4957
bde89a9e 4958static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
0ced55a3 4959{
e7f52dfb
LE
4960 /* Make sure we've acked all the TCP data associated
4961 * with the data requests being unplugged */
ddd061b8 4962 tcp_sock_set_quickack(connection->data.socket->sk, 2);
82bc0194 4963 return 0;
0ced55a3
PR
4964}
4965
bde89a9e 4966static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
73a01a18 4967{
9f4fe9ad 4968 struct drbd_peer_device *peer_device;
b30ab791 4969 struct drbd_device *device;
e658983a 4970 struct p_block_desc *p = pi->data;
4a76b161 4971
9f4fe9ad
AG
4972 peer_device = conn_peer_device(connection, pi->vnr);
4973 if (!peer_device)
4a76b161 4974 return -EIO;
9f4fe9ad 4975 device = peer_device->device;
73a01a18 4976
b30ab791 4977 switch (device->state.conn) {
f735e363
LE
4978 case C_WF_SYNC_UUID:
4979 case C_WF_BITMAP_T:
4980 case C_BEHIND:
4981 break;
4982 default:
d0180171 4983 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
b30ab791 4984 drbd_conn_str(device->state.conn));
f735e363
LE
4985 }
4986
b30ab791 4987 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
73a01a18 4988
82bc0194 4989 return 0;
73a01a18
PR
4990}
4991
700ca8c0
PR
4992static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
4993{
4994 struct drbd_peer_device *peer_device;
4995 struct p_block_desc *p = pi->data;
4996 struct drbd_device *device;
4997 sector_t sector;
4998 int size, err = 0;
4999
5000 peer_device = conn_peer_device(connection, pi->vnr);
5001 if (!peer_device)
5002 return -EIO;
5003 device = peer_device->device;
5004
5005 sector = be64_to_cpu(p->sector);
5006 size = be32_to_cpu(p->blksize);
5007
5008 dec_rs_pending(device);
5009
5010 if (get_ldev(device)) {
5011 struct drbd_peer_request *peer_req;
45c21793 5012 const int op = REQ_OP_WRITE_ZEROES;
700ca8c0
PR
5013
5014 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
9104d31a 5015 size, 0, GFP_NOIO);
700ca8c0
PR
5016 if (!peer_req) {
5017 put_ldev(device);
5018 return -ENOMEM;
5019 }
5020
5021 peer_req->w.cb = e_end_resync_block;
5022 peer_req->submit_jif = jiffies;
f31e583a 5023 peer_req->flags |= EE_TRIM;
700ca8c0
PR
5024
5025 spin_lock_irq(&device->resource->req_lock);
5026 list_add_tail(&peer_req->w.list, &device->sync_ee);
5027 spin_unlock_irq(&device->resource->req_lock);
5028
5029 atomic_add(pi->size >> 9, &device->rs_sect_ev);
5030 err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
5031
5032 if (err) {
5033 spin_lock_irq(&device->resource->req_lock);
5034 list_del(&peer_req->w.list);
5035 spin_unlock_irq(&device->resource->req_lock);
5036
5037 drbd_free_peer_req(device, peer_req);
5038 put_ldev(device);
5039 err = 0;
5040 goto fail;
5041 }
5042
5043 inc_unacked(device);
5044
5045 /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(),
5046 as well as drbd_rs_complete_io() */
5047 } else {
5048 fail:
5049 drbd_rs_complete_io(device, sector);
5050 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
5051 }
5052
5053 atomic_add(size >> 9, &device->rs_sect_in);
5054
5055 return err;
5056}
5057
02918be2
PR
5058struct data_cmd {
5059 int expect_payload;
9104d31a 5060 unsigned int pkt_size;
bde89a9e 5061 int (*fn)(struct drbd_connection *, struct packet_info *);
02918be2
PR
5062};
5063
5064static struct data_cmd drbd_cmd_handler[] = {
5065 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
5066 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
5067 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
5068 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
e658983a
AG
5069 [P_BITMAP] = { 1, 0, receive_bitmap } ,
5070 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
5071 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
02918be2
PR
5072 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
5073 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
e658983a
AG
5074 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
5075 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
02918be2
PR
5076 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
5077 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
5078 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
5079 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
5080 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
5081 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
5082 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
5083 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
5084 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
700ca8c0 5085 [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest },
02918be2 5086 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
73a01a18 5087 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4a76b161 5088 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
036b17ea 5089 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
a0fb3c47 5090 [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
f31e583a 5091 [P_ZEROES] = { 0, sizeof(struct p_trim), receive_Data },
700ca8c0 5092 [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
9104d31a 5093 [P_WSAME] = { 1, sizeof(struct p_wsame), receive_Data },
b411b363
PR
5094};
5095
bde89a9e 5096static void drbdd(struct drbd_connection *connection)
b411b363 5097{
77351055 5098 struct packet_info pi;
02918be2 5099 size_t shs; /* sub header size */
82bc0194 5100 int err;
b411b363 5101
bde89a9e 5102 while (get_t_state(&connection->receiver) == RUNNING) {
9104d31a 5103 struct data_cmd const *cmd;
b411b363 5104
bde89a9e 5105 drbd_thread_current_set_cpu(&connection->receiver);
c51a0ef3
LE
5106 update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
5107 if (drbd_recv_header_maybe_unplug(connection, &pi))
02918be2 5108 goto err_out;
b411b363 5109
deebe195 5110 cmd = &drbd_cmd_handler[pi.cmd];
4a76b161 5111 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
1ec861eb 5112 drbd_err(connection, "Unexpected data packet %s (0x%04x)",
2fcb8f30 5113 cmdname(pi.cmd), pi.cmd);
02918be2 5114 goto err_out;
0b33a916 5115 }
b411b363 5116
e658983a 5117 shs = cmd->pkt_size;
9104d31a
LE
5118 if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME)
5119 shs += sizeof(struct o_qlim);
e658983a 5120 if (pi.size > shs && !cmd->expect_payload) {
1ec861eb 5121 drbd_err(connection, "No payload expected %s l:%d\n",
2fcb8f30 5122 cmdname(pi.cmd), pi.size);
02918be2 5123 goto err_out;
b411b363 5124 }
9104d31a
LE
5125 if (pi.size < shs) {
5126 drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n",
5127 cmdname(pi.cmd), (int)shs, pi.size);
5128 goto err_out;
5129 }
b411b363 5130
c13f7e1a 5131 if (shs) {
944410e9 5132 update_receiver_timing_details(connection, drbd_recv_all_warn);
bde89a9e 5133 err = drbd_recv_all_warn(connection, pi.data, shs);
a5c31904 5134 if (err)
c13f7e1a 5135 goto err_out;
e2857216 5136 pi.size -= shs;
c13f7e1a
LE
5137 }
5138
944410e9 5139 update_receiver_timing_details(connection, cmd->fn);
bde89a9e 5140 err = cmd->fn(connection, &pi);
4a76b161 5141 if (err) {
1ec861eb 5142 drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
9f5bdc33 5143 cmdname(pi.cmd), err, pi.size);
02918be2 5144 goto err_out;
b411b363
PR
5145 }
5146 }
82bc0194 5147 return;
b411b363 5148
82bc0194 5149 err_out:
bde89a9e 5150 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
5151}
5152
bde89a9e 5153static void conn_disconnect(struct drbd_connection *connection)
b411b363 5154{
c06ece6b 5155 struct drbd_peer_device *peer_device;
bbeb641c 5156 enum drbd_conns oc;
376694a0 5157 int vnr;
b411b363 5158
bde89a9e 5159 if (connection->cstate == C_STANDALONE)
b411b363 5160 return;
b411b363 5161
545752d5
LE
5162 /* We are about to start the cleanup after connection loss.
5163 * Make sure drbd_make_request knows about that.
5164 * Usually we should be in some network failure state already,
5165 * but just in case we are not, we fix it up here.
5166 */
bde89a9e 5167 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
545752d5 5168
668700b4 5169 /* ack_receiver does not clean up anything. it must not interfere, either */
1c03e520 5170 drbd_thread_stop(&connection->ack_receiver);
668700b4
PR
5171 if (connection->ack_sender) {
5172 destroy_workqueue(connection->ack_sender);
5173 connection->ack_sender = NULL;
5174 }
bde89a9e 5175 drbd_free_sock(connection);
360cc740 5176
c141ebda 5177 rcu_read_lock();
c06ece6b
AG
5178 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5179 struct drbd_device *device = peer_device->device;
b30ab791 5180 kref_get(&device->kref);
c141ebda 5181 rcu_read_unlock();
69a22773 5182 drbd_disconnected(peer_device);
c06ece6b 5183 kref_put(&device->kref, drbd_destroy_device);
c141ebda
PR
5184 rcu_read_lock();
5185 }
5186 rcu_read_unlock();
5187
bde89a9e 5188 if (!list_empty(&connection->current_epoch->list))
1ec861eb 5189 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
12038a3a 5190 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
bde89a9e
AG
5191 atomic_set(&connection->current_epoch->epoch_size, 0);
5192 connection->send.seen_any_write_yet = false;
12038a3a 5193
1ec861eb 5194 drbd_info(connection, "Connection closed\n");
360cc740 5195
bde89a9e
AG
5196 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
5197 conn_try_outdate_peer_async(connection);
cb703454 5198
0500813f 5199 spin_lock_irq(&connection->resource->req_lock);
bde89a9e 5200 oc = connection->cstate;
bbeb641c 5201 if (oc >= C_UNCONNECTED)
bde89a9e 5202 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
bbeb641c 5203
0500813f 5204 spin_unlock_irq(&connection->resource->req_lock);
360cc740 5205
f3dfa40a 5206 if (oc == C_DISCONNECTING)
bde89a9e 5207 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
360cc740
PR
5208}
5209
69a22773 5210static int drbd_disconnected(struct drbd_peer_device *peer_device)
360cc740 5211{
69a22773 5212 struct drbd_device *device = peer_device->device;
360cc740 5213 unsigned int i;
b411b363 5214
85719573 5215 /* wait for current activity to cease. */
0500813f 5216 spin_lock_irq(&device->resource->req_lock);
b30ab791
AG
5217 _drbd_wait_ee_list_empty(device, &device->active_ee);
5218 _drbd_wait_ee_list_empty(device, &device->sync_ee);
5219 _drbd_wait_ee_list_empty(device, &device->read_ee);
0500813f 5220 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
5221
5222 /* We do not have data structures that would allow us to
5223 * get the rs_pending_cnt down to 0 again.
5224 * * On C_SYNC_TARGET we do not have any data structures describing
5225 * the pending RSDataRequest's we have sent.
5226 * * On C_SYNC_SOURCE there is no data structure that tracks
5227 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
5228 * And no, it is not the sum of the reference counts in the
5229 * resync_LRU. The resync_LRU tracks the whole operation including
5230 * the disk-IO, while the rs_pending_cnt only tracks the blocks
5231 * on the fly. */
b30ab791
AG
5232 drbd_rs_cancel_all(device);
5233 device->rs_total = 0;
5234 device->rs_failed = 0;
5235 atomic_set(&device->rs_pending_cnt, 0);
5236 wake_up(&device->misc_wait);
b411b363 5237
b30ab791 5238 del_timer_sync(&device->resync_timer);
2bccef39 5239 resync_timer_fn(&device->resync_timer);
b411b363 5240
b411b363
PR
5241 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
5242 * w_make_resync_request etc. which may still be on the worker queue
5243 * to be "canceled" */
b5043c5e 5244 drbd_flush_workqueue(&peer_device->connection->sender_work);
b411b363 5245
b30ab791 5246 drbd_finish_peer_reqs(device);
b411b363 5247
d10b4ea3
PR
5248 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
5249 might have issued a work again. The one before drbd_finish_peer_reqs() is
5250 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
b5043c5e 5251 drbd_flush_workqueue(&peer_device->connection->sender_work);
d10b4ea3 5252
08332d73
LE
5253 /* need to do it again, drbd_finish_peer_reqs() may have populated it
5254 * again via drbd_try_clear_on_disk_bm(). */
b30ab791 5255 drbd_rs_cancel_all(device);
b411b363 5256
b30ab791
AG
5257 kfree(device->p_uuid);
5258 device->p_uuid = NULL;
b411b363 5259
b30ab791 5260 if (!drbd_suspended(device))
69a22773 5261 tl_clear(peer_device->connection);
b411b363 5262
b30ab791 5263 drbd_md_sync(device);
b411b363 5264
be115b69
LE
5265 if (get_ldev(device)) {
5266 drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
5267 "write from disconnected", BM_LOCKED_CHANGE_ALLOWED);
5268 put_ldev(device);
5269 }
20ceb2b2 5270
b411b363
PR
5271 /* tcp_close and release of sendpage pages can be deferred. I don't
5272 * want to use SO_LINGER, because apparently it can be deferred for
5273 * more than 20 seconds (longest time I checked).
5274 *
5275 * Actually we don't care for exactly when the network stack does its
5276 * put_page(), but release our reference on these pages right here.
5277 */
b30ab791 5278 i = drbd_free_peer_reqs(device, &device->net_ee);
b411b363 5279 if (i)
d0180171 5280 drbd_info(device, "net_ee not empty, killed %u entries\n", i);
b30ab791 5281 i = atomic_read(&device->pp_in_use_by_net);
435f0740 5282 if (i)
d0180171 5283 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
b30ab791 5284 i = atomic_read(&device->pp_in_use);
b411b363 5285 if (i)
d0180171 5286 drbd_info(device, "pp_in_use = %d, expected 0\n", i);
b411b363 5287
0b0ba1ef
AG
5288 D_ASSERT(device, list_empty(&device->read_ee));
5289 D_ASSERT(device, list_empty(&device->active_ee));
5290 D_ASSERT(device, list_empty(&device->sync_ee));
5291 D_ASSERT(device, list_empty(&device->done_ee));
b411b363 5292
360cc740 5293 return 0;
b411b363
PR
5294}
5295
5296/*
5297 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
5298 * we can agree on is stored in agreed_pro_version.
5299 *
5300 * feature flags and the reserved array should be enough room for future
5301 * enhancements of the handshake protocol, and possible plugins...
5302 *
5303 * for now, they are expected to be zero, but ignored.
5304 */
bde89a9e 5305static int drbd_send_features(struct drbd_connection *connection)
b411b363 5306{
9f5bdc33
AG
5307 struct drbd_socket *sock;
5308 struct p_connection_features *p;
b411b363 5309
bde89a9e
AG
5310 sock = &connection->data;
5311 p = conn_prepare_command(connection, sock);
9f5bdc33 5312 if (!p)
e8d17b01 5313 return -EIO;
b411b363
PR
5314 memset(p, 0, sizeof(*p));
5315 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
5316 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
20c68fde 5317 p->feature_flags = cpu_to_be32(PRO_FEATURES);
bde89a9e 5318 return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
b411b363
PR
5319}
5320
5321/*
5322 * return values:
5323 * 1 yes, we have a valid connection
5324 * 0 oops, did not work out, please try again
5325 * -1 peer talks different language,
5326 * no point in trying again, please go standalone.
5327 */
bde89a9e 5328static int drbd_do_features(struct drbd_connection *connection)
b411b363 5329{
bde89a9e 5330 /* ASSERT current == connection->receiver ... */
e658983a
AG
5331 struct p_connection_features *p;
5332 const int expect = sizeof(struct p_connection_features);
77351055 5333 struct packet_info pi;
a5c31904 5334 int err;
b411b363 5335
bde89a9e 5336 err = drbd_send_features(connection);
e8d17b01 5337 if (err)
b411b363
PR
5338 return 0;
5339
bde89a9e 5340 err = drbd_recv_header(connection, &pi);
69bc7bc3 5341 if (err)
b411b363
PR
5342 return 0;
5343
6038178e 5344 if (pi.cmd != P_CONNECTION_FEATURES) {
1ec861eb 5345 drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
2fcb8f30 5346 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5347 return -1;
5348 }
5349
77351055 5350 if (pi.size != expect) {
1ec861eb 5351 drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
77351055 5352 expect, pi.size);
b411b363
PR
5353 return -1;
5354 }
5355
e658983a 5356 p = pi.data;
bde89a9e 5357 err = drbd_recv_all_warn(connection, p, expect);
a5c31904 5358 if (err)
b411b363 5359 return 0;
b411b363 5360
b411b363
PR
5361 p->protocol_min = be32_to_cpu(p->protocol_min);
5362 p->protocol_max = be32_to_cpu(p->protocol_max);
5363 if (p->protocol_max == 0)
5364 p->protocol_max = p->protocol_min;
5365
5366 if (PRO_VERSION_MAX < p->protocol_min ||
5367 PRO_VERSION_MIN > p->protocol_max)
5368 goto incompat;
5369
bde89a9e 5370 connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
20c68fde 5371 connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags);
b411b363 5372
1ec861eb 5373 drbd_info(connection, "Handshake successful: "
bde89a9e 5374 "Agreed network protocol version %d\n", connection->agreed_pro_version);
b411b363 5375
f31e583a 5376 drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s%s.\n",
9104d31a
LE
5377 connection->agreed_features,
5378 connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "",
5379 connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "",
f31e583a
LE
5380 connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" : "",
5381 connection->agreed_features & DRBD_FF_WZEROES ? " WRITE_ZEROES" :
9104d31a 5382 connection->agreed_features ? "" : " none");
92d94ae6 5383
b411b363
PR
5384 return 1;
5385
5386 incompat:
1ec861eb 5387 drbd_err(connection, "incompatible DRBD dialects: "
b411b363
PR
5388 "I support %d-%d, peer supports %d-%d\n",
5389 PRO_VERSION_MIN, PRO_VERSION_MAX,
5390 p->protocol_min, p->protocol_max);
5391 return -1;
5392}
5393
5394#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
bde89a9e 5395static int drbd_do_auth(struct drbd_connection *connection)
b411b363 5396{
1ec861eb
AG
5397 drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
5398 drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 5399 return -1;
b411b363
PR
5400}
5401#else
5402#define CHALLENGE_LEN 64
b10d96cb
JT
5403
5404/* Return value:
5405 1 - auth succeeded,
5406 0 - failed, try again (network error),
5407 -1 - auth failed, don't try again.
5408*/
5409
bde89a9e 5410static int drbd_do_auth(struct drbd_connection *connection)
b411b363 5411{
9f5bdc33 5412 struct drbd_socket *sock;
b411b363 5413 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
b411b363
PR
5414 char *response = NULL;
5415 char *right_response = NULL;
5416 char *peers_ch = NULL;
44ed167d
PR
5417 unsigned int key_len;
5418 char secret[SHARED_SECRET_MAX]; /* 64 byte */
b411b363 5419 unsigned int resp_size;
77ce56e2 5420 struct shash_desc *desc;
77351055 5421 struct packet_info pi;
44ed167d 5422 struct net_conf *nc;
69bc7bc3 5423 int err, rv;
b411b363 5424
9f5bdc33 5425 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
b411b363 5426
44ed167d 5427 rcu_read_lock();
bde89a9e 5428 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
5429 key_len = strlen(nc->shared_secret);
5430 memcpy(secret, nc->shared_secret, key_len);
5431 rcu_read_unlock();
5432
77ce56e2
AB
5433 desc = kmalloc(sizeof(struct shash_desc) +
5434 crypto_shash_descsize(connection->cram_hmac_tfm),
5435 GFP_KERNEL);
5436 if (!desc) {
5437 rv = -1;
5438 goto fail;
5439 }
9534d671 5440 desc->tfm = connection->cram_hmac_tfm;
b411b363 5441
9534d671 5442 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
b411b363 5443 if (rv) {
9534d671 5444 drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
b10d96cb 5445 rv = -1;
b411b363
PR
5446 goto fail;
5447 }
5448
5449 get_random_bytes(my_challenge, CHALLENGE_LEN);
5450
bde89a9e
AG
5451 sock = &connection->data;
5452 if (!conn_prepare_command(connection, sock)) {
9f5bdc33
AG
5453 rv = 0;
5454 goto fail;
5455 }
bde89a9e 5456 rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
9f5bdc33 5457 my_challenge, CHALLENGE_LEN);
b411b363
PR
5458 if (!rv)
5459 goto fail;
5460
bde89a9e 5461 err = drbd_recv_header(connection, &pi);
69bc7bc3
AG
5462 if (err) {
5463 rv = 0;
b411b363 5464 goto fail;
69bc7bc3 5465 }
b411b363 5466
77351055 5467 if (pi.cmd != P_AUTH_CHALLENGE) {
1ec861eb 5468 drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
2fcb8f30 5469 cmdname(pi.cmd), pi.cmd);
9049ccd4 5470 rv = -1;
b411b363
PR
5471 goto fail;
5472 }
5473
77351055 5474 if (pi.size > CHALLENGE_LEN * 2) {
1ec861eb 5475 drbd_err(connection, "expected AuthChallenge payload too big.\n");
b10d96cb 5476 rv = -1;
b411b363
PR
5477 goto fail;
5478 }
5479
67cca286
PR
5480 if (pi.size < CHALLENGE_LEN) {
5481 drbd_err(connection, "AuthChallenge payload too small.\n");
5482 rv = -1;
5483 goto fail;
5484 }
5485
77351055 5486 peers_ch = kmalloc(pi.size, GFP_NOIO);
b411b363 5487 if (peers_ch == NULL) {
1ec861eb 5488 drbd_err(connection, "kmalloc of peers_ch failed\n");
b10d96cb 5489 rv = -1;
b411b363
PR
5490 goto fail;
5491 }
5492
bde89a9e 5493 err = drbd_recv_all_warn(connection, peers_ch, pi.size);
a5c31904 5494 if (err) {
b411b363
PR
5495 rv = 0;
5496 goto fail;
5497 }
5498
67cca286
PR
5499 if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) {
5500 drbd_err(connection, "Peer presented the same challenge!\n");
5501 rv = -1;
5502 goto fail;
5503 }
5504
9534d671 5505 resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
b411b363
PR
5506 response = kmalloc(resp_size, GFP_NOIO);
5507 if (response == NULL) {
1ec861eb 5508 drbd_err(connection, "kmalloc of response failed\n");
b10d96cb 5509 rv = -1;
b411b363
PR
5510 goto fail;
5511 }
5512
9534d671 5513 rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
b411b363 5514 if (rv) {
1ec861eb 5515 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 5516 rv = -1;
b411b363
PR
5517 goto fail;
5518 }
5519
bde89a9e 5520 if (!conn_prepare_command(connection, sock)) {
9f5bdc33 5521 rv = 0;
b411b363 5522 goto fail;
9f5bdc33 5523 }
bde89a9e 5524 rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
9f5bdc33 5525 response, resp_size);
b411b363
PR
5526 if (!rv)
5527 goto fail;
5528
bde89a9e 5529 err = drbd_recv_header(connection, &pi);
69bc7bc3 5530 if (err) {
b411b363
PR
5531 rv = 0;
5532 goto fail;
5533 }
5534
77351055 5535 if (pi.cmd != P_AUTH_RESPONSE) {
1ec861eb 5536 drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
2fcb8f30 5537 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5538 rv = 0;
5539 goto fail;
5540 }
5541
77351055 5542 if (pi.size != resp_size) {
1ec861eb 5543 drbd_err(connection, "expected AuthResponse payload of wrong size\n");
b411b363
PR
5544 rv = 0;
5545 goto fail;
5546 }
b411b363 5547
bde89a9e 5548 err = drbd_recv_all_warn(connection, response , resp_size);
a5c31904 5549 if (err) {
b411b363
PR
5550 rv = 0;
5551 goto fail;
5552 }
5553
5554 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 5555 if (right_response == NULL) {
1ec861eb 5556 drbd_err(connection, "kmalloc of right_response failed\n");
b10d96cb 5557 rv = -1;
b411b363
PR
5558 goto fail;
5559 }
5560
9534d671
HX
5561 rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
5562 right_response);
b411b363 5563 if (rv) {
1ec861eb 5564 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 5565 rv = -1;
b411b363
PR
5566 goto fail;
5567 }
5568
5569 rv = !memcmp(response, right_response, resp_size);
5570
5571 if (rv)
1ec861eb 5572 drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
44ed167d 5573 resp_size);
b10d96cb
JT
5574 else
5575 rv = -1;
b411b363
PR
5576
5577 fail:
5578 kfree(peers_ch);
5579 kfree(response);
5580 kfree(right_response);
77ce56e2
AB
5581 if (desc) {
5582 shash_desc_zero(desc);
5583 kfree(desc);
5584 }
b411b363
PR
5585
5586 return rv;
5587}
5588#endif
5589
8fe60551 5590int drbd_receiver(struct drbd_thread *thi)
b411b363 5591{
bde89a9e 5592 struct drbd_connection *connection = thi->connection;
b411b363
PR
5593 int h;
5594
1ec861eb 5595 drbd_info(connection, "receiver (re)started\n");
b411b363
PR
5596
5597 do {
bde89a9e 5598 h = conn_connect(connection);
b411b363 5599 if (h == 0) {
bde89a9e 5600 conn_disconnect(connection);
20ee6390 5601 schedule_timeout_interruptible(HZ);
b411b363
PR
5602 }
5603 if (h == -1) {
1ec861eb 5604 drbd_warn(connection, "Discarding network configuration.\n");
bde89a9e 5605 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
5606 }
5607 } while (h == 0);
5608
c51a0ef3
LE
5609 if (h > 0) {
5610 blk_start_plug(&connection->receiver_plug);
bde89a9e 5611 drbdd(connection);
c51a0ef3
LE
5612 blk_finish_plug(&connection->receiver_plug);
5613 }
b411b363 5614
bde89a9e 5615 conn_disconnect(connection);
b411b363 5616
1ec861eb 5617 drbd_info(connection, "receiver terminated\n");
b411b363
PR
5618 return 0;
5619}
5620
5621/* ********* acknowledge sender ******** */
5622
bde89a9e 5623static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5624{
e658983a 5625 struct p_req_state_reply *p = pi->data;
e4f78ede
PR
5626 int retcode = be32_to_cpu(p->retcode);
5627
5628 if (retcode >= SS_SUCCESS) {
bde89a9e 5629 set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
e4f78ede 5630 } else {
bde89a9e 5631 set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
1ec861eb 5632 drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
e4f78ede
PR
5633 drbd_set_st_err_str(retcode), retcode);
5634 }
bde89a9e 5635 wake_up(&connection->ping_wait);
e4f78ede 5636
2735a594 5637 return 0;
e4f78ede 5638}
b411b363 5639
bde89a9e 5640static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5641{
9f4fe9ad 5642 struct drbd_peer_device *peer_device;
b30ab791 5643 struct drbd_device *device;
e658983a 5644 struct p_req_state_reply *p = pi->data;
b411b363
PR
5645 int retcode = be32_to_cpu(p->retcode);
5646
9f4fe9ad
AG
5647 peer_device = conn_peer_device(connection, pi->vnr);
5648 if (!peer_device)
2735a594 5649 return -EIO;
9f4fe9ad 5650 device = peer_device->device;
1952e916 5651
bde89a9e 5652 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
0b0ba1ef 5653 D_ASSERT(device, connection->agreed_pro_version < 100);
bde89a9e 5654 return got_conn_RqSReply(connection, pi);
4d0fc3fd
PR
5655 }
5656
b411b363 5657 if (retcode >= SS_SUCCESS) {
b30ab791 5658 set_bit(CL_ST_CHG_SUCCESS, &device->flags);
b411b363 5659 } else {
b30ab791 5660 set_bit(CL_ST_CHG_FAIL, &device->flags);
d0180171 5661 drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
e4f78ede 5662 drbd_set_st_err_str(retcode), retcode);
b411b363 5663 }
b30ab791 5664 wake_up(&device->state_wait);
b411b363 5665
2735a594 5666 return 0;
b411b363
PR
5667}
5668
bde89a9e 5669static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5670{
bde89a9e 5671 return drbd_send_ping_ack(connection);
b411b363
PR
5672
5673}
5674
bde89a9e 5675static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363
PR
5676{
5677 /* restore idle timeout */
bde89a9e
AG
5678 connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
5679 if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
5680 wake_up(&connection->ping_wait);
b411b363 5681
2735a594 5682 return 0;
b411b363
PR
5683}
5684
bde89a9e 5685static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5686{
9f4fe9ad 5687 struct drbd_peer_device *peer_device;
b30ab791 5688 struct drbd_device *device;
e658983a 5689 struct p_block_ack *p = pi->data;
b411b363
PR
5690 sector_t sector = be64_to_cpu(p->sector);
5691 int blksize = be32_to_cpu(p->blksize);
5692
9f4fe9ad
AG
5693 peer_device = conn_peer_device(connection, pi->vnr);
5694 if (!peer_device)
2735a594 5695 return -EIO;
9f4fe9ad 5696 device = peer_device->device;
1952e916 5697
9f4fe9ad 5698 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
b411b363 5699
69a22773 5700 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5701
b30ab791
AG
5702 if (get_ldev(device)) {
5703 drbd_rs_complete_io(device, sector);
5704 drbd_set_in_sync(device, sector, blksize);
1d53f09e 5705 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
b30ab791
AG
5706 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
5707 put_ldev(device);
1d53f09e 5708 }
b30ab791
AG
5709 dec_rs_pending(device);
5710 atomic_add(blksize >> 9, &device->rs_sect_in);
b411b363 5711
2735a594 5712 return 0;
b411b363
PR
5713}
5714
bc9c5c41 5715static int
b30ab791 5716validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
bc9c5c41
AG
5717 struct rb_root *root, const char *func,
5718 enum drbd_req_event what, bool missing_ok)
b411b363
PR
5719{
5720 struct drbd_request *req;
5721 struct bio_and_error m;
5722
0500813f 5723 spin_lock_irq(&device->resource->req_lock);
b30ab791 5724 req = find_request(device, root, id, sector, missing_ok, func);
b411b363 5725 if (unlikely(!req)) {
0500813f 5726 spin_unlock_irq(&device->resource->req_lock);
85997675 5727 return -EIO;
b411b363
PR
5728 }
5729 __req_mod(req, what, &m);
0500813f 5730 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
5731
5732 if (m.bio)
b30ab791 5733 complete_master_bio(device, &m);
85997675 5734 return 0;
b411b363
PR
5735}
5736
bde89a9e 5737static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5738{
9f4fe9ad 5739 struct drbd_peer_device *peer_device;
b30ab791 5740 struct drbd_device *device;
e658983a 5741 struct p_block_ack *p = pi->data;
b411b363
PR
5742 sector_t sector = be64_to_cpu(p->sector);
5743 int blksize = be32_to_cpu(p->blksize);
5744 enum drbd_req_event what;
5745
9f4fe9ad
AG
5746 peer_device = conn_peer_device(connection, pi->vnr);
5747 if (!peer_device)
2735a594 5748 return -EIO;
9f4fe9ad 5749 device = peer_device->device;
1952e916 5750
69a22773 5751 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5752
579b57ed 5753 if (p->block_id == ID_SYNCER) {
b30ab791
AG
5754 drbd_set_in_sync(device, sector, blksize);
5755 dec_rs_pending(device);
2735a594 5756 return 0;
b411b363 5757 }
e05e1e59 5758 switch (pi->cmd) {
b411b363 5759 case P_RS_WRITE_ACK:
8554df1c 5760 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
5761 break;
5762 case P_WRITE_ACK:
8554df1c 5763 what = WRITE_ACKED_BY_PEER;
b411b363
PR
5764 break;
5765 case P_RECV_ACK:
8554df1c 5766 what = RECV_ACKED_BY_PEER;
b411b363 5767 break;
d4dabbe2
LE
5768 case P_SUPERSEDED:
5769 what = CONFLICT_RESOLVED;
b411b363 5770 break;
7be8da07 5771 case P_RETRY_WRITE:
7be8da07 5772 what = POSTPONE_WRITE;
b411b363
PR
5773 break;
5774 default:
2735a594 5775 BUG();
b411b363
PR
5776 }
5777
b30ab791
AG
5778 return validate_req_change_req_state(device, p->block_id, sector,
5779 &device->write_requests, __func__,
2735a594 5780 what, false);
b411b363
PR
5781}
5782
bde89a9e 5783static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5784{
9f4fe9ad 5785 struct drbd_peer_device *peer_device;
b30ab791 5786 struct drbd_device *device;
e658983a 5787 struct p_block_ack *p = pi->data;
b411b363 5788 sector_t sector = be64_to_cpu(p->sector);
2deb8336 5789 int size = be32_to_cpu(p->blksize);
85997675 5790 int err;
b411b363 5791
9f4fe9ad
AG
5792 peer_device = conn_peer_device(connection, pi->vnr);
5793 if (!peer_device)
2735a594 5794 return -EIO;
9f4fe9ad 5795 device = peer_device->device;
b411b363 5796
69a22773 5797 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5798
579b57ed 5799 if (p->block_id == ID_SYNCER) {
b30ab791
AG
5800 dec_rs_pending(device);
5801 drbd_rs_failed_io(device, sector, size);
2735a594 5802 return 0;
b411b363 5803 }
2deb8336 5804
b30ab791
AG
5805 err = validate_req_change_req_state(device, p->block_id, sector,
5806 &device->write_requests, __func__,
303d1448 5807 NEG_ACKED, true);
85997675 5808 if (err) {
c3afd8f5
AG
5809 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5810 The master bio might already be completed, therefore the
5811 request is no longer in the collision hash. */
5812 /* In Protocol B we might already have got a P_RECV_ACK
5813 but then get a P_NEG_ACK afterwards. */
b30ab791 5814 drbd_set_out_of_sync(device, sector, size);
2deb8336 5815 }
2735a594 5816 return 0;
b411b363
PR
5817}
5818
bde89a9e 5819static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5820{
9f4fe9ad 5821 struct drbd_peer_device *peer_device;
b30ab791 5822 struct drbd_device *device;
e658983a 5823 struct p_block_ack *p = pi->data;
b411b363
PR
5824 sector_t sector = be64_to_cpu(p->sector);
5825
9f4fe9ad
AG
5826 peer_device = conn_peer_device(connection, pi->vnr);
5827 if (!peer_device)
2735a594 5828 return -EIO;
9f4fe9ad 5829 device = peer_device->device;
1952e916 5830
69a22773 5831 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
7be8da07 5832
d0180171 5833 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
b411b363
PR
5834 (unsigned long long)sector, be32_to_cpu(p->blksize));
5835
b30ab791
AG
5836 return validate_req_change_req_state(device, p->block_id, sector,
5837 &device->read_requests, __func__,
2735a594 5838 NEG_ACKED, false);
b411b363
PR
5839}
5840
bde89a9e 5841static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5842{
9f4fe9ad 5843 struct drbd_peer_device *peer_device;
b30ab791 5844 struct drbd_device *device;
b411b363
PR
5845 sector_t sector;
5846 int size;
e658983a 5847 struct p_block_ack *p = pi->data;
1952e916 5848
9f4fe9ad
AG
5849 peer_device = conn_peer_device(connection, pi->vnr);
5850 if (!peer_device)
2735a594 5851 return -EIO;
9f4fe9ad 5852 device = peer_device->device;
b411b363
PR
5853
5854 sector = be64_to_cpu(p->sector);
5855 size = be32_to_cpu(p->blksize);
b411b363 5856
69a22773 5857 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5858
b30ab791 5859 dec_rs_pending(device);
b411b363 5860
b30ab791
AG
5861 if (get_ldev_if_state(device, D_FAILED)) {
5862 drbd_rs_complete_io(device, sector);
e05e1e59 5863 switch (pi->cmd) {
d612d309 5864 case P_NEG_RS_DREPLY:
b30ab791 5865 drbd_rs_failed_io(device, sector, size);
d612d309
PR
5866 case P_RS_CANCEL:
5867 break;
5868 default:
2735a594 5869 BUG();
d612d309 5870 }
b30ab791 5871 put_ldev(device);
b411b363
PR
5872 }
5873
2735a594 5874 return 0;
b411b363
PR
5875}
5876
bde89a9e 5877static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5878{
e658983a 5879 struct p_barrier_ack *p = pi->data;
c06ece6b 5880 struct drbd_peer_device *peer_device;
9ed57dcb 5881 int vnr;
1952e916 5882
bde89a9e 5883 tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
b411b363 5884
9ed57dcb 5885 rcu_read_lock();
c06ece6b
AG
5886 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5887 struct drbd_device *device = peer_device->device;
5888
b30ab791
AG
5889 if (device->state.conn == C_AHEAD &&
5890 atomic_read(&device->ap_in_flight) == 0 &&
5891 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
5892 device->start_resync_timer.expires = jiffies + HZ;
5893 add_timer(&device->start_resync_timer);
9ed57dcb 5894 }
c4752ef1 5895 }
9ed57dcb 5896 rcu_read_unlock();
c4752ef1 5897
2735a594 5898 return 0;
b411b363
PR
5899}
5900
bde89a9e 5901static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5902{
9f4fe9ad 5903 struct drbd_peer_device *peer_device;
b30ab791 5904 struct drbd_device *device;
e658983a 5905 struct p_block_ack *p = pi->data;
84b8c06b 5906 struct drbd_device_work *dw;
b411b363
PR
5907 sector_t sector;
5908 int size;
5909
9f4fe9ad
AG
5910 peer_device = conn_peer_device(connection, pi->vnr);
5911 if (!peer_device)
2735a594 5912 return -EIO;
9f4fe9ad 5913 device = peer_device->device;
1952e916 5914
b411b363
PR
5915 sector = be64_to_cpu(p->sector);
5916 size = be32_to_cpu(p->blksize);
5917
69a22773 5918 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363
PR
5919
5920 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
b30ab791 5921 drbd_ov_out_of_sync_found(device, sector, size);
b411b363 5922 else
b30ab791 5923 ov_out_of_sync_print(device);
b411b363 5924
b30ab791 5925 if (!get_ldev(device))
2735a594 5926 return 0;
1d53f09e 5927
b30ab791
AG
5928 drbd_rs_complete_io(device, sector);
5929 dec_rs_pending(device);
b411b363 5930
b30ab791 5931 --device->ov_left;
ea5442af
LE
5932
5933 /* let's advance progress step marks only for every other megabyte */
b30ab791
AG
5934 if ((device->ov_left & 0x200) == 0x200)
5935 drbd_advance_rs_marks(device, device->ov_left);
ea5442af 5936
b30ab791 5937 if (device->ov_left == 0) {
84b8c06b
AG
5938 dw = kmalloc(sizeof(*dw), GFP_NOIO);
5939 if (dw) {
5940 dw->w.cb = w_ov_finished;
5941 dw->device = device;
5942 drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
b411b363 5943 } else {
84b8c06b 5944 drbd_err(device, "kmalloc(dw) failed.");
b30ab791
AG
5945 ov_out_of_sync_print(device);
5946 drbd_resync_finished(device);
b411b363
PR
5947 }
5948 }
b30ab791 5949 put_ldev(device);
2735a594 5950 return 0;
b411b363
PR
5951}
5952
bde89a9e 5953static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
0ced55a3 5954{
2735a594 5955 return 0;
b411b363
PR
5956}
5957
668700b4
PR
5958struct meta_sock_cmd {
5959 size_t pkt_size;
5960 int (*fn)(struct drbd_connection *connection, struct packet_info *);
5961};
5962
5963static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout)
0ced55a3 5964{
668700b4
PR
5965 long t;
5966 struct net_conf *nc;
32862ec7 5967
668700b4
PR
5968 rcu_read_lock();
5969 nc = rcu_dereference(connection->net_conf);
5970 t = ping_timeout ? nc->ping_timeo : nc->ping_int;
5971 rcu_read_unlock();
c141ebda 5972
668700b4
PR
5973 t *= HZ;
5974 if (ping_timeout)
5975 t /= 10;
082a3439 5976
668700b4
PR
5977 connection->meta.socket->sk->sk_rcvtimeo = t;
5978}
32862ec7 5979
668700b4
PR
5980static void set_ping_timeout(struct drbd_connection *connection)
5981{
5982 set_rcvtimeo(connection, 1);
0ced55a3
PR
5983}
5984
668700b4
PR
5985static void set_idle_timeout(struct drbd_connection *connection)
5986{
5987 set_rcvtimeo(connection, 0);
5988}
b411b363 5989
668700b4 5990static struct meta_sock_cmd ack_receiver_tbl[] = {
e658983a
AG
5991 [P_PING] = { 0, got_Ping },
5992 [P_PING_ACK] = { 0, got_PingAck },
b411b363
PR
5993 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5994 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5995 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
d4dabbe2 5996 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
b411b363
PR
5997 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5998 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
1952e916 5999 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
b411b363
PR
6000 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
6001 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
6002 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
6003 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 6004 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
1952e916
AG
6005 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
6006 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
6007 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
7201b972 6008};
b411b363 6009
1c03e520 6010int drbd_ack_receiver(struct drbd_thread *thi)
b411b363 6011{
bde89a9e 6012 struct drbd_connection *connection = thi->connection;
668700b4 6013 struct meta_sock_cmd *cmd = NULL;
77351055 6014 struct packet_info pi;
668700b4 6015 unsigned long pre_recv_jif;
257d0af6 6016 int rv;
bde89a9e 6017 void *buf = connection->meta.rbuf;
b411b363 6018 int received = 0;
bde89a9e 6019 unsigned int header_size = drbd_header_size(connection);
52b061a4 6020 int expect = header_size;
44ed167d 6021 bool ping_timeout_active = false;
b411b363 6022
8b700983 6023 sched_set_fifo_low(current);
b411b363 6024
e77a0a5c 6025 while (get_t_state(thi) == RUNNING) {
80822284 6026 drbd_thread_current_set_cpu(thi);
b411b363 6027
668700b4 6028 conn_reclaim_net_peer_reqs(connection);
44ed167d 6029
bde89a9e
AG
6030 if (test_and_clear_bit(SEND_PING, &connection->flags)) {
6031 if (drbd_send_ping(connection)) {
1ec861eb 6032 drbd_err(connection, "drbd_send_ping has failed\n");
b411b363 6033 goto reconnect;
841ce241 6034 }
668700b4 6035 set_ping_timeout(connection);
44ed167d 6036 ping_timeout_active = true;
b411b363
PR
6037 }
6038
668700b4 6039 pre_recv_jif = jiffies;
bde89a9e 6040 rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
b411b363
PR
6041
6042 /* Note:
6043 * -EINTR (on meta) we got a signal
6044 * -EAGAIN (on meta) rcvtimeo expired
6045 * -ECONNRESET other side closed the connection
6046 * -ERESTARTSYS (on data) we got a signal
6047 * rv < 0 other than above: unexpected error!
6048 * rv == expected: full header or command
6049 * rv < expected: "woken" by signal during receive
6050 * rv == 0 : "connection shut down by peer"
6051 */
6052 if (likely(rv > 0)) {
6053 received += rv;
6054 buf += rv;
6055 } else if (rv == 0) {
bde89a9e 6056 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
b66623e3
PR
6057 long t;
6058 rcu_read_lock();
bde89a9e 6059 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
b66623e3
PR
6060 rcu_read_unlock();
6061
bde89a9e
AG
6062 t = wait_event_timeout(connection->ping_wait,
6063 connection->cstate < C_WF_REPORT_PARAMS,
b66623e3 6064 t);
599377ac
PR
6065 if (t)
6066 break;
6067 }
1ec861eb 6068 drbd_err(connection, "meta connection shut down by peer.\n");
b411b363
PR
6069 goto reconnect;
6070 } else if (rv == -EAGAIN) {
cb6518cb
LE
6071 /* If the data socket received something meanwhile,
6072 * that is good enough: peer is still alive. */
668700b4 6073 if (time_after(connection->last_received, pre_recv_jif))
cb6518cb 6074 continue;
f36af18c 6075 if (ping_timeout_active) {
1ec861eb 6076 drbd_err(connection, "PingAck did not arrive in time.\n");
b411b363
PR
6077 goto reconnect;
6078 }
bde89a9e 6079 set_bit(SEND_PING, &connection->flags);
b411b363
PR
6080 continue;
6081 } else if (rv == -EINTR) {
668700b4
PR
6082 /* maybe drbd_thread_stop(): the while condition will notice.
6083 * maybe woken for send_ping: we'll send a ping above,
6084 * and change the rcvtimeo */
6085 flush_signals(current);
b411b363
PR
6086 continue;
6087 } else {
1ec861eb 6088 drbd_err(connection, "sock_recvmsg returned %d\n", rv);
b411b363
PR
6089 goto reconnect;
6090 }
6091
6092 if (received == expect && cmd == NULL) {
bde89a9e 6093 if (decode_header(connection, connection->meta.rbuf, &pi))
b411b363 6094 goto reconnect;
668700b4
PR
6095 cmd = &ack_receiver_tbl[pi.cmd];
6096 if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) {
1ec861eb 6097 drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
2fcb8f30 6098 cmdname(pi.cmd), pi.cmd);
b411b363
PR
6099 goto disconnect;
6100 }
e658983a 6101 expect = header_size + cmd->pkt_size;
52b061a4 6102 if (pi.size != expect - header_size) {
1ec861eb 6103 drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 6104 pi.cmd, pi.size);
b411b363 6105 goto reconnect;
257d0af6 6106 }
b411b363
PR
6107 }
6108 if (received == expect) {
2735a594 6109 bool err;
a4fbda8e 6110
bde89a9e 6111 err = cmd->fn(connection, &pi);
2735a594 6112 if (err) {
d75f773c 6113 drbd_err(connection, "%ps failed\n", cmd->fn);
b411b363 6114 goto reconnect;
1952e916 6115 }
b411b363 6116
bde89a9e 6117 connection->last_received = jiffies;
f36af18c 6118
668700b4
PR
6119 if (cmd == &ack_receiver_tbl[P_PING_ACK]) {
6120 set_idle_timeout(connection);
44ed167d
PR
6121 ping_timeout_active = false;
6122 }
f36af18c 6123
bde89a9e 6124 buf = connection->meta.rbuf;
b411b363 6125 received = 0;
52b061a4 6126 expect = header_size;
b411b363
PR
6127 cmd = NULL;
6128 }
6129 }
6130
6131 if (0) {
6132reconnect:
bde89a9e
AG
6133 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
6134 conn_md_sync(connection);
b411b363
PR
6135 }
6136 if (0) {
6137disconnect:
bde89a9e 6138 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 6139 }
b411b363 6140
668700b4 6141 drbd_info(connection, "ack_receiver terminated\n");
b411b363
PR
6142
6143 return 0;
6144}
668700b4
PR
6145
6146void drbd_send_acks_wf(struct work_struct *ws)
6147{
6148 struct drbd_peer_device *peer_device =
6149 container_of(ws, struct drbd_peer_device, send_acks_work);
6150 struct drbd_connection *connection = peer_device->connection;
6151 struct drbd_device *device = peer_device->device;
6152 struct net_conf *nc;
6153 int tcp_cork, err;
6154
6155 rcu_read_lock();
6156 nc = rcu_dereference(connection->net_conf);
6157 tcp_cork = nc->tcp_cork;
6158 rcu_read_unlock();
6159
6160 if (tcp_cork)
db10538a 6161 tcp_sock_set_cork(connection->meta.socket->sk, true);
668700b4
PR
6162
6163 err = drbd_finish_peer_reqs(device);
6164 kref_put(&device->kref, drbd_destroy_device);
6165 /* get is in drbd_endio_write_sec_final(). That is necessary to keep the
6166 struct work_struct send_acks_work alive, which is in the peer_device object */
6167
6168 if (err) {
6169 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
6170 return;
6171 }
6172
6173 if (tcp_cork)
db10538a 6174 tcp_sock_set_cork(connection->meta.socket->sk, false);
668700b4
PR
6175
6176 return;
6177}