block: make bdev_ops->rw_page() take a REQ_OP instead of bool
[linux-block.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
b411b363
PR
1/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
b411b363
PR
26#include <linux/module.h>
27
7e5fec31 28#include <linux/uaccess.h>
b411b363
PR
29#include <net/sock.h>
30
b411b363
PR
31#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
ae7e81c0 39#include <uapi/linux/sched/types.h>
174cd4b1 40#include <linux/sched/signal.h>
b411b363
PR
41#include <linux/pkt_sched.h>
42#define __KERNEL_SYSCALLS__
43#include <linux/unistd.h>
44#include <linux/vmalloc.h>
45#include <linux/random.h>
b411b363
PR
46#include <linux/string.h>
47#include <linux/scatterlist.h>
48#include "drbd_int.h"
a3603a6e 49#include "drbd_protocol.h"
b411b363 50#include "drbd_req.h"
b411b363
PR
51#include "drbd_vli.h"
52
9104d31a 53#define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME)
20c68fde 54
77351055
PR
55struct packet_info {
56 enum drbd_packet cmd;
e2857216
AG
57 unsigned int size;
58 unsigned int vnr;
e658983a 59 void *data;
77351055
PR
60};
61
b411b363
PR
62enum finish_epoch {
63 FE_STILL_LIVE,
64 FE_DESTROYED,
65 FE_RECYCLED,
66};
67
bde89a9e
AG
68static int drbd_do_features(struct drbd_connection *connection);
69static int drbd_do_auth(struct drbd_connection *connection);
69a22773 70static int drbd_disconnected(struct drbd_peer_device *);
a0fb3c47 71static void conn_wait_active_ee_empty(struct drbd_connection *connection);
bde89a9e 72static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
99920dc5 73static int e_end_block(struct drbd_work *, int);
b411b363 74
b411b363
PR
75
76#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
77
45bb912b
LE
78/*
79 * some helper functions to deal with single linked page lists,
80 * page->private being our "next" pointer.
81 */
82
83/* If at least n pages are linked at head, get n pages off.
84 * Otherwise, don't modify head, and return NULL.
85 * Locking is the responsibility of the caller.
86 */
87static struct page *page_chain_del(struct page **head, int n)
88{
89 struct page *page;
90 struct page *tmp;
91
92 BUG_ON(!n);
93 BUG_ON(!head);
94
95 page = *head;
23ce4227
PR
96
97 if (!page)
98 return NULL;
99
45bb912b
LE
100 while (page) {
101 tmp = page_chain_next(page);
102 if (--n == 0)
103 break; /* found sufficient pages */
104 if (tmp == NULL)
105 /* insufficient pages, don't use any of them. */
106 return NULL;
107 page = tmp;
108 }
109
110 /* add end of list marker for the returned list */
111 set_page_private(page, 0);
112 /* actual return value, and adjustment of head */
113 page = *head;
114 *head = tmp;
115 return page;
116}
117
118/* may be used outside of locks to find the tail of a (usually short)
119 * "private" page chain, before adding it back to a global chain head
120 * with page_chain_add() under a spinlock. */
121static struct page *page_chain_tail(struct page *page, int *len)
122{
123 struct page *tmp;
124 int i = 1;
125 while ((tmp = page_chain_next(page)))
126 ++i, page = tmp;
127 if (len)
128 *len = i;
129 return page;
130}
131
132static int page_chain_free(struct page *page)
133{
134 struct page *tmp;
135 int i = 0;
136 page_chain_for_each_safe(page, tmp) {
137 put_page(page);
138 ++i;
139 }
140 return i;
141}
142
143static void page_chain_add(struct page **head,
144 struct page *chain_first, struct page *chain_last)
145{
146#if 1
147 struct page *tmp;
148 tmp = page_chain_tail(chain_first, NULL);
149 BUG_ON(tmp != chain_last);
150#endif
151
152 /* add chain to head */
153 set_page_private(chain_last, (unsigned long)*head);
154 *head = chain_first;
155}
156
b30ab791 157static struct page *__drbd_alloc_pages(struct drbd_device *device,
18c2d522 158 unsigned int number)
b411b363
PR
159{
160 struct page *page = NULL;
45bb912b 161 struct page *tmp = NULL;
18c2d522 162 unsigned int i = 0;
b411b363
PR
163
164 /* Yes, testing drbd_pp_vacant outside the lock is racy.
165 * So what. It saves a spin_lock. */
45bb912b 166 if (drbd_pp_vacant >= number) {
b411b363 167 spin_lock(&drbd_pp_lock);
45bb912b
LE
168 page = page_chain_del(&drbd_pp_pool, number);
169 if (page)
170 drbd_pp_vacant -= number;
b411b363 171 spin_unlock(&drbd_pp_lock);
45bb912b
LE
172 if (page)
173 return page;
b411b363 174 }
45bb912b 175
b411b363
PR
176 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
177 * "criss-cross" setup, that might cause write-out on some other DRBD,
178 * which in turn might block on the other node at this very place. */
45bb912b
LE
179 for (i = 0; i < number; i++) {
180 tmp = alloc_page(GFP_TRY);
181 if (!tmp)
182 break;
183 set_page_private(tmp, (unsigned long)page);
184 page = tmp;
185 }
186
187 if (i == number)
188 return page;
189
190 /* Not enough pages immediately available this time.
c37c8ecf 191 * No need to jump around here, drbd_alloc_pages will retry this
45bb912b
LE
192 * function "soon". */
193 if (page) {
194 tmp = page_chain_tail(page, NULL);
195 spin_lock(&drbd_pp_lock);
196 page_chain_add(&drbd_pp_pool, page, tmp);
197 drbd_pp_vacant += i;
198 spin_unlock(&drbd_pp_lock);
199 }
200 return NULL;
b411b363
PR
201}
202
b30ab791 203static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
a990be46 204 struct list_head *to_be_freed)
b411b363 205{
a8cd15ba 206 struct drbd_peer_request *peer_req, *tmp;
b411b363
PR
207
208 /* The EEs are always appended to the end of the list. Since
209 they are sent in order over the wire, they have to finish
210 in order. As soon as we see the first not finished we can
211 stop to examine the list... */
212
a8cd15ba 213 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
045417f7 214 if (drbd_peer_req_has_active_page(peer_req))
b411b363 215 break;
a8cd15ba 216 list_move(&peer_req->w.list, to_be_freed);
b411b363
PR
217 }
218}
219
668700b4 220static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
b411b363
PR
221{
222 LIST_HEAD(reclaimed);
db830c46 223 struct drbd_peer_request *peer_req, *t;
b411b363 224
0500813f 225 spin_lock_irq(&device->resource->req_lock);
b30ab791 226 reclaim_finished_net_peer_reqs(device, &reclaimed);
0500813f 227 spin_unlock_irq(&device->resource->req_lock);
a8cd15ba 228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
b30ab791 229 drbd_free_net_peer_req(device, peer_req);
b411b363
PR
230}
231
668700b4
PR
232static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
233{
234 struct drbd_peer_device *peer_device;
235 int vnr;
236
237 rcu_read_lock();
238 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
239 struct drbd_device *device = peer_device->device;
240 if (!atomic_read(&device->pp_in_use_by_net))
241 continue;
242
243 kref_get(&device->kref);
244 rcu_read_unlock();
245 drbd_reclaim_net_peer_reqs(device);
246 kref_put(&device->kref, drbd_destroy_device);
247 rcu_read_lock();
248 }
249 rcu_read_unlock();
250}
251
b411b363 252/**
c37c8ecf 253 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
b30ab791 254 * @device: DRBD device.
45bb912b
LE
255 * @number: number of pages requested
256 * @retry: whether to retry, if not enough pages are available right now
257 *
258 * Tries to allocate number pages, first from our own page pool, then from
0e49d7b0 259 * the kernel.
45bb912b 260 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 261 *
0e49d7b0
LE
262 * If this allocation would exceed the max_buffers setting, we throttle
263 * allocation (schedule_timeout) to give the system some room to breathe.
264 *
265 * We do not use max-buffers as hard limit, because it could lead to
266 * congestion and further to a distributed deadlock during online-verify or
267 * (checksum based) resync, if the max-buffers, socket buffer sizes and
268 * resync-rate settings are mis-configured.
269 *
45bb912b 270 * Returns a page chain linked via page->private.
b411b363 271 */
69a22773 272struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
c37c8ecf 273 bool retry)
b411b363 274{
69a22773 275 struct drbd_device *device = peer_device->device;
b411b363 276 struct page *page = NULL;
44ed167d 277 struct net_conf *nc;
b411b363 278 DEFINE_WAIT(wait);
0e49d7b0 279 unsigned int mxb;
b411b363 280
44ed167d 281 rcu_read_lock();
69a22773 282 nc = rcu_dereference(peer_device->connection->net_conf);
44ed167d
PR
283 mxb = nc ? nc->max_buffers : 1000000;
284 rcu_read_unlock();
285
b30ab791
AG
286 if (atomic_read(&device->pp_in_use) < mxb)
287 page = __drbd_alloc_pages(device, number);
b411b363 288
668700b4
PR
289 /* Try to keep the fast path fast, but occasionally we need
290 * to reclaim the pages we lended to the network stack. */
291 if (page && atomic_read(&device->pp_in_use_by_net) > 512)
292 drbd_reclaim_net_peer_reqs(device);
293
45bb912b 294 while (page == NULL) {
b411b363
PR
295 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
296
668700b4 297 drbd_reclaim_net_peer_reqs(device);
b411b363 298
b30ab791
AG
299 if (atomic_read(&device->pp_in_use) < mxb) {
300 page = __drbd_alloc_pages(device, number);
b411b363
PR
301 if (page)
302 break;
303 }
304
305 if (!retry)
306 break;
307
308 if (signal_pending(current)) {
d0180171 309 drbd_warn(device, "drbd_alloc_pages interrupted!\n");
b411b363
PR
310 break;
311 }
312
0e49d7b0
LE
313 if (schedule_timeout(HZ/10) == 0)
314 mxb = UINT_MAX;
b411b363
PR
315 }
316 finish_wait(&drbd_pp_wait, &wait);
317
45bb912b 318 if (page)
b30ab791 319 atomic_add(number, &device->pp_in_use);
b411b363
PR
320 return page;
321}
322
c37c8ecf 323/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
0500813f 324 * Is also used from inside an other spin_lock_irq(&resource->req_lock);
45bb912b
LE
325 * Either links the page chain back to the global pool,
326 * or returns all pages to the system. */
b30ab791 327static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
b411b363 328{
b30ab791 329 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
b411b363 330 int i;
435f0740 331
a73ff323
LE
332 if (page == NULL)
333 return;
334
183ece30 335 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
45bb912b
LE
336 i = page_chain_free(page);
337 else {
338 struct page *tmp;
339 tmp = page_chain_tail(page, &i);
340 spin_lock(&drbd_pp_lock);
341 page_chain_add(&drbd_pp_pool, page, tmp);
342 drbd_pp_vacant += i;
343 spin_unlock(&drbd_pp_lock);
b411b363 344 }
435f0740 345 i = atomic_sub_return(i, a);
45bb912b 346 if (i < 0)
d0180171 347 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
435f0740 348 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
349 wake_up(&drbd_pp_wait);
350}
351
352/*
353You need to hold the req_lock:
354 _drbd_wait_ee_list_empty()
355
356You must not have the req_lock:
3967deb1 357 drbd_free_peer_req()
0db55363 358 drbd_alloc_peer_req()
7721f567 359 drbd_free_peer_reqs()
b411b363 360 drbd_ee_fix_bhs()
a990be46 361 drbd_finish_peer_reqs()
b411b363
PR
362 drbd_clear_done_ee()
363 drbd_wait_ee_list_empty()
364*/
365
9104d31a
LE
366/* normal: payload_size == request size (bi_size)
367 * w_same: payload_size == logical_block_size
368 * trim: payload_size == 0 */
f6ffca9f 369struct drbd_peer_request *
69a22773 370drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
9104d31a 371 unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
b411b363 372{
69a22773 373 struct drbd_device *device = peer_device->device;
db830c46 374 struct drbd_peer_request *peer_req;
a73ff323 375 struct page *page = NULL;
9104d31a 376 unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363 377
b30ab791 378 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
b411b363
PR
379 return NULL;
380
0892fac8 381 peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
db830c46 382 if (!peer_req) {
b411b363 383 if (!(gfp_mask & __GFP_NOWARN))
d0180171 384 drbd_err(device, "%s: allocation failed\n", __func__);
b411b363
PR
385 return NULL;
386 }
387
9104d31a 388 if (nr_pages) {
d0164adc
MG
389 page = drbd_alloc_pages(peer_device, nr_pages,
390 gfpflags_allow_blocking(gfp_mask));
a73ff323
LE
391 if (!page)
392 goto fail;
393 }
b411b363 394
c5a2c150
LE
395 memset(peer_req, 0, sizeof(*peer_req));
396 INIT_LIST_HEAD(&peer_req->w.list);
db830c46 397 drbd_clear_interval(&peer_req->i);
9104d31a 398 peer_req->i.size = request_size;
db830c46 399 peer_req->i.sector = sector;
c5a2c150 400 peer_req->submit_jif = jiffies;
a8cd15ba 401 peer_req->peer_device = peer_device;
db830c46 402 peer_req->pages = page;
9a8e7753
AG
403 /*
404 * The block_id is opaque to the receiver. It is not endianness
405 * converted, and sent back to the sender unchanged.
406 */
db830c46 407 peer_req->block_id = id;
b411b363 408
db830c46 409 return peer_req;
b411b363 410
45bb912b 411 fail:
0892fac8 412 mempool_free(peer_req, &drbd_ee_mempool);
b411b363
PR
413 return NULL;
414}
415
b30ab791 416void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
f6ffca9f 417 int is_net)
b411b363 418{
21ae5d7f 419 might_sleep();
db830c46
AG
420 if (peer_req->flags & EE_HAS_DIGEST)
421 kfree(peer_req->digest);
b30ab791 422 drbd_free_pages(device, peer_req->pages, is_net);
0b0ba1ef
AG
423 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
424 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
21ae5d7f
LE
425 if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
426 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
427 drbd_al_complete_io(device, &peer_req->i);
428 }
0892fac8 429 mempool_free(peer_req, &drbd_ee_mempool);
b411b363
PR
430}
431
b30ab791 432int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
b411b363
PR
433{
434 LIST_HEAD(work_list);
db830c46 435 struct drbd_peer_request *peer_req, *t;
b411b363 436 int count = 0;
b30ab791 437 int is_net = list == &device->net_ee;
b411b363 438
0500813f 439 spin_lock_irq(&device->resource->req_lock);
b411b363 440 list_splice_init(list, &work_list);
0500813f 441 spin_unlock_irq(&device->resource->req_lock);
b411b363 442
a8cd15ba 443 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
b30ab791 444 __drbd_free_peer_req(device, peer_req, is_net);
b411b363
PR
445 count++;
446 }
447 return count;
448}
449
b411b363 450/*
a990be46 451 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
b411b363 452 */
b30ab791 453static int drbd_finish_peer_reqs(struct drbd_device *device)
b411b363
PR
454{
455 LIST_HEAD(work_list);
456 LIST_HEAD(reclaimed);
db830c46 457 struct drbd_peer_request *peer_req, *t;
e2b3032b 458 int err = 0;
b411b363 459
0500813f 460 spin_lock_irq(&device->resource->req_lock);
b30ab791
AG
461 reclaim_finished_net_peer_reqs(device, &reclaimed);
462 list_splice_init(&device->done_ee, &work_list);
0500813f 463 spin_unlock_irq(&device->resource->req_lock);
b411b363 464
a8cd15ba 465 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
b30ab791 466 drbd_free_net_peer_req(device, peer_req);
b411b363
PR
467
468 /* possible callbacks here:
d4dabbe2 469 * e_end_block, and e_end_resync_block, e_send_superseded.
b411b363
PR
470 * all ignore the last argument.
471 */
a8cd15ba 472 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
473 int err2;
474
b411b363 475 /* list_del not necessary, next/prev members not touched */
a8cd15ba 476 err2 = peer_req->w.cb(&peer_req->w, !!err);
e2b3032b
AG
477 if (!err)
478 err = err2;
b30ab791 479 drbd_free_peer_req(device, peer_req);
b411b363 480 }
b30ab791 481 wake_up(&device->ee_wait);
b411b363 482
e2b3032b 483 return err;
b411b363
PR
484}
485
b30ab791 486static void _drbd_wait_ee_list_empty(struct drbd_device *device,
d4da1537 487 struct list_head *head)
b411b363
PR
488{
489 DEFINE_WAIT(wait);
490
491 /* avoids spin_lock/unlock
492 * and calling prepare_to_wait in the fast path */
493 while (!list_empty(head)) {
b30ab791 494 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
0500813f 495 spin_unlock_irq(&device->resource->req_lock);
7eaceacc 496 io_schedule();
b30ab791 497 finish_wait(&device->ee_wait, &wait);
0500813f 498 spin_lock_irq(&device->resource->req_lock);
b411b363
PR
499 }
500}
501
b30ab791 502static void drbd_wait_ee_list_empty(struct drbd_device *device,
d4da1537 503 struct list_head *head)
b411b363 504{
0500813f 505 spin_lock_irq(&device->resource->req_lock);
b30ab791 506 _drbd_wait_ee_list_empty(device, head);
0500813f 507 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
508}
509
dbd9eea0 510static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363 511{
b411b363
PR
512 struct kvec iov = {
513 .iov_base = buf,
514 .iov_len = size,
515 };
516 struct msghdr msg = {
b411b363
PR
517 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
518 };
f7765c36
AV
519 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
520 return sock_recvmsg(sock, &msg, msg.msg_flags);
b411b363
PR
521}
522
bde89a9e 523static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
b411b363 524{
b411b363
PR
525 int rv;
526
bde89a9e 527 rv = drbd_recv_short(connection->data.socket, buf, size, 0);
b411b363 528
dbd0820c
PR
529 if (rv < 0) {
530 if (rv == -ECONNRESET)
1ec861eb 531 drbd_info(connection, "sock was reset by peer\n");
dbd0820c 532 else if (rv != -ERESTARTSYS)
1ec861eb 533 drbd_err(connection, "sock_recvmsg returned %d\n", rv);
dbd0820c 534 } else if (rv == 0) {
bde89a9e 535 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
b66623e3
PR
536 long t;
537 rcu_read_lock();
bde89a9e 538 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
b66623e3
PR
539 rcu_read_unlock();
540
bde89a9e 541 t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
b66623e3 542
599377ac
PR
543 if (t)
544 goto out;
545 }
1ec861eb 546 drbd_info(connection, "sock was shut down by peer\n");
599377ac
PR
547 }
548
b411b363 549 if (rv != size)
bde89a9e 550 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 551
599377ac 552out:
b411b363
PR
553 return rv;
554}
555
bde89a9e 556static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
c6967746
AG
557{
558 int err;
559
bde89a9e 560 err = drbd_recv(connection, buf, size);
c6967746
AG
561 if (err != size) {
562 if (err >= 0)
563 err = -EIO;
564 } else
565 err = 0;
566 return err;
567}
568
bde89a9e 569static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
a5c31904
AG
570{
571 int err;
572
bde89a9e 573 err = drbd_recv_all(connection, buf, size);
a5c31904 574 if (err && !signal_pending(current))
1ec861eb 575 drbd_warn(connection, "short read (expected size %d)\n", (int)size);
a5c31904
AG
576 return err;
577}
578
5dbf1673
LE
579/* quoting tcp(7):
580 * On individual connections, the socket buffer size must be set prior to the
581 * listen(2) or connect(2) calls in order to have it take effect.
582 * This is our wrapper to do so.
583 */
584static void drbd_setbufsize(struct socket *sock, unsigned int snd,
585 unsigned int rcv)
586{
587 /* open coded SO_SNDBUF, SO_RCVBUF */
588 if (snd) {
589 sock->sk->sk_sndbuf = snd;
590 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
591 }
592 if (rcv) {
593 sock->sk->sk_rcvbuf = rcv;
594 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
595 }
596}
597
bde89a9e 598static struct socket *drbd_try_connect(struct drbd_connection *connection)
b411b363
PR
599{
600 const char *what;
601 struct socket *sock;
602 struct sockaddr_in6 src_in6;
44ed167d
PR
603 struct sockaddr_in6 peer_in6;
604 struct net_conf *nc;
605 int err, peer_addr_len, my_addr_len;
69ef82de 606 int sndbuf_size, rcvbuf_size, connect_int;
b411b363
PR
607 int disconnect_on_error = 1;
608
44ed167d 609 rcu_read_lock();
bde89a9e 610 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
611 if (!nc) {
612 rcu_read_unlock();
b411b363 613 return NULL;
44ed167d 614 }
44ed167d
PR
615 sndbuf_size = nc->sndbuf_size;
616 rcvbuf_size = nc->rcvbuf_size;
69ef82de 617 connect_int = nc->connect_int;
089c075d 618 rcu_read_unlock();
44ed167d 619
bde89a9e
AG
620 my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
621 memcpy(&src_in6, &connection->my_addr, my_addr_len);
44ed167d 622
bde89a9e 623 if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
44ed167d
PR
624 src_in6.sin6_port = 0;
625 else
626 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
627
bde89a9e
AG
628 peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
629 memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
b411b363
PR
630
631 what = "sock_create_kern";
eeb1bd5c 632 err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
44ed167d 633 SOCK_STREAM, IPPROTO_TCP, &sock);
b411b363
PR
634 if (err < 0) {
635 sock = NULL;
636 goto out;
637 }
638
639 sock->sk->sk_rcvtimeo =
69ef82de 640 sock->sk->sk_sndtimeo = connect_int * HZ;
44ed167d 641 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
b411b363
PR
642
643 /* explicitly bind to the configured IP as source IP
644 * for the outgoing connections.
645 * This is needed for multihomed hosts and to be
646 * able to use lo: interfaces for drbd.
647 * Make sure to use 0 as port number, so linux selects
648 * a free one dynamically.
649 */
b411b363 650 what = "bind before connect";
44ed167d 651 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
b411b363
PR
652 if (err < 0)
653 goto out;
654
655 /* connect may fail, peer not yet available.
656 * stay C_WF_CONNECTION, don't go Disconnecting! */
657 disconnect_on_error = 0;
658 what = "connect";
44ed167d 659 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
b411b363
PR
660
661out:
662 if (err < 0) {
663 if (sock) {
664 sock_release(sock);
665 sock = NULL;
666 }
667 switch (-err) {
668 /* timeout, busy, signal pending */
669 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
670 case EINTR: case ERESTARTSYS:
671 /* peer not (yet) available, network problem */
672 case ECONNREFUSED: case ENETUNREACH:
673 case EHOSTDOWN: case EHOSTUNREACH:
674 disconnect_on_error = 0;
675 break;
676 default:
1ec861eb 677 drbd_err(connection, "%s failed, err = %d\n", what, err);
b411b363
PR
678 }
679 if (disconnect_on_error)
bde89a9e 680 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 681 }
44ed167d 682
b411b363
PR
683 return sock;
684}
685
7a426fd8 686struct accept_wait_data {
bde89a9e 687 struct drbd_connection *connection;
7a426fd8
PR
688 struct socket *s_listen;
689 struct completion door_bell;
690 void (*original_sk_state_change)(struct sock *sk);
691
692};
693
715306f6 694static void drbd_incoming_connection(struct sock *sk)
7a426fd8
PR
695{
696 struct accept_wait_data *ad = sk->sk_user_data;
715306f6 697 void (*state_change)(struct sock *sk);
7a426fd8 698
715306f6
AG
699 state_change = ad->original_sk_state_change;
700 if (sk->sk_state == TCP_ESTABLISHED)
701 complete(&ad->door_bell);
702 state_change(sk);
7a426fd8
PR
703}
704
bde89a9e 705static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
b411b363 706{
1f3e509b 707 int err, sndbuf_size, rcvbuf_size, my_addr_len;
44ed167d 708 struct sockaddr_in6 my_addr;
1f3e509b 709 struct socket *s_listen;
44ed167d 710 struct net_conf *nc;
b411b363
PR
711 const char *what;
712
44ed167d 713 rcu_read_lock();
bde89a9e 714 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
715 if (!nc) {
716 rcu_read_unlock();
7a426fd8 717 return -EIO;
44ed167d 718 }
44ed167d
PR
719 sndbuf_size = nc->sndbuf_size;
720 rcvbuf_size = nc->rcvbuf_size;
44ed167d 721 rcu_read_unlock();
b411b363 722
bde89a9e
AG
723 my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
724 memcpy(&my_addr, &connection->my_addr, my_addr_len);
b411b363
PR
725
726 what = "sock_create_kern";
eeb1bd5c 727 err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
1f3e509b 728 SOCK_STREAM, IPPROTO_TCP, &s_listen);
b411b363
PR
729 if (err) {
730 s_listen = NULL;
731 goto out;
732 }
733
98683650 734 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
44ed167d 735 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
b411b363
PR
736
737 what = "bind before listen";
44ed167d 738 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
b411b363
PR
739 if (err < 0)
740 goto out;
741
7a426fd8
PR
742 ad->s_listen = s_listen;
743 write_lock_bh(&s_listen->sk->sk_callback_lock);
744 ad->original_sk_state_change = s_listen->sk->sk_state_change;
715306f6 745 s_listen->sk->sk_state_change = drbd_incoming_connection;
7a426fd8
PR
746 s_listen->sk->sk_user_data = ad;
747 write_unlock_bh(&s_listen->sk->sk_callback_lock);
b411b363 748
2820fd39
PR
749 what = "listen";
750 err = s_listen->ops->listen(s_listen, 5);
751 if (err < 0)
752 goto out;
753
7a426fd8 754 return 0;
b411b363
PR
755out:
756 if (s_listen)
757 sock_release(s_listen);
758 if (err < 0) {
759 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1ec861eb 760 drbd_err(connection, "%s failed, err = %d\n", what, err);
bde89a9e 761 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
762 }
763 }
b411b363 764
7a426fd8 765 return -EIO;
b411b363
PR
766}
767
715306f6 768static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
b411b363 769{
715306f6
AG
770 write_lock_bh(&sk->sk_callback_lock);
771 sk->sk_state_change = ad->original_sk_state_change;
772 sk->sk_user_data = NULL;
773 write_unlock_bh(&sk->sk_callback_lock);
b411b363
PR
774}
775
bde89a9e 776static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
b411b363 777{
1f3e509b
PR
778 int timeo, connect_int, err = 0;
779 struct socket *s_estab = NULL;
1f3e509b
PR
780 struct net_conf *nc;
781
782 rcu_read_lock();
bde89a9e 783 nc = rcu_dereference(connection->net_conf);
1f3e509b
PR
784 if (!nc) {
785 rcu_read_unlock();
786 return NULL;
787 }
788 connect_int = nc->connect_int;
789 rcu_read_unlock();
790
791 timeo = connect_int * HZ;
38b682b2
AM
792 /* 28.5% random jitter */
793 timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
1f3e509b 794
7a426fd8
PR
795 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
796 if (err <= 0)
797 return NULL;
b411b363 798
7a426fd8 799 err = kernel_accept(ad->s_listen, &s_estab, 0);
b411b363
PR
800 if (err < 0) {
801 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1ec861eb 802 drbd_err(connection, "accept failed, err = %d\n", err);
bde89a9e 803 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
804 }
805 }
b411b363 806
715306f6
AG
807 if (s_estab)
808 unregister_state_change(s_estab->sk, ad);
b411b363 809
b411b363
PR
810 return s_estab;
811}
b411b363 812
bde89a9e 813static int decode_header(struct drbd_connection *, void *, struct packet_info *);
b411b363 814
bde89a9e 815static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
9f5bdc33
AG
816 enum drbd_packet cmd)
817{
bde89a9e 818 if (!conn_prepare_command(connection, sock))
9f5bdc33 819 return -EIO;
bde89a9e 820 return conn_send_command(connection, sock, cmd, 0, NULL, 0);
b411b363
PR
821}
822
bde89a9e 823static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
b411b363 824{
bde89a9e 825 unsigned int header_size = drbd_header_size(connection);
9f5bdc33 826 struct packet_info pi;
4920e37a 827 struct net_conf *nc;
9f5bdc33 828 int err;
b411b363 829
4920e37a
PR
830 rcu_read_lock();
831 nc = rcu_dereference(connection->net_conf);
832 if (!nc) {
833 rcu_read_unlock();
834 return -EIO;
835 }
836 sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10;
837 rcu_read_unlock();
838
bde89a9e 839 err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
9f5bdc33
AG
840 if (err != header_size) {
841 if (err >= 0)
842 err = -EIO;
843 return err;
844 }
bde89a9e 845 err = decode_header(connection, connection->data.rbuf, &pi);
9f5bdc33
AG
846 if (err)
847 return err;
848 return pi.cmd;
b411b363
PR
849}
850
851/**
852 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
853 * @sock: pointer to the pointer to the socket.
854 */
5d0b17f1 855static bool drbd_socket_okay(struct socket **sock)
b411b363
PR
856{
857 int rr;
858 char tb[4];
859
860 if (!*sock)
81e84650 861 return false;
b411b363 862
dbd9eea0 863 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
864
865 if (rr > 0 || rr == -EAGAIN) {
81e84650 866 return true;
b411b363
PR
867 } else {
868 sock_release(*sock);
869 *sock = NULL;
81e84650 870 return false;
b411b363
PR
871 }
872}
5d0b17f1
PR
873
874static bool connection_established(struct drbd_connection *connection,
875 struct socket **sock1,
876 struct socket **sock2)
877{
878 struct net_conf *nc;
879 int timeout;
880 bool ok;
881
882 if (!*sock1 || !*sock2)
883 return false;
884
885 rcu_read_lock();
886 nc = rcu_dereference(connection->net_conf);
887 timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10;
888 rcu_read_unlock();
889 schedule_timeout_interruptible(timeout);
890
891 ok = drbd_socket_okay(sock1);
892 ok = drbd_socket_okay(sock2) && ok;
893
894 return ok;
895}
896
2325eb66
PR
897/* Gets called if a connection is established, or if a new minor gets created
898 in a connection */
69a22773 899int drbd_connected(struct drbd_peer_device *peer_device)
907599e0 900{
69a22773 901 struct drbd_device *device = peer_device->device;
0829f5ed 902 int err;
907599e0 903
b30ab791
AG
904 atomic_set(&device->packet_seq, 0);
905 device->peer_seq = 0;
907599e0 906
69a22773
AG
907 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
908 &peer_device->connection->cstate_mutex :
b30ab791 909 &device->own_state_mutex;
8410da8f 910
69a22773 911 err = drbd_send_sync_param(peer_device);
0829f5ed 912 if (!err)
69a22773 913 err = drbd_send_sizes(peer_device, 0, 0);
0829f5ed 914 if (!err)
69a22773 915 err = drbd_send_uuids(peer_device);
0829f5ed 916 if (!err)
69a22773 917 err = drbd_send_current_state(peer_device);
b30ab791
AG
918 clear_bit(USE_DEGR_WFC_T, &device->flags);
919 clear_bit(RESIZE_PENDING, &device->flags);
920 atomic_set(&device->ap_in_flight, 0);
921 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
0829f5ed 922 return err;
907599e0 923}
b411b363
PR
924
925/*
926 * return values:
927 * 1 yes, we have a valid connection
928 * 0 oops, did not work out, please try again
929 * -1 peer talks different language,
930 * no point in trying again, please go standalone.
931 * -2 We do not have a network config...
932 */
bde89a9e 933static int conn_connect(struct drbd_connection *connection)
b411b363 934{
7da35862 935 struct drbd_socket sock, msock;
c06ece6b 936 struct drbd_peer_device *peer_device;
44ed167d 937 struct net_conf *nc;
5d0b17f1
PR
938 int vnr, timeout, h;
939 bool discard_my_data, ok;
197296ff 940 enum drbd_state_rv rv;
7a426fd8 941 struct accept_wait_data ad = {
bde89a9e 942 .connection = connection,
7a426fd8
PR
943 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
944 };
b411b363 945
bde89a9e
AG
946 clear_bit(DISCONNECT_SENT, &connection->flags);
947 if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
948 return -2;
949
7da35862 950 mutex_init(&sock.mutex);
bde89a9e
AG
951 sock.sbuf = connection->data.sbuf;
952 sock.rbuf = connection->data.rbuf;
7da35862
PR
953 sock.socket = NULL;
954 mutex_init(&msock.mutex);
bde89a9e
AG
955 msock.sbuf = connection->meta.sbuf;
956 msock.rbuf = connection->meta.rbuf;
7da35862
PR
957 msock.socket = NULL;
958
0916e0e3 959 /* Assume that the peer only understands protocol 80 until we know better. */
bde89a9e 960 connection->agreed_pro_version = 80;
b411b363 961
bde89a9e 962 if (prepare_listen_socket(connection, &ad))
7a426fd8 963 return 0;
b411b363
PR
964
965 do {
2bf89621 966 struct socket *s;
b411b363 967
bde89a9e 968 s = drbd_try_connect(connection);
b411b363 969 if (s) {
7da35862
PR
970 if (!sock.socket) {
971 sock.socket = s;
bde89a9e 972 send_first_packet(connection, &sock, P_INITIAL_DATA);
7da35862 973 } else if (!msock.socket) {
bde89a9e 974 clear_bit(RESOLVE_CONFLICTS, &connection->flags);
7da35862 975 msock.socket = s;
bde89a9e 976 send_first_packet(connection, &msock, P_INITIAL_META);
b411b363 977 } else {
1ec861eb 978 drbd_err(connection, "Logic error in conn_connect()\n");
b411b363
PR
979 goto out_release_sockets;
980 }
981 }
982
5d0b17f1
PR
983 if (connection_established(connection, &sock.socket, &msock.socket))
984 break;
b411b363
PR
985
986retry:
bde89a9e 987 s = drbd_wait_for_connect(connection, &ad);
b411b363 988 if (s) {
bde89a9e 989 int fp = receive_first_packet(connection, s);
7da35862
PR
990 drbd_socket_okay(&sock.socket);
991 drbd_socket_okay(&msock.socket);
92f14951 992 switch (fp) {
e5d6f33a 993 case P_INITIAL_DATA:
7da35862 994 if (sock.socket) {
1ec861eb 995 drbd_warn(connection, "initial packet S crossed\n");
7da35862 996 sock_release(sock.socket);
80c6eed4
PR
997 sock.socket = s;
998 goto randomize;
b411b363 999 }
7da35862 1000 sock.socket = s;
b411b363 1001 break;
e5d6f33a 1002 case P_INITIAL_META:
bde89a9e 1003 set_bit(RESOLVE_CONFLICTS, &connection->flags);
7da35862 1004 if (msock.socket) {
1ec861eb 1005 drbd_warn(connection, "initial packet M crossed\n");
7da35862 1006 sock_release(msock.socket);
80c6eed4
PR
1007 msock.socket = s;
1008 goto randomize;
b411b363 1009 }
7da35862 1010 msock.socket = s;
b411b363
PR
1011 break;
1012 default:
1ec861eb 1013 drbd_warn(connection, "Error receiving initial packet\n");
b411b363 1014 sock_release(s);
80c6eed4 1015randomize:
38b682b2 1016 if (prandom_u32() & 1)
b411b363
PR
1017 goto retry;
1018 }
1019 }
1020
bde89a9e 1021 if (connection->cstate <= C_DISCONNECTING)
b411b363
PR
1022 goto out_release_sockets;
1023 if (signal_pending(current)) {
1024 flush_signals(current);
1025 smp_rmb();
bde89a9e 1026 if (get_t_state(&connection->receiver) == EXITING)
b411b363
PR
1027 goto out_release_sockets;
1028 }
1029
5d0b17f1 1030 ok = connection_established(connection, &sock.socket, &msock.socket);
b666dbf8 1031 } while (!ok);
b411b363 1032
7a426fd8
PR
1033 if (ad.s_listen)
1034 sock_release(ad.s_listen);
b411b363 1035
98683650
PR
1036 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1037 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
b411b363 1038
7da35862
PR
1039 sock.socket->sk->sk_allocation = GFP_NOIO;
1040 msock.socket->sk->sk_allocation = GFP_NOIO;
b411b363 1041
7da35862
PR
1042 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1043 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
b411b363 1044
b411b363 1045 /* NOT YET ...
bde89a9e 1046 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
7da35862 1047 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
6038178e 1048 * first set it to the P_CONNECTION_FEATURES timeout,
b411b363 1049 * which we set to 4x the configured ping_timeout. */
44ed167d 1050 rcu_read_lock();
bde89a9e 1051 nc = rcu_dereference(connection->net_conf);
44ed167d 1052
7da35862
PR
1053 sock.socket->sk->sk_sndtimeo =
1054 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
b411b363 1055
7da35862 1056 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
44ed167d 1057 timeout = nc->timeout * HZ / 10;
08b165ba 1058 discard_my_data = nc->discard_my_data;
44ed167d 1059 rcu_read_unlock();
b411b363 1060
7da35862 1061 msock.socket->sk->sk_sndtimeo = timeout;
b411b363
PR
1062
1063 /* we don't want delays.
25985edc 1064 * we use TCP_CORK where appropriate, though */
7da35862
PR
1065 drbd_tcp_nodelay(sock.socket);
1066 drbd_tcp_nodelay(msock.socket);
b411b363 1067
bde89a9e
AG
1068 connection->data.socket = sock.socket;
1069 connection->meta.socket = msock.socket;
1070 connection->last_received = jiffies;
b411b363 1071
bde89a9e 1072 h = drbd_do_features(connection);
b411b363
PR
1073 if (h <= 0)
1074 return h;
1075
bde89a9e 1076 if (connection->cram_hmac_tfm) {
b30ab791 1077 /* drbd_request_state(device, NS(conn, WFAuth)); */
bde89a9e 1078 switch (drbd_do_auth(connection)) {
b10d96cb 1079 case -1:
1ec861eb 1080 drbd_err(connection, "Authentication of peer failed\n");
b411b363 1081 return -1;
b10d96cb 1082 case 0:
1ec861eb 1083 drbd_err(connection, "Authentication of peer failed, trying again.\n");
b10d96cb 1084 return 0;
b411b363
PR
1085 }
1086 }
1087
bde89a9e
AG
1088 connection->data.socket->sk->sk_sndtimeo = timeout;
1089 connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
b411b363 1090
bde89a9e 1091 if (drbd_send_protocol(connection) == -EOPNOTSUPP)
7e2455c1 1092 return -1;
b411b363 1093
31007745
PR
1094 /* Prevent a race between resync-handshake and
1095 * being promoted to Primary.
1096 *
1097 * Grab and release the state mutex, so we know that any current
1098 * drbd_set_role() is finished, and any incoming drbd_set_role
1099 * will see the STATE_SENT flag, and wait for it to be cleared.
1100 */
1101 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1102 mutex_lock(peer_device->device->state_mutex);
1103
cde81d99
LE
1104 /* avoid a race with conn_request_state( C_DISCONNECTING ) */
1105 spin_lock_irq(&connection->resource->req_lock);
bde89a9e 1106 set_bit(STATE_SENT, &connection->flags);
cde81d99 1107 spin_unlock_irq(&connection->resource->req_lock);
a1096a6e 1108
31007745
PR
1109 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1110 mutex_unlock(peer_device->device->state_mutex);
1111
c141ebda 1112 rcu_read_lock();
c06ece6b
AG
1113 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1114 struct drbd_device *device = peer_device->device;
b30ab791 1115 kref_get(&device->kref);
26ea8f92
AG
1116 rcu_read_unlock();
1117
08b165ba 1118 if (discard_my_data)
b30ab791 1119 set_bit(DISCARD_MY_DATA, &device->flags);
08b165ba 1120 else
b30ab791 1121 clear_bit(DISCARD_MY_DATA, &device->flags);
08b165ba 1122
69a22773 1123 drbd_connected(peer_device);
05a10ec7 1124 kref_put(&device->kref, drbd_destroy_device);
c141ebda
PR
1125 rcu_read_lock();
1126 }
1127 rcu_read_unlock();
1128
bde89a9e
AG
1129 rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1130 if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
1131 clear_bit(STATE_SENT, &connection->flags);
1e86ac48 1132 return 0;
a1096a6e 1133 }
1e86ac48 1134
1c03e520 1135 drbd_thread_start(&connection->ack_receiver);
39e91a60
LE
1136 /* opencoded create_singlethread_workqueue(),
1137 * to be able to use format string arguments */
1138 connection->ack_sender =
1139 alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name);
668700b4
PR
1140 if (!connection->ack_sender) {
1141 drbd_err(connection, "Failed to create workqueue ack_sender\n");
1142 return 0;
1143 }
b411b363 1144
0500813f 1145 mutex_lock(&connection->resource->conf_update);
08b165ba
PR
1146 /* The discard_my_data flag is a single-shot modifier to the next
1147 * connection attempt, the handshake of which is now well underway.
1148 * No need for rcu style copying of the whole struct
1149 * just to clear a single value. */
bde89a9e 1150 connection->net_conf->discard_my_data = 0;
0500813f 1151 mutex_unlock(&connection->resource->conf_update);
08b165ba 1152
d3fcb490 1153 return h;
b411b363
PR
1154
1155out_release_sockets:
7a426fd8
PR
1156 if (ad.s_listen)
1157 sock_release(ad.s_listen);
7da35862
PR
1158 if (sock.socket)
1159 sock_release(sock.socket);
1160 if (msock.socket)
1161 sock_release(msock.socket);
b411b363
PR
1162 return -1;
1163}
1164
bde89a9e 1165static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
b411b363 1166{
bde89a9e 1167 unsigned int header_size = drbd_header_size(connection);
e658983a 1168
0c8e36d9
AG
1169 if (header_size == sizeof(struct p_header100) &&
1170 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1171 struct p_header100 *h = header;
1172 if (h->pad != 0) {
1ec861eb 1173 drbd_err(connection, "Header padding is not zero\n");
0c8e36d9
AG
1174 return -EINVAL;
1175 }
1176 pi->vnr = be16_to_cpu(h->volume);
1177 pi->cmd = be16_to_cpu(h->command);
1178 pi->size = be32_to_cpu(h->length);
1179 } else if (header_size == sizeof(struct p_header95) &&
1180 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
e658983a 1181 struct p_header95 *h = header;
e658983a 1182 pi->cmd = be16_to_cpu(h->command);
b55d84ba
AG
1183 pi->size = be32_to_cpu(h->length);
1184 pi->vnr = 0;
e658983a
AG
1185 } else if (header_size == sizeof(struct p_header80) &&
1186 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1187 struct p_header80 *h = header;
1188 pi->cmd = be16_to_cpu(h->command);
1189 pi->size = be16_to_cpu(h->length);
77351055 1190 pi->vnr = 0;
02918be2 1191 } else {
1ec861eb 1192 drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
e658983a 1193 be32_to_cpu(*(__be32 *)header),
bde89a9e 1194 connection->agreed_pro_version);
8172f3e9 1195 return -EINVAL;
b411b363 1196 }
e658983a 1197 pi->data = header + header_size;
8172f3e9 1198 return 0;
257d0af6 1199}
b411b363 1200
c51a0ef3
LE
1201static void drbd_unplug_all_devices(struct drbd_connection *connection)
1202{
1203 if (current->plug == &connection->receiver_plug) {
1204 blk_finish_plug(&connection->receiver_plug);
1205 blk_start_plug(&connection->receiver_plug);
1206 } /* else: maybe just schedule() ?? */
1207}
1208
bde89a9e 1209static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
257d0af6 1210{
bde89a9e 1211 void *buffer = connection->data.rbuf;
69bc7bc3 1212 int err;
257d0af6 1213
bde89a9e 1214 err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
a5c31904 1215 if (err)
69bc7bc3 1216 return err;
257d0af6 1217
bde89a9e
AG
1218 err = decode_header(connection, buffer, pi);
1219 connection->last_received = jiffies;
b411b363 1220
69bc7bc3 1221 return err;
b411b363
PR
1222}
1223
c51a0ef3
LE
1224static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
1225{
1226 void *buffer = connection->data.rbuf;
1227 unsigned int size = drbd_header_size(connection);
1228 int err;
1229
1230 err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
1231 if (err != size) {
1232 /* If we have nothing in the receive buffer now, to reduce
1233 * application latency, try to drain the backend queues as
1234 * quickly as possible, and let remote TCP know what we have
1235 * received so far. */
1236 if (err == -EAGAIN) {
1237 drbd_tcp_quickack(connection->data.socket);
1238 drbd_unplug_all_devices(connection);
1239 }
1240 if (err > 0) {
1241 buffer += err;
1242 size -= err;
1243 }
1244 err = drbd_recv_all_warn(connection, buffer, size);
1245 if (err)
1246 return err;
1247 }
1248
1249 err = decode_header(connection, connection->data.rbuf, pi);
1250 connection->last_received = jiffies;
1251
1252 return err;
1253}
f9ff0da5
LE
1254/* This is blkdev_issue_flush, but asynchronous.
1255 * We want to submit to all component volumes in parallel,
1256 * then wait for all completions.
1257 */
1258struct issue_flush_context {
1259 atomic_t pending;
1260 int error;
1261 struct completion done;
1262};
1263struct one_flush_context {
1264 struct drbd_device *device;
1265 struct issue_flush_context *ctx;
1266};
1267
1ffa7bfa 1268static void one_flush_endio(struct bio *bio)
b411b363 1269{
f9ff0da5
LE
1270 struct one_flush_context *octx = bio->bi_private;
1271 struct drbd_device *device = octx->device;
1272 struct issue_flush_context *ctx = octx->ctx;
1273
4e4cbee9
CH
1274 if (bio->bi_status) {
1275 ctx->error = blk_status_to_errno(bio->bi_status);
1276 drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
f9ff0da5
LE
1277 }
1278 kfree(octx);
1279 bio_put(bio);
1280
1281 clear_bit(FLUSH_PENDING, &device->flags);
1282 put_ldev(device);
1283 kref_put(&device->kref, drbd_destroy_device);
1284
1285 if (atomic_dec_and_test(&ctx->pending))
1286 complete(&ctx->done);
1287}
1288
1289static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
1290{
1291 struct bio *bio = bio_alloc(GFP_NOIO, 0);
1292 struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
1293 if (!bio || !octx) {
1294 drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
1295 /* FIXME: what else can I do now? disconnecting or detaching
1296 * really does not help to improve the state of the world, either.
1297 */
1298 kfree(octx);
1299 if (bio)
1300 bio_put(bio);
1301
1302 ctx->error = -ENOMEM;
1303 put_ldev(device);
1304 kref_put(&device->kref, drbd_destroy_device);
1305 return;
1306 }
4b0007c0 1307
f9ff0da5
LE
1308 octx->device = device;
1309 octx->ctx = ctx;
74d46992 1310 bio_set_dev(bio, device->ldev->backing_bdev);
f9ff0da5
LE
1311 bio->bi_private = octx;
1312 bio->bi_end_io = one_flush_endio;
70fd7614 1313 bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
f9ff0da5
LE
1314
1315 device->flush_jif = jiffies;
1316 set_bit(FLUSH_PENDING, &device->flags);
1317 atomic_inc(&ctx->pending);
1318 submit_bio(bio);
1319}
1320
1321static void drbd_flush(struct drbd_connection *connection)
1322{
f6ba8636 1323 if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
f9ff0da5
LE
1324 struct drbd_peer_device *peer_device;
1325 struct issue_flush_context ctx;
1326 int vnr;
1327
1328 atomic_set(&ctx.pending, 1);
1329 ctx.error = 0;
1330 init_completion(&ctx.done);
1331
615e087f 1332 rcu_read_lock();
c06ece6b
AG
1333 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1334 struct drbd_device *device = peer_device->device;
1335
b30ab791 1336 if (!get_ldev(device))
615e087f 1337 continue;
b30ab791 1338 kref_get(&device->kref);
615e087f
LE
1339 rcu_read_unlock();
1340
f9ff0da5 1341 submit_one_flush(device, &ctx);
b411b363 1342
615e087f 1343 rcu_read_lock();
b411b363 1344 }
615e087f 1345 rcu_read_unlock();
f9ff0da5
LE
1346
1347 /* Do we want to add a timeout,
1348 * if disk-timeout is set? */
1349 if (!atomic_dec_and_test(&ctx.pending))
1350 wait_for_completion(&ctx.done);
1351
1352 if (ctx.error) {
1353 /* would rather check on EOPNOTSUPP, but that is not reliable.
1354 * don't try again for ANY return value != 0
1355 * if (rv == -EOPNOTSUPP) */
1356 /* Any error is already reported by bio_endio callback. */
1357 drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
1358 }
b411b363 1359 }
b411b363
PR
1360}
1361
1362/**
1363 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
b30ab791 1364 * @device: DRBD device.
b411b363
PR
1365 * @epoch: Epoch object.
1366 * @ev: Epoch event.
1367 */
bde89a9e 1368static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
b411b363
PR
1369 struct drbd_epoch *epoch,
1370 enum epoch_event ev)
1371{
2451fc3b 1372 int epoch_size;
b411b363 1373 struct drbd_epoch *next_epoch;
b411b363
PR
1374 enum finish_epoch rv = FE_STILL_LIVE;
1375
bde89a9e 1376 spin_lock(&connection->epoch_lock);
b411b363
PR
1377 do {
1378 next_epoch = NULL;
b411b363
PR
1379
1380 epoch_size = atomic_read(&epoch->epoch_size);
1381
1382 switch (ev & ~EV_CLEANUP) {
1383 case EV_PUT:
1384 atomic_dec(&epoch->active);
1385 break;
1386 case EV_GOT_BARRIER_NR:
1387 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1388 break;
1389 case EV_BECAME_LAST:
1390 /* nothing to do*/
1391 break;
1392 }
1393
b411b363
PR
1394 if (epoch_size != 0 &&
1395 atomic_read(&epoch->active) == 0 &&
80f9fd55 1396 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
b411b363 1397 if (!(ev & EV_CLEANUP)) {
bde89a9e
AG
1398 spin_unlock(&connection->epoch_lock);
1399 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1400 spin_lock(&connection->epoch_lock);
b411b363 1401 }
9ed57dcb
LE
1402#if 0
1403 /* FIXME: dec unacked on connection, once we have
1404 * something to count pending connection packets in. */
80f9fd55 1405 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
bde89a9e 1406 dec_unacked(epoch->connection);
9ed57dcb 1407#endif
b411b363 1408
bde89a9e 1409 if (connection->current_epoch != epoch) {
b411b363
PR
1410 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1411 list_del(&epoch->list);
1412 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
bde89a9e 1413 connection->epochs--;
b411b363
PR
1414 kfree(epoch);
1415
1416 if (rv == FE_STILL_LIVE)
1417 rv = FE_DESTROYED;
1418 } else {
1419 epoch->flags = 0;
1420 atomic_set(&epoch->epoch_size, 0);
698f9315 1421 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1422 if (rv == FE_STILL_LIVE)
1423 rv = FE_RECYCLED;
1424 }
1425 }
1426
1427 if (!next_epoch)
1428 break;
1429
1430 epoch = next_epoch;
1431 } while (1);
1432
bde89a9e 1433 spin_unlock(&connection->epoch_lock);
b411b363 1434
b411b363
PR
1435 return rv;
1436}
1437
8fe39aac
PR
1438static enum write_ordering_e
1439max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
1440{
1441 struct disk_conf *dc;
1442
1443 dc = rcu_dereference(bdev->disk_conf);
1444
f6ba8636
AG
1445 if (wo == WO_BDEV_FLUSH && !dc->disk_flushes)
1446 wo = WO_DRAIN_IO;
1447 if (wo == WO_DRAIN_IO && !dc->disk_drain)
1448 wo = WO_NONE;
8fe39aac
PR
1449
1450 return wo;
1451}
1452
b411b363
PR
1453/**
1454 * drbd_bump_write_ordering() - Fall back to an other write ordering method
bde89a9e 1455 * @connection: DRBD connection.
b411b363
PR
1456 * @wo: Write ordering method to try.
1457 */
8fe39aac
PR
1458void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1459 enum write_ordering_e wo)
b411b363 1460{
e9526580 1461 struct drbd_device *device;
b411b363 1462 enum write_ordering_e pwo;
4b0007c0 1463 int vnr;
b411b363 1464 static char *write_ordering_str[] = {
f6ba8636
AG
1465 [WO_NONE] = "none",
1466 [WO_DRAIN_IO] = "drain",
1467 [WO_BDEV_FLUSH] = "flush",
b411b363
PR
1468 };
1469
e9526580 1470 pwo = resource->write_ordering;
f6ba8636 1471 if (wo != WO_BDEV_FLUSH)
70df7092 1472 wo = min(pwo, wo);
daeda1cc 1473 rcu_read_lock();
e9526580 1474 idr_for_each_entry(&resource->devices, device, vnr) {
8fe39aac
PR
1475 if (get_ldev(device)) {
1476 wo = max_allowed_wo(device->ldev, wo);
1477 if (device->ldev == bdev)
1478 bdev = NULL;
1479 put_ldev(device);
1480 }
4b0007c0 1481 }
8fe39aac
PR
1482
1483 if (bdev)
1484 wo = max_allowed_wo(bdev, wo);
1485
70df7092
LE
1486 rcu_read_unlock();
1487
e9526580 1488 resource->write_ordering = wo;
f6ba8636 1489 if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH)
e9526580 1490 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
b411b363
PR
1491}
1492
0dbed96a 1493static void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req)
dd4f699d
LE
1494{
1495 struct block_device *bdev = device->ldev->backing_bdev;
dd4f699d 1496
0dbed96a
CH
1497 if (blkdev_issue_zeroout(bdev, peer_req->i.sector, peer_req->i.size >> 9,
1498 GFP_NOIO, 0))
dd4f699d 1499 peer_req->flags |= EE_WAS_ERROR;
0dbed96a 1500
dd4f699d
LE
1501 drbd_endio_write_sec_final(peer_req);
1502}
1503
9104d31a
LE
1504static void drbd_issue_peer_wsame(struct drbd_device *device,
1505 struct drbd_peer_request *peer_req)
1506{
1507 struct block_device *bdev = device->ldev->backing_bdev;
1508 sector_t s = peer_req->i.sector;
1509 sector_t nr = peer_req->i.size >> 9;
1510 if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
1511 peer_req->flags |= EE_WAS_ERROR;
1512 drbd_endio_write_sec_final(peer_req);
1513}
1514
1515
45bb912b 1516/**
fbe29dec 1517 * drbd_submit_peer_request()
b30ab791 1518 * @device: DRBD device.
db830c46 1519 * @peer_req: peer request
1eff9d32 1520 * @rw: flag field, see bio->bi_opf
10f6d992
LE
1521 *
1522 * May spread the pages to multiple bios,
1523 * depending on bio_add_page restrictions.
1524 *
1525 * Returns 0 if all bios have been submitted,
1526 * -ENOMEM if we could not allocate enough bios,
1527 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1528 * single page to an empty bio (which should never happen and likely indicates
1529 * that the lower level IO stack is in some way broken). This has been observed
1530 * on certain Xen deployments.
45bb912b
LE
1531 */
1532/* TODO allocate from our own bio_set. */
b30ab791 1533int drbd_submit_peer_request(struct drbd_device *device,
fbe29dec 1534 struct drbd_peer_request *peer_req,
bb3cc85e
MC
1535 const unsigned op, const unsigned op_flags,
1536 const int fault_type)
45bb912b
LE
1537{
1538 struct bio *bios = NULL;
1539 struct bio *bio;
db830c46
AG
1540 struct page *page = peer_req->pages;
1541 sector_t sector = peer_req->i.sector;
11f8b2b6 1542 unsigned data_size = peer_req->i.size;
45bb912b 1543 unsigned n_bios = 0;
11f8b2b6 1544 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
10f6d992 1545 int err = -ENOMEM;
45bb912b 1546
dd4f699d
LE
1547 /* TRIM/DISCARD: for now, always use the helper function
1548 * blkdev_issue_zeroout(..., discard=true).
1549 * It's synchronous, but it does the right thing wrt. bio splitting.
1550 * Correctness first, performance later. Next step is to code an
1551 * asynchronous variant of the same.
1552 */
9104d31a 1553 if (peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) {
a0fb3c47
LE
1554 /* wait for all pending IO completions, before we start
1555 * zeroing things out. */
5dd2ca19 1556 conn_wait_active_ee_empty(peer_req->peer_device->connection);
45d2933c
LE
1557 /* add it to the active list now,
1558 * so we can find it to present it in debugfs */
21ae5d7f
LE
1559 peer_req->submit_jif = jiffies;
1560 peer_req->flags |= EE_SUBMITTED;
700ca8c0
PR
1561
1562 /* If this was a resync request from receive_rs_deallocated(),
1563 * it is already on the sync_ee list */
1564 if (list_empty(&peer_req->w.list)) {
1565 spin_lock_irq(&device->resource->req_lock);
1566 list_add_tail(&peer_req->w.list, &device->active_ee);
1567 spin_unlock_irq(&device->resource->req_lock);
1568 }
1569
9104d31a
LE
1570 if (peer_req->flags & EE_IS_TRIM)
1571 drbd_issue_peer_discard(device, peer_req);
1572 else /* EE_WRITE_SAME */
1573 drbd_issue_peer_wsame(device, peer_req);
a0fb3c47
LE
1574 return 0;
1575 }
1576
45bb912b
LE
1577 /* In most cases, we will only need one bio. But in case the lower
1578 * level restrictions happen to be different at this offset on this
1579 * side than those of the sending peer, we may need to submit the
9476f39d
LE
1580 * request in more than one bio.
1581 *
1582 * Plain bio_alloc is good enough here, this is no DRBD internally
1583 * generated bio, but a bio allocated on behalf of the peer.
1584 */
45bb912b
LE
1585next_bio:
1586 bio = bio_alloc(GFP_NOIO, nr_pages);
1587 if (!bio) {
a0fb3c47 1588 drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
45bb912b
LE
1589 goto fail;
1590 }
db830c46 1591 /* > peer_req->i.sector, unless this is the first bio */
4f024f37 1592 bio->bi_iter.bi_sector = sector;
74d46992 1593 bio_set_dev(bio, device->ldev->backing_bdev);
bb3cc85e 1594 bio_set_op_attrs(bio, op, op_flags);
db830c46 1595 bio->bi_private = peer_req;
fcefa62e 1596 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1597
1598 bio->bi_next = bios;
1599 bios = bio;
1600 ++n_bios;
1601
1602 page_chain_for_each(page) {
11f8b2b6 1603 unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
06efffda 1604 if (!bio_add_page(bio, page, len, 0))
45bb912b 1605 goto next_bio;
11f8b2b6 1606 data_size -= len;
45bb912b
LE
1607 sector += len >> 9;
1608 --nr_pages;
1609 }
11f8b2b6 1610 D_ASSERT(device, data_size == 0);
a0fb3c47 1611 D_ASSERT(device, page == NULL);
45bb912b 1612
db830c46 1613 atomic_set(&peer_req->pending_bios, n_bios);
21ae5d7f
LE
1614 /* for debugfs: update timestamp, mark as submitted */
1615 peer_req->submit_jif = jiffies;
1616 peer_req->flags |= EE_SUBMITTED;
45bb912b
LE
1617 do {
1618 bio = bios;
1619 bios = bios->bi_next;
1620 bio->bi_next = NULL;
1621
b30ab791 1622 drbd_generic_make_request(device, fault_type, bio);
45bb912b 1623 } while (bios);
45bb912b
LE
1624 return 0;
1625
1626fail:
1627 while (bios) {
1628 bio = bios;
1629 bios = bios->bi_next;
1630 bio_put(bio);
1631 }
10f6d992 1632 return err;
45bb912b
LE
1633}
1634
b30ab791 1635static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
db830c46 1636 struct drbd_peer_request *peer_req)
53840641 1637{
db830c46 1638 struct drbd_interval *i = &peer_req->i;
53840641 1639
b30ab791 1640 drbd_remove_interval(&device->write_requests, i);
53840641
AG
1641 drbd_clear_interval(i);
1642
6c852bec 1643 /* Wake up any processes waiting for this peer request to complete. */
53840641 1644 if (i->waiting)
b30ab791 1645 wake_up(&device->misc_wait);
53840641
AG
1646}
1647
bde89a9e 1648static void conn_wait_active_ee_empty(struct drbd_connection *connection)
77fede51 1649{
c06ece6b 1650 struct drbd_peer_device *peer_device;
77fede51
PR
1651 int vnr;
1652
1653 rcu_read_lock();
c06ece6b
AG
1654 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1655 struct drbd_device *device = peer_device->device;
1656
b30ab791 1657 kref_get(&device->kref);
77fede51 1658 rcu_read_unlock();
b30ab791 1659 drbd_wait_ee_list_empty(device, &device->active_ee);
05a10ec7 1660 kref_put(&device->kref, drbd_destroy_device);
77fede51
PR
1661 rcu_read_lock();
1662 }
1663 rcu_read_unlock();
1664}
1665
bde89a9e 1666static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
b411b363 1667{
2451fc3b 1668 int rv;
e658983a 1669 struct p_barrier *p = pi->data;
b411b363
PR
1670 struct drbd_epoch *epoch;
1671
9ed57dcb
LE
1672 /* FIXME these are unacked on connection,
1673 * not a specific (peer)device.
1674 */
bde89a9e
AG
1675 connection->current_epoch->barrier_nr = p->barrier;
1676 connection->current_epoch->connection = connection;
1677 rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
b411b363
PR
1678
1679 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1680 * the activity log, which means it would not be resynced in case the
1681 * R_PRIMARY crashes now.
1682 * Therefore we must send the barrier_ack after the barrier request was
1683 * completed. */
e9526580 1684 switch (connection->resource->write_ordering) {
f6ba8636 1685 case WO_NONE:
b411b363 1686 if (rv == FE_RECYCLED)
82bc0194 1687 return 0;
2451fc3b
PR
1688
1689 /* receiver context, in the writeout path of the other node.
1690 * avoid potential distributed deadlock */
1691 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1692 if (epoch)
1693 break;
1694 else
1ec861eb 1695 drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
2451fc3b 1696 /* Fall through */
b411b363 1697
f6ba8636
AG
1698 case WO_BDEV_FLUSH:
1699 case WO_DRAIN_IO:
bde89a9e
AG
1700 conn_wait_active_ee_empty(connection);
1701 drbd_flush(connection);
2451fc3b 1702
bde89a9e 1703 if (atomic_read(&connection->current_epoch->epoch_size)) {
2451fc3b
PR
1704 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1705 if (epoch)
1706 break;
b411b363
PR
1707 }
1708
82bc0194 1709 return 0;
2451fc3b 1710 default:
e9526580
PR
1711 drbd_err(connection, "Strangeness in connection->write_ordering %d\n",
1712 connection->resource->write_ordering);
82bc0194 1713 return -EIO;
b411b363
PR
1714 }
1715
1716 epoch->flags = 0;
1717 atomic_set(&epoch->epoch_size, 0);
1718 atomic_set(&epoch->active, 0);
1719
bde89a9e
AG
1720 spin_lock(&connection->epoch_lock);
1721 if (atomic_read(&connection->current_epoch->epoch_size)) {
1722 list_add(&epoch->list, &connection->current_epoch->list);
1723 connection->current_epoch = epoch;
1724 connection->epochs++;
b411b363
PR
1725 } else {
1726 /* The current_epoch got recycled while we allocated this one... */
1727 kfree(epoch);
1728 }
bde89a9e 1729 spin_unlock(&connection->epoch_lock);
b411b363 1730
82bc0194 1731 return 0;
b411b363
PR
1732}
1733
9104d31a
LE
1734/* quick wrapper in case payload size != request_size (write same) */
1735static void drbd_csum_ee_size(struct crypto_ahash *h,
1736 struct drbd_peer_request *r, void *d,
1737 unsigned int payload_size)
1738{
1739 unsigned int tmp = r->i.size;
1740 r->i.size = payload_size;
1741 drbd_csum_ee(h, r, d);
1742 r->i.size = tmp;
1743}
1744
b411b363 1745/* used from receive_RSDataReply (recv_resync_read)
9104d31a
LE
1746 * and from receive_Data.
1747 * data_size: actual payload ("data in")
1748 * for normal writes that is bi_size.
1749 * for discards, that is zero.
1750 * for write same, it is logical_block_size.
1751 * both trim and write same have the bi_size ("data len to be affected")
1752 * as extra argument in the packet header.
1753 */
f6ffca9f 1754static struct drbd_peer_request *
69a22773 1755read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
a0fb3c47 1756 struct packet_info *pi) __must_hold(local)
b411b363 1757{
69a22773 1758 struct drbd_device *device = peer_device->device;
b30ab791 1759 const sector_t capacity = drbd_get_capacity(device->this_bdev);
db830c46 1760 struct drbd_peer_request *peer_req;
b411b363 1761 struct page *page;
11f8b2b6
AG
1762 int digest_size, err;
1763 unsigned int data_size = pi->size, ds;
69a22773
AG
1764 void *dig_in = peer_device->connection->int_dig_in;
1765 void *dig_vv = peer_device->connection->int_dig_vv;
6b4388ac 1766 unsigned long *data;
a0fb3c47 1767 struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
9104d31a 1768 struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL;
b411b363 1769
11f8b2b6 1770 digest_size = 0;
a0fb3c47 1771 if (!trim && peer_device->connection->peer_integrity_tfm) {
9534d671 1772 digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
9f5bdc33
AG
1773 /*
1774 * FIXME: Receive the incoming digest into the receive buffer
1775 * here, together with its struct p_data?
1776 */
11f8b2b6 1777 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
a5c31904 1778 if (err)
b411b363 1779 return NULL;
11f8b2b6 1780 data_size -= digest_size;
b411b363
PR
1781 }
1782
9104d31a
LE
1783 /* assume request_size == data_size, but special case trim and wsame. */
1784 ds = data_size;
a0fb3c47 1785 if (trim) {
9104d31a
LE
1786 if (!expect(data_size == 0))
1787 return NULL;
1788 ds = be32_to_cpu(trim->size);
1789 } else if (wsame) {
1790 if (data_size != queue_logical_block_size(device->rq_queue)) {
1791 drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n",
1792 data_size, queue_logical_block_size(device->rq_queue));
1793 return NULL;
1794 }
1795 if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
1796 drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n",
1797 data_size, bdev_logical_block_size(device->ldev->backing_bdev));
1798 return NULL;
1799 }
1800 ds = be32_to_cpu(wsame->size);
a0fb3c47
LE
1801 }
1802
9104d31a 1803 if (!expect(IS_ALIGNED(ds, 512)))
841ce241 1804 return NULL;
9104d31a
LE
1805 if (trim || wsame) {
1806 if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
1807 return NULL;
1808 } else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
841ce241 1809 return NULL;
b411b363 1810
6666032a
LE
1811 /* even though we trust out peer,
1812 * we sometimes have to double check. */
9104d31a 1813 if (sector + (ds>>9) > capacity) {
d0180171 1814 drbd_err(device, "request from peer beyond end of local disk: "
fdda6544 1815 "capacity: %llus < sector: %llus + size: %u\n",
6666032a 1816 (unsigned long long)capacity,
9104d31a 1817 (unsigned long long)sector, ds);
6666032a
LE
1818 return NULL;
1819 }
1820
b411b363
PR
1821 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1822 * "criss-cross" setup, that might cause write-out on some other DRBD,
1823 * which in turn might block on the other node at this very place. */
9104d31a 1824 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
db830c46 1825 if (!peer_req)
b411b363 1826 return NULL;
45bb912b 1827
21ae5d7f 1828 peer_req->flags |= EE_WRITE;
9104d31a
LE
1829 if (trim) {
1830 peer_req->flags |= EE_IS_TRIM;
81a3537a 1831 return peer_req;
9104d31a
LE
1832 }
1833 if (wsame)
1834 peer_req->flags |= EE_WRITE_SAME;
a73ff323 1835
9104d31a 1836 /* receive payload size bytes into page chain */
b411b363 1837 ds = data_size;
db830c46 1838 page = peer_req->pages;
45bb912b
LE
1839 page_chain_for_each(page) {
1840 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1841 data = kmap(page);
69a22773 1842 err = drbd_recv_all_warn(peer_device->connection, data, len);
b30ab791 1843 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
d0180171 1844 drbd_err(device, "Fault injection: Corrupting data on receive\n");
6b4388ac
PR
1845 data[0] = data[0] ^ (unsigned long)-1;
1846 }
b411b363 1847 kunmap(page);
a5c31904 1848 if (err) {
b30ab791 1849 drbd_free_peer_req(device, peer_req);
b411b363
PR
1850 return NULL;
1851 }
a5c31904 1852 ds -= len;
b411b363
PR
1853 }
1854
11f8b2b6 1855 if (digest_size) {
9104d31a 1856 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
11f8b2b6 1857 if (memcmp(dig_in, dig_vv, digest_size)) {
d0180171 1858 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
470be44a 1859 (unsigned long long)sector, data_size);
b30ab791 1860 drbd_free_peer_req(device, peer_req);
b411b363
PR
1861 return NULL;
1862 }
1863 }
11f8b2b6 1864 device->recv_cnt += data_size >> 9;
db830c46 1865 return peer_req;
b411b363
PR
1866}
1867
1868/* drbd_drain_block() just takes a data block
1869 * out of the socket input buffer, and discards it.
1870 */
69a22773 1871static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
b411b363
PR
1872{
1873 struct page *page;
a5c31904 1874 int err = 0;
b411b363
PR
1875 void *data;
1876
c3470cde 1877 if (!data_size)
fc5be839 1878 return 0;
c3470cde 1879
69a22773 1880 page = drbd_alloc_pages(peer_device, 1, 1);
b411b363
PR
1881
1882 data = kmap(page);
1883 while (data_size) {
fc5be839
AG
1884 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1885
69a22773 1886 err = drbd_recv_all_warn(peer_device->connection, data, len);
a5c31904 1887 if (err)
b411b363 1888 break;
a5c31904 1889 data_size -= len;
b411b363
PR
1890 }
1891 kunmap(page);
69a22773 1892 drbd_free_pages(peer_device->device, page, 0);
fc5be839 1893 return err;
b411b363
PR
1894}
1895
69a22773 1896static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
b411b363
PR
1897 sector_t sector, int data_size)
1898{
7988613b
KO
1899 struct bio_vec bvec;
1900 struct bvec_iter iter;
b411b363 1901 struct bio *bio;
11f8b2b6 1902 int digest_size, err, expect;
69a22773
AG
1903 void *dig_in = peer_device->connection->int_dig_in;
1904 void *dig_vv = peer_device->connection->int_dig_vv;
b411b363 1905
11f8b2b6 1906 digest_size = 0;
69a22773 1907 if (peer_device->connection->peer_integrity_tfm) {
9534d671 1908 digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
11f8b2b6 1909 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
a5c31904
AG
1910 if (err)
1911 return err;
11f8b2b6 1912 data_size -= digest_size;
b411b363
PR
1913 }
1914
b411b363
PR
1915 /* optimistically update recv_cnt. if receiving fails below,
1916 * we disconnect anyways, and counters will be reset. */
69a22773 1917 peer_device->device->recv_cnt += data_size>>9;
b411b363
PR
1918
1919 bio = req->master_bio;
69a22773 1920 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
b411b363 1921
7988613b
KO
1922 bio_for_each_segment(bvec, bio, iter) {
1923 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1924 expect = min_t(int, data_size, bvec.bv_len);
69a22773 1925 err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
7988613b 1926 kunmap(bvec.bv_page);
a5c31904
AG
1927 if (err)
1928 return err;
1929 data_size -= expect;
b411b363
PR
1930 }
1931
11f8b2b6 1932 if (digest_size) {
69a22773 1933 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
11f8b2b6 1934 if (memcmp(dig_in, dig_vv, digest_size)) {
69a22773 1935 drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 1936 return -EINVAL;
b411b363
PR
1937 }
1938 }
1939
69a22773 1940 D_ASSERT(peer_device->device, data_size == 0);
28284cef 1941 return 0;
b411b363
PR
1942}
1943
a990be46 1944/*
668700b4 1945 * e_end_resync_block() is called in ack_sender context via
a990be46
AG
1946 * drbd_finish_peer_reqs().
1947 */
99920dc5 1948static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 1949{
8050e6d0 1950 struct drbd_peer_request *peer_req =
a8cd15ba
AG
1951 container_of(w, struct drbd_peer_request, w);
1952 struct drbd_peer_device *peer_device = peer_req->peer_device;
1953 struct drbd_device *device = peer_device->device;
db830c46 1954 sector_t sector = peer_req->i.sector;
99920dc5 1955 int err;
b411b363 1956
0b0ba1ef 1957 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
b411b363 1958
db830c46 1959 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b30ab791 1960 drbd_set_in_sync(device, sector, peer_req->i.size);
a8cd15ba 1961 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
b411b363
PR
1962 } else {
1963 /* Record failure to sync */
b30ab791 1964 drbd_rs_failed_io(device, sector, peer_req->i.size);
b411b363 1965
a8cd15ba 1966 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
b411b363 1967 }
b30ab791 1968 dec_unacked(device);
b411b363 1969
99920dc5 1970 return err;
b411b363
PR
1971}
1972
69a22773 1973static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
a0fb3c47 1974 struct packet_info *pi) __releases(local)
b411b363 1975{
69a22773 1976 struct drbd_device *device = peer_device->device;
db830c46 1977 struct drbd_peer_request *peer_req;
b411b363 1978
a0fb3c47 1979 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
db830c46 1980 if (!peer_req)
45bb912b 1981 goto fail;
b411b363 1982
b30ab791 1983 dec_rs_pending(device);
b411b363 1984
b30ab791 1985 inc_unacked(device);
b411b363
PR
1986 /* corresponding dec_unacked() in e_end_resync_block()
1987 * respective _drbd_clear_done_ee */
1988
a8cd15ba 1989 peer_req->w.cb = e_end_resync_block;
21ae5d7f 1990 peer_req->submit_jif = jiffies;
45bb912b 1991
0500813f 1992 spin_lock_irq(&device->resource->req_lock);
b9ed7080 1993 list_add_tail(&peer_req->w.list, &device->sync_ee);
0500813f 1994 spin_unlock_irq(&device->resource->req_lock);
b411b363 1995
a0fb3c47 1996 atomic_add(pi->size >> 9, &device->rs_sect_ev);
bb3cc85e
MC
1997 if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
1998 DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 1999 return 0;
b411b363 2000
10f6d992 2001 /* don't care for the reason here */
d0180171 2002 drbd_err(device, "submit failed, triggering re-connect\n");
0500813f 2003 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2004 list_del(&peer_req->w.list);
0500813f 2005 spin_unlock_irq(&device->resource->req_lock);
22cc37a9 2006
b30ab791 2007 drbd_free_peer_req(device, peer_req);
45bb912b 2008fail:
b30ab791 2009 put_ldev(device);
e1c1b0fc 2010 return -EIO;
b411b363
PR
2011}
2012
668eebc6 2013static struct drbd_request *
b30ab791 2014find_request(struct drbd_device *device, struct rb_root *root, u64 id,
bc9c5c41 2015 sector_t sector, bool missing_ok, const char *func)
51624585 2016{
51624585
AG
2017 struct drbd_request *req;
2018
bc9c5c41
AG
2019 /* Request object according to our peer */
2020 req = (struct drbd_request *)(unsigned long)id;
5e472264 2021 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 2022 return req;
c3afd8f5 2023 if (!missing_ok) {
d0180171 2024 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
c3afd8f5
AG
2025 (unsigned long)id, (unsigned long long)sector);
2026 }
51624585 2027 return NULL;
b411b363
PR
2028}
2029
bde89a9e 2030static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2031{
9f4fe9ad 2032 struct drbd_peer_device *peer_device;
b30ab791 2033 struct drbd_device *device;
b411b363
PR
2034 struct drbd_request *req;
2035 sector_t sector;
82bc0194 2036 int err;
e658983a 2037 struct p_data *p = pi->data;
4a76b161 2038
9f4fe9ad
AG
2039 peer_device = conn_peer_device(connection, pi->vnr);
2040 if (!peer_device)
4a76b161 2041 return -EIO;
9f4fe9ad 2042 device = peer_device->device;
b411b363
PR
2043
2044 sector = be64_to_cpu(p->sector);
2045
0500813f 2046 spin_lock_irq(&device->resource->req_lock);
b30ab791 2047 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
0500813f 2048 spin_unlock_irq(&device->resource->req_lock);
c3afd8f5 2049 if (unlikely(!req))
82bc0194 2050 return -EIO;
b411b363 2051
24c4830c 2052 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
b411b363
PR
2053 * special casing it there for the various failure cases.
2054 * still no race with drbd_fail_pending_reads */
69a22773 2055 err = recv_dless_read(peer_device, req, sector, pi->size);
82bc0194 2056 if (!err)
8554df1c 2057 req_mod(req, DATA_RECEIVED);
b411b363
PR
2058 /* else: nothing. handled from drbd_disconnect...
2059 * I don't think we may complete this just yet
2060 * in case we are "on-disconnect: freeze" */
2061
82bc0194 2062 return err;
b411b363
PR
2063}
2064
bde89a9e 2065static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2066{
9f4fe9ad 2067 struct drbd_peer_device *peer_device;
b30ab791 2068 struct drbd_device *device;
b411b363 2069 sector_t sector;
82bc0194 2070 int err;
e658983a 2071 struct p_data *p = pi->data;
4a76b161 2072
9f4fe9ad
AG
2073 peer_device = conn_peer_device(connection, pi->vnr);
2074 if (!peer_device)
4a76b161 2075 return -EIO;
9f4fe9ad 2076 device = peer_device->device;
b411b363
PR
2077
2078 sector = be64_to_cpu(p->sector);
0b0ba1ef 2079 D_ASSERT(device, p->block_id == ID_SYNCER);
b411b363 2080
b30ab791 2081 if (get_ldev(device)) {
b411b363
PR
2082 /* data is submitted to disk within recv_resync_read.
2083 * corresponding put_ldev done below on error,
fcefa62e 2084 * or in drbd_peer_request_endio. */
a0fb3c47 2085 err = recv_resync_read(peer_device, sector, pi);
b411b363
PR
2086 } else {
2087 if (__ratelimit(&drbd_ratelimit_state))
d0180171 2088 drbd_err(device, "Can not write resync data to local disk.\n");
b411b363 2089
69a22773 2090 err = drbd_drain_block(peer_device, pi->size);
b411b363 2091
69a22773 2092 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
b411b363
PR
2093 }
2094
b30ab791 2095 atomic_add(pi->size >> 9, &device->rs_sect_in);
778f271d 2096
82bc0194 2097 return err;
b411b363
PR
2098}
2099
b30ab791 2100static void restart_conflicting_writes(struct drbd_device *device,
7be8da07 2101 sector_t sector, int size)
b411b363 2102{
7be8da07
AG
2103 struct drbd_interval *i;
2104 struct drbd_request *req;
2105
b30ab791 2106 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2107 if (!i->local)
2108 continue;
2109 req = container_of(i, struct drbd_request, i);
2110 if (req->rq_state & RQ_LOCAL_PENDING ||
2111 !(req->rq_state & RQ_POSTPONED))
2112 continue;
2312f0b3
LE
2113 /* as it is RQ_POSTPONED, this will cause it to
2114 * be queued on the retry workqueue. */
d4dabbe2 2115 __req_mod(req, CONFLICT_RESOLVED, NULL);
7be8da07
AG
2116 }
2117}
b411b363 2118
a990be46 2119/*
668700b4 2120 * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs().
b411b363 2121 */
99920dc5 2122static int e_end_block(struct drbd_work *w, int cancel)
b411b363 2123{
8050e6d0 2124 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2125 container_of(w, struct drbd_peer_request, w);
2126 struct drbd_peer_device *peer_device = peer_req->peer_device;
2127 struct drbd_device *device = peer_device->device;
db830c46 2128 sector_t sector = peer_req->i.sector;
99920dc5 2129 int err = 0, pcmd;
b411b363 2130
303d1448 2131 if (peer_req->flags & EE_SEND_WRITE_ACK) {
db830c46 2132 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b30ab791
AG
2133 pcmd = (device->state.conn >= C_SYNC_SOURCE &&
2134 device->state.conn <= C_PAUSED_SYNC_T &&
db830c46 2135 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 2136 P_RS_WRITE_ACK : P_WRITE_ACK;
a8cd15ba 2137 err = drbd_send_ack(peer_device, pcmd, peer_req);
b411b363 2138 if (pcmd == P_RS_WRITE_ACK)
b30ab791 2139 drbd_set_in_sync(device, sector, peer_req->i.size);
b411b363 2140 } else {
a8cd15ba 2141 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
b411b363
PR
2142 /* we expect it to be marked out of sync anyways...
2143 * maybe assert this? */
2144 }
b30ab791 2145 dec_unacked(device);
b411b363 2146 }
08d0dabf 2147
b411b363
PR
2148 /* we delete from the conflict detection hash _after_ we sent out the
2149 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
302bdeae 2150 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
0500813f 2151 spin_lock_irq(&device->resource->req_lock);
0b0ba1ef 2152 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
b30ab791 2153 drbd_remove_epoch_entry_interval(device, peer_req);
7be8da07 2154 if (peer_req->flags & EE_RESTART_REQUESTS)
b30ab791 2155 restart_conflicting_writes(device, sector, peer_req->i.size);
0500813f 2156 spin_unlock_irq(&device->resource->req_lock);
bb3bfe96 2157 } else
0b0ba1ef 2158 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
b411b363 2159
5dd2ca19 2160 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 2161
99920dc5 2162 return err;
b411b363
PR
2163}
2164
a8cd15ba 2165static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 2166{
8050e6d0 2167 struct drbd_peer_request *peer_req =
a8cd15ba
AG
2168 container_of(w, struct drbd_peer_request, w);
2169 struct drbd_peer_device *peer_device = peer_req->peer_device;
99920dc5 2170 int err;
b411b363 2171
a8cd15ba
AG
2172 err = drbd_send_ack(peer_device, ack, peer_req);
2173 dec_unacked(peer_device->device);
b411b363 2174
99920dc5 2175 return err;
b411b363
PR
2176}
2177
d4dabbe2 2178static int e_send_superseded(struct drbd_work *w, int unused)
7be8da07 2179{
a8cd15ba 2180 return e_send_ack(w, P_SUPERSEDED);
7be8da07
AG
2181}
2182
99920dc5 2183static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07 2184{
a8cd15ba
AG
2185 struct drbd_peer_request *peer_req =
2186 container_of(w, struct drbd_peer_request, w);
2187 struct drbd_connection *connection = peer_req->peer_device->connection;
7be8da07 2188
a8cd15ba 2189 return e_send_ack(w, connection->agreed_pro_version >= 100 ?
d4dabbe2 2190 P_RETRY_WRITE : P_SUPERSEDED);
7be8da07 2191}
b411b363 2192
3e394da1
AG
2193static bool seq_greater(u32 a, u32 b)
2194{
2195 /*
2196 * We assume 32-bit wrap-around here.
2197 * For 24-bit wrap-around, we would have to shift:
2198 * a <<= 8; b <<= 8;
2199 */
2200 return (s32)a - (s32)b > 0;
2201}
b411b363 2202
3e394da1
AG
2203static u32 seq_max(u32 a, u32 b)
2204{
2205 return seq_greater(a, b) ? a : b;
b411b363
PR
2206}
2207
69a22773 2208static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
3e394da1 2209{
69a22773 2210 struct drbd_device *device = peer_device->device;
3c13b680 2211 unsigned int newest_peer_seq;
3e394da1 2212
69a22773 2213 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
b30ab791
AG
2214 spin_lock(&device->peer_seq_lock);
2215 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
2216 device->peer_seq = newest_peer_seq;
2217 spin_unlock(&device->peer_seq_lock);
2218 /* wake up only if we actually changed device->peer_seq */
3c13b680 2219 if (peer_seq == newest_peer_seq)
b30ab791 2220 wake_up(&device->seq_wait);
7be8da07 2221 }
b411b363
PR
2222}
2223
d93f6302 2224static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
b6a370ba 2225{
d93f6302
LE
2226 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
2227}
b6a370ba 2228
d93f6302 2229/* maybe change sync_ee into interval trees as well? */
b30ab791 2230static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
d93f6302
LE
2231{
2232 struct drbd_peer_request *rs_req;
7e5fec31 2233 bool rv = false;
b6a370ba 2234
0500813f 2235 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2236 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
d93f6302
LE
2237 if (overlaps(peer_req->i.sector, peer_req->i.size,
2238 rs_req->i.sector, rs_req->i.size)) {
7e5fec31 2239 rv = true;
b6a370ba
PR
2240 break;
2241 }
2242 }
0500813f 2243 spin_unlock_irq(&device->resource->req_lock);
b6a370ba
PR
2244
2245 return rv;
2246}
2247
b411b363
PR
2248/* Called from receive_Data.
2249 * Synchronize packets on sock with packets on msock.
2250 *
2251 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
2252 * packet traveling on msock, they are still processed in the order they have
2253 * been sent.
2254 *
2255 * Note: we don't care for Ack packets overtaking P_DATA packets.
2256 *
b30ab791 2257 * In case packet_seq is larger than device->peer_seq number, there are
b411b363 2258 * outstanding packets on the msock. We wait for them to arrive.
b30ab791 2259 * In case we are the logically next packet, we update device->peer_seq
b411b363
PR
2260 * ourselves. Correctly handles 32bit wrap around.
2261 *
2262 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
2263 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
2264 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
2265 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
2266 *
2267 * returns 0 if we may process the packet,
2268 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
69a22773 2269static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
b411b363 2270{
69a22773 2271 struct drbd_device *device = peer_device->device;
b411b363 2272 DEFINE_WAIT(wait);
b411b363 2273 long timeout;
b874d231 2274 int ret = 0, tp;
7be8da07 2275
69a22773 2276 if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
7be8da07
AG
2277 return 0;
2278
b30ab791 2279 spin_lock(&device->peer_seq_lock);
b411b363 2280 for (;;) {
b30ab791
AG
2281 if (!seq_greater(peer_seq - 1, device->peer_seq)) {
2282 device->peer_seq = seq_max(device->peer_seq, peer_seq);
b411b363 2283 break;
7be8da07 2284 }
b874d231 2285
b411b363
PR
2286 if (signal_pending(current)) {
2287 ret = -ERESTARTSYS;
2288 break;
2289 }
b874d231
PR
2290
2291 rcu_read_lock();
5dd2ca19 2292 tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
b874d231
PR
2293 rcu_read_unlock();
2294
2295 if (!tp)
2296 break;
2297
2298 /* Only need to wait if two_primaries is enabled */
b30ab791
AG
2299 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
2300 spin_unlock(&device->peer_seq_lock);
44ed167d 2301 rcu_read_lock();
69a22773 2302 timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
44ed167d 2303 rcu_read_unlock();
71b1c1eb 2304 timeout = schedule_timeout(timeout);
b30ab791 2305 spin_lock(&device->peer_seq_lock);
7be8da07 2306 if (!timeout) {
b411b363 2307 ret = -ETIMEDOUT;
d0180171 2308 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
2309 break;
2310 }
2311 }
b30ab791
AG
2312 spin_unlock(&device->peer_seq_lock);
2313 finish_wait(&device->seq_wait, &wait);
b411b363
PR
2314 return ret;
2315}
2316
688593c5
LE
2317/* see also bio_flags_to_wire()
2318 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2319 * flags and back. We may replicate to other kernel versions. */
bb3cc85e 2320static unsigned long wire_flags_to_bio_flags(u32 dpf)
76d2e7ec 2321{
688593c5
LE
2322 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2323 (dpf & DP_FUA ? REQ_FUA : 0) |
28a8f0d3 2324 (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
bb3cc85e
MC
2325}
2326
2327static unsigned long wire_flags_to_bio_op(u32 dpf)
2328{
2329 if (dpf & DP_DISCARD)
45c21793 2330 return REQ_OP_WRITE_ZEROES;
bb3cc85e
MC
2331 else
2332 return REQ_OP_WRITE;
76d2e7ec
PR
2333}
2334
b30ab791 2335static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
7be8da07
AG
2336 unsigned int size)
2337{
2338 struct drbd_interval *i;
2339
2340 repeat:
b30ab791 2341 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2342 struct drbd_request *req;
2343 struct bio_and_error m;
2344
2345 if (!i->local)
2346 continue;
2347 req = container_of(i, struct drbd_request, i);
2348 if (!(req->rq_state & RQ_POSTPONED))
2349 continue;
2350 req->rq_state &= ~RQ_POSTPONED;
2351 __req_mod(req, NEG_ACKED, &m);
0500813f 2352 spin_unlock_irq(&device->resource->req_lock);
7be8da07 2353 if (m.bio)
b30ab791 2354 complete_master_bio(device, &m);
0500813f 2355 spin_lock_irq(&device->resource->req_lock);
7be8da07
AG
2356 goto repeat;
2357 }
2358}
2359
b30ab791 2360static int handle_write_conflicts(struct drbd_device *device,
7be8da07
AG
2361 struct drbd_peer_request *peer_req)
2362{
e33b32de 2363 struct drbd_connection *connection = peer_req->peer_device->connection;
bde89a9e 2364 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
7be8da07
AG
2365 sector_t sector = peer_req->i.sector;
2366 const unsigned int size = peer_req->i.size;
2367 struct drbd_interval *i;
2368 bool equal;
2369 int err;
2370
2371 /*
2372 * Inserting the peer request into the write_requests tree will prevent
2373 * new conflicting local requests from being added.
2374 */
b30ab791 2375 drbd_insert_interval(&device->write_requests, &peer_req->i);
7be8da07
AG
2376
2377 repeat:
b30ab791 2378 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
7be8da07
AG
2379 if (i == &peer_req->i)
2380 continue;
08d0dabf
LE
2381 if (i->completed)
2382 continue;
7be8da07
AG
2383
2384 if (!i->local) {
2385 /*
2386 * Our peer has sent a conflicting remote request; this
2387 * should not happen in a two-node setup. Wait for the
2388 * earlier peer request to complete.
2389 */
b30ab791 2390 err = drbd_wait_misc(device, i);
7be8da07
AG
2391 if (err)
2392 goto out;
2393 goto repeat;
2394 }
2395
2396 equal = i->sector == sector && i->size == size;
2397 if (resolve_conflicts) {
2398 /*
2399 * If the peer request is fully contained within the
d4dabbe2
LE
2400 * overlapping request, it can be considered overwritten
2401 * and thus superseded; otherwise, it will be retried
2402 * once all overlapping requests have completed.
7be8da07 2403 */
d4dabbe2 2404 bool superseded = i->sector <= sector && i->sector +
7be8da07
AG
2405 (i->size >> 9) >= sector + (size >> 9);
2406
2407 if (!equal)
d0180171 2408 drbd_alert(device, "Concurrent writes detected: "
7be8da07
AG
2409 "local=%llus +%u, remote=%llus +%u, "
2410 "assuming %s came first\n",
2411 (unsigned long long)i->sector, i->size,
2412 (unsigned long long)sector, size,
d4dabbe2 2413 superseded ? "local" : "remote");
7be8da07 2414
a8cd15ba 2415 peer_req->w.cb = superseded ? e_send_superseded :
7be8da07 2416 e_send_retry_write;
a8cd15ba 2417 list_add_tail(&peer_req->w.list, &device->done_ee);
668700b4 2418 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
7be8da07
AG
2419
2420 err = -ENOENT;
2421 goto out;
2422 } else {
2423 struct drbd_request *req =
2424 container_of(i, struct drbd_request, i);
2425
2426 if (!equal)
d0180171 2427 drbd_alert(device, "Concurrent writes detected: "
7be8da07
AG
2428 "local=%llus +%u, remote=%llus +%u\n",
2429 (unsigned long long)i->sector, i->size,
2430 (unsigned long long)sector, size);
2431
2432 if (req->rq_state & RQ_LOCAL_PENDING ||
2433 !(req->rq_state & RQ_POSTPONED)) {
2434 /*
2435 * Wait for the node with the discard flag to
d4dabbe2
LE
2436 * decide if this request has been superseded
2437 * or needs to be retried.
2438 * Requests that have been superseded will
7be8da07
AG
2439 * disappear from the write_requests tree.
2440 *
2441 * In addition, wait for the conflicting
2442 * request to finish locally before submitting
2443 * the conflicting peer request.
2444 */
b30ab791 2445 err = drbd_wait_misc(device, &req->i);
7be8da07 2446 if (err) {
e33b32de 2447 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
b30ab791 2448 fail_postponed_requests(device, sector, size);
7be8da07
AG
2449 goto out;
2450 }
2451 goto repeat;
2452 }
2453 /*
2454 * Remember to restart the conflicting requests after
2455 * the new peer request has completed.
2456 */
2457 peer_req->flags |= EE_RESTART_REQUESTS;
2458 }
2459 }
2460 err = 0;
2461
2462 out:
2463 if (err)
b30ab791 2464 drbd_remove_epoch_entry_interval(device, peer_req);
7be8da07
AG
2465 return err;
2466}
2467
b411b363 2468/* mirrored write */
bde89a9e 2469static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2470{
9f4fe9ad 2471 struct drbd_peer_device *peer_device;
b30ab791 2472 struct drbd_device *device;
21ae5d7f 2473 struct net_conf *nc;
b411b363 2474 sector_t sector;
db830c46 2475 struct drbd_peer_request *peer_req;
e658983a 2476 struct p_data *p = pi->data;
7be8da07 2477 u32 peer_seq = be32_to_cpu(p->seq_num);
bb3cc85e 2478 int op, op_flags;
b411b363 2479 u32 dp_flags;
302bdeae 2480 int err, tp;
b411b363 2481
9f4fe9ad
AG
2482 peer_device = conn_peer_device(connection, pi->vnr);
2483 if (!peer_device)
4a76b161 2484 return -EIO;
9f4fe9ad 2485 device = peer_device->device;
b411b363 2486
b30ab791 2487 if (!get_ldev(device)) {
82bc0194
AG
2488 int err2;
2489
69a22773
AG
2490 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2491 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
bde89a9e 2492 atomic_inc(&connection->current_epoch->epoch_size);
69a22773 2493 err2 = drbd_drain_block(peer_device, pi->size);
82bc0194
AG
2494 if (!err)
2495 err = err2;
2496 return err;
b411b363
PR
2497 }
2498
fcefa62e
AG
2499 /*
2500 * Corresponding put_ldev done either below (on various errors), or in
2501 * drbd_peer_request_endio, if we successfully submit the data at the
2502 * end of this function.
2503 */
b411b363
PR
2504
2505 sector = be64_to_cpu(p->sector);
a0fb3c47 2506 peer_req = read_in_block(peer_device, p->block_id, sector, pi);
db830c46 2507 if (!peer_req) {
b30ab791 2508 put_ldev(device);
82bc0194 2509 return -EIO;
b411b363
PR
2510 }
2511
a8cd15ba 2512 peer_req->w.cb = e_end_block;
21ae5d7f
LE
2513 peer_req->submit_jif = jiffies;
2514 peer_req->flags |= EE_APPLICATION;
b411b363 2515
688593c5 2516 dp_flags = be32_to_cpu(p->dp_flags);
bb3cc85e
MC
2517 op = wire_flags_to_bio_op(dp_flags);
2518 op_flags = wire_flags_to_bio_flags(dp_flags);
a0fb3c47 2519 if (pi->cmd == P_TRIM) {
a0fb3c47 2520 D_ASSERT(peer_device, peer_req->i.size > 0);
45c21793 2521 D_ASSERT(peer_device, op == REQ_OP_WRITE_ZEROES);
a0fb3c47
LE
2522 D_ASSERT(peer_device, peer_req->pages == NULL);
2523 } else if (peer_req->pages == NULL) {
0b0ba1ef
AG
2524 D_ASSERT(device, peer_req->i.size == 0);
2525 D_ASSERT(device, dp_flags & DP_FLUSH);
a73ff323 2526 }
688593c5
LE
2527
2528 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2529 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2530
bde89a9e
AG
2531 spin_lock(&connection->epoch_lock);
2532 peer_req->epoch = connection->current_epoch;
db830c46
AG
2533 atomic_inc(&peer_req->epoch->epoch_size);
2534 atomic_inc(&peer_req->epoch->active);
bde89a9e 2535 spin_unlock(&connection->epoch_lock);
b411b363 2536
302bdeae 2537 rcu_read_lock();
21ae5d7f
LE
2538 nc = rcu_dereference(peer_device->connection->net_conf);
2539 tp = nc->two_primaries;
2540 if (peer_device->connection->agreed_pro_version < 100) {
2541 switch (nc->wire_protocol) {
2542 case DRBD_PROT_C:
2543 dp_flags |= DP_SEND_WRITE_ACK;
2544 break;
2545 case DRBD_PROT_B:
2546 dp_flags |= DP_SEND_RECEIVE_ACK;
2547 break;
2548 }
2549 }
302bdeae 2550 rcu_read_unlock();
21ae5d7f
LE
2551
2552 if (dp_flags & DP_SEND_WRITE_ACK) {
2553 peer_req->flags |= EE_SEND_WRITE_ACK;
2554 inc_unacked(device);
2555 /* corresponding dec_unacked() in e_end_block()
2556 * respective _drbd_clear_done_ee */
2557 }
2558
2559 if (dp_flags & DP_SEND_RECEIVE_ACK) {
2560 /* I really don't like it that the receiver thread
2561 * sends on the msock, but anyways */
5dd2ca19 2562 drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
21ae5d7f
LE
2563 }
2564
302bdeae 2565 if (tp) {
21ae5d7f
LE
2566 /* two primaries implies protocol C */
2567 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
302bdeae 2568 peer_req->flags |= EE_IN_INTERVAL_TREE;
69a22773 2569 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
7be8da07 2570 if (err)
b411b363 2571 goto out_interrupted;
0500813f 2572 spin_lock_irq(&device->resource->req_lock);
b30ab791 2573 err = handle_write_conflicts(device, peer_req);
7be8da07 2574 if (err) {
0500813f 2575 spin_unlock_irq(&device->resource->req_lock);
7be8da07 2576 if (err == -ENOENT) {
b30ab791 2577 put_ldev(device);
82bc0194 2578 return 0;
b411b363 2579 }
7be8da07 2580 goto out_interrupted;
b411b363 2581 }
b874d231 2582 } else {
69a22773 2583 update_peer_seq(peer_device, peer_seq);
0500813f 2584 spin_lock_irq(&device->resource->req_lock);
b874d231 2585 }
9104d31a
LE
2586 /* TRIM and WRITE_SAME are processed synchronously,
2587 * we wait for all pending requests, respectively wait for
a0fb3c47
LE
2588 * active_ee to become empty in drbd_submit_peer_request();
2589 * better not add ourselves here. */
9104d31a 2590 if ((peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) == 0)
b9ed7080 2591 list_add_tail(&peer_req->w.list, &device->active_ee);
0500813f 2592 spin_unlock_irq(&device->resource->req_lock);
b411b363 2593
b30ab791
AG
2594 if (device->state.conn == C_SYNC_TARGET)
2595 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
b411b363 2596
b30ab791 2597 if (device->state.pdsk < D_INCONSISTENT) {
b411b363 2598 /* In case we have the only disk of the cluster, */
b30ab791 2599 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
db830c46 2600 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
4dd726f0 2601 drbd_al_begin_io(device, &peer_req->i);
21ae5d7f 2602 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
b411b363
PR
2603 }
2604
bb3cc85e
MC
2605 err = drbd_submit_peer_request(device, peer_req, op, op_flags,
2606 DRBD_FAULT_DT_WR);
82bc0194
AG
2607 if (!err)
2608 return 0;
b411b363 2609
10f6d992 2610 /* don't care for the reason here */
d0180171 2611 drbd_err(device, "submit failed, triggering re-connect\n");
0500813f 2612 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2613 list_del(&peer_req->w.list);
b30ab791 2614 drbd_remove_epoch_entry_interval(device, peer_req);
0500813f 2615 spin_unlock_irq(&device->resource->req_lock);
21ae5d7f
LE
2616 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
2617 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
b30ab791 2618 drbd_al_complete_io(device, &peer_req->i);
21ae5d7f 2619 }
22cc37a9 2620
b411b363 2621out_interrupted:
7e5fec31 2622 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
b30ab791
AG
2623 put_ldev(device);
2624 drbd_free_peer_req(device, peer_req);
82bc0194 2625 return err;
b411b363
PR
2626}
2627
0f0601f4
LE
2628/* We may throttle resync, if the lower device seems to be busy,
2629 * and current sync rate is above c_min_rate.
2630 *
2631 * To decide whether or not the lower device is busy, we use a scheme similar
2632 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2633 * (more than 64 sectors) of activity we cannot account for with our own resync
2634 * activity, it obviously is "busy".
2635 *
2636 * The current sync rate used here uses only the most recent two step marks,
2637 * to have a short time average so we can react faster.
2638 */
ad3fee79
LE
2639bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
2640 bool throttle_if_app_is_waiting)
0f0601f4 2641{
e3555d85 2642 struct lc_element *tmp;
ad3fee79 2643 bool throttle = drbd_rs_c_min_rate_throttle(device);
daeda1cc 2644
ad3fee79
LE
2645 if (!throttle || throttle_if_app_is_waiting)
2646 return throttle;
0f0601f4 2647
b30ab791
AG
2648 spin_lock_irq(&device->al_lock);
2649 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
e3555d85
PR
2650 if (tmp) {
2651 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
e8299874
LE
2652 if (test_bit(BME_PRIORITY, &bm_ext->flags))
2653 throttle = false;
ad3fee79
LE
2654 /* Do not slow down if app IO is already waiting for this extent,
2655 * and our progress is necessary for application IO to complete. */
e3555d85 2656 }
b30ab791 2657 spin_unlock_irq(&device->al_lock);
e3555d85 2658
e8299874
LE
2659 return throttle;
2660}
2661
2662bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
2663{
2664 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
2665 unsigned long db, dt, dbdt;
2666 unsigned int c_min_rate;
2667 int curr_events;
2668
2669 rcu_read_lock();
2670 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2671 rcu_read_unlock();
2672
2673 /* feature disabled? */
2674 if (c_min_rate == 0)
2675 return false;
2676
0f0601f4
LE
2677 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2678 (int)part_stat_read(&disk->part0, sectors[1]) -
b30ab791 2679 atomic_read(&device->rs_sect_ev);
ad3fee79
LE
2680
2681 if (atomic_read(&device->ap_actlog_cnt)
ff8bd88b 2682 || curr_events - device->rs_last_events > 64) {
0f0601f4
LE
2683 unsigned long rs_left;
2684 int i;
2685
b30ab791 2686 device->rs_last_events = curr_events;
0f0601f4
LE
2687
2688 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2689 * approx. */
b30ab791 2690 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2649f080 2691
b30ab791
AG
2692 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2693 rs_left = device->ov_left;
2649f080 2694 else
b30ab791 2695 rs_left = drbd_bm_total_weight(device) - device->rs_failed;
0f0601f4 2696
b30ab791 2697 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
0f0601f4
LE
2698 if (!dt)
2699 dt++;
b30ab791 2700 db = device->rs_mark_left[i] - rs_left;
0f0601f4
LE
2701 dbdt = Bit2KB(db/dt);
2702
daeda1cc 2703 if (dbdt > c_min_rate)
e8299874 2704 return true;
0f0601f4 2705 }
e8299874 2706 return false;
0f0601f4
LE
2707}
2708
bde89a9e 2709static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
b411b363 2710{
9f4fe9ad 2711 struct drbd_peer_device *peer_device;
b30ab791 2712 struct drbd_device *device;
b411b363 2713 sector_t sector;
4a76b161 2714 sector_t capacity;
db830c46 2715 struct drbd_peer_request *peer_req;
b411b363 2716 struct digest_info *di = NULL;
b18b37be 2717 int size, verb;
b411b363 2718 unsigned int fault_type;
e658983a 2719 struct p_block_req *p = pi->data;
4a76b161 2720
9f4fe9ad
AG
2721 peer_device = conn_peer_device(connection, pi->vnr);
2722 if (!peer_device)
4a76b161 2723 return -EIO;
9f4fe9ad 2724 device = peer_device->device;
b30ab791 2725 capacity = drbd_get_capacity(device->this_bdev);
b411b363
PR
2726
2727 sector = be64_to_cpu(p->sector);
2728 size = be32_to_cpu(p->blksize);
2729
c670a398 2730 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
d0180171 2731 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
b411b363 2732 (unsigned long long)sector, size);
82bc0194 2733 return -EINVAL;
b411b363
PR
2734 }
2735 if (sector + (size>>9) > capacity) {
d0180171 2736 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
b411b363 2737 (unsigned long long)sector, size);
82bc0194 2738 return -EINVAL;
b411b363
PR
2739 }
2740
b30ab791 2741 if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
b18b37be 2742 verb = 1;
e2857216 2743 switch (pi->cmd) {
b18b37be 2744 case P_DATA_REQUEST:
69a22773 2745 drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
b18b37be 2746 break;
700ca8c0 2747 case P_RS_THIN_REQ:
b18b37be
PR
2748 case P_RS_DATA_REQUEST:
2749 case P_CSUM_RS_REQUEST:
2750 case P_OV_REQUEST:
69a22773 2751 drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
b18b37be
PR
2752 break;
2753 case P_OV_REPLY:
2754 verb = 0;
b30ab791 2755 dec_rs_pending(device);
69a22773 2756 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
b18b37be
PR
2757 break;
2758 default:
49ba9b1b 2759 BUG();
b18b37be
PR
2760 }
2761 if (verb && __ratelimit(&drbd_ratelimit_state))
d0180171 2762 drbd_err(device, "Can not satisfy peer's read request, "
b411b363 2763 "no local data.\n");
b18b37be 2764
a821cc4a 2765 /* drain possibly payload */
69a22773 2766 return drbd_drain_block(peer_device, pi->size);
b411b363
PR
2767 }
2768
2769 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2770 * "criss-cross" setup, that might cause write-out on some other DRBD,
2771 * which in turn might block on the other node at this very place. */
a0fb3c47 2772 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
9104d31a 2773 size, GFP_NOIO);
db830c46 2774 if (!peer_req) {
b30ab791 2775 put_ldev(device);
82bc0194 2776 return -ENOMEM;
b411b363
PR
2777 }
2778
e2857216 2779 switch (pi->cmd) {
b411b363 2780 case P_DATA_REQUEST:
a8cd15ba 2781 peer_req->w.cb = w_e_end_data_req;
b411b363 2782 fault_type = DRBD_FAULT_DT_RD;
80a40e43 2783 /* application IO, don't drbd_rs_begin_io */
21ae5d7f 2784 peer_req->flags |= EE_APPLICATION;
80a40e43
LE
2785 goto submit;
2786
700ca8c0
PR
2787 case P_RS_THIN_REQ:
2788 /* If at some point in the future we have a smart way to
2789 find out if this data block is completely deallocated,
2790 then we would do something smarter here than reading
2791 the block... */
2792 peer_req->flags |= EE_RS_THIN_REQ;
d769a992 2793 /* fall through */
b411b363 2794 case P_RS_DATA_REQUEST:
a8cd15ba 2795 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2796 fault_type = DRBD_FAULT_RS_RD;
5f9915bb 2797 /* used in the sector offset progress display */
b30ab791 2798 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2799 break;
2800
2801 case P_OV_REPLY:
2802 case P_CSUM_RS_REQUEST:
2803 fault_type = DRBD_FAULT_RS_RD;
e2857216 2804 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
b411b363
PR
2805 if (!di)
2806 goto out_free_e;
2807
e2857216 2808 di->digest_size = pi->size;
b411b363
PR
2809 di->digest = (((char *)di)+sizeof(struct digest_info));
2810
db830c46
AG
2811 peer_req->digest = di;
2812 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2813
9f4fe9ad 2814 if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
b411b363
PR
2815 goto out_free_e;
2816
e2857216 2817 if (pi->cmd == P_CSUM_RS_REQUEST) {
9f4fe9ad 2818 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
a8cd15ba 2819 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb 2820 /* used in the sector offset progress display */
b30ab791 2821 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
aaaba345
LE
2822 /* remember to report stats in drbd_resync_finished */
2823 device->use_csums = true;
e2857216 2824 } else if (pi->cmd == P_OV_REPLY) {
2649f080 2825 /* track progress, we may need to throttle */
b30ab791 2826 atomic_add(size >> 9, &device->rs_sect_in);
a8cd15ba 2827 peer_req->w.cb = w_e_end_ov_reply;
b30ab791 2828 dec_rs_pending(device);
0f0601f4
LE
2829 /* drbd_rs_begin_io done when we sent this request,
2830 * but accounting still needs to be done. */
2831 goto submit_for_resync;
b411b363
PR
2832 }
2833 break;
2834
2835 case P_OV_REQUEST:
b30ab791 2836 if (device->ov_start_sector == ~(sector_t)0 &&
9f4fe9ad 2837 peer_device->connection->agreed_pro_version >= 90) {
de228bba
LE
2838 unsigned long now = jiffies;
2839 int i;
b30ab791
AG
2840 device->ov_start_sector = sector;
2841 device->ov_position = sector;
2842 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2843 device->rs_total = device->ov_left;
de228bba 2844 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
b30ab791
AG
2845 device->rs_mark_left[i] = device->ov_left;
2846 device->rs_mark_time[i] = now;
de228bba 2847 }
d0180171 2848 drbd_info(device, "Online Verify start sector: %llu\n",
b411b363
PR
2849 (unsigned long long)sector);
2850 }
a8cd15ba 2851 peer_req->w.cb = w_e_end_ov_req;
b411b363 2852 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2853 break;
2854
b411b363 2855 default:
49ba9b1b 2856 BUG();
b411b363
PR
2857 }
2858
0f0601f4
LE
2859 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2860 * wrt the receiver, but it is not as straightforward as it may seem.
2861 * Various places in the resync start and stop logic assume resync
2862 * requests are processed in order, requeuing this on the worker thread
2863 * introduces a bunch of new code for synchronization between threads.
2864 *
2865 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2866 * "forever", throttling after drbd_rs_begin_io will lock that extent
2867 * for application writes for the same time. For now, just throttle
2868 * here, where the rest of the code expects the receiver to sleep for
2869 * a while, anyways.
2870 */
2871
2872 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2873 * this defers syncer requests for some time, before letting at least
2874 * on request through. The resync controller on the receiving side
2875 * will adapt to the incoming rate accordingly.
2876 *
2877 * We cannot throttle here if remote is Primary/SyncTarget:
2878 * we would also throttle its application reads.
2879 * In that case, throttling is done on the SyncTarget only.
2880 */
c5a2c150
LE
2881
2882 /* Even though this may be a resync request, we do add to "read_ee";
2883 * "sync_ee" is only used for resync WRITEs.
2884 * Add to list early, so debugfs can find this request
2885 * even if we have to sleep below. */
2886 spin_lock_irq(&device->resource->req_lock);
2887 list_add_tail(&peer_req->w.list, &device->read_ee);
2888 spin_unlock_irq(&device->resource->req_lock);
2889
944410e9 2890 update_receiver_timing_details(connection, drbd_rs_should_slow_down);
ad3fee79
LE
2891 if (device->state.peer != R_PRIMARY
2892 && drbd_rs_should_slow_down(device, sector, false))
e3555d85 2893 schedule_timeout_uninterruptible(HZ/10);
944410e9 2894 update_receiver_timing_details(connection, drbd_rs_begin_io);
b30ab791 2895 if (drbd_rs_begin_io(device, sector))
80a40e43 2896 goto out_free_e;
b411b363 2897
0f0601f4 2898submit_for_resync:
b30ab791 2899 atomic_add(size >> 9, &device->rs_sect_ev);
0f0601f4 2900
80a40e43 2901submit:
944410e9 2902 update_receiver_timing_details(connection, drbd_submit_peer_request);
b30ab791 2903 inc_unacked(device);
bb3cc85e
MC
2904 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
2905 fault_type) == 0)
82bc0194 2906 return 0;
b411b363 2907
10f6d992 2908 /* don't care for the reason here */
d0180171 2909 drbd_err(device, "submit failed, triggering re-connect\n");
c5a2c150
LE
2910
2911out_free_e:
0500813f 2912 spin_lock_irq(&device->resource->req_lock);
a8cd15ba 2913 list_del(&peer_req->w.list);
0500813f 2914 spin_unlock_irq(&device->resource->req_lock);
22cc37a9
LE
2915 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2916
b30ab791
AG
2917 put_ldev(device);
2918 drbd_free_peer_req(device, peer_req);
82bc0194 2919 return -EIO;
b411b363
PR
2920}
2921
69a22773
AG
2922/**
2923 * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries
2924 */
2925static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 2926{
69a22773 2927 struct drbd_device *device = peer_device->device;
b411b363
PR
2928 int self, peer, rv = -100;
2929 unsigned long ch_self, ch_peer;
44ed167d 2930 enum drbd_after_sb_p after_sb_0p;
b411b363 2931
b30ab791
AG
2932 self = device->ldev->md.uuid[UI_BITMAP] & 1;
2933 peer = device->p_uuid[UI_BITMAP] & 1;
b411b363 2934
b30ab791
AG
2935 ch_peer = device->p_uuid[UI_SIZE];
2936 ch_self = device->comm_bm_set;
b411b363 2937
44ed167d 2938 rcu_read_lock();
69a22773 2939 after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
44ed167d
PR
2940 rcu_read_unlock();
2941 switch (after_sb_0p) {
b411b363
PR
2942 case ASB_CONSENSUS:
2943 case ASB_DISCARD_SECONDARY:
2944 case ASB_CALL_HELPER:
44ed167d 2945 case ASB_VIOLENTLY:
d0180171 2946 drbd_err(device, "Configuration error.\n");
b411b363
PR
2947 break;
2948 case ASB_DISCONNECT:
2949 break;
2950 case ASB_DISCARD_YOUNGER_PRI:
2951 if (self == 0 && peer == 1) {
2952 rv = -1;
2953 break;
2954 }
2955 if (self == 1 && peer == 0) {
2956 rv = 1;
2957 break;
2958 }
2959 /* Else fall through to one of the other strategies... */
2960 case ASB_DISCARD_OLDER_PRI:
2961 if (self == 0 && peer == 1) {
2962 rv = 1;
2963 break;
2964 }
2965 if (self == 1 && peer == 0) {
2966 rv = -1;
2967 break;
2968 }
2969 /* Else fall through to one of the other strategies... */
d0180171 2970 drbd_warn(device, "Discard younger/older primary did not find a decision\n"
b411b363 2971 "Using discard-least-changes instead\n");
d769a992 2972 /* fall through */
b411b363
PR
2973 case ASB_DISCARD_ZERO_CHG:
2974 if (ch_peer == 0 && ch_self == 0) {
69a22773 2975 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
b411b363
PR
2976 ? -1 : 1;
2977 break;
2978 } else {
2979 if (ch_peer == 0) { rv = 1; break; }
2980 if (ch_self == 0) { rv = -1; break; }
2981 }
44ed167d 2982 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363 2983 break;
d769a992 2984 /* else: fall through */
b411b363
PR
2985 case ASB_DISCARD_LEAST_CHG:
2986 if (ch_self < ch_peer)
2987 rv = -1;
2988 else if (ch_self > ch_peer)
2989 rv = 1;
2990 else /* ( ch_self == ch_peer ) */
2991 /* Well, then use something else. */
69a22773 2992 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
b411b363
PR
2993 ? -1 : 1;
2994 break;
2995 case ASB_DISCARD_LOCAL:
2996 rv = -1;
2997 break;
2998 case ASB_DISCARD_REMOTE:
2999 rv = 1;
3000 }
3001
3002 return rv;
3003}
3004
69a22773
AG
3005/**
3006 * drbd_asb_recover_1p - Recover after split-brain with one remaining primary
3007 */
3008static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3009{
69a22773 3010 struct drbd_device *device = peer_device->device;
6184ea21 3011 int hg, rv = -100;
44ed167d 3012 enum drbd_after_sb_p after_sb_1p;
b411b363 3013
44ed167d 3014 rcu_read_lock();
69a22773 3015 after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
44ed167d
PR
3016 rcu_read_unlock();
3017 switch (after_sb_1p) {
b411b363
PR
3018 case ASB_DISCARD_YOUNGER_PRI:
3019 case ASB_DISCARD_OLDER_PRI:
3020 case ASB_DISCARD_LEAST_CHG:
3021 case ASB_DISCARD_LOCAL:
3022 case ASB_DISCARD_REMOTE:
44ed167d 3023 case ASB_DISCARD_ZERO_CHG:
d0180171 3024 drbd_err(device, "Configuration error.\n");
b411b363
PR
3025 break;
3026 case ASB_DISCONNECT:
3027 break;
3028 case ASB_CONSENSUS:
69a22773 3029 hg = drbd_asb_recover_0p(peer_device);
b30ab791 3030 if (hg == -1 && device->state.role == R_SECONDARY)
b411b363 3031 rv = hg;
b30ab791 3032 if (hg == 1 && device->state.role == R_PRIMARY)
b411b363
PR
3033 rv = hg;
3034 break;
3035 case ASB_VIOLENTLY:
69a22773 3036 rv = drbd_asb_recover_0p(peer_device);
b411b363
PR
3037 break;
3038 case ASB_DISCARD_SECONDARY:
b30ab791 3039 return device->state.role == R_PRIMARY ? 1 : -1;
b411b363 3040 case ASB_CALL_HELPER:
69a22773 3041 hg = drbd_asb_recover_0p(peer_device);
b30ab791 3042 if (hg == -1 && device->state.role == R_PRIMARY) {
bb437946
AG
3043 enum drbd_state_rv rv2;
3044
b411b363
PR
3045 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3046 * we might be here in C_WF_REPORT_PARAMS which is transient.
3047 * we do not need to wait for the after state change work either. */
b30ab791 3048 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
bb437946 3049 if (rv2 != SS_SUCCESS) {
b30ab791 3050 drbd_khelper(device, "pri-lost-after-sb");
b411b363 3051 } else {
d0180171 3052 drbd_warn(device, "Successfully gave up primary role.\n");
b411b363
PR
3053 rv = hg;
3054 }
3055 } else
3056 rv = hg;
3057 }
3058
3059 return rv;
3060}
3061
69a22773
AG
3062/**
3063 * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries
3064 */
3065static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
b411b363 3066{
69a22773 3067 struct drbd_device *device = peer_device->device;
6184ea21 3068 int hg, rv = -100;
44ed167d 3069 enum drbd_after_sb_p after_sb_2p;
b411b363 3070
44ed167d 3071 rcu_read_lock();
69a22773 3072 after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
44ed167d
PR
3073 rcu_read_unlock();
3074 switch (after_sb_2p) {
b411b363
PR
3075 case ASB_DISCARD_YOUNGER_PRI:
3076 case ASB_DISCARD_OLDER_PRI:
3077 case ASB_DISCARD_LEAST_CHG:
3078 case ASB_DISCARD_LOCAL:
3079 case ASB_DISCARD_REMOTE:
3080 case ASB_CONSENSUS:
3081 case ASB_DISCARD_SECONDARY:
44ed167d 3082 case ASB_DISCARD_ZERO_CHG:
d0180171 3083 drbd_err(device, "Configuration error.\n");
b411b363
PR
3084 break;
3085 case ASB_VIOLENTLY:
69a22773 3086 rv = drbd_asb_recover_0p(peer_device);
b411b363
PR
3087 break;
3088 case ASB_DISCONNECT:
3089 break;
3090 case ASB_CALL_HELPER:
69a22773 3091 hg = drbd_asb_recover_0p(peer_device);
b411b363 3092 if (hg == -1) {
bb437946
AG
3093 enum drbd_state_rv rv2;
3094
b411b363
PR
3095 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3096 * we might be here in C_WF_REPORT_PARAMS which is transient.
3097 * we do not need to wait for the after state change work either. */
b30ab791 3098 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
bb437946 3099 if (rv2 != SS_SUCCESS) {
b30ab791 3100 drbd_khelper(device, "pri-lost-after-sb");
b411b363 3101 } else {
d0180171 3102 drbd_warn(device, "Successfully gave up primary role.\n");
b411b363
PR
3103 rv = hg;
3104 }
3105 } else
3106 rv = hg;
3107 }
3108
3109 return rv;
3110}
3111
b30ab791 3112static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
b411b363
PR
3113 u64 bits, u64 flags)
3114{
3115 if (!uuid) {
d0180171 3116 drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
b411b363
PR
3117 return;
3118 }
d0180171 3119 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
b411b363
PR
3120 text,
3121 (unsigned long long)uuid[UI_CURRENT],
3122 (unsigned long long)uuid[UI_BITMAP],
3123 (unsigned long long)uuid[UI_HISTORY_START],
3124 (unsigned long long)uuid[UI_HISTORY_END],
3125 (unsigned long long)bits,
3126 (unsigned long long)flags);
3127}
3128
3129/*
3130 100 after split brain try auto recover
3131 2 C_SYNC_SOURCE set BitMap
3132 1 C_SYNC_SOURCE use BitMap
3133 0 no Sync
3134 -1 C_SYNC_TARGET use BitMap
3135 -2 C_SYNC_TARGET set BitMap
3136 -100 after split brain, disconnect
3137-1000 unrelated data
4a23f264
PR
3138-1091 requires proto 91
3139-1096 requires proto 96
b411b363 3140 */
f2d3d75b
LE
3141
3142static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
b411b363 3143{
44a4d551
LE
3144 struct drbd_peer_device *const peer_device = first_peer_device(device);
3145 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
b411b363
PR
3146 u64 self, peer;
3147 int i, j;
3148
b30ab791
AG
3149 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3150 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
3151
3152 *rule_nr = 10;
3153 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
3154 return 0;
3155
3156 *rule_nr = 20;
3157 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
3158 peer != UUID_JUST_CREATED)
3159 return -2;
3160
3161 *rule_nr = 30;
3162 if (self != UUID_JUST_CREATED &&
3163 (peer == UUID_JUST_CREATED || peer == (u64)0))
3164 return 2;
3165
3166 if (self == peer) {
3167 int rct, dc; /* roles at crash time */
3168
b30ab791 3169 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
b411b363 3170
44a4d551 3171 if (connection->agreed_pro_version < 91)
4a23f264 3172 return -1091;
b411b363 3173
b30ab791
AG
3174 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
3175 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
d0180171 3176 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
b30ab791
AG
3177 drbd_uuid_move_history(device);
3178 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3179 device->ldev->md.uuid[UI_BITMAP] = 0;
b411b363 3180
b30ab791
AG
3181 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3182 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
b411b363
PR
3183 *rule_nr = 34;
3184 } else {
d0180171 3185 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
b411b363
PR
3186 *rule_nr = 36;
3187 }
3188
3189 return 1;
3190 }
3191
b30ab791 3192 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
b411b363 3193
44a4d551 3194 if (connection->agreed_pro_version < 91)
4a23f264 3195 return -1091;
b411b363 3196
b30ab791
AG
3197 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
3198 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
d0180171 3199 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
b411b363 3200
b30ab791
AG
3201 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
3202 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
3203 device->p_uuid[UI_BITMAP] = 0UL;
b411b363 3204
b30ab791 3205 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
b411b363
PR
3206 *rule_nr = 35;
3207 } else {
d0180171 3208 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
b411b363
PR
3209 *rule_nr = 37;
3210 }
3211
3212 return -1;
3213 }
3214
3215 /* Common power [off|failure] */
b30ab791
AG
3216 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
3217 (device->p_uuid[UI_FLAGS] & 2);
b411b363
PR
3218 /* lowest bit is set when we were primary,
3219 * next bit (weight 2) is set when peer was primary */
3220 *rule_nr = 40;
3221
f2d3d75b
LE
3222 /* Neither has the "crashed primary" flag set,
3223 * only a replication link hickup. */
3224 if (rct == 0)
3225 return 0;
3226
3227 /* Current UUID equal and no bitmap uuid; does not necessarily
3228 * mean this was a "simultaneous hard crash", maybe IO was
3229 * frozen, so no UUID-bump happened.
3230 * This is a protocol change, overload DRBD_FF_WSAME as flag
3231 * for "new-enough" peer DRBD version. */
3232 if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
3233 *rule_nr = 41;
3234 if (!(connection->agreed_features & DRBD_FF_WSAME)) {
3235 drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
3236 return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8));
3237 }
3238 if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
3239 /* At least one has the "crashed primary" bit set,
3240 * both are primary now, but neither has rotated its UUIDs?
3241 * "Can not happen." */
3242 drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
3243 return -100;
3244 }
3245 if (device->state.role == R_PRIMARY)
3246 return 1;
3247 return -1;
3248 }
3249
3250 /* Both are secondary.
3251 * Really looks like recovery from simultaneous hard crash.
3252 * Check which had been primary before, and arbitrate. */
b411b363 3253 switch (rct) {
f2d3d75b 3254 case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */
b411b363
PR
3255 case 1: /* self_pri && !peer_pri */ return 1;
3256 case 2: /* !self_pri && peer_pri */ return -1;
3257 case 3: /* self_pri && peer_pri */
44a4d551 3258 dc = test_bit(RESOLVE_CONFLICTS, &connection->flags);
b411b363
PR
3259 return dc ? -1 : 1;
3260 }
3261 }
3262
3263 *rule_nr = 50;
b30ab791 3264 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
b411b363
PR
3265 if (self == peer)
3266 return -1;
3267
3268 *rule_nr = 51;
b30ab791 3269 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
b411b363 3270 if (self == peer) {
44a4d551 3271 if (connection->agreed_pro_version < 96 ?
b30ab791
AG
3272 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
3273 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
3274 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
3275 /* The last P_SYNC_UUID did not get though. Undo the last start of
3276 resync as sync source modifications of the peer's UUIDs. */
3277
44a4d551 3278 if (connection->agreed_pro_version < 91)
4a23f264 3279 return -1091;
b411b363 3280
b30ab791
AG
3281 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
3282 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
4a23f264 3283
d0180171 3284 drbd_info(device, "Lost last syncUUID packet, corrected:\n");
b30ab791 3285 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
4a23f264 3286
b411b363
PR
3287 return -1;
3288 }
3289 }
3290
3291 *rule_nr = 60;
b30ab791 3292 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
b411b363 3293 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3294 peer = device->p_uuid[i] & ~((u64)1);
b411b363
PR
3295 if (self == peer)
3296 return -2;
3297 }
3298
3299 *rule_nr = 70;
b30ab791
AG
3300 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3301 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
3302 if (self == peer)
3303 return 1;
3304
3305 *rule_nr = 71;
b30ab791 3306 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
b411b363 3307 if (self == peer) {
44a4d551 3308 if (connection->agreed_pro_version < 96 ?
b30ab791
AG
3309 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
3310 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
3311 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
3312 /* The last P_SYNC_UUID did not get though. Undo the last start of
3313 resync as sync source modifications of our UUIDs. */
3314
44a4d551 3315 if (connection->agreed_pro_version < 91)
4a23f264 3316 return -1091;
b411b363 3317
b30ab791
AG
3318 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
3319 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
b411b363 3320
d0180171 3321 drbd_info(device, "Last syncUUID did not get through, corrected:\n");
b30ab791
AG
3322 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3323 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
b411b363
PR
3324
3325 return 1;
3326 }
3327 }
3328
3329
3330 *rule_nr = 80;
b30ab791 3331 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363 3332 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3333 self = device->ldev->md.uuid[i] & ~((u64)1);
b411b363
PR
3334 if (self == peer)
3335 return 2;
3336 }
3337
3338 *rule_nr = 90;
b30ab791
AG
3339 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3340 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
b411b363
PR
3341 if (self == peer && self != ((u64)0))
3342 return 100;
3343
3344 *rule_nr = 100;
3345 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
b30ab791 3346 self = device->ldev->md.uuid[i] & ~((u64)1);
b411b363 3347 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
b30ab791 3348 peer = device->p_uuid[j] & ~((u64)1);
b411b363
PR
3349 if (self == peer)
3350 return -100;
3351 }
3352 }
3353
3354 return -1000;
3355}
3356
3357/* drbd_sync_handshake() returns the new conn state on success, or
3358 CONN_MASK (-1) on failure.
3359 */
69a22773
AG
3360static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
3361 enum drbd_role peer_role,
b411b363
PR
3362 enum drbd_disk_state peer_disk) __must_hold(local)
3363{
69a22773 3364 struct drbd_device *device = peer_device->device;
b411b363
PR
3365 enum drbd_conns rv = C_MASK;
3366 enum drbd_disk_state mydisk;
44ed167d 3367 struct net_conf *nc;
6dff2902 3368 int hg, rule_nr, rr_conflict, tentative;
b411b363 3369
b30ab791 3370 mydisk = device->state.disk;
b411b363 3371 if (mydisk == D_NEGOTIATING)
b30ab791 3372 mydisk = device->new_state_tmp.disk;
b411b363 3373
d0180171 3374 drbd_info(device, "drbd_sync_handshake:\n");
9f2247bb 3375
b30ab791
AG
3376 spin_lock_irq(&device->ldev->md.uuid_lock);
3377 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
3378 drbd_uuid_dump(device, "peer", device->p_uuid,
3379 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
b411b363 3380
f2d3d75b 3381 hg = drbd_uuid_compare(device, peer_role, &rule_nr);
b30ab791 3382 spin_unlock_irq(&device->ldev->md.uuid_lock);
b411b363 3383
d0180171 3384 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
b411b363
PR
3385
3386 if (hg == -1000) {
d0180171 3387 drbd_alert(device, "Unrelated data, aborting!\n");
b411b363
PR
3388 return C_MASK;
3389 }
f2d3d75b
LE
3390 if (hg < -0x10000) {
3391 int proto, fflags;
3392 hg = -hg;
3393 proto = hg & 0xff;
3394 fflags = (hg >> 8) & 0xff;
3395 drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
3396 proto, fflags);
3397 return C_MASK;
3398 }
4a23f264 3399 if (hg < -1000) {
d0180171 3400 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
3401 return C_MASK;
3402 }
3403
3404 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
3405 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
3406 int f = (hg == -100) || abs(hg) == 2;
3407 hg = mydisk > D_INCONSISTENT ? 1 : -1;
3408 if (f)
3409 hg = hg*2;
d0180171 3410 drbd_info(device, "Becoming sync %s due to disk states.\n",
b411b363
PR
3411 hg > 0 ? "source" : "target");
3412 }
3413
3a11a487 3414 if (abs(hg) == 100)
b30ab791 3415 drbd_khelper(device, "initial-split-brain");
3a11a487 3416
44ed167d 3417 rcu_read_lock();
69a22773 3418 nc = rcu_dereference(peer_device->connection->net_conf);
44ed167d
PR
3419
3420 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
b30ab791 3421 int pcount = (device->state.role == R_PRIMARY)
b411b363
PR
3422 + (peer_role == R_PRIMARY);
3423 int forced = (hg == -100);
3424
3425 switch (pcount) {
3426 case 0:
69a22773 3427 hg = drbd_asb_recover_0p(peer_device);
b411b363
PR
3428 break;
3429 case 1:
69a22773 3430 hg = drbd_asb_recover_1p(peer_device);
b411b363
PR
3431 break;
3432 case 2:
69a22773 3433 hg = drbd_asb_recover_2p(peer_device);
b411b363
PR
3434 break;
3435 }
3436 if (abs(hg) < 100) {
d0180171 3437 drbd_warn(device, "Split-Brain detected, %d primaries, "
b411b363
PR
3438 "automatically solved. Sync from %s node\n",
3439 pcount, (hg < 0) ? "peer" : "this");
3440 if (forced) {
d0180171 3441 drbd_warn(device, "Doing a full sync, since"
b411b363
PR
3442 " UUIDs where ambiguous.\n");
3443 hg = hg*2;
3444 }
3445 }
3446 }
3447
3448 if (hg == -100) {
b30ab791 3449 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
b411b363 3450 hg = -1;
b30ab791 3451 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
b411b363
PR
3452 hg = 1;
3453
3454 if (abs(hg) < 100)
d0180171 3455 drbd_warn(device, "Split-Brain detected, manually solved. "
b411b363
PR
3456 "Sync from %s node\n",
3457 (hg < 0) ? "peer" : "this");
3458 }
44ed167d 3459 rr_conflict = nc->rr_conflict;
6dff2902 3460 tentative = nc->tentative;
44ed167d 3461 rcu_read_unlock();
b411b363
PR
3462
3463 if (hg == -100) {
580b9767
LE
3464 /* FIXME this log message is not correct if we end up here
3465 * after an attempted attach on a diskless node.
3466 * We just refuse to attach -- well, we drop the "connection"
3467 * to that disk, in a way... */
d0180171 3468 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
b30ab791 3469 drbd_khelper(device, "split-brain");
b411b363
PR
3470 return C_MASK;
3471 }
3472
3473 if (hg > 0 && mydisk <= D_INCONSISTENT) {
d0180171 3474 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
b411b363
PR
3475 return C_MASK;
3476 }
3477
3478 if (hg < 0 && /* by intention we do not use mydisk here. */
b30ab791 3479 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
44ed167d 3480 switch (rr_conflict) {
b411b363 3481 case ASB_CALL_HELPER:
b30ab791 3482 drbd_khelper(device, "pri-lost");
b411b363
PR
3483 /* fall through */
3484 case ASB_DISCONNECT:
d0180171 3485 drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
b411b363
PR
3486 return C_MASK;
3487 case ASB_VIOLENTLY:
d0180171 3488 drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
b411b363
PR
3489 "assumption\n");
3490 }
3491 }
3492
69a22773 3493 if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
cf14c2e9 3494 if (hg == 0)
d0180171 3495 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
cf14c2e9 3496 else
d0180171 3497 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
cf14c2e9
PR
3498 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3499 abs(hg) >= 2 ? "full" : "bit-map based");
3500 return C_MASK;
3501 }
3502
b411b363 3503 if (abs(hg) >= 2) {
d0180171 3504 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
b30ab791 3505 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
20ceb2b2 3506 BM_LOCKED_SET_ALLOWED))
b411b363
PR
3507 return C_MASK;
3508 }
3509
3510 if (hg > 0) { /* become sync source. */
3511 rv = C_WF_BITMAP_S;
3512 } else if (hg < 0) { /* become sync target */
3513 rv = C_WF_BITMAP_T;
3514 } else {
3515 rv = C_CONNECTED;
b30ab791 3516 if (drbd_bm_total_weight(device)) {
d0180171 3517 drbd_info(device, "No resync, but %lu bits in bitmap!\n",
b30ab791 3518 drbd_bm_total_weight(device));
b411b363
PR
3519 }
3520 }
3521
3522 return rv;
3523}
3524
f179d76d 3525static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
b411b363
PR
3526{
3527 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
f179d76d
PR
3528 if (peer == ASB_DISCARD_REMOTE)
3529 return ASB_DISCARD_LOCAL;
b411b363
PR
3530
3531 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
f179d76d
PR
3532 if (peer == ASB_DISCARD_LOCAL)
3533 return ASB_DISCARD_REMOTE;
b411b363
PR
3534
3535 /* everything else is valid if they are equal on both sides. */
f179d76d 3536 return peer;
b411b363
PR
3537}
3538
bde89a9e 3539static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3540{
e658983a 3541 struct p_protocol *p = pi->data;
036b17ea
PR
3542 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3543 int p_proto, p_discard_my_data, p_two_primaries, cf;
3544 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3545 char integrity_alg[SHARED_SECRET_MAX] = "";
9534d671 3546 struct crypto_ahash *peer_integrity_tfm = NULL;
7aca6c75 3547 void *int_dig_in = NULL, *int_dig_vv = NULL;
b411b363 3548
b411b363
PR
3549 p_proto = be32_to_cpu(p->protocol);
3550 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3551 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3552 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 3553 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9 3554 cf = be32_to_cpu(p->conn_flags);
6139f60d 3555 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
cf14c2e9 3556
bde89a9e 3557 if (connection->agreed_pro_version >= 87) {
86db0618 3558 int err;
cf14c2e9 3559
88104ca4 3560 if (pi->size > sizeof(integrity_alg))
86db0618 3561 return -EIO;
bde89a9e 3562 err = drbd_recv_all(connection, integrity_alg, pi->size);
86db0618
AG
3563 if (err)
3564 return err;
036b17ea 3565 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
b411b363
PR
3566 }
3567
7d4c782c 3568 if (pi->cmd != P_PROTOCOL_UPDATE) {
bde89a9e 3569 clear_bit(CONN_DRY_RUN, &connection->flags);
b411b363 3570
fbc12f45 3571 if (cf & CF_DRY_RUN)
bde89a9e 3572 set_bit(CONN_DRY_RUN, &connection->flags);
b411b363 3573
fbc12f45 3574 rcu_read_lock();
bde89a9e 3575 nc = rcu_dereference(connection->net_conf);
b411b363 3576
fbc12f45 3577 if (p_proto != nc->wire_protocol) {
1ec861eb 3578 drbd_err(connection, "incompatible %s settings\n", "protocol");
fbc12f45
AG
3579 goto disconnect_rcu_unlock;
3580 }
b411b363 3581
fbc12f45 3582 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
1ec861eb 3583 drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
fbc12f45
AG
3584 goto disconnect_rcu_unlock;
3585 }
b411b363 3586
fbc12f45 3587 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
1ec861eb 3588 drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
fbc12f45
AG
3589 goto disconnect_rcu_unlock;
3590 }
b411b363 3591
fbc12f45 3592 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
1ec861eb 3593 drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
fbc12f45
AG
3594 goto disconnect_rcu_unlock;
3595 }
b411b363 3596
fbc12f45 3597 if (p_discard_my_data && nc->discard_my_data) {
1ec861eb 3598 drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
fbc12f45
AG
3599 goto disconnect_rcu_unlock;
3600 }
b411b363 3601
fbc12f45 3602 if (p_two_primaries != nc->two_primaries) {
1ec861eb 3603 drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
fbc12f45
AG
3604 goto disconnect_rcu_unlock;
3605 }
b411b363 3606
fbc12f45 3607 if (strcmp(integrity_alg, nc->integrity_alg)) {
1ec861eb 3608 drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
fbc12f45
AG
3609 goto disconnect_rcu_unlock;
3610 }
b411b363 3611
fbc12f45 3612 rcu_read_unlock();
b411b363
PR
3613 }
3614
7d4c782c
AG
3615 if (integrity_alg[0]) {
3616 int hash_size;
3617
3618 /*
3619 * We can only change the peer data integrity algorithm
3620 * here. Changing our own data integrity algorithm
3621 * requires that we send a P_PROTOCOL_UPDATE packet at
3622 * the same time; otherwise, the peer has no way to
3623 * tell between which packets the algorithm should
3624 * change.
3625 */
b411b363 3626
9534d671 3627 peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
1b57e663
LE
3628 if (IS_ERR(peer_integrity_tfm)) {
3629 peer_integrity_tfm = NULL;
1ec861eb 3630 drbd_err(connection, "peer data-integrity-alg %s not supported\n",
7d4c782c
AG
3631 integrity_alg);
3632 goto disconnect;
3633 }
b411b363 3634
9534d671 3635 hash_size = crypto_ahash_digestsize(peer_integrity_tfm);
7d4c782c
AG
3636 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3637 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3638 if (!(int_dig_in && int_dig_vv)) {
1ec861eb 3639 drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
b411b363
PR
3640 goto disconnect;
3641 }
b411b363
PR
3642 }
3643
7d4c782c
AG
3644 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3645 if (!new_net_conf) {
1ec861eb 3646 drbd_err(connection, "Allocation of new net_conf failed\n");
7d4c782c
AG
3647 goto disconnect;
3648 }
3649
bde89a9e 3650 mutex_lock(&connection->data.mutex);
0500813f 3651 mutex_lock(&connection->resource->conf_update);
bde89a9e 3652 old_net_conf = connection->net_conf;
7d4c782c
AG
3653 *new_net_conf = *old_net_conf;
3654
3655 new_net_conf->wire_protocol = p_proto;
3656 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3657 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3658 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3659 new_net_conf->two_primaries = p_two_primaries;
3660
bde89a9e 3661 rcu_assign_pointer(connection->net_conf, new_net_conf);
0500813f 3662 mutex_unlock(&connection->resource->conf_update);
bde89a9e 3663 mutex_unlock(&connection->data.mutex);
7d4c782c 3664
9534d671 3665 crypto_free_ahash(connection->peer_integrity_tfm);
bde89a9e
AG
3666 kfree(connection->int_dig_in);
3667 kfree(connection->int_dig_vv);
3668 connection->peer_integrity_tfm = peer_integrity_tfm;
3669 connection->int_dig_in = int_dig_in;
3670 connection->int_dig_vv = int_dig_vv;
7d4c782c
AG
3671
3672 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
1ec861eb 3673 drbd_info(connection, "peer data-integrity-alg: %s\n",
7d4c782c
AG
3674 integrity_alg[0] ? integrity_alg : "(none)");
3675
3676 synchronize_rcu();
3677 kfree(old_net_conf);
82bc0194 3678 return 0;
b411b363 3679
44ed167d
PR
3680disconnect_rcu_unlock:
3681 rcu_read_unlock();
b411b363 3682disconnect:
9534d671 3683 crypto_free_ahash(peer_integrity_tfm);
036b17ea
PR
3684 kfree(int_dig_in);
3685 kfree(int_dig_vv);
bde89a9e 3686 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3687 return -EIO;
b411b363
PR
3688}
3689
3690/* helper function
3691 * input: alg name, feature name
3692 * return: NULL (alg name was "")
3693 * ERR_PTR(error) if something goes wrong
3694 * or the crypto hash ptr, if it worked out ok. */
9534d671 3695static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
b411b363
PR
3696 const char *alg, const char *name)
3697{
9534d671 3698 struct crypto_ahash *tfm;
b411b363
PR
3699
3700 if (!alg[0])
3701 return NULL;
3702
9534d671 3703 tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
b411b363 3704 if (IS_ERR(tfm)) {
d0180171 3705 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
b411b363
PR
3706 alg, name, PTR_ERR(tfm));
3707 return tfm;
3708 }
b411b363
PR
3709 return tfm;
3710}
3711
bde89a9e 3712static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
4a76b161 3713{
bde89a9e 3714 void *buffer = connection->data.rbuf;
4a76b161
AG
3715 int size = pi->size;
3716
3717 while (size) {
3718 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
bde89a9e 3719 s = drbd_recv(connection, buffer, s);
4a76b161
AG
3720 if (s <= 0) {
3721 if (s < 0)
3722 return s;
3723 break;
3724 }
3725 size -= s;
3726 }
3727 if (size)
3728 return -EIO;
3729 return 0;
3730}
3731
3732/*
3733 * config_unknown_volume - device configuration command for unknown volume
3734 *
3735 * When a device is added to an existing connection, the node on which the
3736 * device is added first will send configuration commands to its peer but the
3737 * peer will not know about the device yet. It will warn and ignore these
3738 * commands. Once the device is added on the second node, the second node will
3739 * send the same device configuration commands, but in the other direction.
3740 *
3741 * (We can also end up here if drbd is misconfigured.)
3742 */
bde89a9e 3743static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
4a76b161 3744{
1ec861eb 3745 drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
2fcb8f30 3746 cmdname(pi->cmd), pi->vnr);
bde89a9e 3747 return ignore_remaining_packet(connection, pi);
4a76b161
AG
3748}
3749
bde89a9e 3750static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3751{
9f4fe9ad 3752 struct drbd_peer_device *peer_device;
b30ab791 3753 struct drbd_device *device;
e658983a 3754 struct p_rs_param_95 *p;
b411b363 3755 unsigned int header_size, data_size, exp_max_sz;
9534d671
HX
3756 struct crypto_ahash *verify_tfm = NULL;
3757 struct crypto_ahash *csums_tfm = NULL;
2ec91e0e 3758 struct net_conf *old_net_conf, *new_net_conf = NULL;
813472ce 3759 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
bde89a9e 3760 const int apv = connection->agreed_pro_version;
813472ce 3761 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
778f271d 3762 int fifo_size = 0;
82bc0194 3763 int err;
b411b363 3764
9f4fe9ad
AG
3765 peer_device = conn_peer_device(connection, pi->vnr);
3766 if (!peer_device)
bde89a9e 3767 return config_unknown_volume(connection, pi);
9f4fe9ad 3768 device = peer_device->device;
b411b363
PR
3769
3770 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3771 : apv == 88 ? sizeof(struct p_rs_param)
3772 + SHARED_SECRET_MAX
8e26f9cc
PR
3773 : apv <= 94 ? sizeof(struct p_rs_param_89)
3774 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 3775
e2857216 3776 if (pi->size > exp_max_sz) {
d0180171 3777 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
e2857216 3778 pi->size, exp_max_sz);
82bc0194 3779 return -EIO;
b411b363
PR
3780 }
3781
3782 if (apv <= 88) {
e658983a 3783 header_size = sizeof(struct p_rs_param);
e2857216 3784 data_size = pi->size - header_size;
8e26f9cc 3785 } else if (apv <= 94) {
e658983a 3786 header_size = sizeof(struct p_rs_param_89);
e2857216 3787 data_size = pi->size - header_size;
0b0ba1ef 3788 D_ASSERT(device, data_size == 0);
8e26f9cc 3789 } else {
e658983a 3790 header_size = sizeof(struct p_rs_param_95);
e2857216 3791 data_size = pi->size - header_size;
0b0ba1ef 3792 D_ASSERT(device, data_size == 0);
b411b363
PR
3793 }
3794
3795 /* initialize verify_alg and csums_alg */
e658983a 3796 p = pi->data;
b411b363
PR
3797 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3798
9f4fe9ad 3799 err = drbd_recv_all(peer_device->connection, p, header_size);
82bc0194
AG
3800 if (err)
3801 return err;
b411b363 3802
0500813f 3803 mutex_lock(&connection->resource->conf_update);
9f4fe9ad 3804 old_net_conf = peer_device->connection->net_conf;
b30ab791 3805 if (get_ldev(device)) {
813472ce
PR
3806 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3807 if (!new_disk_conf) {
b30ab791 3808 put_ldev(device);
0500813f 3809 mutex_unlock(&connection->resource->conf_update);
d0180171 3810 drbd_err(device, "Allocation of new disk_conf failed\n");
813472ce
PR
3811 return -ENOMEM;
3812 }
daeda1cc 3813
b30ab791 3814 old_disk_conf = device->ldev->disk_conf;
813472ce 3815 *new_disk_conf = *old_disk_conf;
b411b363 3816
6394b935 3817 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
813472ce 3818 }
b411b363
PR
3819
3820 if (apv >= 88) {
3821 if (apv == 88) {
5de73827 3822 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
d0180171 3823 drbd_err(device, "verify-alg of wrong size, "
5de73827
PR
3824 "peer wants %u, accepting only up to %u byte\n",
3825 data_size, SHARED_SECRET_MAX);
813472ce
PR
3826 err = -EIO;
3827 goto reconnect;
b411b363
PR
3828 }
3829
9f4fe9ad 3830 err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
813472ce
PR
3831 if (err)
3832 goto reconnect;
b411b363
PR
3833 /* we expect NUL terminated string */
3834 /* but just in case someone tries to be evil */
0b0ba1ef 3835 D_ASSERT(device, p->verify_alg[data_size-1] == 0);
b411b363
PR
3836 p->verify_alg[data_size-1] = 0;
3837
3838 } else /* apv >= 89 */ {
3839 /* we still expect NUL terminated strings */
3840 /* but just in case someone tries to be evil */
0b0ba1ef
AG
3841 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3842 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
b411b363
PR
3843 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3844 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3845 }
3846
2ec91e0e 3847 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
b30ab791 3848 if (device->state.conn == C_WF_REPORT_PARAMS) {
d0180171 3849 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3850 old_net_conf->verify_alg, p->verify_alg);
b411b363
PR
3851 goto disconnect;
3852 }
b30ab791 3853 verify_tfm = drbd_crypto_alloc_digest_safe(device,
b411b363
PR
3854 p->verify_alg, "verify-alg");
3855 if (IS_ERR(verify_tfm)) {
3856 verify_tfm = NULL;
3857 goto disconnect;
3858 }
3859 }
3860
2ec91e0e 3861 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
b30ab791 3862 if (device->state.conn == C_WF_REPORT_PARAMS) {
d0180171 3863 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3864 old_net_conf->csums_alg, p->csums_alg);
b411b363
PR
3865 goto disconnect;
3866 }
b30ab791 3867 csums_tfm = drbd_crypto_alloc_digest_safe(device,
b411b363
PR
3868 p->csums_alg, "csums-alg");
3869 if (IS_ERR(csums_tfm)) {
3870 csums_tfm = NULL;
3871 goto disconnect;
3872 }
3873 }
3874
813472ce 3875 if (apv > 94 && new_disk_conf) {
daeda1cc
PR
3876 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3877 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3878 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3879 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 3880
daeda1cc 3881 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
b30ab791 3882 if (fifo_size != device->rs_plan_s->size) {
813472ce
PR
3883 new_plan = fifo_alloc(fifo_size);
3884 if (!new_plan) {
d0180171 3885 drbd_err(device, "kmalloc of fifo_buffer failed");
b30ab791 3886 put_ldev(device);
778f271d
PR
3887 goto disconnect;
3888 }
3889 }
8e26f9cc 3890 }
b411b363 3891
91fd4dad 3892 if (verify_tfm || csums_tfm) {
2ec91e0e
PR
3893 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3894 if (!new_net_conf) {
d0180171 3895 drbd_err(device, "Allocation of new net_conf failed\n");
91fd4dad
PR
3896 goto disconnect;
3897 }
3898
2ec91e0e 3899 *new_net_conf = *old_net_conf;
91fd4dad
PR
3900
3901 if (verify_tfm) {
2ec91e0e
PR
3902 strcpy(new_net_conf->verify_alg, p->verify_alg);
3903 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
9534d671 3904 crypto_free_ahash(peer_device->connection->verify_tfm);
9f4fe9ad 3905 peer_device->connection->verify_tfm = verify_tfm;
d0180171 3906 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
91fd4dad
PR
3907 }
3908 if (csums_tfm) {
2ec91e0e
PR
3909 strcpy(new_net_conf->csums_alg, p->csums_alg);
3910 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
9534d671 3911 crypto_free_ahash(peer_device->connection->csums_tfm);
9f4fe9ad 3912 peer_device->connection->csums_tfm = csums_tfm;
d0180171 3913 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
91fd4dad 3914 }
bde89a9e 3915 rcu_assign_pointer(connection->net_conf, new_net_conf);
778f271d 3916 }
b411b363
PR
3917 }
3918
813472ce 3919 if (new_disk_conf) {
b30ab791
AG
3920 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3921 put_ldev(device);
813472ce
PR
3922 }
3923
3924 if (new_plan) {
b30ab791
AG
3925 old_plan = device->rs_plan_s;
3926 rcu_assign_pointer(device->rs_plan_s, new_plan);
b411b363 3927 }
daeda1cc 3928
0500813f 3929 mutex_unlock(&connection->resource->conf_update);
daeda1cc
PR
3930 synchronize_rcu();
3931 if (new_net_conf)
3932 kfree(old_net_conf);
3933 kfree(old_disk_conf);
813472ce 3934 kfree(old_plan);
daeda1cc 3935
82bc0194 3936 return 0;
b411b363 3937
813472ce
PR
3938reconnect:
3939 if (new_disk_conf) {
b30ab791 3940 put_ldev(device);
813472ce
PR
3941 kfree(new_disk_conf);
3942 }
0500813f 3943 mutex_unlock(&connection->resource->conf_update);
813472ce
PR
3944 return -EIO;
3945
b411b363 3946disconnect:
813472ce
PR
3947 kfree(new_plan);
3948 if (new_disk_conf) {
b30ab791 3949 put_ldev(device);
813472ce
PR
3950 kfree(new_disk_conf);
3951 }
0500813f 3952 mutex_unlock(&connection->resource->conf_update);
b411b363
PR
3953 /* just for completeness: actually not needed,
3954 * as this is not reached if csums_tfm was ok. */
9534d671 3955 crypto_free_ahash(csums_tfm);
b411b363 3956 /* but free the verify_tfm again, if csums_tfm did not work out */
9534d671 3957 crypto_free_ahash(verify_tfm);
9f4fe9ad 3958 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3959 return -EIO;
b411b363
PR
3960}
3961
b411b363 3962/* warn if the arguments differ by more than 12.5% */
b30ab791 3963static void warn_if_differ_considerably(struct drbd_device *device,
b411b363
PR
3964 const char *s, sector_t a, sector_t b)
3965{
3966 sector_t d;
3967 if (a == 0 || b == 0)
3968 return;
3969 d = (a > b) ? (a - b) : (b - a);
3970 if (d > (a>>3) || d > (b>>3))
d0180171 3971 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
b411b363
PR
3972 (unsigned long long)a, (unsigned long long)b);
3973}
3974
bde89a9e 3975static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
b411b363 3976{
9f4fe9ad 3977 struct drbd_peer_device *peer_device;
b30ab791 3978 struct drbd_device *device;
e658983a 3979 struct p_sizes *p = pi->data;
9104d31a 3980 struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
e96c9633 3981 enum determine_dev_size dd = DS_UNCHANGED;
6a8d68b1 3982 sector_t p_size, p_usize, p_csize, my_usize;
b411b363 3983 int ldsc = 0; /* local disk size changed */
e89b591c 3984 enum dds_flags ddsf;
b411b363 3985
9f4fe9ad
AG
3986 peer_device = conn_peer_device(connection, pi->vnr);
3987 if (!peer_device)
bde89a9e 3988 return config_unknown_volume(connection, pi);
9f4fe9ad 3989 device = peer_device->device;
4a76b161 3990
b411b363
PR
3991 p_size = be64_to_cpu(p->d_size);
3992 p_usize = be64_to_cpu(p->u_size);
6a8d68b1 3993 p_csize = be64_to_cpu(p->c_size);
b411b363 3994
b411b363
PR
3995 /* just store the peer's disk size for now.
3996 * we still need to figure out whether we accept that. */
b30ab791 3997 device->p_size = p_size;
b411b363 3998
b30ab791 3999 if (get_ldev(device)) {
60bac040 4000 sector_t new_size, cur_size;
daeda1cc 4001 rcu_read_lock();
b30ab791 4002 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
daeda1cc
PR
4003 rcu_read_unlock();
4004
b30ab791
AG
4005 warn_if_differ_considerably(device, "lower level device sizes",
4006 p_size, drbd_get_max_capacity(device->ldev));
4007 warn_if_differ_considerably(device, "user requested size",
daeda1cc 4008 p_usize, my_usize);
b411b363
PR
4009
4010 /* if this is the first connect, or an otherwise expected
4011 * param exchange, choose the minimum */
b30ab791 4012 if (device->state.conn == C_WF_REPORT_PARAMS)
daeda1cc 4013 p_usize = min_not_zero(my_usize, p_usize);
b411b363
PR
4014
4015 /* Never shrink a device with usable data during connect.
4016 But allow online shrinking if we are connected. */
60bac040
LE
4017 new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
4018 cur_size = drbd_get_capacity(device->this_bdev);
4019 if (new_size < cur_size &&
b30ab791
AG
4020 device->state.disk >= D_OUTDATED &&
4021 device->state.conn < C_CONNECTED) {
60bac040
LE
4022 drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
4023 (unsigned long long)new_size, (unsigned long long)cur_size);
9f4fe9ad 4024 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
b30ab791 4025 put_ldev(device);
82bc0194 4026 return -EIO;
b411b363 4027 }
daeda1cc
PR
4028
4029 if (my_usize != p_usize) {
4030 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
4031
4032 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
4033 if (!new_disk_conf) {
d0180171 4034 drbd_err(device, "Allocation of new disk_conf failed\n");
b30ab791 4035 put_ldev(device);
daeda1cc
PR
4036 return -ENOMEM;
4037 }
4038
0500813f 4039 mutex_lock(&connection->resource->conf_update);
b30ab791 4040 old_disk_conf = device->ldev->disk_conf;
daeda1cc
PR
4041 *new_disk_conf = *old_disk_conf;
4042 new_disk_conf->disk_size = p_usize;
4043
b30ab791 4044 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
0500813f 4045 mutex_unlock(&connection->resource->conf_update);
daeda1cc
PR
4046 synchronize_rcu();
4047 kfree(old_disk_conf);
4048
d0180171 4049 drbd_info(device, "Peer sets u_size to %lu sectors\n",
daeda1cc 4050 (unsigned long)my_usize);
b411b363 4051 }
daeda1cc 4052
b30ab791 4053 put_ldev(device);
b411b363 4054 }
b411b363 4055
20c68fde 4056 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
dd4f699d 4057 /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
20c68fde 4058 In case we cleared the QUEUE_FLAG_DISCARD from our queue in
dd4f699d 4059 drbd_reconsider_queue_parameters(), we can be sure that after
20c68fde
LE
4060 drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
4061
e89b591c 4062 ddsf = be16_to_cpu(p->dds_flags);
b30ab791 4063 if (get_ldev(device)) {
9104d31a 4064 drbd_reconsider_queue_parameters(device, device->ldev, o);
b30ab791
AG
4065 dd = drbd_determine_dev_size(device, ddsf, NULL);
4066 put_ldev(device);
e96c9633 4067 if (dd == DS_ERROR)
82bc0194 4068 return -EIO;
b30ab791 4069 drbd_md_sync(device);
b411b363 4070 } else {
6a8d68b1
LE
4071 /*
4072 * I am diskless, need to accept the peer's *current* size.
4073 * I must NOT accept the peers backing disk size,
4074 * it may have been larger than mine all along...
4075 *
4076 * At this point, the peer knows more about my disk, or at
4077 * least about what we last agreed upon, than myself.
4078 * So if his c_size is less than his d_size, the most likely
4079 * reason is that *my* d_size was smaller last time we checked.
4080 *
4081 * However, if he sends a zero current size,
4082 * take his (user-capped or) backing disk size anyways.
4083 */
9104d31a 4084 drbd_reconsider_queue_parameters(device, NULL, o);
6a8d68b1 4085 drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size);
b411b363
PR
4086 }
4087
b30ab791
AG
4088 if (get_ldev(device)) {
4089 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
4090 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
b411b363
PR
4091 ldsc = 1;
4092 }
4093
b30ab791 4094 put_ldev(device);
b411b363
PR
4095 }
4096
b30ab791 4097 if (device->state.conn > C_WF_REPORT_PARAMS) {
b411b363 4098 if (be64_to_cpu(p->c_size) !=
b30ab791 4099 drbd_get_capacity(device->this_bdev) || ldsc) {
b411b363
PR
4100 /* we have different sizes, probably peer
4101 * needs to know my new size... */
69a22773 4102 drbd_send_sizes(peer_device, 0, ddsf);
b411b363 4103 }
b30ab791
AG
4104 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
4105 (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
4106 if (device->state.pdsk >= D_INCONSISTENT &&
4107 device->state.disk >= D_INCONSISTENT) {
e89b591c 4108 if (ddsf & DDSF_NO_RESYNC)
d0180171 4109 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
e89b591c 4110 else
b30ab791 4111 resync_after_online_grow(device);
e89b591c 4112 } else
b30ab791 4113 set_bit(RESYNC_AFTER_NEG, &device->flags);
b411b363
PR
4114 }
4115 }
4116
82bc0194 4117 return 0;
b411b363
PR
4118}
4119
bde89a9e 4120static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4121{
9f4fe9ad 4122 struct drbd_peer_device *peer_device;
b30ab791 4123 struct drbd_device *device;
e658983a 4124 struct p_uuids *p = pi->data;
b411b363 4125 u64 *p_uuid;
62b0da3a 4126 int i, updated_uuids = 0;
b411b363 4127
9f4fe9ad
AG
4128 peer_device = conn_peer_device(connection, pi->vnr);
4129 if (!peer_device)
bde89a9e 4130 return config_unknown_volume(connection, pi);
9f4fe9ad 4131 device = peer_device->device;
4a76b161 4132
365cf663 4133 p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
063eacf8 4134 if (!p_uuid) {
d0180171 4135 drbd_err(device, "kmalloc of p_uuid failed\n");
063eacf8
JW
4136 return false;
4137 }
b411b363
PR
4138
4139 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
4140 p_uuid[i] = be64_to_cpu(p->uuid[i]);
4141
b30ab791
AG
4142 kfree(device->p_uuid);
4143 device->p_uuid = p_uuid;
b411b363 4144
b30ab791
AG
4145 if (device->state.conn < C_CONNECTED &&
4146 device->state.disk < D_INCONSISTENT &&
4147 device->state.role == R_PRIMARY &&
4148 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
d0180171 4149 drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
b30ab791 4150 (unsigned long long)device->ed_uuid);
9f4fe9ad 4151 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4152 return -EIO;
b411b363
PR
4153 }
4154
b30ab791 4155 if (get_ldev(device)) {
b411b363 4156 int skip_initial_sync =
b30ab791 4157 device->state.conn == C_CONNECTED &&
9f4fe9ad 4158 peer_device->connection->agreed_pro_version >= 90 &&
b30ab791 4159 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
b411b363
PR
4160 (p_uuid[UI_FLAGS] & 8);
4161 if (skip_initial_sync) {
d0180171 4162 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
b30ab791 4163 drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
20ceb2b2
LE
4164 "clear_n_write from receive_uuids",
4165 BM_LOCKED_TEST_ALLOWED);
b30ab791
AG
4166 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
4167 _drbd_uuid_set(device, UI_BITMAP, 0);
4168 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
b411b363 4169 CS_VERBOSE, NULL);
b30ab791 4170 drbd_md_sync(device);
62b0da3a 4171 updated_uuids = 1;
b411b363 4172 }
b30ab791
AG
4173 put_ldev(device);
4174 } else if (device->state.disk < D_INCONSISTENT &&
4175 device->state.role == R_PRIMARY) {
18a50fa2
PR
4176 /* I am a diskless primary, the peer just created a new current UUID
4177 for me. */
b30ab791 4178 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
b411b363
PR
4179 }
4180
4181 /* Before we test for the disk state, we should wait until an eventually
4182 ongoing cluster wide state change is finished. That is important if
4183 we are primary and are detaching from our disk. We need to see the
4184 new disk state... */
b30ab791
AG
4185 mutex_lock(device->state_mutex);
4186 mutex_unlock(device->state_mutex);
4187 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
4188 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
62b0da3a
LE
4189
4190 if (updated_uuids)
b30ab791 4191 drbd_print_uuids(device, "receiver updated UUIDs to");
b411b363 4192
82bc0194 4193 return 0;
b411b363
PR
4194}
4195
4196/**
4197 * convert_state() - Converts the peer's view of the cluster state to our point of view
4198 * @ps: The state as seen by the peer.
4199 */
4200static union drbd_state convert_state(union drbd_state ps)
4201{
4202 union drbd_state ms;
4203
4204 static enum drbd_conns c_tab[] = {
369bea63 4205 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
b411b363
PR
4206 [C_CONNECTED] = C_CONNECTED,
4207
4208 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
4209 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
4210 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
4211 [C_VERIFY_S] = C_VERIFY_T,
4212 [C_MASK] = C_MASK,
4213 };
4214
4215 ms.i = ps.i;
4216
4217 ms.conn = c_tab[ps.conn];
4218 ms.peer = ps.role;
4219 ms.role = ps.peer;
4220 ms.pdsk = ps.disk;
4221 ms.disk = ps.pdsk;
4222 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
4223
4224 return ms;
4225}
4226
bde89a9e 4227static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4228{
9f4fe9ad 4229 struct drbd_peer_device *peer_device;
b30ab791 4230 struct drbd_device *device;
e658983a 4231 struct p_req_state *p = pi->data;
b411b363 4232 union drbd_state mask, val;
bf885f8a 4233 enum drbd_state_rv rv;
b411b363 4234
9f4fe9ad
AG
4235 peer_device = conn_peer_device(connection, pi->vnr);
4236 if (!peer_device)
4a76b161 4237 return -EIO;
9f4fe9ad 4238 device = peer_device->device;
4a76b161 4239
b411b363
PR
4240 mask.i = be32_to_cpu(p->mask);
4241 val.i = be32_to_cpu(p->val);
4242
9f4fe9ad 4243 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
b30ab791 4244 mutex_is_locked(device->state_mutex)) {
69a22773 4245 drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
82bc0194 4246 return 0;
b411b363
PR
4247 }
4248
4249 mask = convert_state(mask);
4250 val = convert_state(val);
4251
b30ab791 4252 rv = drbd_change_state(device, CS_VERBOSE, mask, val);
69a22773 4253 drbd_send_sr_reply(peer_device, rv);
b411b363 4254
b30ab791 4255 drbd_md_sync(device);
b411b363 4256
82bc0194 4257 return 0;
b411b363
PR
4258}
4259
bde89a9e 4260static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4261{
e658983a 4262 struct p_req_state *p = pi->data;
b411b363 4263 union drbd_state mask, val;
bf885f8a 4264 enum drbd_state_rv rv;
b411b363 4265
b411b363
PR
4266 mask.i = be32_to_cpu(p->mask);
4267 val.i = be32_to_cpu(p->val);
4268
bde89a9e
AG
4269 if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
4270 mutex_is_locked(&connection->cstate_mutex)) {
4271 conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
82bc0194 4272 return 0;
b411b363
PR
4273 }
4274
4275 mask = convert_state(mask);
4276 val = convert_state(val);
4277
bde89a9e
AG
4278 rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
4279 conn_send_sr_reply(connection, rv);
b411b363 4280
82bc0194 4281 return 0;
b411b363
PR
4282}
4283
bde89a9e 4284static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4285{
9f4fe9ad 4286 struct drbd_peer_device *peer_device;
b30ab791 4287 struct drbd_device *device;
e658983a 4288 struct p_state *p = pi->data;
4ac4aada 4289 union drbd_state os, ns, peer_state;
b411b363 4290 enum drbd_disk_state real_peer_disk;
65d922c3 4291 enum chg_state_flags cs_flags;
b411b363
PR
4292 int rv;
4293
9f4fe9ad
AG
4294 peer_device = conn_peer_device(connection, pi->vnr);
4295 if (!peer_device)
bde89a9e 4296 return config_unknown_volume(connection, pi);
9f4fe9ad 4297 device = peer_device->device;
4a76b161 4298
b411b363
PR
4299 peer_state.i = be32_to_cpu(p->state);
4300
4301 real_peer_disk = peer_state.disk;
4302 if (peer_state.disk == D_NEGOTIATING) {
b30ab791 4303 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
d0180171 4304 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
b411b363
PR
4305 }
4306
0500813f 4307 spin_lock_irq(&device->resource->req_lock);
b411b363 4308 retry:
b30ab791 4309 os = ns = drbd_read_state(device);
0500813f 4310 spin_unlock_irq(&device->resource->req_lock);
b411b363 4311
668700b4 4312 /* If some other part of the code (ack_receiver thread, timeout)
545752d5
LE
4313 * already decided to close the connection again,
4314 * we must not "re-establish" it here. */
4315 if (os.conn <= C_TEAR_DOWN)
58ffa580 4316 return -ECONNRESET;
545752d5 4317
40424e4a
LE
4318 /* If this is the "end of sync" confirmation, usually the peer disk
4319 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
4320 * set) resync started in PausedSyncT, or if the timing of pause-/
4321 * unpause-sync events has been "just right", the peer disk may
4322 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
4323 */
4324 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
4325 real_peer_disk == D_UP_TO_DATE &&
e9ef7bb6
LE
4326 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
4327 /* If we are (becoming) SyncSource, but peer is still in sync
4328 * preparation, ignore its uptodate-ness to avoid flapping, it
4329 * will change to inconsistent once the peer reaches active
4330 * syncing states.
4331 * It may have changed syncer-paused flags, however, so we
4332 * cannot ignore this completely. */
4333 if (peer_state.conn > C_CONNECTED &&
4334 peer_state.conn < C_SYNC_SOURCE)
4335 real_peer_disk = D_INCONSISTENT;
4336
4337 /* if peer_state changes to connected at the same time,
4338 * it explicitly notifies us that it finished resync.
4339 * Maybe we should finish it up, too? */
4340 else if (os.conn >= C_SYNC_SOURCE &&
4341 peer_state.conn == C_CONNECTED) {
b30ab791
AG
4342 if (drbd_bm_total_weight(device) <= device->rs_failed)
4343 drbd_resync_finished(device);
82bc0194 4344 return 0;
e9ef7bb6
LE
4345 }
4346 }
4347
02b91b55
LE
4348 /* explicit verify finished notification, stop sector reached. */
4349 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
4350 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
b30ab791
AG
4351 ov_out_of_sync_print(device);
4352 drbd_resync_finished(device);
58ffa580 4353 return 0;
02b91b55
LE
4354 }
4355
e9ef7bb6
LE
4356 /* peer says his disk is inconsistent, while we think it is uptodate,
4357 * and this happens while the peer still thinks we have a sync going on,
4358 * but we think we are already done with the sync.
4359 * We ignore this to avoid flapping pdsk.
4360 * This should not happen, if the peer is a recent version of drbd. */
4361 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
4362 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
4363 real_peer_disk = D_UP_TO_DATE;
4364
4ac4aada
LE
4365 if (ns.conn == C_WF_REPORT_PARAMS)
4366 ns.conn = C_CONNECTED;
b411b363 4367
67531718
PR
4368 if (peer_state.conn == C_AHEAD)
4369 ns.conn = C_BEHIND;
4370
b30ab791
AG
4371 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
4372 get_ldev_if_state(device, D_NEGOTIATING)) {
b411b363
PR
4373 int cr; /* consider resync */
4374
4375 /* if we established a new connection */
4ac4aada 4376 cr = (os.conn < C_CONNECTED);
b411b363
PR
4377 /* if we had an established connection
4378 * and one of the nodes newly attaches a disk */
4ac4aada 4379 cr |= (os.conn == C_CONNECTED &&
b411b363 4380 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 4381 os.disk == D_NEGOTIATING));
b411b363
PR
4382 /* if we have both been inconsistent, and the peer has been
4383 * forced to be UpToDate with --overwrite-data */
b30ab791 4384 cr |= test_bit(CONSIDER_RESYNC, &device->flags);
b411b363
PR
4385 /* if we had been plain connected, and the admin requested to
4386 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 4387 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
4388 (peer_state.conn >= C_STARTING_SYNC_S &&
4389 peer_state.conn <= C_WF_BITMAP_T));
4390
4391 if (cr)
69a22773 4392 ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
b411b363 4393
b30ab791 4394 put_ldev(device);
4ac4aada
LE
4395 if (ns.conn == C_MASK) {
4396 ns.conn = C_CONNECTED;
b30ab791
AG
4397 if (device->state.disk == D_NEGOTIATING) {
4398 drbd_force_state(device, NS(disk, D_FAILED));
b411b363 4399 } else if (peer_state.disk == D_NEGOTIATING) {
d0180171 4400 drbd_err(device, "Disk attach process on the peer node was aborted.\n");
b411b363 4401 peer_state.disk = D_DISKLESS;
580b9767 4402 real_peer_disk = D_DISKLESS;
b411b363 4403 } else {
9f4fe9ad 4404 if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
82bc0194 4405 return -EIO;
0b0ba1ef 4406 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
9f4fe9ad 4407 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4408 return -EIO;
b411b363
PR
4409 }
4410 }
4411 }
4412
0500813f 4413 spin_lock_irq(&device->resource->req_lock);
b30ab791 4414 if (os.i != drbd_read_state(device).i)
b411b363 4415 goto retry;
b30ab791 4416 clear_bit(CONSIDER_RESYNC, &device->flags);
b411b363
PR
4417 ns.peer = peer_state.role;
4418 ns.pdsk = real_peer_disk;
4419 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 4420 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b30ab791 4421 ns.disk = device->new_state_tmp.disk;
4ac4aada 4422 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
b30ab791
AG
4423 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
4424 test_bit(NEW_CUR_UUID, &device->flags)) {
8554df1c 4425 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 4426 for temporal network outages! */
0500813f 4427 spin_unlock_irq(&device->resource->req_lock);
d0180171 4428 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
9f4fe9ad 4429 tl_clear(peer_device->connection);
b30ab791
AG
4430 drbd_uuid_new_current(device);
4431 clear_bit(NEW_CUR_UUID, &device->flags);
9f4fe9ad 4432 conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 4433 return -EIO;
481c6f50 4434 }
b30ab791
AG
4435 rv = _drbd_set_state(device, ns, cs_flags, NULL);
4436 ns = drbd_read_state(device);
0500813f 4437 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
4438
4439 if (rv < SS_SUCCESS) {
9f4fe9ad 4440 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 4441 return -EIO;
b411b363
PR
4442 }
4443
4ac4aada
LE
4444 if (os.conn > C_WF_REPORT_PARAMS) {
4445 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
4446 peer_state.disk != D_NEGOTIATING ) {
4447 /* we want resync, peer has not yet decided to sync... */
4448 /* Nowadays only used when forcing a node into primary role and
4449 setting its disk to UpToDate with that */
69a22773
AG
4450 drbd_send_uuids(peer_device);
4451 drbd_send_current_state(peer_device);
b411b363
PR
4452 }
4453 }
4454
b30ab791 4455 clear_bit(DISCARD_MY_DATA, &device->flags);
b411b363 4456
b30ab791 4457 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
b411b363 4458
82bc0194 4459 return 0;
b411b363
PR
4460}
4461
bde89a9e 4462static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4463{
9f4fe9ad 4464 struct drbd_peer_device *peer_device;
b30ab791 4465 struct drbd_device *device;
e658983a 4466 struct p_rs_uuid *p = pi->data;
4a76b161 4467
9f4fe9ad
AG
4468 peer_device = conn_peer_device(connection, pi->vnr);
4469 if (!peer_device)
4a76b161 4470 return -EIO;
9f4fe9ad 4471 device = peer_device->device;
b411b363 4472
b30ab791
AG
4473 wait_event(device->misc_wait,
4474 device->state.conn == C_WF_SYNC_UUID ||
4475 device->state.conn == C_BEHIND ||
4476 device->state.conn < C_CONNECTED ||
4477 device->state.disk < D_NEGOTIATING);
b411b363 4478
0b0ba1ef 4479 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
b411b363 4480
b411b363
PR
4481 /* Here the _drbd_uuid_ functions are right, current should
4482 _not_ be rotated into the history */
b30ab791
AG
4483 if (get_ldev_if_state(device, D_NEGOTIATING)) {
4484 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
4485 _drbd_uuid_set(device, UI_BITMAP, 0UL);
b411b363 4486
b30ab791
AG
4487 drbd_print_uuids(device, "updated sync uuid");
4488 drbd_start_resync(device, C_SYNC_TARGET);
b411b363 4489
b30ab791 4490 put_ldev(device);
b411b363 4491 } else
d0180171 4492 drbd_err(device, "Ignoring SyncUUID packet!\n");
b411b363 4493
82bc0194 4494 return 0;
b411b363
PR
4495}
4496
2c46407d
AG
4497/**
4498 * receive_bitmap_plain
4499 *
4500 * Return 0 when done, 1 when another iteration is needed, and a negative error
4501 * code upon failure.
4502 */
4503static int
69a22773 4504receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
e658983a 4505 unsigned long *p, struct bm_xfer_ctx *c)
b411b363 4506{
50d0b1ad 4507 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
69a22773 4508 drbd_header_size(peer_device->connection);
e658983a 4509 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 4510 c->bm_words - c->word_offset);
e658983a 4511 unsigned int want = num_words * sizeof(*p);
2c46407d 4512 int err;
b411b363 4513
50d0b1ad 4514 if (want != size) {
69a22773 4515 drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
2c46407d 4516 return -EIO;
b411b363
PR
4517 }
4518 if (want == 0)
2c46407d 4519 return 0;
69a22773 4520 err = drbd_recv_all(peer_device->connection, p, want);
82bc0194 4521 if (err)
2c46407d 4522 return err;
b411b363 4523
69a22773 4524 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
b411b363
PR
4525
4526 c->word_offset += num_words;
4527 c->bit_offset = c->word_offset * BITS_PER_LONG;
4528 if (c->bit_offset > c->bm_bits)
4529 c->bit_offset = c->bm_bits;
4530
2c46407d 4531 return 1;
b411b363
PR
4532}
4533
a02d1240
AG
4534static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4535{
4536 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4537}
4538
4539static int dcbp_get_start(struct p_compressed_bm *p)
4540{
4541 return (p->encoding & 0x80) != 0;
4542}
4543
4544static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4545{
4546 return (p->encoding >> 4) & 0x7;
4547}
4548
2c46407d
AG
4549/**
4550 * recv_bm_rle_bits
4551 *
4552 * Return 0 when done, 1 when another iteration is needed, and a negative error
4553 * code upon failure.
4554 */
4555static int
69a22773 4556recv_bm_rle_bits(struct drbd_peer_device *peer_device,
b411b363 4557 struct p_compressed_bm *p,
c6d25cfe
PR
4558 struct bm_xfer_ctx *c,
4559 unsigned int len)
b411b363
PR
4560{
4561 struct bitstream bs;
4562 u64 look_ahead;
4563 u64 rl;
4564 u64 tmp;
4565 unsigned long s = c->bit_offset;
4566 unsigned long e;
a02d1240 4567 int toggle = dcbp_get_start(p);
b411b363
PR
4568 int have;
4569 int bits;
4570
a02d1240 4571 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
b411b363
PR
4572
4573 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4574 if (bits < 0)
2c46407d 4575 return -EIO;
b411b363
PR
4576
4577 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4578 bits = vli_decode_bits(&rl, look_ahead);
4579 if (bits <= 0)
2c46407d 4580 return -EIO;
b411b363
PR
4581
4582 if (toggle) {
4583 e = s + rl -1;
4584 if (e >= c->bm_bits) {
69a22773 4585 drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 4586 return -EIO;
b411b363 4587 }
69a22773 4588 _drbd_bm_set_bits(peer_device->device, s, e);
b411b363
PR
4589 }
4590
4591 if (have < bits) {
69a22773 4592 drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
b411b363
PR
4593 have, bits, look_ahead,
4594 (unsigned int)(bs.cur.b - p->code),
4595 (unsigned int)bs.buf_len);
2c46407d 4596 return -EIO;
b411b363 4597 }
d2da5b0c
LE
4598 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4599 if (likely(bits < 64))
4600 look_ahead >>= bits;
4601 else
4602 look_ahead = 0;
b411b363
PR
4603 have -= bits;
4604
4605 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4606 if (bits < 0)
2c46407d 4607 return -EIO;
b411b363
PR
4608 look_ahead |= tmp << have;
4609 have += bits;
4610 }
4611
4612 c->bit_offset = s;
4613 bm_xfer_ctx_bit_to_word_offset(c);
4614
2c46407d 4615 return (s != c->bm_bits);
b411b363
PR
4616}
4617
2c46407d
AG
4618/**
4619 * decode_bitmap_c
4620 *
4621 * Return 0 when done, 1 when another iteration is needed, and a negative error
4622 * code upon failure.
4623 */
4624static int
69a22773 4625decode_bitmap_c(struct drbd_peer_device *peer_device,
b411b363 4626 struct p_compressed_bm *p,
c6d25cfe
PR
4627 struct bm_xfer_ctx *c,
4628 unsigned int len)
b411b363 4629{
a02d1240 4630 if (dcbp_get_code(p) == RLE_VLI_Bits)
69a22773 4631 return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
b411b363
PR
4632
4633 /* other variants had been implemented for evaluation,
4634 * but have been dropped as this one turned out to be "best"
4635 * during all our tests. */
4636
69a22773
AG
4637 drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4638 conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 4639 return -EIO;
b411b363
PR
4640}
4641
b30ab791 4642void INFO_bm_xfer_stats(struct drbd_device *device,
b411b363
PR
4643 const char *direction, struct bm_xfer_ctx *c)
4644{
4645 /* what would it take to transfer it "plaintext" */
a6b32bc3 4646 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
50d0b1ad
AG
4647 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4648 unsigned int plain =
4649 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4650 c->bm_words * sizeof(unsigned long);
4651 unsigned int total = c->bytes[0] + c->bytes[1];
4652 unsigned int r;
b411b363
PR
4653
4654 /* total can not be zero. but just in case: */
4655 if (total == 0)
4656 return;
4657
4658 /* don't report if not compressed */
4659 if (total >= plain)
4660 return;
4661
4662 /* total < plain. check for overflow, still */
4663 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4664 : (1000 * total / plain);
4665
4666 if (r > 1000)
4667 r = 1000;
4668
4669 r = 1000 - r;
d0180171 4670 drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
b411b363
PR
4671 "total %u; compression: %u.%u%%\n",
4672 direction,
4673 c->bytes[1], c->packets[1],
4674 c->bytes[0], c->packets[0],
4675 total, r/10, r % 10);
4676}
4677
4678/* Since we are processing the bitfield from lower addresses to higher,
4679 it does not matter if the process it in 32 bit chunks or 64 bit
4680 chunks as long as it is little endian. (Understand it as byte stream,
4681 beginning with the lowest byte...) If we would use big endian
4682 we would need to process it from the highest address to the lowest,
4683 in order to be agnostic to the 32 vs 64 bits issue.
4684
4685 returns 0 on failure, 1 if we successfully received it. */
bde89a9e 4686static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4687{
9f4fe9ad 4688 struct drbd_peer_device *peer_device;
b30ab791 4689 struct drbd_device *device;
b411b363 4690 struct bm_xfer_ctx c;
2c46407d 4691 int err;
4a76b161 4692
9f4fe9ad
AG
4693 peer_device = conn_peer_device(connection, pi->vnr);
4694 if (!peer_device)
4a76b161 4695 return -EIO;
9f4fe9ad 4696 device = peer_device->device;
b411b363 4697
b30ab791 4698 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
20ceb2b2
LE
4699 /* you are supposed to send additional out-of-sync information
4700 * if you actually set bits during this phase */
b411b363 4701
b411b363 4702 c = (struct bm_xfer_ctx) {
b30ab791
AG
4703 .bm_bits = drbd_bm_bits(device),
4704 .bm_words = drbd_bm_words(device),
b411b363
PR
4705 };
4706
2c46407d 4707 for(;;) {
e658983a 4708 if (pi->cmd == P_BITMAP)
69a22773 4709 err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
e658983a 4710 else if (pi->cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
4711 /* MAYBE: sanity check that we speak proto >= 90,
4712 * and the feature is enabled! */
e658983a 4713 struct p_compressed_bm *p = pi->data;
b411b363 4714
bde89a9e 4715 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
d0180171 4716 drbd_err(device, "ReportCBitmap packet too large\n");
82bc0194 4717 err = -EIO;
b411b363
PR
4718 goto out;
4719 }
e658983a 4720 if (pi->size <= sizeof(*p)) {
d0180171 4721 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
82bc0194 4722 err = -EIO;
78fcbdae 4723 goto out;
b411b363 4724 }
9f4fe9ad 4725 err = drbd_recv_all(peer_device->connection, p, pi->size);
e658983a
AG
4726 if (err)
4727 goto out;
69a22773 4728 err = decode_bitmap_c(peer_device, p, &c, pi->size);
b411b363 4729 } else {
d0180171 4730 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
82bc0194 4731 err = -EIO;
b411b363
PR
4732 goto out;
4733 }
4734
e2857216 4735 c.packets[pi->cmd == P_BITMAP]++;
bde89a9e 4736 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
b411b363 4737
2c46407d
AG
4738 if (err <= 0) {
4739 if (err < 0)
4740 goto out;
b411b363 4741 break;
2c46407d 4742 }
9f4fe9ad 4743 err = drbd_recv_header(peer_device->connection, pi);
82bc0194 4744 if (err)
b411b363 4745 goto out;
2c46407d 4746 }
b411b363 4747
b30ab791 4748 INFO_bm_xfer_stats(device, "receive", &c);
b411b363 4749
b30ab791 4750 if (device->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
4751 enum drbd_state_rv rv;
4752
b30ab791 4753 err = drbd_send_bitmap(device);
82bc0194 4754 if (err)
b411b363
PR
4755 goto out;
4756 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
b30ab791 4757 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
0b0ba1ef 4758 D_ASSERT(device, rv == SS_SUCCESS);
b30ab791 4759 } else if (device->state.conn != C_WF_BITMAP_S) {
b411b363
PR
4760 /* admin may have requested C_DISCONNECTING,
4761 * other threads may have noticed network errors */
d0180171 4762 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
b30ab791 4763 drbd_conn_str(device->state.conn));
b411b363 4764 }
82bc0194 4765 err = 0;
b411b363 4766
b411b363 4767 out:
b30ab791
AG
4768 drbd_bm_unlock(device);
4769 if (!err && device->state.conn == C_WF_BITMAP_S)
4770 drbd_start_resync(device, C_SYNC_SOURCE);
82bc0194 4771 return err;
b411b363
PR
4772}
4773
bde89a9e 4774static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
b411b363 4775{
1ec861eb 4776 drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
e2857216 4777 pi->cmd, pi->size);
b411b363 4778
bde89a9e 4779 return ignore_remaining_packet(connection, pi);
b411b363
PR
4780}
4781
bde89a9e 4782static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
0ced55a3 4783{
e7f52dfb
LE
4784 /* Make sure we've acked all the TCP data associated
4785 * with the data requests being unplugged */
bde89a9e 4786 drbd_tcp_quickack(connection->data.socket);
0ced55a3 4787
82bc0194 4788 return 0;
0ced55a3
PR
4789}
4790
bde89a9e 4791static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
73a01a18 4792{
9f4fe9ad 4793 struct drbd_peer_device *peer_device;
b30ab791 4794 struct drbd_device *device;
e658983a 4795 struct p_block_desc *p = pi->data;
4a76b161 4796
9f4fe9ad
AG
4797 peer_device = conn_peer_device(connection, pi->vnr);
4798 if (!peer_device)
4a76b161 4799 return -EIO;
9f4fe9ad 4800 device = peer_device->device;
73a01a18 4801
b30ab791 4802 switch (device->state.conn) {
f735e363
LE
4803 case C_WF_SYNC_UUID:
4804 case C_WF_BITMAP_T:
4805 case C_BEHIND:
4806 break;
4807 default:
d0180171 4808 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
b30ab791 4809 drbd_conn_str(device->state.conn));
f735e363
LE
4810 }
4811
b30ab791 4812 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
73a01a18 4813
82bc0194 4814 return 0;
73a01a18
PR
4815}
4816
700ca8c0
PR
4817static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
4818{
4819 struct drbd_peer_device *peer_device;
4820 struct p_block_desc *p = pi->data;
4821 struct drbd_device *device;
4822 sector_t sector;
4823 int size, err = 0;
4824
4825 peer_device = conn_peer_device(connection, pi->vnr);
4826 if (!peer_device)
4827 return -EIO;
4828 device = peer_device->device;
4829
4830 sector = be64_to_cpu(p->sector);
4831 size = be32_to_cpu(p->blksize);
4832
4833 dec_rs_pending(device);
4834
4835 if (get_ldev(device)) {
4836 struct drbd_peer_request *peer_req;
45c21793 4837 const int op = REQ_OP_WRITE_ZEROES;
700ca8c0
PR
4838
4839 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
9104d31a 4840 size, 0, GFP_NOIO);
700ca8c0
PR
4841 if (!peer_req) {
4842 put_ldev(device);
4843 return -ENOMEM;
4844 }
4845
4846 peer_req->w.cb = e_end_resync_block;
4847 peer_req->submit_jif = jiffies;
4848 peer_req->flags |= EE_IS_TRIM;
4849
4850 spin_lock_irq(&device->resource->req_lock);
4851 list_add_tail(&peer_req->w.list, &device->sync_ee);
4852 spin_unlock_irq(&device->resource->req_lock);
4853
4854 atomic_add(pi->size >> 9, &device->rs_sect_ev);
4855 err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
4856
4857 if (err) {
4858 spin_lock_irq(&device->resource->req_lock);
4859 list_del(&peer_req->w.list);
4860 spin_unlock_irq(&device->resource->req_lock);
4861
4862 drbd_free_peer_req(device, peer_req);
4863 put_ldev(device);
4864 err = 0;
4865 goto fail;
4866 }
4867
4868 inc_unacked(device);
4869
4870 /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(),
4871 as well as drbd_rs_complete_io() */
4872 } else {
4873 fail:
4874 drbd_rs_complete_io(device, sector);
4875 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
4876 }
4877
4878 atomic_add(size >> 9, &device->rs_sect_in);
4879
4880 return err;
4881}
4882
02918be2
PR
4883struct data_cmd {
4884 int expect_payload;
9104d31a 4885 unsigned int pkt_size;
bde89a9e 4886 int (*fn)(struct drbd_connection *, struct packet_info *);
02918be2
PR
4887};
4888
4889static struct data_cmd drbd_cmd_handler[] = {
4890 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4891 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4892 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4893 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
e658983a
AG
4894 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4895 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4896 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
02918be2
PR
4897 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4898 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
e658983a
AG
4899 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4900 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
02918be2
PR
4901 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4902 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4903 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4904 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4905 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4906 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4907 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4908 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4909 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
700ca8c0 4910 [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest },
02918be2 4911 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
73a01a18 4912 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4a76b161 4913 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
036b17ea 4914 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
a0fb3c47 4915 [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
700ca8c0 4916 [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
9104d31a 4917 [P_WSAME] = { 1, sizeof(struct p_wsame), receive_Data },
b411b363
PR
4918};
4919
bde89a9e 4920static void drbdd(struct drbd_connection *connection)
b411b363 4921{
77351055 4922 struct packet_info pi;
02918be2 4923 size_t shs; /* sub header size */
82bc0194 4924 int err;
b411b363 4925
bde89a9e 4926 while (get_t_state(&connection->receiver) == RUNNING) {
9104d31a 4927 struct data_cmd const *cmd;
b411b363 4928
bde89a9e 4929 drbd_thread_current_set_cpu(&connection->receiver);
c51a0ef3
LE
4930 update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
4931 if (drbd_recv_header_maybe_unplug(connection, &pi))
02918be2 4932 goto err_out;
b411b363 4933
deebe195 4934 cmd = &drbd_cmd_handler[pi.cmd];
4a76b161 4935 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
1ec861eb 4936 drbd_err(connection, "Unexpected data packet %s (0x%04x)",
2fcb8f30 4937 cmdname(pi.cmd), pi.cmd);
02918be2 4938 goto err_out;
0b33a916 4939 }
b411b363 4940
e658983a 4941 shs = cmd->pkt_size;
9104d31a
LE
4942 if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME)
4943 shs += sizeof(struct o_qlim);
e658983a 4944 if (pi.size > shs && !cmd->expect_payload) {
1ec861eb 4945 drbd_err(connection, "No payload expected %s l:%d\n",
2fcb8f30 4946 cmdname(pi.cmd), pi.size);
02918be2 4947 goto err_out;
b411b363 4948 }
9104d31a
LE
4949 if (pi.size < shs) {
4950 drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n",
4951 cmdname(pi.cmd), (int)shs, pi.size);
4952 goto err_out;
4953 }
b411b363 4954
c13f7e1a 4955 if (shs) {
944410e9 4956 update_receiver_timing_details(connection, drbd_recv_all_warn);
bde89a9e 4957 err = drbd_recv_all_warn(connection, pi.data, shs);
a5c31904 4958 if (err)
c13f7e1a 4959 goto err_out;
e2857216 4960 pi.size -= shs;
c13f7e1a
LE
4961 }
4962
944410e9 4963 update_receiver_timing_details(connection, cmd->fn);
bde89a9e 4964 err = cmd->fn(connection, &pi);
4a76b161 4965 if (err) {
1ec861eb 4966 drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
9f5bdc33 4967 cmdname(pi.cmd), err, pi.size);
02918be2 4968 goto err_out;
b411b363
PR
4969 }
4970 }
82bc0194 4971 return;
b411b363 4972
82bc0194 4973 err_out:
bde89a9e 4974 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
4975}
4976
bde89a9e 4977static void conn_disconnect(struct drbd_connection *connection)
b411b363 4978{
c06ece6b 4979 struct drbd_peer_device *peer_device;
bbeb641c 4980 enum drbd_conns oc;
376694a0 4981 int vnr;
b411b363 4982
bde89a9e 4983 if (connection->cstate == C_STANDALONE)
b411b363 4984 return;
b411b363 4985
545752d5
LE
4986 /* We are about to start the cleanup after connection loss.
4987 * Make sure drbd_make_request knows about that.
4988 * Usually we should be in some network failure state already,
4989 * but just in case we are not, we fix it up here.
4990 */
bde89a9e 4991 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
545752d5 4992
668700b4 4993 /* ack_receiver does not clean up anything. it must not interfere, either */
1c03e520 4994 drbd_thread_stop(&connection->ack_receiver);
668700b4
PR
4995 if (connection->ack_sender) {
4996 destroy_workqueue(connection->ack_sender);
4997 connection->ack_sender = NULL;
4998 }
bde89a9e 4999 drbd_free_sock(connection);
360cc740 5000
c141ebda 5001 rcu_read_lock();
c06ece6b
AG
5002 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5003 struct drbd_device *device = peer_device->device;
b30ab791 5004 kref_get(&device->kref);
c141ebda 5005 rcu_read_unlock();
69a22773 5006 drbd_disconnected(peer_device);
c06ece6b 5007 kref_put(&device->kref, drbd_destroy_device);
c141ebda
PR
5008 rcu_read_lock();
5009 }
5010 rcu_read_unlock();
5011
bde89a9e 5012 if (!list_empty(&connection->current_epoch->list))
1ec861eb 5013 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
12038a3a 5014 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
bde89a9e
AG
5015 atomic_set(&connection->current_epoch->epoch_size, 0);
5016 connection->send.seen_any_write_yet = false;
12038a3a 5017
1ec861eb 5018 drbd_info(connection, "Connection closed\n");
360cc740 5019
bde89a9e
AG
5020 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
5021 conn_try_outdate_peer_async(connection);
cb703454 5022
0500813f 5023 spin_lock_irq(&connection->resource->req_lock);
bde89a9e 5024 oc = connection->cstate;
bbeb641c 5025 if (oc >= C_UNCONNECTED)
bde89a9e 5026 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
bbeb641c 5027
0500813f 5028 spin_unlock_irq(&connection->resource->req_lock);
360cc740 5029
f3dfa40a 5030 if (oc == C_DISCONNECTING)
bde89a9e 5031 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
360cc740
PR
5032}
5033
69a22773 5034static int drbd_disconnected(struct drbd_peer_device *peer_device)
360cc740 5035{
69a22773 5036 struct drbd_device *device = peer_device->device;
360cc740 5037 unsigned int i;
b411b363 5038
85719573 5039 /* wait for current activity to cease. */
0500813f 5040 spin_lock_irq(&device->resource->req_lock);
b30ab791
AG
5041 _drbd_wait_ee_list_empty(device, &device->active_ee);
5042 _drbd_wait_ee_list_empty(device, &device->sync_ee);
5043 _drbd_wait_ee_list_empty(device, &device->read_ee);
0500813f 5044 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
5045
5046 /* We do not have data structures that would allow us to
5047 * get the rs_pending_cnt down to 0 again.
5048 * * On C_SYNC_TARGET we do not have any data structures describing
5049 * the pending RSDataRequest's we have sent.
5050 * * On C_SYNC_SOURCE there is no data structure that tracks
5051 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
5052 * And no, it is not the sum of the reference counts in the
5053 * resync_LRU. The resync_LRU tracks the whole operation including
5054 * the disk-IO, while the rs_pending_cnt only tracks the blocks
5055 * on the fly. */
b30ab791
AG
5056 drbd_rs_cancel_all(device);
5057 device->rs_total = 0;
5058 device->rs_failed = 0;
5059 atomic_set(&device->rs_pending_cnt, 0);
5060 wake_up(&device->misc_wait);
b411b363 5061
b30ab791 5062 del_timer_sync(&device->resync_timer);
2bccef39 5063 resync_timer_fn(&device->resync_timer);
b411b363 5064
b411b363
PR
5065 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
5066 * w_make_resync_request etc. which may still be on the worker queue
5067 * to be "canceled" */
b5043c5e 5068 drbd_flush_workqueue(&peer_device->connection->sender_work);
b411b363 5069
b30ab791 5070 drbd_finish_peer_reqs(device);
b411b363 5071
d10b4ea3
PR
5072 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
5073 might have issued a work again. The one before drbd_finish_peer_reqs() is
5074 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
b5043c5e 5075 drbd_flush_workqueue(&peer_device->connection->sender_work);
d10b4ea3 5076
08332d73
LE
5077 /* need to do it again, drbd_finish_peer_reqs() may have populated it
5078 * again via drbd_try_clear_on_disk_bm(). */
b30ab791 5079 drbd_rs_cancel_all(device);
b411b363 5080
b30ab791
AG
5081 kfree(device->p_uuid);
5082 device->p_uuid = NULL;
b411b363 5083
b30ab791 5084 if (!drbd_suspended(device))
69a22773 5085 tl_clear(peer_device->connection);
b411b363 5086
b30ab791 5087 drbd_md_sync(device);
b411b363 5088
be115b69
LE
5089 if (get_ldev(device)) {
5090 drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
5091 "write from disconnected", BM_LOCKED_CHANGE_ALLOWED);
5092 put_ldev(device);
5093 }
20ceb2b2 5094
b411b363
PR
5095 /* tcp_close and release of sendpage pages can be deferred. I don't
5096 * want to use SO_LINGER, because apparently it can be deferred for
5097 * more than 20 seconds (longest time I checked).
5098 *
5099 * Actually we don't care for exactly when the network stack does its
5100 * put_page(), but release our reference on these pages right here.
5101 */
b30ab791 5102 i = drbd_free_peer_reqs(device, &device->net_ee);
b411b363 5103 if (i)
d0180171 5104 drbd_info(device, "net_ee not empty, killed %u entries\n", i);
b30ab791 5105 i = atomic_read(&device->pp_in_use_by_net);
435f0740 5106 if (i)
d0180171 5107 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
b30ab791 5108 i = atomic_read(&device->pp_in_use);
b411b363 5109 if (i)
d0180171 5110 drbd_info(device, "pp_in_use = %d, expected 0\n", i);
b411b363 5111
0b0ba1ef
AG
5112 D_ASSERT(device, list_empty(&device->read_ee));
5113 D_ASSERT(device, list_empty(&device->active_ee));
5114 D_ASSERT(device, list_empty(&device->sync_ee));
5115 D_ASSERT(device, list_empty(&device->done_ee));
b411b363 5116
360cc740 5117 return 0;
b411b363
PR
5118}
5119
5120/*
5121 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
5122 * we can agree on is stored in agreed_pro_version.
5123 *
5124 * feature flags and the reserved array should be enough room for future
5125 * enhancements of the handshake protocol, and possible plugins...
5126 *
5127 * for now, they are expected to be zero, but ignored.
5128 */
bde89a9e 5129static int drbd_send_features(struct drbd_connection *connection)
b411b363 5130{
9f5bdc33
AG
5131 struct drbd_socket *sock;
5132 struct p_connection_features *p;
b411b363 5133
bde89a9e
AG
5134 sock = &connection->data;
5135 p = conn_prepare_command(connection, sock);
9f5bdc33 5136 if (!p)
e8d17b01 5137 return -EIO;
b411b363
PR
5138 memset(p, 0, sizeof(*p));
5139 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
5140 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
20c68fde 5141 p->feature_flags = cpu_to_be32(PRO_FEATURES);
bde89a9e 5142 return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
b411b363
PR
5143}
5144
5145/*
5146 * return values:
5147 * 1 yes, we have a valid connection
5148 * 0 oops, did not work out, please try again
5149 * -1 peer talks different language,
5150 * no point in trying again, please go standalone.
5151 */
bde89a9e 5152static int drbd_do_features(struct drbd_connection *connection)
b411b363 5153{
bde89a9e 5154 /* ASSERT current == connection->receiver ... */
e658983a
AG
5155 struct p_connection_features *p;
5156 const int expect = sizeof(struct p_connection_features);
77351055 5157 struct packet_info pi;
a5c31904 5158 int err;
b411b363 5159
bde89a9e 5160 err = drbd_send_features(connection);
e8d17b01 5161 if (err)
b411b363
PR
5162 return 0;
5163
bde89a9e 5164 err = drbd_recv_header(connection, &pi);
69bc7bc3 5165 if (err)
b411b363
PR
5166 return 0;
5167
6038178e 5168 if (pi.cmd != P_CONNECTION_FEATURES) {
1ec861eb 5169 drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
2fcb8f30 5170 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5171 return -1;
5172 }
5173
77351055 5174 if (pi.size != expect) {
1ec861eb 5175 drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
77351055 5176 expect, pi.size);
b411b363
PR
5177 return -1;
5178 }
5179
e658983a 5180 p = pi.data;
bde89a9e 5181 err = drbd_recv_all_warn(connection, p, expect);
a5c31904 5182 if (err)
b411b363 5183 return 0;
b411b363 5184
b411b363
PR
5185 p->protocol_min = be32_to_cpu(p->protocol_min);
5186 p->protocol_max = be32_to_cpu(p->protocol_max);
5187 if (p->protocol_max == 0)
5188 p->protocol_max = p->protocol_min;
5189
5190 if (PRO_VERSION_MAX < p->protocol_min ||
5191 PRO_VERSION_MIN > p->protocol_max)
5192 goto incompat;
5193
bde89a9e 5194 connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
20c68fde 5195 connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags);
b411b363 5196
1ec861eb 5197 drbd_info(connection, "Handshake successful: "
bde89a9e 5198 "Agreed network protocol version %d\n", connection->agreed_pro_version);
b411b363 5199
9104d31a
LE
5200 drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s.\n",
5201 connection->agreed_features,
5202 connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "",
5203 connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "",
5204 connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" :
5205 connection->agreed_features ? "" : " none");
92d94ae6 5206
b411b363
PR
5207 return 1;
5208
5209 incompat:
1ec861eb 5210 drbd_err(connection, "incompatible DRBD dialects: "
b411b363
PR
5211 "I support %d-%d, peer supports %d-%d\n",
5212 PRO_VERSION_MIN, PRO_VERSION_MAX,
5213 p->protocol_min, p->protocol_max);
5214 return -1;
5215}
5216
5217#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
bde89a9e 5218static int drbd_do_auth(struct drbd_connection *connection)
b411b363 5219{
1ec861eb
AG
5220 drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
5221 drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 5222 return -1;
b411b363
PR
5223}
5224#else
5225#define CHALLENGE_LEN 64
b10d96cb
JT
5226
5227/* Return value:
5228 1 - auth succeeded,
5229 0 - failed, try again (network error),
5230 -1 - auth failed, don't try again.
5231*/
5232
bde89a9e 5233static int drbd_do_auth(struct drbd_connection *connection)
b411b363 5234{
9f5bdc33 5235 struct drbd_socket *sock;
b411b363 5236 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
b411b363
PR
5237 char *response = NULL;
5238 char *right_response = NULL;
5239 char *peers_ch = NULL;
44ed167d
PR
5240 unsigned int key_len;
5241 char secret[SHARED_SECRET_MAX]; /* 64 byte */
b411b363 5242 unsigned int resp_size;
9534d671 5243 SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
77351055 5244 struct packet_info pi;
44ed167d 5245 struct net_conf *nc;
69bc7bc3 5246 int err, rv;
b411b363 5247
9f5bdc33 5248 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
b411b363 5249
44ed167d 5250 rcu_read_lock();
bde89a9e 5251 nc = rcu_dereference(connection->net_conf);
44ed167d
PR
5252 key_len = strlen(nc->shared_secret);
5253 memcpy(secret, nc->shared_secret, key_len);
5254 rcu_read_unlock();
5255
9534d671
HX
5256 desc->tfm = connection->cram_hmac_tfm;
5257 desc->flags = 0;
b411b363 5258
9534d671 5259 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
b411b363 5260 if (rv) {
9534d671 5261 drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
b10d96cb 5262 rv = -1;
b411b363
PR
5263 goto fail;
5264 }
5265
5266 get_random_bytes(my_challenge, CHALLENGE_LEN);
5267
bde89a9e
AG
5268 sock = &connection->data;
5269 if (!conn_prepare_command(connection, sock)) {
9f5bdc33
AG
5270 rv = 0;
5271 goto fail;
5272 }
bde89a9e 5273 rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
9f5bdc33 5274 my_challenge, CHALLENGE_LEN);
b411b363
PR
5275 if (!rv)
5276 goto fail;
5277
bde89a9e 5278 err = drbd_recv_header(connection, &pi);
69bc7bc3
AG
5279 if (err) {
5280 rv = 0;
b411b363 5281 goto fail;
69bc7bc3 5282 }
b411b363 5283
77351055 5284 if (pi.cmd != P_AUTH_CHALLENGE) {
1ec861eb 5285 drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
2fcb8f30 5286 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5287 rv = 0;
5288 goto fail;
5289 }
5290
77351055 5291 if (pi.size > CHALLENGE_LEN * 2) {
1ec861eb 5292 drbd_err(connection, "expected AuthChallenge payload too big.\n");
b10d96cb 5293 rv = -1;
b411b363
PR
5294 goto fail;
5295 }
5296
67cca286
PR
5297 if (pi.size < CHALLENGE_LEN) {
5298 drbd_err(connection, "AuthChallenge payload too small.\n");
5299 rv = -1;
5300 goto fail;
5301 }
5302
77351055 5303 peers_ch = kmalloc(pi.size, GFP_NOIO);
b411b363 5304 if (peers_ch == NULL) {
1ec861eb 5305 drbd_err(connection, "kmalloc of peers_ch failed\n");
b10d96cb 5306 rv = -1;
b411b363
PR
5307 goto fail;
5308 }
5309
bde89a9e 5310 err = drbd_recv_all_warn(connection, peers_ch, pi.size);
a5c31904 5311 if (err) {
b411b363
PR
5312 rv = 0;
5313 goto fail;
5314 }
5315
67cca286
PR
5316 if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) {
5317 drbd_err(connection, "Peer presented the same challenge!\n");
5318 rv = -1;
5319 goto fail;
5320 }
5321
9534d671 5322 resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
b411b363
PR
5323 response = kmalloc(resp_size, GFP_NOIO);
5324 if (response == NULL) {
1ec861eb 5325 drbd_err(connection, "kmalloc of response failed\n");
b10d96cb 5326 rv = -1;
b411b363
PR
5327 goto fail;
5328 }
5329
9534d671 5330 rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
b411b363 5331 if (rv) {
1ec861eb 5332 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 5333 rv = -1;
b411b363
PR
5334 goto fail;
5335 }
5336
bde89a9e 5337 if (!conn_prepare_command(connection, sock)) {
9f5bdc33 5338 rv = 0;
b411b363 5339 goto fail;
9f5bdc33 5340 }
bde89a9e 5341 rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
9f5bdc33 5342 response, resp_size);
b411b363
PR
5343 if (!rv)
5344 goto fail;
5345
bde89a9e 5346 err = drbd_recv_header(connection, &pi);
69bc7bc3 5347 if (err) {
b411b363
PR
5348 rv = 0;
5349 goto fail;
5350 }
5351
77351055 5352 if (pi.cmd != P_AUTH_RESPONSE) {
1ec861eb 5353 drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
2fcb8f30 5354 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5355 rv = 0;
5356 goto fail;
5357 }
5358
77351055 5359 if (pi.size != resp_size) {
1ec861eb 5360 drbd_err(connection, "expected AuthResponse payload of wrong size\n");
b411b363
PR
5361 rv = 0;
5362 goto fail;
5363 }
b411b363 5364
bde89a9e 5365 err = drbd_recv_all_warn(connection, response , resp_size);
a5c31904 5366 if (err) {
b411b363
PR
5367 rv = 0;
5368 goto fail;
5369 }
5370
5371 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 5372 if (right_response == NULL) {
1ec861eb 5373 drbd_err(connection, "kmalloc of right_response failed\n");
b10d96cb 5374 rv = -1;
b411b363
PR
5375 goto fail;
5376 }
5377
9534d671
HX
5378 rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
5379 right_response);
b411b363 5380 if (rv) {
1ec861eb 5381 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 5382 rv = -1;
b411b363
PR
5383 goto fail;
5384 }
5385
5386 rv = !memcmp(response, right_response, resp_size);
5387
5388 if (rv)
1ec861eb 5389 drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
44ed167d 5390 resp_size);
b10d96cb
JT
5391 else
5392 rv = -1;
b411b363
PR
5393
5394 fail:
5395 kfree(peers_ch);
5396 kfree(response);
5397 kfree(right_response);
9534d671 5398 shash_desc_zero(desc);
b411b363
PR
5399
5400 return rv;
5401}
5402#endif
5403
8fe60551 5404int drbd_receiver(struct drbd_thread *thi)
b411b363 5405{
bde89a9e 5406 struct drbd_connection *connection = thi->connection;
b411b363
PR
5407 int h;
5408
1ec861eb 5409 drbd_info(connection, "receiver (re)started\n");
b411b363
PR
5410
5411 do {
bde89a9e 5412 h = conn_connect(connection);
b411b363 5413 if (h == 0) {
bde89a9e 5414 conn_disconnect(connection);
20ee6390 5415 schedule_timeout_interruptible(HZ);
b411b363
PR
5416 }
5417 if (h == -1) {
1ec861eb 5418 drbd_warn(connection, "Discarding network configuration.\n");
bde89a9e 5419 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
5420 }
5421 } while (h == 0);
5422
c51a0ef3
LE
5423 if (h > 0) {
5424 blk_start_plug(&connection->receiver_plug);
bde89a9e 5425 drbdd(connection);
c51a0ef3
LE
5426 blk_finish_plug(&connection->receiver_plug);
5427 }
b411b363 5428
bde89a9e 5429 conn_disconnect(connection);
b411b363 5430
1ec861eb 5431 drbd_info(connection, "receiver terminated\n");
b411b363
PR
5432 return 0;
5433}
5434
5435/* ********* acknowledge sender ******** */
5436
bde89a9e 5437static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5438{
e658983a 5439 struct p_req_state_reply *p = pi->data;
e4f78ede
PR
5440 int retcode = be32_to_cpu(p->retcode);
5441
5442 if (retcode >= SS_SUCCESS) {
bde89a9e 5443 set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
e4f78ede 5444 } else {
bde89a9e 5445 set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
1ec861eb 5446 drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
e4f78ede
PR
5447 drbd_set_st_err_str(retcode), retcode);
5448 }
bde89a9e 5449 wake_up(&connection->ping_wait);
e4f78ede 5450
2735a594 5451 return 0;
e4f78ede 5452}
b411b363 5453
bde89a9e 5454static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5455{
9f4fe9ad 5456 struct drbd_peer_device *peer_device;
b30ab791 5457 struct drbd_device *device;
e658983a 5458 struct p_req_state_reply *p = pi->data;
b411b363
PR
5459 int retcode = be32_to_cpu(p->retcode);
5460
9f4fe9ad
AG
5461 peer_device = conn_peer_device(connection, pi->vnr);
5462 if (!peer_device)
2735a594 5463 return -EIO;
9f4fe9ad 5464 device = peer_device->device;
1952e916 5465
bde89a9e 5466 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
0b0ba1ef 5467 D_ASSERT(device, connection->agreed_pro_version < 100);
bde89a9e 5468 return got_conn_RqSReply(connection, pi);
4d0fc3fd
PR
5469 }
5470
b411b363 5471 if (retcode >= SS_SUCCESS) {
b30ab791 5472 set_bit(CL_ST_CHG_SUCCESS, &device->flags);
b411b363 5473 } else {
b30ab791 5474 set_bit(CL_ST_CHG_FAIL, &device->flags);
d0180171 5475 drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
e4f78ede 5476 drbd_set_st_err_str(retcode), retcode);
b411b363 5477 }
b30ab791 5478 wake_up(&device->state_wait);
b411b363 5479
2735a594 5480 return 0;
b411b363
PR
5481}
5482
bde89a9e 5483static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5484{
bde89a9e 5485 return drbd_send_ping_ack(connection);
b411b363
PR
5486
5487}
5488
bde89a9e 5489static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363
PR
5490{
5491 /* restore idle timeout */
bde89a9e
AG
5492 connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
5493 if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
5494 wake_up(&connection->ping_wait);
b411b363 5495
2735a594 5496 return 0;
b411b363
PR
5497}
5498
bde89a9e 5499static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5500{
9f4fe9ad 5501 struct drbd_peer_device *peer_device;
b30ab791 5502 struct drbd_device *device;
e658983a 5503 struct p_block_ack *p = pi->data;
b411b363
PR
5504 sector_t sector = be64_to_cpu(p->sector);
5505 int blksize = be32_to_cpu(p->blksize);
5506
9f4fe9ad
AG
5507 peer_device = conn_peer_device(connection, pi->vnr);
5508 if (!peer_device)
2735a594 5509 return -EIO;
9f4fe9ad 5510 device = peer_device->device;
1952e916 5511
9f4fe9ad 5512 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
b411b363 5513
69a22773 5514 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5515
b30ab791
AG
5516 if (get_ldev(device)) {
5517 drbd_rs_complete_io(device, sector);
5518 drbd_set_in_sync(device, sector, blksize);
1d53f09e 5519 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
b30ab791
AG
5520 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
5521 put_ldev(device);
1d53f09e 5522 }
b30ab791
AG
5523 dec_rs_pending(device);
5524 atomic_add(blksize >> 9, &device->rs_sect_in);
b411b363 5525
2735a594 5526 return 0;
b411b363
PR
5527}
5528
bc9c5c41 5529static int
b30ab791 5530validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
bc9c5c41
AG
5531 struct rb_root *root, const char *func,
5532 enum drbd_req_event what, bool missing_ok)
b411b363
PR
5533{
5534 struct drbd_request *req;
5535 struct bio_and_error m;
5536
0500813f 5537 spin_lock_irq(&device->resource->req_lock);
b30ab791 5538 req = find_request(device, root, id, sector, missing_ok, func);
b411b363 5539 if (unlikely(!req)) {
0500813f 5540 spin_unlock_irq(&device->resource->req_lock);
85997675 5541 return -EIO;
b411b363
PR
5542 }
5543 __req_mod(req, what, &m);
0500813f 5544 spin_unlock_irq(&device->resource->req_lock);
b411b363
PR
5545
5546 if (m.bio)
b30ab791 5547 complete_master_bio(device, &m);
85997675 5548 return 0;
b411b363
PR
5549}
5550
bde89a9e 5551static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5552{
9f4fe9ad 5553 struct drbd_peer_device *peer_device;
b30ab791 5554 struct drbd_device *device;
e658983a 5555 struct p_block_ack *p = pi->data;
b411b363
PR
5556 sector_t sector = be64_to_cpu(p->sector);
5557 int blksize = be32_to_cpu(p->blksize);
5558 enum drbd_req_event what;
5559
9f4fe9ad
AG
5560 peer_device = conn_peer_device(connection, pi->vnr);
5561 if (!peer_device)
2735a594 5562 return -EIO;
9f4fe9ad 5563 device = peer_device->device;
1952e916 5564
69a22773 5565 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5566
579b57ed 5567 if (p->block_id == ID_SYNCER) {
b30ab791
AG
5568 drbd_set_in_sync(device, sector, blksize);
5569 dec_rs_pending(device);
2735a594 5570 return 0;
b411b363 5571 }
e05e1e59 5572 switch (pi->cmd) {
b411b363 5573 case P_RS_WRITE_ACK:
8554df1c 5574 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
5575 break;
5576 case P_WRITE_ACK:
8554df1c 5577 what = WRITE_ACKED_BY_PEER;
b411b363
PR
5578 break;
5579 case P_RECV_ACK:
8554df1c 5580 what = RECV_ACKED_BY_PEER;
b411b363 5581 break;
d4dabbe2
LE
5582 case P_SUPERSEDED:
5583 what = CONFLICT_RESOLVED;
b411b363 5584 break;
7be8da07 5585 case P_RETRY_WRITE:
7be8da07 5586 what = POSTPONE_WRITE;
b411b363
PR
5587 break;
5588 default:
2735a594 5589 BUG();
b411b363
PR
5590 }
5591
b30ab791
AG
5592 return validate_req_change_req_state(device, p->block_id, sector,
5593 &device->write_requests, __func__,
2735a594 5594 what, false);
b411b363
PR
5595}
5596
bde89a9e 5597static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5598{
9f4fe9ad 5599 struct drbd_peer_device *peer_device;
b30ab791 5600 struct drbd_device *device;
e658983a 5601 struct p_block_ack *p = pi->data;
b411b363 5602 sector_t sector = be64_to_cpu(p->sector);
2deb8336 5603 int size = be32_to_cpu(p->blksize);
85997675 5604 int err;
b411b363 5605
9f4fe9ad
AG
5606 peer_device = conn_peer_device(connection, pi->vnr);
5607 if (!peer_device)
2735a594 5608 return -EIO;
9f4fe9ad 5609 device = peer_device->device;
b411b363 5610
69a22773 5611 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5612
579b57ed 5613 if (p->block_id == ID_SYNCER) {
b30ab791
AG
5614 dec_rs_pending(device);
5615 drbd_rs_failed_io(device, sector, size);
2735a594 5616 return 0;
b411b363 5617 }
2deb8336 5618
b30ab791
AG
5619 err = validate_req_change_req_state(device, p->block_id, sector,
5620 &device->write_requests, __func__,
303d1448 5621 NEG_ACKED, true);
85997675 5622 if (err) {
c3afd8f5
AG
5623 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5624 The master bio might already be completed, therefore the
5625 request is no longer in the collision hash. */
5626 /* In Protocol B we might already have got a P_RECV_ACK
5627 but then get a P_NEG_ACK afterwards. */
b30ab791 5628 drbd_set_out_of_sync(device, sector, size);
2deb8336 5629 }
2735a594 5630 return 0;
b411b363
PR
5631}
5632
bde89a9e 5633static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5634{
9f4fe9ad 5635 struct drbd_peer_device *peer_device;
b30ab791 5636 struct drbd_device *device;
e658983a 5637 struct p_block_ack *p = pi->data;
b411b363
PR
5638 sector_t sector = be64_to_cpu(p->sector);
5639
9f4fe9ad
AG
5640 peer_device = conn_peer_device(connection, pi->vnr);
5641 if (!peer_device)
2735a594 5642 return -EIO;
9f4fe9ad 5643 device = peer_device->device;
1952e916 5644
69a22773 5645 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
7be8da07 5646
d0180171 5647 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
b411b363
PR
5648 (unsigned long long)sector, be32_to_cpu(p->blksize));
5649
b30ab791
AG
5650 return validate_req_change_req_state(device, p->block_id, sector,
5651 &device->read_requests, __func__,
2735a594 5652 NEG_ACKED, false);
b411b363
PR
5653}
5654
bde89a9e 5655static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5656{
9f4fe9ad 5657 struct drbd_peer_device *peer_device;
b30ab791 5658 struct drbd_device *device;
b411b363
PR
5659 sector_t sector;
5660 int size;
e658983a 5661 struct p_block_ack *p = pi->data;
1952e916 5662
9f4fe9ad
AG
5663 peer_device = conn_peer_device(connection, pi->vnr);
5664 if (!peer_device)
2735a594 5665 return -EIO;
9f4fe9ad 5666 device = peer_device->device;
b411b363
PR
5667
5668 sector = be64_to_cpu(p->sector);
5669 size = be32_to_cpu(p->blksize);
b411b363 5670
69a22773 5671 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363 5672
b30ab791 5673 dec_rs_pending(device);
b411b363 5674
b30ab791
AG
5675 if (get_ldev_if_state(device, D_FAILED)) {
5676 drbd_rs_complete_io(device, sector);
e05e1e59 5677 switch (pi->cmd) {
d612d309 5678 case P_NEG_RS_DREPLY:
b30ab791 5679 drbd_rs_failed_io(device, sector, size);
d612d309
PR
5680 case P_RS_CANCEL:
5681 break;
5682 default:
2735a594 5683 BUG();
d612d309 5684 }
b30ab791 5685 put_ldev(device);
b411b363
PR
5686 }
5687
2735a594 5688 return 0;
b411b363
PR
5689}
5690
bde89a9e 5691static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5692{
e658983a 5693 struct p_barrier_ack *p = pi->data;
c06ece6b 5694 struct drbd_peer_device *peer_device;
9ed57dcb 5695 int vnr;
1952e916 5696
bde89a9e 5697 tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
b411b363 5698
9ed57dcb 5699 rcu_read_lock();
c06ece6b
AG
5700 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5701 struct drbd_device *device = peer_device->device;
5702
b30ab791
AG
5703 if (device->state.conn == C_AHEAD &&
5704 atomic_read(&device->ap_in_flight) == 0 &&
5705 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
5706 device->start_resync_timer.expires = jiffies + HZ;
5707 add_timer(&device->start_resync_timer);
9ed57dcb 5708 }
c4752ef1 5709 }
9ed57dcb 5710 rcu_read_unlock();
c4752ef1 5711
2735a594 5712 return 0;
b411b363
PR
5713}
5714
bde89a9e 5715static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
b411b363 5716{
9f4fe9ad 5717 struct drbd_peer_device *peer_device;
b30ab791 5718 struct drbd_device *device;
e658983a 5719 struct p_block_ack *p = pi->data;
84b8c06b 5720 struct drbd_device_work *dw;
b411b363
PR
5721 sector_t sector;
5722 int size;
5723
9f4fe9ad
AG
5724 peer_device = conn_peer_device(connection, pi->vnr);
5725 if (!peer_device)
2735a594 5726 return -EIO;
9f4fe9ad 5727 device = peer_device->device;
1952e916 5728
b411b363
PR
5729 sector = be64_to_cpu(p->sector);
5730 size = be32_to_cpu(p->blksize);
5731
69a22773 5732 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
b411b363
PR
5733
5734 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
b30ab791 5735 drbd_ov_out_of_sync_found(device, sector, size);
b411b363 5736 else
b30ab791 5737 ov_out_of_sync_print(device);
b411b363 5738
b30ab791 5739 if (!get_ldev(device))
2735a594 5740 return 0;
1d53f09e 5741
b30ab791
AG
5742 drbd_rs_complete_io(device, sector);
5743 dec_rs_pending(device);
b411b363 5744
b30ab791 5745 --device->ov_left;
ea5442af
LE
5746
5747 /* let's advance progress step marks only for every other megabyte */
b30ab791
AG
5748 if ((device->ov_left & 0x200) == 0x200)
5749 drbd_advance_rs_marks(device, device->ov_left);
ea5442af 5750
b30ab791 5751 if (device->ov_left == 0) {
84b8c06b
AG
5752 dw = kmalloc(sizeof(*dw), GFP_NOIO);
5753 if (dw) {
5754 dw->w.cb = w_ov_finished;
5755 dw->device = device;
5756 drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
b411b363 5757 } else {
84b8c06b 5758 drbd_err(device, "kmalloc(dw) failed.");
b30ab791
AG
5759 ov_out_of_sync_print(device);
5760 drbd_resync_finished(device);
b411b363
PR
5761 }
5762 }
b30ab791 5763 put_ldev(device);
2735a594 5764 return 0;
b411b363
PR
5765}
5766
bde89a9e 5767static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
0ced55a3 5768{
2735a594 5769 return 0;
b411b363
PR
5770}
5771
668700b4
PR
5772struct meta_sock_cmd {
5773 size_t pkt_size;
5774 int (*fn)(struct drbd_connection *connection, struct packet_info *);
5775};
5776
5777static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout)
0ced55a3 5778{
668700b4
PR
5779 long t;
5780 struct net_conf *nc;
32862ec7 5781
668700b4
PR
5782 rcu_read_lock();
5783 nc = rcu_dereference(connection->net_conf);
5784 t = ping_timeout ? nc->ping_timeo : nc->ping_int;
5785 rcu_read_unlock();
c141ebda 5786
668700b4
PR
5787 t *= HZ;
5788 if (ping_timeout)
5789 t /= 10;
082a3439 5790
668700b4
PR
5791 connection->meta.socket->sk->sk_rcvtimeo = t;
5792}
32862ec7 5793
668700b4
PR
5794static void set_ping_timeout(struct drbd_connection *connection)
5795{
5796 set_rcvtimeo(connection, 1);
0ced55a3
PR
5797}
5798
668700b4
PR
5799static void set_idle_timeout(struct drbd_connection *connection)
5800{
5801 set_rcvtimeo(connection, 0);
5802}
b411b363 5803
668700b4 5804static struct meta_sock_cmd ack_receiver_tbl[] = {
e658983a
AG
5805 [P_PING] = { 0, got_Ping },
5806 [P_PING_ACK] = { 0, got_PingAck },
b411b363
PR
5807 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5808 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5809 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
d4dabbe2 5810 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
b411b363
PR
5811 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5812 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
1952e916 5813 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
b411b363
PR
5814 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5815 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5816 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5817 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 5818 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
1952e916
AG
5819 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5820 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5821 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
7201b972 5822};
b411b363 5823
1c03e520 5824int drbd_ack_receiver(struct drbd_thread *thi)
b411b363 5825{
bde89a9e 5826 struct drbd_connection *connection = thi->connection;
668700b4 5827 struct meta_sock_cmd *cmd = NULL;
77351055 5828 struct packet_info pi;
668700b4 5829 unsigned long pre_recv_jif;
257d0af6 5830 int rv;
bde89a9e 5831 void *buf = connection->meta.rbuf;
b411b363 5832 int received = 0;
bde89a9e 5833 unsigned int header_size = drbd_header_size(connection);
52b061a4 5834 int expect = header_size;
44ed167d 5835 bool ping_timeout_active = false;
3990e04d 5836 struct sched_param param = { .sched_priority = 2 };
b411b363 5837
3990e04d
PR
5838 rv = sched_setscheduler(current, SCHED_RR, &param);
5839 if (rv < 0)
668700b4 5840 drbd_err(connection, "drbd_ack_receiver: ERROR set priority, ret=%d\n", rv);
b411b363 5841
e77a0a5c 5842 while (get_t_state(thi) == RUNNING) {
80822284 5843 drbd_thread_current_set_cpu(thi);
b411b363 5844
668700b4 5845 conn_reclaim_net_peer_reqs(connection);
44ed167d 5846
bde89a9e
AG
5847 if (test_and_clear_bit(SEND_PING, &connection->flags)) {
5848 if (drbd_send_ping(connection)) {
1ec861eb 5849 drbd_err(connection, "drbd_send_ping has failed\n");
b411b363 5850 goto reconnect;
841ce241 5851 }
668700b4 5852 set_ping_timeout(connection);
44ed167d 5853 ping_timeout_active = true;
b411b363
PR
5854 }
5855
668700b4 5856 pre_recv_jif = jiffies;
bde89a9e 5857 rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
b411b363
PR
5858
5859 /* Note:
5860 * -EINTR (on meta) we got a signal
5861 * -EAGAIN (on meta) rcvtimeo expired
5862 * -ECONNRESET other side closed the connection
5863 * -ERESTARTSYS (on data) we got a signal
5864 * rv < 0 other than above: unexpected error!
5865 * rv == expected: full header or command
5866 * rv < expected: "woken" by signal during receive
5867 * rv == 0 : "connection shut down by peer"
5868 */
5869 if (likely(rv > 0)) {
5870 received += rv;
5871 buf += rv;
5872 } else if (rv == 0) {
bde89a9e 5873 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
b66623e3
PR
5874 long t;
5875 rcu_read_lock();
bde89a9e 5876 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
b66623e3
PR
5877 rcu_read_unlock();
5878
bde89a9e
AG
5879 t = wait_event_timeout(connection->ping_wait,
5880 connection->cstate < C_WF_REPORT_PARAMS,
b66623e3 5881 t);
599377ac
PR
5882 if (t)
5883 break;
5884 }
1ec861eb 5885 drbd_err(connection, "meta connection shut down by peer.\n");
b411b363
PR
5886 goto reconnect;
5887 } else if (rv == -EAGAIN) {
cb6518cb
LE
5888 /* If the data socket received something meanwhile,
5889 * that is good enough: peer is still alive. */
668700b4 5890 if (time_after(connection->last_received, pre_recv_jif))
cb6518cb 5891 continue;
f36af18c 5892 if (ping_timeout_active) {
1ec861eb 5893 drbd_err(connection, "PingAck did not arrive in time.\n");
b411b363
PR
5894 goto reconnect;
5895 }
bde89a9e 5896 set_bit(SEND_PING, &connection->flags);
b411b363
PR
5897 continue;
5898 } else if (rv == -EINTR) {
668700b4
PR
5899 /* maybe drbd_thread_stop(): the while condition will notice.
5900 * maybe woken for send_ping: we'll send a ping above,
5901 * and change the rcvtimeo */
5902 flush_signals(current);
b411b363
PR
5903 continue;
5904 } else {
1ec861eb 5905 drbd_err(connection, "sock_recvmsg returned %d\n", rv);
b411b363
PR
5906 goto reconnect;
5907 }
5908
5909 if (received == expect && cmd == NULL) {
bde89a9e 5910 if (decode_header(connection, connection->meta.rbuf, &pi))
b411b363 5911 goto reconnect;
668700b4
PR
5912 cmd = &ack_receiver_tbl[pi.cmd];
5913 if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) {
1ec861eb 5914 drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
2fcb8f30 5915 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5916 goto disconnect;
5917 }
e658983a 5918 expect = header_size + cmd->pkt_size;
52b061a4 5919 if (pi.size != expect - header_size) {
1ec861eb 5920 drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 5921 pi.cmd, pi.size);
b411b363 5922 goto reconnect;
257d0af6 5923 }
b411b363
PR
5924 }
5925 if (received == expect) {
2735a594 5926 bool err;
a4fbda8e 5927
bde89a9e 5928 err = cmd->fn(connection, &pi);
2735a594 5929 if (err) {
1ec861eb 5930 drbd_err(connection, "%pf failed\n", cmd->fn);
b411b363 5931 goto reconnect;
1952e916 5932 }
b411b363 5933
bde89a9e 5934 connection->last_received = jiffies;
f36af18c 5935
668700b4
PR
5936 if (cmd == &ack_receiver_tbl[P_PING_ACK]) {
5937 set_idle_timeout(connection);
44ed167d
PR
5938 ping_timeout_active = false;
5939 }
f36af18c 5940
bde89a9e 5941 buf = connection->meta.rbuf;
b411b363 5942 received = 0;
52b061a4 5943 expect = header_size;
b411b363
PR
5944 cmd = NULL;
5945 }
5946 }
5947
5948 if (0) {
5949reconnect:
bde89a9e
AG
5950 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5951 conn_md_sync(connection);
b411b363
PR
5952 }
5953 if (0) {
5954disconnect:
bde89a9e 5955 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 5956 }
b411b363 5957
668700b4 5958 drbd_info(connection, "ack_receiver terminated\n");
b411b363
PR
5959
5960 return 0;
5961}
668700b4
PR
5962
5963void drbd_send_acks_wf(struct work_struct *ws)
5964{
5965 struct drbd_peer_device *peer_device =
5966 container_of(ws, struct drbd_peer_device, send_acks_work);
5967 struct drbd_connection *connection = peer_device->connection;
5968 struct drbd_device *device = peer_device->device;
5969 struct net_conf *nc;
5970 int tcp_cork, err;
5971
5972 rcu_read_lock();
5973 nc = rcu_dereference(connection->net_conf);
5974 tcp_cork = nc->tcp_cork;
5975 rcu_read_unlock();
5976
5977 if (tcp_cork)
5978 drbd_tcp_cork(connection->meta.socket);
5979
5980 err = drbd_finish_peer_reqs(device);
5981 kref_put(&device->kref, drbd_destroy_device);
5982 /* get is in drbd_endio_write_sec_final(). That is necessary to keep the
5983 struct work_struct send_acks_work alive, which is in the peer_device object */
5984
5985 if (err) {
5986 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5987 return;
5988 }
5989
5990 if (tcp_cork)
5991 drbd_tcp_uncork(connection->meta.socket);
5992
5993 return;
5994}