libceph: separate msgr1 protocol implementation
[linux-block.git] / net / ceph / messenger.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
3d14c5d2 2#include <linux/ceph/ceph_debug.h>
31b8006e
SW
3
4#include <linux/crc32c.h>
5#include <linux/ctype.h>
6#include <linux/highmem.h>
7#include <linux/inet.h>
8#include <linux/kthread.h>
9#include <linux/net.h>
757856d2 10#include <linux/nsproxy.h>
633ee407 11#include <linux/sched/mm.h>
5a0e3ad6 12#include <linux/slab.h>
31b8006e
SW
13#include <linux/socket.h>
14#include <linux/string.h>
3ebc21f7 15#ifdef CONFIG_BLOCK
68b4476b 16#include <linux/bio.h>
3ebc21f7 17#endif /* CONFIG_BLOCK */
ee3b56f2 18#include <linux/dns_resolver.h>
31b8006e
SW
19#include <net/tcp.h>
20
2b3e0c90 21#include <linux/ceph/ceph_features.h>
3d14c5d2
YS
22#include <linux/ceph/libceph.h>
23#include <linux/ceph/messenger.h>
24#include <linux/ceph/decode.h>
25#include <linux/ceph/pagelist.h>
bc3b2d7f 26#include <linux/export.h>
31b8006e
SW
27
28/*
29 * Ceph uses the messenger to exchange ceph_msg messages with other
30 * hosts in the system. The messenger provides ordered and reliable
31 * delivery. We tolerate TCP disconnects by reconnecting (with
32 * exponential backoff) in the case of a fault (disconnection, bad
33 * crc, protocol error). Acks allow sent messages to be discarded by
34 * the sender.
35 */
36
bc18f4b1
AE
37/*
38 * We track the state of the socket on a given connection using
39 * values defined below. The transition to a new socket state is
40 * handled by a function which verifies we aren't coming from an
41 * unexpected state.
42 *
43 * --------
44 * | NEW* | transient initial state
45 * --------
46 * | con_sock_state_init()
47 * v
48 * ----------
49 * | CLOSED | initialized, but no socket (and no
50 * ---------- TCP connection)
51 * ^ \
52 * | \ con_sock_state_connecting()
53 * | ----------------------
54 * | \
55 * + con_sock_state_closed() \
fbb85a47
SW
56 * |+--------------------------- \
57 * | \ \ \
58 * | ----------- \ \
59 * | | CLOSING | socket event; \ \
60 * | ----------- await close \ \
61 * | ^ \ |
62 * | | \ |
63 * | + con_sock_state_closing() \ |
64 * | / \ | |
65 * | / --------------- | |
66 * | / \ v v
bc18f4b1
AE
67 * | / --------------
68 * | / -----------------| CONNECTING | socket created, TCP
69 * | | / -------------- connect initiated
70 * | | | con_sock_state_connected()
71 * | | v
72 * -------------
73 * | CONNECTED | TCP connection established
74 * -------------
75 *
76 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
77 */
ce2c8903
AE
78
79#define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
80#define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
81#define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
82#define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
83#define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
84
c9ffc77a
AE
85static bool con_flag_valid(unsigned long con_flag)
86{
87 switch (con_flag) {
3fefd43e
ID
88 case CEPH_CON_F_LOSSYTX:
89 case CEPH_CON_F_KEEPALIVE_PENDING:
90 case CEPH_CON_F_WRITE_PENDING:
91 case CEPH_CON_F_SOCK_CLOSED:
92 case CEPH_CON_F_BACKOFF:
c9ffc77a
AE
93 return true;
94 default:
95 return false;
96 }
97}
98
6503e0b6 99void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
c9ffc77a
AE
100{
101 BUG_ON(!con_flag_valid(con_flag));
102
103 clear_bit(con_flag, &con->flags);
104}
105
6503e0b6 106void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag)
c9ffc77a
AE
107{
108 BUG_ON(!con_flag_valid(con_flag));
109
110 set_bit(con_flag, &con->flags);
111}
112
6503e0b6 113bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag)
c9ffc77a
AE
114{
115 BUG_ON(!con_flag_valid(con_flag));
116
117 return test_bit(con_flag, &con->flags);
118}
119
6503e0b6
ID
120bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
121 unsigned long con_flag)
c9ffc77a
AE
122{
123 BUG_ON(!con_flag_valid(con_flag));
124
125 return test_and_clear_bit(con_flag, &con->flags);
126}
127
6503e0b6
ID
128bool ceph_con_flag_test_and_set(struct ceph_connection *con,
129 unsigned long con_flag)
c9ffc77a
AE
130{
131 BUG_ON(!con_flag_valid(con_flag));
132
133 return test_and_set_bit(con_flag, &con->flags);
134}
135
e3d5d638
AE
136/* Slab caches for frequently-allocated structures */
137
138static struct kmem_cache *ceph_msg_cache;
139
31b8006e
SW
140/* static tag bytes (protocol control messages) */
141static char tag_msg = CEPH_MSGR_TAG_MSG;
142static char tag_ack = CEPH_MSGR_TAG_ACK;
143static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
8b9558aa 144static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
31b8006e 145
a6a5349d
SW
146#ifdef CONFIG_LOCKDEP
147static struct lock_class_key socket_class;
148#endif
149
31b8006e 150static void queue_con(struct ceph_connection *con);
37ab77ac 151static void cancel_con(struct ceph_connection *con);
68931622 152static void ceph_con_workfn(struct work_struct *);
93209264 153static void con_fault(struct ceph_connection *con);
31b8006e 154
31b8006e 155/*
f64a9317
AE
156 * Nicely render a sockaddr as a string. An array of formatted
157 * strings is used, to approximate reentrancy.
31b8006e 158 */
f64a9317
AE
159#define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
160#define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
161#define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
162#define MAX_ADDR_STR_LEN 64 /* 54 is enough */
163
164static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
165static atomic_t addr_str_seq = ATOMIC_INIT(0);
31b8006e 166
699921d9 167struct page *ceph_zero_page; /* used in certain error cases */
57666519 168
b726ec97 169const char *ceph_pr_addr(const struct ceph_entity_addr *addr)
31b8006e
SW
170{
171 int i;
172 char *s;
b726ec97
JL
173 struct sockaddr_storage ss = addr->in_addr; /* align */
174 struct sockaddr_in *in4 = (struct sockaddr_in *)&ss;
175 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&ss;
31b8006e 176
f64a9317 177 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
31b8006e
SW
178 s = addr_str[i];
179
b726ec97 180 switch (ss.ss_family) {
31b8006e 181 case AF_INET:
d3c3c0a8
JL
182 snprintf(s, MAX_ADDR_STR_LEN, "(%d)%pI4:%hu",
183 le32_to_cpu(addr->type), &in4->sin_addr,
bd406145 184 ntohs(in4->sin_port));
31b8006e
SW
185 break;
186
187 case AF_INET6:
d3c3c0a8
JL
188 snprintf(s, MAX_ADDR_STR_LEN, "(%d)[%pI6c]:%hu",
189 le32_to_cpu(addr->type), &in6->sin6_addr,
bd406145 190 ntohs(in6->sin6_port));
31b8006e
SW
191 break;
192
193 default:
d3002b97 194 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
b726ec97 195 ss.ss_family);
31b8006e
SW
196 }
197
198 return s;
199}
3d14c5d2 200EXPORT_SYMBOL(ceph_pr_addr);
31b8006e 201
6503e0b6 202void ceph_encode_my_addr(struct ceph_messenger *msgr)
63f2d211
SW
203{
204 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
2c66de56 205 ceph_encode_banner_addr(&msgr->my_enc_addr);
63f2d211
SW
206}
207
31b8006e
SW
208/*
209 * work queue for all reading and writing to/from the socket.
210 */
e0f43c94 211static struct workqueue_struct *ceph_msgr_wq;
31b8006e 212
e3d5d638
AE
213static int ceph_msgr_slab_init(void)
214{
215 BUG_ON(ceph_msg_cache);
5ee61e95 216 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
81b36be4
AE
217 if (!ceph_msg_cache)
218 return -ENOMEM;
219
0d9c1ab3 220 return 0;
e3d5d638
AE
221}
222
223static void ceph_msgr_slab_exit(void)
224{
225 BUG_ON(!ceph_msg_cache);
226 kmem_cache_destroy(ceph_msg_cache);
227 ceph_msg_cache = NULL;
228}
229
15417167 230static void _ceph_msgr_exit(void)
6173d1f0 231{
d3002b97 232 if (ceph_msgr_wq) {
6173d1f0 233 destroy_workqueue(ceph_msgr_wq);
d3002b97
AE
234 ceph_msgr_wq = NULL;
235 }
6173d1f0 236
699921d9
ID
237 BUG_ON(!ceph_zero_page);
238 put_page(ceph_zero_page);
239 ceph_zero_page = NULL;
d920ff6f
BC
240
241 ceph_msgr_slab_exit();
6173d1f0
AE
242}
243
57a35dfb 244int __init ceph_msgr_init(void)
31b8006e 245{
d920ff6f
BC
246 if (ceph_msgr_slab_init())
247 return -ENOMEM;
248
699921d9
ID
249 BUG_ON(ceph_zero_page);
250 ceph_zero_page = ZERO_PAGE(0);
251 get_page(ceph_zero_page);
57666519 252
f9865f06
ID
253 /*
254 * The number of active work items is limited by the number of
255 * connections, so leave @max_active at default.
256 */
257 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
6173d1f0
AE
258 if (ceph_msgr_wq)
259 return 0;
57666519 260
6173d1f0
AE
261 pr_err("msgr_init failed to create workqueue\n");
262 _ceph_msgr_exit();
57666519 263
6173d1f0 264 return -ENOMEM;
31b8006e
SW
265}
266
267void ceph_msgr_exit(void)
268{
57666519 269 BUG_ON(ceph_msgr_wq == NULL);
57666519 270
6173d1f0 271 _ceph_msgr_exit();
31b8006e
SW
272}
273
cd84db6e 274void ceph_msgr_flush(void)
a922d38f
SW
275{
276 flush_workqueue(ceph_msgr_wq);
277}
3d14c5d2 278EXPORT_SYMBOL(ceph_msgr_flush);
a922d38f 279
ce2c8903
AE
280/* Connection socket state transition functions */
281
282static void con_sock_state_init(struct ceph_connection *con)
283{
284 int old_state;
285
286 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
287 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
288 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
289 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
290 CON_SOCK_STATE_CLOSED);
ce2c8903
AE
291}
292
293static void con_sock_state_connecting(struct ceph_connection *con)
294{
295 int old_state;
296
297 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
298 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
299 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
300 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
301 CON_SOCK_STATE_CONNECTING);
ce2c8903
AE
302}
303
304static void con_sock_state_connected(struct ceph_connection *con)
305{
306 int old_state;
307
308 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
309 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
310 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
311 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
312 CON_SOCK_STATE_CONNECTED);
ce2c8903
AE
313}
314
315static void con_sock_state_closing(struct ceph_connection *con)
316{
317 int old_state;
318
319 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
320 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
321 old_state != CON_SOCK_STATE_CONNECTED &&
322 old_state != CON_SOCK_STATE_CLOSING))
323 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
324 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
325 CON_SOCK_STATE_CLOSING);
ce2c8903
AE
326}
327
328static void con_sock_state_closed(struct ceph_connection *con)
329{
330 int old_state;
331
332 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
333 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
fbb85a47 334 old_state != CON_SOCK_STATE_CLOSING &&
8007b8d6
SW
335 old_state != CON_SOCK_STATE_CONNECTING &&
336 old_state != CON_SOCK_STATE_CLOSED))
ce2c8903 337 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
338 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
339 CON_SOCK_STATE_CLOSED);
ce2c8903 340}
a922d38f 341
31b8006e
SW
342/*
343 * socket callback functions
344 */
345
346/* data available on socket, or listen socket received a connect */
676d2369 347static void ceph_sock_data_ready(struct sock *sk)
31b8006e 348{
bd406145 349 struct ceph_connection *con = sk->sk_user_data;
a2a32584
GH
350 if (atomic_read(&con->msgr->stopping)) {
351 return;
352 }
bd406145 353
31b8006e 354 if (sk->sk_state != TCP_CLOSE_WAIT) {
30be780a 355 dout("%s %p state = %d, queueing work\n", __func__,
31b8006e
SW
356 con, con->state);
357 queue_con(con);
358 }
359}
360
361/* socket has buffer space for writing */
327800bd 362static void ceph_sock_write_space(struct sock *sk)
31b8006e 363{
d3002b97 364 struct ceph_connection *con = sk->sk_user_data;
31b8006e 365
182fac26
JS
366 /* only queue to workqueue if there is data we want to write,
367 * and there is sufficient space in the socket buffer to accept
327800bd 368 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
182fac26
JS
369 * doesn't get called again until try_write() fills the socket
370 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
371 * and net/core/stream.c:sk_stream_write_space().
372 */
6503e0b6 373 if (ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING)) {
64dc6130 374 if (sk_stream_is_writeable(sk)) {
327800bd 375 dout("%s %p queueing write work\n", __func__, con);
182fac26
JS
376 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
377 queue_con(con);
378 }
31b8006e 379 } else {
327800bd 380 dout("%s %p nothing to write\n", __func__, con);
31b8006e 381 }
31b8006e
SW
382}
383
384/* socket's state has changed */
327800bd 385static void ceph_sock_state_change(struct sock *sk)
31b8006e 386{
bd406145 387 struct ceph_connection *con = sk->sk_user_data;
31b8006e 388
30be780a 389 dout("%s %p state = %d sk_state = %u\n", __func__,
31b8006e
SW
390 con, con->state, sk->sk_state);
391
31b8006e
SW
392 switch (sk->sk_state) {
393 case TCP_CLOSE:
327800bd 394 dout("%s TCP_CLOSE\n", __func__);
df561f66 395 fallthrough;
31b8006e 396 case TCP_CLOSE_WAIT:
327800bd 397 dout("%s TCP_CLOSE_WAIT\n", __func__);
ce2c8903 398 con_sock_state_closing(con);
6503e0b6 399 ceph_con_flag_set(con, CEPH_CON_F_SOCK_CLOSED);
d65c9e0b 400 queue_con(con);
31b8006e
SW
401 break;
402 case TCP_ESTABLISHED:
327800bd 403 dout("%s TCP_ESTABLISHED\n", __func__);
ce2c8903 404 con_sock_state_connected(con);
31b8006e
SW
405 queue_con(con);
406 break;
d3002b97
AE
407 default: /* Everything else is uninteresting */
408 break;
31b8006e
SW
409 }
410}
411
412/*
413 * set up socket callbacks
414 */
415static void set_sock_callbacks(struct socket *sock,
416 struct ceph_connection *con)
417{
418 struct sock *sk = sock->sk;
bd406145 419 sk->sk_user_data = con;
327800bd
AE
420 sk->sk_data_ready = ceph_sock_data_ready;
421 sk->sk_write_space = ceph_sock_write_space;
422 sk->sk_state_change = ceph_sock_state_change;
31b8006e
SW
423}
424
425
426/*
427 * socket helpers
428 */
429
430/*
431 * initiate connection to a remote socket.
432 */
6503e0b6 433int ceph_tcp_connect(struct ceph_connection *con)
31b8006e 434{
cede185b 435 struct sockaddr_storage ss = con->peer_addr.in_addr; /* align */
31b8006e 436 struct socket *sock;
633ee407 437 unsigned int noio_flag;
31b8006e
SW
438 int ret;
439
6503e0b6
ID
440 dout("%s con %p peer_addr %s\n", __func__, con,
441 ceph_pr_addr(&con->peer_addr));
31b8006e 442 BUG_ON(con->sock);
633ee407
ID
443
444 /* sock_create_kern() allocates with GFP_KERNEL */
445 noio_flag = memalloc_noio_save();
cede185b 446 ret = sock_create_kern(read_pnet(&con->msgr->net), ss.ss_family,
eeb1bd5c 447 SOCK_STREAM, IPPROTO_TCP, &sock);
633ee407 448 memalloc_noio_restore(noio_flag);
31b8006e 449 if (ret)
41617d0c 450 return ret;
6d7fdb0a 451 sock->sk->sk_allocation = GFP_NOFS;
31b8006e 452
a6a5349d
SW
453#ifdef CONFIG_LOCKDEP
454 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
455#endif
456
31b8006e
SW
457 set_sock_callbacks(sock, con);
458
89a86be0 459 con_sock_state_connecting(con);
cede185b 460 ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
f91d3471 461 O_NONBLOCK);
31b8006e
SW
462 if (ret == -EINPROGRESS) {
463 dout("connect %s EINPROGRESS sk_state = %u\n",
b726ec97 464 ceph_pr_addr(&con->peer_addr),
31b8006e 465 sock->sk->sk_state);
a5bc3129 466 } else if (ret < 0) {
31b8006e 467 pr_err("connect %s error %d\n",
b726ec97 468 ceph_pr_addr(&con->peer_addr), ret);
31b8006e 469 sock_release(sock);
41617d0c 470 return ret;
a5bc3129 471 }
89baaa57 472
12abc5ee
CH
473 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY))
474 tcp_sock_set_nodelay(sock->sk);
ba988f87 475
a5bc3129 476 con->sock = sock;
41617d0c 477 return 0;
31b8006e
SW
478}
479
e5c93883
ID
480/*
481 * If @buf is NULL, discard up to @len bytes.
482 */
31b8006e
SW
483static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
484{
485 struct kvec iov = {buf, len};
486 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
98bdb0aa 487 int r;
31b8006e 488
e5c93883
ID
489 if (!buf)
490 msg.msg_flags |= MSG_TRUNC;
491
aa563d7b 492 iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len);
100803a8 493 r = sock_recvmsg(sock, &msg, msg.msg_flags);
98bdb0aa
SW
494 if (r == -EAGAIN)
495 r = 0;
496 return r;
31b8006e
SW
497}
498
afb3d90e
AE
499static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
500 int page_offset, size_t length)
501{
100803a8
AV
502 struct bio_vec bvec = {
503 .bv_page = page,
504 .bv_offset = page_offset,
505 .bv_len = length
506 };
507 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
508 int r;
afb3d90e
AE
509
510 BUG_ON(page_offset + length > PAGE_SIZE);
aa563d7b 511 iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length);
100803a8
AV
512 r = sock_recvmsg(sock, &msg, msg.msg_flags);
513 if (r == -EAGAIN)
514 r = 0;
515 return r;
afb3d90e
AE
516}
517
31b8006e
SW
518/*
519 * write something. @more is true if caller will be sending more data
520 * shortly.
521 */
522static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
87349cda 523 size_t kvlen, size_t len, bool more)
31b8006e
SW
524{
525 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
42961d23 526 int r;
31b8006e
SW
527
528 if (more)
529 msg.msg_flags |= MSG_MORE;
530 else
531 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
532
42961d23
SW
533 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
534 if (r == -EAGAIN)
535 r = 0;
536 return r;
31b8006e
SW
537}
538
433b0a12
ID
539/*
540 * @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST
541 */
178eda29 542static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
433b0a12 543 int offset, size_t size, int more)
178eda29 544{
3239eb52
ID
545 ssize_t (*sendpage)(struct socket *sock, struct page *page,
546 int offset, size_t size, int flags);
433b0a12 547 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
178eda29 548 int ret;
178eda29 549
7e241f64
ID
550 /*
551 * sendpage cannot properly handle pages with page_count == 0,
552 * we need to fall back to sendmsg if that's the case.
553 *
554 * Same goes for slab pages: skb_can_coalesce() allows
555 * coalescing neighboring slab objects into a single frag which
556 * triggers one of hardened usercopy checks.
557 */
40efc4dc 558 if (sendpage_ok(page))
3239eb52 559 sendpage = sock->ops->sendpage;
61ff6e9b 560 else
3239eb52 561 sendpage = sock_no_sendpage;
61ff6e9b 562
3239eb52 563 ret = sendpage(sock, page, offset, size, flags);
61ff6e9b
AV
564 if (ret == -EAGAIN)
565 ret = 0;
178eda29
CC
566
567 return ret;
568}
31b8006e
SW
569
570/*
571 * Shutdown/close the socket for the given connection.
572 */
6503e0b6 573int ceph_con_close_socket(struct ceph_connection *con)
31b8006e 574{
8007b8d6 575 int rc = 0;
31b8006e 576
6503e0b6 577 dout("%s con %p sock %p\n", __func__, con, con->sock);
8007b8d6
SW
578 if (con->sock) {
579 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
580 sock_release(con->sock);
581 con->sock = NULL;
582 }
456ea468
AE
583
584 /*
4a861692 585 * Forcibly clear the SOCK_CLOSED flag. It gets set
456ea468
AE
586 * independent of the connection mutex, and we could have
587 * received a socket close event before we had the chance to
588 * shut the socket down.
589 */
6503e0b6 590 ceph_con_flag_clear(con, CEPH_CON_F_SOCK_CLOSED);
8007b8d6 591
ce2c8903 592 con_sock_state_closed(con);
31b8006e
SW
593 return rc;
594}
595
566050e1
ID
596void ceph_con_v1_reset_protocol(struct ceph_connection *con)
597{
598 con->out_skip = 0;
599}
600
3596f4c1
ID
601static void ceph_con_reset_protocol(struct ceph_connection *con)
602{
603 dout("%s con %p\n", __func__, con);
604
6503e0b6 605 ceph_con_close_socket(con);
3596f4c1
ID
606 if (con->in_msg) {
607 WARN_ON(con->in_msg->con != con);
608 ceph_msg_put(con->in_msg);
609 con->in_msg = NULL;
610 }
611 if (con->out_msg) {
612 WARN_ON(con->out_msg->con != con);
613 ceph_msg_put(con->out_msg);
614 con->out_msg = NULL;
615 }
616
566050e1 617 ceph_con_v1_reset_protocol(con);
3596f4c1
ID
618}
619
31b8006e
SW
620/*
621 * Reset a connection. Discard all incoming and outgoing messages
622 * and clear *_seq state.
623 */
624static void ceph_msg_remove(struct ceph_msg *msg)
625{
626 list_del_init(&msg->list_head);
38941f80 627
31b8006e
SW
628 ceph_msg_put(msg);
629}
630static void ceph_msg_remove_list(struct list_head *head)
631{
632 while (!list_empty(head)) {
633 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
634 list_head);
635 ceph_msg_remove(msg);
636 }
637}
638
566050e1
ID
639void ceph_con_v1_reset_session(struct ceph_connection *con)
640{
641 con->connect_seq = 0;
642 con->peer_global_seq = 0;
643}
644
6503e0b6 645void ceph_con_reset_session(struct ceph_connection *con)
31b8006e 646{
5963c3d0 647 dout("%s con %p\n", __func__, con);
3596f4c1
ID
648
649 WARN_ON(con->in_msg);
650 WARN_ON(con->out_msg);
31b8006e
SW
651 ceph_msg_remove_list(&con->out_queue);
652 ceph_msg_remove_list(&con->out_sent);
31b8006e 653 con->out_seq = 0;
31b8006e 654 con->in_seq = 0;
0e0d5e0c 655 con->in_seq_acked = 0;
a3da057b 656
566050e1 657 ceph_con_v1_reset_session(con);
31b8006e
SW
658}
659
660/*
661 * mark a peer down. drop any open connections.
662 */
663void ceph_con_close(struct ceph_connection *con)
664{
8c50c817 665 mutex_lock(&con->mutex);
b726ec97 666 dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr));
6d7f62bf 667 con->state = CEPH_CON_S_CLOSED;
a5988c49 668
6503e0b6
ID
669 ceph_con_flag_clear(con, CEPH_CON_F_LOSSYTX); /* so we retry next
670 connect */
671 ceph_con_flag_clear(con, CEPH_CON_F_KEEPALIVE_PENDING);
672 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
673 ceph_con_flag_clear(con, CEPH_CON_F_BACKOFF);
a5988c49 674
3596f4c1 675 ceph_con_reset_protocol(con);
5963c3d0 676 ceph_con_reset_session(con);
37ab77ac 677 cancel_con(con);
ec302645 678 mutex_unlock(&con->mutex);
31b8006e 679}
3d14c5d2 680EXPORT_SYMBOL(ceph_con_close);
31b8006e 681
31b8006e
SW
682/*
683 * Reopen a closed connection, with a new peer address.
684 */
b7a9e5dd
SW
685void ceph_con_open(struct ceph_connection *con,
686 __u8 entity_type, __u64 entity_num,
687 struct ceph_entity_addr *addr)
31b8006e 688{
5469155f 689 mutex_lock(&con->mutex);
b726ec97 690 dout("con_open %p %s\n", con, ceph_pr_addr(addr));
8dacc7da 691
6d7f62bf
ID
692 WARN_ON(con->state != CEPH_CON_S_CLOSED);
693 con->state = CEPH_CON_S_PREOPEN;
a5988c49 694
b7a9e5dd
SW
695 con->peer_name.type = (__u8) entity_type;
696 con->peer_name.num = cpu_to_le64(entity_num);
697
31b8006e 698 memcpy(&con->peer_addr, addr, sizeof(*addr));
03c677e1 699 con->delay = 0; /* reset backoff memory */
5469155f 700 mutex_unlock(&con->mutex);
31b8006e
SW
701 queue_con(con);
702}
3d14c5d2 703EXPORT_SYMBOL(ceph_con_open);
31b8006e 704
566050e1
ID
705bool ceph_con_v1_opened(struct ceph_connection *con)
706{
707 return con->connect_seq;
708}
709
87b315a5
SW
710/*
711 * return true if this connection ever successfully opened
712 */
713bool ceph_con_opened(struct ceph_connection *con)
714{
566050e1 715 return ceph_con_v1_opened(con);
87b315a5
SW
716}
717
31b8006e
SW
718/*
719 * initialize a new connection.
720 */
1bfd89f4
AE
721void ceph_con_init(struct ceph_connection *con, void *private,
722 const struct ceph_connection_operations *ops,
b7a9e5dd 723 struct ceph_messenger *msgr)
31b8006e
SW
724{
725 dout("con_init %p\n", con);
726 memset(con, 0, sizeof(*con));
1bfd89f4
AE
727 con->private = private;
728 con->ops = ops;
31b8006e 729 con->msgr = msgr;
ce2c8903
AE
730
731 con_sock_state_init(con);
732
ec302645 733 mutex_init(&con->mutex);
31b8006e
SW
734 INIT_LIST_HEAD(&con->out_queue);
735 INIT_LIST_HEAD(&con->out_sent);
68931622 736 INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
a5988c49 737
6d7f62bf 738 con->state = CEPH_CON_S_CLOSED;
31b8006e 739}
3d14c5d2 740EXPORT_SYMBOL(ceph_con_init);
31b8006e
SW
741
742
743/*
744 * We maintain a global counter to order connection attempts. Get
745 * a unique seq greater than @gt.
746 */
6503e0b6 747u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt)
31b8006e
SW
748{
749 u32 ret;
750
751 spin_lock(&msgr->global_seq_lock);
752 if (msgr->global_seq < gt)
753 msgr->global_seq = gt;
754 ret = ++msgr->global_seq;
755 spin_unlock(&msgr->global_seq_lock);
756 return ret;
757}
758
02471928
ID
759/*
760 * Discard messages that have been acked by the server.
761 */
6503e0b6 762void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq)
02471928
ID
763{
764 struct ceph_msg *msg;
765 u64 seq;
766
767 dout("%s con %p ack_seq %llu\n", __func__, con, ack_seq);
768 while (!list_empty(&con->out_sent)) {
769 msg = list_first_entry(&con->out_sent, struct ceph_msg,
770 list_head);
771 WARN_ON(msg->needs_out_seq);
772 seq = le64_to_cpu(msg->hdr.seq);
773 if (seq > ack_seq)
774 break;
775
776 dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
777 msg, seq);
778 ceph_msg_remove(msg);
779 }
780}
781
782/*
783 * Discard messages that have been requeued in con_fault(), up to
784 * reconnect_seq. This avoids gratuitously resending messages that
785 * the server had received and handled prior to reconnect.
786 */
6503e0b6 787void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq)
02471928
ID
788{
789 struct ceph_msg *msg;
790 u64 seq;
791
792 dout("%s con %p reconnect_seq %llu\n", __func__, con, reconnect_seq);
793 while (!list_empty(&con->out_queue)) {
794 msg = list_first_entry(&con->out_queue, struct ceph_msg,
795 list_head);
796 if (msg->needs_out_seq)
797 break;
798 seq = le64_to_cpu(msg->hdr.seq);
799 if (seq > reconnect_seq)
800 break;
801
802 dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
803 msg, seq);
804 ceph_msg_remove(msg);
805 }
806}
807
e2200423 808static void con_out_kvec_reset(struct ceph_connection *con)
859eb799 809{
67645d76
ID
810 BUG_ON(con->out_skip);
811
859eb799
AE
812 con->out_kvec_left = 0;
813 con->out_kvec_bytes = 0;
814 con->out_kvec_cur = &con->out_kvec[0];
815}
816
e2200423 817static void con_out_kvec_add(struct ceph_connection *con,
859eb799
AE
818 size_t size, void *data)
819{
67645d76 820 int index = con->out_kvec_left;
859eb799 821
67645d76 822 BUG_ON(con->out_skip);
859eb799
AE
823 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
824
825 con->out_kvec[index].iov_len = size;
826 con->out_kvec[index].iov_base = data;
827 con->out_kvec_left++;
828 con->out_kvec_bytes += size;
829}
31b8006e 830
67645d76
ID
831/*
832 * Chop off a kvec from the end. Return residual number of bytes for
833 * that kvec, i.e. how many bytes would have been written if the kvec
834 * hadn't been nuked.
835 */
836static int con_out_kvec_skip(struct ceph_connection *con)
837{
838 int off = con->out_kvec_cur - con->out_kvec;
839 int skip = 0;
840
841 if (con->out_kvec_bytes > 0) {
842 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
843 BUG_ON(con->out_kvec_bytes < skip);
844 BUG_ON(!con->out_kvec_left);
845 con->out_kvec_bytes -= skip;
846 con->out_kvec_left--;
847 }
848
849 return skip;
850}
851
df6ad1f9 852#ifdef CONFIG_BLOCK
6aaa4511
AE
853
854/*
855 * For a bio data item, a piece is whatever remains of the next
856 * entry in the current bio iovec, or the first entry in the next
857 * bio in the list.
858 */
8ae4f4f5 859static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
25aff7c5 860 size_t length)
6aaa4511 861{
8ae4f4f5 862 struct ceph_msg_data *data = cursor->data;
5359a17d 863 struct ceph_bio_iter *it = &cursor->bio_iter;
6aaa4511 864
5359a17d
ID
865 cursor->resid = min_t(size_t, length, data->bio_length);
866 *it = data->bio_pos;
867 if (cursor->resid < it->iter.bi_size)
868 it->iter.bi_size = cursor->resid;
6aaa4511 869
5359a17d
ID
870 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
871 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
6aaa4511
AE
872}
873
8ae4f4f5 874static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
6aaa4511
AE
875 size_t *page_offset,
876 size_t *length)
877{
5359a17d
ID
878 struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
879 cursor->bio_iter.iter);
6aaa4511 880
5359a17d
ID
881 *page_offset = bv.bv_offset;
882 *length = bv.bv_len;
883 return bv.bv_page;
6aaa4511
AE
884}
885
8ae4f4f5
AE
886static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
887 size_t bytes)
6aaa4511 888{
5359a17d 889 struct ceph_bio_iter *it = &cursor->bio_iter;
187df763 890 struct page *page = bio_iter_page(it->bio, it->iter);
6aaa4511 891
5359a17d
ID
892 BUG_ON(bytes > cursor->resid);
893 BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
25aff7c5 894 cursor->resid -= bytes;
5359a17d 895 bio_advance_iter(it->bio, &it->iter, bytes);
f38a5181 896
5359a17d
ID
897 if (!cursor->resid) {
898 BUG_ON(!cursor->last_piece);
899 return false; /* no more data */
900 }
f38a5181 901
187df763
ID
902 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
903 page == bio_iter_page(it->bio, it->iter)))
6aaa4511
AE
904 return false; /* more bytes to process in this segment */
905
5359a17d
ID
906 if (!it->iter.bi_size) {
907 it->bio = it->bio->bi_next;
908 it->iter = it->bio->bi_iter;
909 if (cursor->resid < it->iter.bi_size)
910 it->iter.bi_size = cursor->resid;
25aff7c5 911 }
6aaa4511 912
5359a17d
ID
913 BUG_ON(cursor->last_piece);
914 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
915 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
6aaa4511
AE
916 return true;
917}
ea96571f 918#endif /* CONFIG_BLOCK */
df6ad1f9 919
b9e281c2
ID
920static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
921 size_t length)
922{
923 struct ceph_msg_data *data = cursor->data;
924 struct bio_vec *bvecs = data->bvec_pos.bvecs;
925
926 cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
927 cursor->bvec_iter = data->bvec_pos.iter;
928 cursor->bvec_iter.bi_size = cursor->resid;
929
930 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
931 cursor->last_piece =
932 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
933}
934
935static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
936 size_t *page_offset,
937 size_t *length)
938{
939 struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
940 cursor->bvec_iter);
941
942 *page_offset = bv.bv_offset;
943 *length = bv.bv_len;
944 return bv.bv_page;
945}
946
947static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
948 size_t bytes)
949{
950 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
187df763 951 struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
b9e281c2
ID
952
953 BUG_ON(bytes > cursor->resid);
954 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
955 cursor->resid -= bytes;
956 bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);
957
958 if (!cursor->resid) {
959 BUG_ON(!cursor->last_piece);
960 return false; /* no more data */
961 }
962
187df763
ID
963 if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
964 page == bvec_iter_page(bvecs, cursor->bvec_iter)))
b9e281c2
ID
965 return false; /* more bytes to process in this segment */
966
967 BUG_ON(cursor->last_piece);
968 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
969 cursor->last_piece =
970 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
971 return true;
972}
973
e766d7b5
AE
974/*
975 * For a page array, a piece comes from the first page in the array
976 * that has not already been fully consumed.
977 */
8ae4f4f5 978static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
25aff7c5 979 size_t length)
e766d7b5 980{
8ae4f4f5 981 struct ceph_msg_data *data = cursor->data;
e766d7b5
AE
982 int page_count;
983
984 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
985
986 BUG_ON(!data->pages);
987 BUG_ON(!data->length);
988
ca8b3a69 989 cursor->resid = min(length, data->length);
e766d7b5 990 page_count = calc_pages_for(data->alignment, (u64)data->length);
e766d7b5
AE
991 cursor->page_offset = data->alignment & ~PAGE_MASK;
992 cursor->page_index = 0;
56fc5659
AE
993 BUG_ON(page_count > (int)USHRT_MAX);
994 cursor->page_count = (unsigned short)page_count;
995 BUG_ON(length > SIZE_MAX - cursor->page_offset);
5f740d7e 996 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
e766d7b5
AE
997}
998
8ae4f4f5
AE
999static struct page *
1000ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
1001 size_t *page_offset, size_t *length)
e766d7b5 1002{
8ae4f4f5 1003 struct ceph_msg_data *data = cursor->data;
e766d7b5
AE
1004
1005 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
1006
1007 BUG_ON(cursor->page_index >= cursor->page_count);
1008 BUG_ON(cursor->page_offset >= PAGE_SIZE);
e766d7b5
AE
1009
1010 *page_offset = cursor->page_offset;
25aff7c5 1011 if (cursor->last_piece)
e766d7b5 1012 *length = cursor->resid;
25aff7c5 1013 else
e766d7b5 1014 *length = PAGE_SIZE - *page_offset;
e766d7b5
AE
1015
1016 return data->pages[cursor->page_index];
1017}
1018
8ae4f4f5 1019static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
e766d7b5
AE
1020 size_t bytes)
1021{
8ae4f4f5 1022 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
e766d7b5
AE
1023
1024 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
e766d7b5
AE
1025
1026 /* Advance the cursor page offset */
1027
1028 cursor->resid -= bytes;
5df521b1
AE
1029 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
1030 if (!bytes || cursor->page_offset)
e766d7b5
AE
1031 return false; /* more bytes to process in the current page */
1032
d90deda6
YZ
1033 if (!cursor->resid)
1034 return false; /* no more data */
1035
5df521b1 1036 /* Move on to the next page; offset is already at 0 */
e766d7b5
AE
1037
1038 BUG_ON(cursor->page_index >= cursor->page_count);
e766d7b5 1039 cursor->page_index++;
25aff7c5 1040 cursor->last_piece = cursor->resid <= PAGE_SIZE;
e766d7b5
AE
1041
1042 return true;
1043}
1044
fe38a2b6 1045/*
dd236fcb
AE
1046 * For a pagelist, a piece is whatever remains to be consumed in the
1047 * first page in the list, or the front of the next page.
fe38a2b6 1048 */
8ae4f4f5
AE
1049static void
1050ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
25aff7c5 1051 size_t length)
fe38a2b6 1052{
8ae4f4f5 1053 struct ceph_msg_data *data = cursor->data;
fe38a2b6
AE
1054 struct ceph_pagelist *pagelist;
1055 struct page *page;
1056
dd236fcb 1057 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
fe38a2b6
AE
1058
1059 pagelist = data->pagelist;
1060 BUG_ON(!pagelist);
25aff7c5
AE
1061
1062 if (!length)
fe38a2b6
AE
1063 return; /* pagelist can be assigned but empty */
1064
1065 BUG_ON(list_empty(&pagelist->head));
1066 page = list_first_entry(&pagelist->head, struct page, lru);
1067
ca8b3a69 1068 cursor->resid = min(length, pagelist->length);
fe38a2b6
AE
1069 cursor->page = page;
1070 cursor->offset = 0;
a51b272e 1071 cursor->last_piece = cursor->resid <= PAGE_SIZE;
fe38a2b6
AE
1072}
1073
8ae4f4f5
AE
1074static struct page *
1075ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
1076 size_t *page_offset, size_t *length)
fe38a2b6 1077{
8ae4f4f5 1078 struct ceph_msg_data *data = cursor->data;
fe38a2b6 1079 struct ceph_pagelist *pagelist;
fe38a2b6
AE
1080
1081 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1082
1083 pagelist = data->pagelist;
1084 BUG_ON(!pagelist);
1085
1086 BUG_ON(!cursor->page);
25aff7c5 1087 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
fe38a2b6 1088
5df521b1 1089 /* offset of first page in pagelist is always 0 */
fe38a2b6 1090 *page_offset = cursor->offset & ~PAGE_MASK;
5df521b1 1091 if (cursor->last_piece)
25aff7c5
AE
1092 *length = cursor->resid;
1093 else
1094 *length = PAGE_SIZE - *page_offset;
fe38a2b6 1095
8ae4f4f5 1096 return cursor->page;
fe38a2b6
AE
1097}
1098
8ae4f4f5 1099static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
dd236fcb 1100 size_t bytes)
fe38a2b6 1101{
8ae4f4f5 1102 struct ceph_msg_data *data = cursor->data;
fe38a2b6
AE
1103 struct ceph_pagelist *pagelist;
1104
1105 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1106
1107 pagelist = data->pagelist;
1108 BUG_ON(!pagelist);
25aff7c5
AE
1109
1110 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
fe38a2b6
AE
1111 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
1112
1113 /* Advance the cursor offset */
1114
25aff7c5 1115 cursor->resid -= bytes;
fe38a2b6 1116 cursor->offset += bytes;
5df521b1 1117 /* offset of first page in pagelist is always 0 */
fe38a2b6
AE
1118 if (!bytes || cursor->offset & ~PAGE_MASK)
1119 return false; /* more bytes to process in the current page */
1120
d90deda6
YZ
1121 if (!cursor->resid)
1122 return false; /* no more data */
1123
fe38a2b6
AE
1124 /* Move on to the next page */
1125
1126 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
17ddc49b 1127 cursor->page = list_next_entry(cursor->page, lru);
25aff7c5 1128 cursor->last_piece = cursor->resid <= PAGE_SIZE;
fe38a2b6
AE
1129
1130 return true;
1131}
1132
dd236fcb
AE
1133/*
1134 * Message data is handled (sent or received) in pieces, where each
1135 * piece resides on a single page. The network layer might not
1136 * consume an entire piece at once. A data item's cursor keeps
1137 * track of which piece is next to process and how much remains to
1138 * be processed in that piece. It also tracks whether the current
1139 * piece is the last one in the data item.
1140 */
ca8b3a69 1141static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
dd236fcb 1142{
ca8b3a69 1143 size_t length = cursor->total_resid;
8ae4f4f5 1144
8ae4f4f5 1145 switch (cursor->data->type) {
dd236fcb 1146 case CEPH_MSG_DATA_PAGELIST:
8ae4f4f5 1147 ceph_msg_data_pagelist_cursor_init(cursor, length);
dd236fcb 1148 break;
e766d7b5 1149 case CEPH_MSG_DATA_PAGES:
8ae4f4f5 1150 ceph_msg_data_pages_cursor_init(cursor, length);
e766d7b5 1151 break;
dd236fcb
AE
1152#ifdef CONFIG_BLOCK
1153 case CEPH_MSG_DATA_BIO:
8ae4f4f5 1154 ceph_msg_data_bio_cursor_init(cursor, length);
6aaa4511 1155 break;
dd236fcb 1156#endif /* CONFIG_BLOCK */
b9e281c2
ID
1157 case CEPH_MSG_DATA_BVECS:
1158 ceph_msg_data_bvecs_cursor_init(cursor, length);
1159 break;
6aaa4511 1160 case CEPH_MSG_DATA_NONE:
dd236fcb
AE
1161 default:
1162 /* BUG(); */
1163 break;
1164 }
8ae4f4f5 1165 cursor->need_crc = true;
dd236fcb
AE
1166}
1167
6503e0b6
ID
1168void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
1169 struct ceph_msg *msg, size_t length)
ca8b3a69 1170{
ca8b3a69
AE
1171 BUG_ON(!length);
1172 BUG_ON(length > msg->data_length);
0d9c1ab3 1173 BUG_ON(!msg->num_data_items);
ca8b3a69 1174
ca8b3a69 1175 cursor->total_resid = length;
0d9c1ab3 1176 cursor->data = msg->data;
ca8b3a69
AE
1177
1178 __ceph_msg_data_cursor_init(cursor);
1179}
1180
dd236fcb
AE
1181/*
1182 * Return the page containing the next piece to process for a given
1183 * data item, and supply the page offset and length of that piece.
1184 * Indicate whether this is the last piece in this data item.
1185 */
6503e0b6
ID
1186struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1187 size_t *page_offset, size_t *length,
1188 bool *last_piece)
dd236fcb
AE
1189{
1190 struct page *page;
1191
8ae4f4f5 1192 switch (cursor->data->type) {
dd236fcb 1193 case CEPH_MSG_DATA_PAGELIST:
8ae4f4f5 1194 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
dd236fcb 1195 break;
e766d7b5 1196 case CEPH_MSG_DATA_PAGES:
8ae4f4f5 1197 page = ceph_msg_data_pages_next(cursor, page_offset, length);
e766d7b5 1198 break;
dd236fcb
AE
1199#ifdef CONFIG_BLOCK
1200 case CEPH_MSG_DATA_BIO:
8ae4f4f5 1201 page = ceph_msg_data_bio_next(cursor, page_offset, length);
6aaa4511 1202 break;
dd236fcb 1203#endif /* CONFIG_BLOCK */
b9e281c2
ID
1204 case CEPH_MSG_DATA_BVECS:
1205 page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
1206 break;
6aaa4511 1207 case CEPH_MSG_DATA_NONE:
dd236fcb
AE
1208 default:
1209 page = NULL;
1210 break;
1211 }
5359a17d 1212
dd236fcb
AE
1213 BUG_ON(!page);
1214 BUG_ON(*page_offset + *length > PAGE_SIZE);
1215 BUG_ON(!*length);
5359a17d 1216 BUG_ON(*length > cursor->resid);
dd236fcb 1217 if (last_piece)
8ae4f4f5 1218 *last_piece = cursor->last_piece;
dd236fcb
AE
1219
1220 return page;
1221}
1222
1223/*
1224 * Returns true if the result moves the cursor on to the next piece
1225 * of the data item.
1226 */
6503e0b6 1227void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
dd236fcb
AE
1228{
1229 bool new_piece;
1230
25aff7c5 1231 BUG_ON(bytes > cursor->resid);
8ae4f4f5 1232 switch (cursor->data->type) {
dd236fcb 1233 case CEPH_MSG_DATA_PAGELIST:
8ae4f4f5 1234 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
dd236fcb 1235 break;
e766d7b5 1236 case CEPH_MSG_DATA_PAGES:
8ae4f4f5 1237 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
e766d7b5 1238 break;
dd236fcb
AE
1239#ifdef CONFIG_BLOCK
1240 case CEPH_MSG_DATA_BIO:
8ae4f4f5 1241 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
6aaa4511 1242 break;
dd236fcb 1243#endif /* CONFIG_BLOCK */
b9e281c2
ID
1244 case CEPH_MSG_DATA_BVECS:
1245 new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
1246 break;
6aaa4511 1247 case CEPH_MSG_DATA_NONE:
dd236fcb
AE
1248 default:
1249 BUG();
1250 break;
1251 }
ca8b3a69 1252 cursor->total_resid -= bytes;
dd236fcb 1253
ca8b3a69
AE
1254 if (!cursor->resid && cursor->total_resid) {
1255 WARN_ON(!cursor->last_piece);
0d9c1ab3 1256 cursor->data++;
ca8b3a69 1257 __ceph_msg_data_cursor_init(cursor);
a51b272e 1258 new_piece = true;
ca8b3a69 1259 }
a51b272e 1260 cursor->need_crc = new_piece;
dd236fcb
AE
1261}
1262
dbc0d3ca
ID
1263static size_t sizeof_footer(struct ceph_connection *con)
1264{
1265 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
1266 sizeof(struct ceph_msg_footer) :
1267 sizeof(struct ceph_msg_footer_old);
1268}
1269
98fa5dd8 1270static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
739c905b 1271{
4c59b4a2 1272 /* Initialize data cursor */
fe38a2b6 1273
8ee8abf7 1274 ceph_msg_data_cursor_init(&msg->cursor, msg, data_len);
739c905b
AE
1275}
1276
31b8006e
SW
1277/*
1278 * Prepare footer for currently outgoing message, and finish things
1279 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1280 */
859eb799 1281static void prepare_write_message_footer(struct ceph_connection *con)
31b8006e
SW
1282{
1283 struct ceph_msg *m = con->out_msg;
1284
fd154f3c
AE
1285 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
1286
31b8006e 1287 dout("prepare_write_message_footer %p\n", con);
89f08173 1288 con_out_kvec_add(con, sizeof_footer(con), &m->footer);
33d07337
YZ
1289 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
1290 if (con->ops->sign_message)
79dbd1ba 1291 con->ops->sign_message(m);
33d07337
YZ
1292 else
1293 m->footer.sig = 0;
33d07337
YZ
1294 } else {
1295 m->old_footer.flags = m->footer.flags;
33d07337 1296 }
31b8006e 1297 con->out_more = m->more_to_follow;
c86a2930 1298 con->out_msg_done = true;
31b8006e
SW
1299}
1300
1301/*
1302 * Prepare headers for the next outgoing message.
1303 */
1304static void prepare_write_message(struct ceph_connection *con)
1305{
1306 struct ceph_msg *m;
a9a0c51a 1307 u32 crc;
31b8006e 1308
e2200423 1309 con_out_kvec_reset(con);
c86a2930 1310 con->out_msg_done = false;
31b8006e
SW
1311
1312 /* Sneak an ack in there first? If we can get it into the same
1313 * TCP packet that's a good thing. */
1314 if (con->in_seq > con->in_seq_acked) {
1315 con->in_seq_acked = con->in_seq;
e2200423 1316 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
31b8006e 1317 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
e2200423 1318 con_out_kvec_add(con, sizeof (con->out_temp_ack),
859eb799 1319 &con->out_temp_ack);
31b8006e
SW
1320 }
1321
771294fe
ID
1322 ceph_con_get_out_msg(con);
1323 m = con->out_msg;
31b8006e 1324
98fa5dd8 1325 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
31b8006e
SW
1326 m, con->out_seq, le16_to_cpu(m->hdr.type),
1327 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
98fa5dd8 1328 m->data_length);
98ad5ebd
ID
1329 WARN_ON(m->front.iov_len != le32_to_cpu(m->hdr.front_len));
1330 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len));
31b8006e
SW
1331
1332 /* tag + hdr + front + middle */
e2200423 1333 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
67645d76 1334 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
e2200423 1335 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
859eb799 1336
31b8006e 1337 if (m->middle)
e2200423 1338 con_out_kvec_add(con, m->middle->vec.iov_len,
859eb799 1339 m->middle->vec.iov_base);
31b8006e 1340
67645d76 1341 /* fill in hdr crc and finalize hdr */
a9a0c51a
AE
1342 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
1343 con->out_msg->hdr.crc = cpu_to_le32(crc);
67645d76 1344 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
a9a0c51a 1345
67645d76 1346 /* fill in front and middle crc, footer */
a9a0c51a
AE
1347 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
1348 con->out_msg->footer.front_crc = cpu_to_le32(crc);
1349 if (m->middle) {
1350 crc = crc32c(0, m->middle->vec.iov_base,
1351 m->middle->vec.iov_len);
1352 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
1353 } else
31b8006e 1354 con->out_msg->footer.middle_crc = 0;
739c905b 1355 dout("%s front_crc %u middle_crc %u\n", __func__,
31b8006e
SW
1356 le32_to_cpu(con->out_msg->footer.front_crc),
1357 le32_to_cpu(con->out_msg->footer.middle_crc));
67645d76 1358 con->out_msg->footer.flags = 0;
31b8006e
SW
1359
1360 /* is there a data payload? */
739c905b 1361 con->out_msg->footer.data_crc = 0;
98fa5dd8
AE
1362 if (m->data_length) {
1363 prepare_message_data(con->out_msg, m->data_length);
78625051
AE
1364 con->out_more = 1; /* data + footer will follow */
1365 } else {
31b8006e 1366 /* no, queue up footer too and be done */
859eb799 1367 prepare_write_message_footer(con);
78625051 1368 }
31b8006e 1369
6503e0b6 1370 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
31b8006e
SW
1371}
1372
1373/*
1374 * Prepare an ack.
1375 */
1376static void prepare_write_ack(struct ceph_connection *con)
1377{
1378 dout("prepare_write_ack %p %llu -> %llu\n", con,
1379 con->in_seq_acked, con->in_seq);
1380 con->in_seq_acked = con->in_seq;
1381
e2200423 1382 con_out_kvec_reset(con);
859eb799 1383
e2200423 1384 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
859eb799 1385
31b8006e 1386 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
e2200423 1387 con_out_kvec_add(con, sizeof (con->out_temp_ack),
859eb799
AE
1388 &con->out_temp_ack);
1389
31b8006e 1390 con->out_more = 1; /* more will follow.. eventually.. */
6503e0b6 1391 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
31b8006e
SW
1392}
1393
3a23083b
SW
1394/*
1395 * Prepare to share the seq during handshake
1396 */
1397static void prepare_write_seq(struct ceph_connection *con)
1398{
1399 dout("prepare_write_seq %p %llu -> %llu\n", con,
1400 con->in_seq_acked, con->in_seq);
1401 con->in_seq_acked = con->in_seq;
1402
1403 con_out_kvec_reset(con);
1404
1405 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1406 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1407 &con->out_temp_ack);
1408
6503e0b6 1409 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
3a23083b
SW
1410}
1411
31b8006e
SW
1412/*
1413 * Prepare to write keepalive byte.
1414 */
1415static void prepare_write_keepalive(struct ceph_connection *con)
1416{
1417 dout("prepare_write_keepalive %p\n", con);
e2200423 1418 con_out_kvec_reset(con);
8b9558aa 1419 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
473bd2d7 1420 struct timespec64 now;
7f61f545 1421
473bd2d7 1422 ktime_get_real_ts64(&now);
8b9558aa 1423 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
473bd2d7 1424 ceph_encode_timespec64(&con->out_temp_keepalive2, &now);
7f61f545
ID
1425 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
1426 &con->out_temp_keepalive2);
8b9558aa
YZ
1427 } else {
1428 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
1429 }
6503e0b6 1430 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
31b8006e
SW
1431}
1432
1433/*
1434 * Connection negotiation.
1435 */
1436
262614c4 1437static int get_connect_authorizer(struct ceph_connection *con)
4e7a5dcd 1438{
a3530df3 1439 struct ceph_auth_handshake *auth;
262614c4 1440 int auth_proto;
b1c6b980
AE
1441
1442 if (!con->ops->get_authorizer) {
262614c4 1443 con->auth = NULL;
b1c6b980
AE
1444 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
1445 con->out_connect.authorizer_len = 0;
262614c4 1446 return 0;
b1c6b980
AE
1447 }
1448
262614c4 1449 auth = con->ops->get_authorizer(con, &auth_proto, con->auth_retry);
a3530df3 1450 if (IS_ERR(auth))
262614c4 1451 return PTR_ERR(auth);
0da5d703 1452
262614c4
ID
1453 con->auth = auth;
1454 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
1455 con->out_connect.authorizer_len = cpu_to_le32(auth->authorizer_buf_len);
1456 return 0;
4e7a5dcd
SW
1457}
1458
31b8006e
SW
1459/*
1460 * We connected to a peer and are saying hello.
1461 */
e825a66d 1462static void prepare_write_banner(struct ceph_connection *con)
31b8006e 1463{
e2200423
AE
1464 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
1465 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
e825a66d 1466 &con->msgr->my_enc_addr);
eed0ef2c 1467
eed0ef2c 1468 con->out_more = 0;
6503e0b6 1469 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
eed0ef2c
SW
1470}
1471
c0f56b48
ID
1472static void __prepare_write_connect(struct ceph_connection *con)
1473{
1474 con_out_kvec_add(con, sizeof(con->out_connect), &con->out_connect);
1475 if (con->auth)
1476 con_out_kvec_add(con, con->auth->authorizer_buf_len,
1477 con->auth->authorizer_buf);
1478
1479 con->out_more = 0;
6503e0b6 1480 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
c0f56b48
ID
1481}
1482
e825a66d 1483static int prepare_write_connect(struct ceph_connection *con)
eed0ef2c 1484{
6503e0b6 1485 unsigned int global_seq = ceph_get_global_seq(con->msgr, 0);
31b8006e 1486 int proto;
262614c4 1487 int ret;
31b8006e
SW
1488
1489 switch (con->peer_name.type) {
1490 case CEPH_ENTITY_TYPE_MON:
1491 proto = CEPH_MONC_PROTOCOL;
1492 break;
1493 case CEPH_ENTITY_TYPE_OSD:
1494 proto = CEPH_OSDC_PROTOCOL;
1495 break;
1496 case CEPH_ENTITY_TYPE_MDS:
1497 proto = CEPH_MDSC_PROTOCOL;
1498 break;
1499 default:
1500 BUG();
1501 }
1502
1503 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1504 con->connect_seq, global_seq, proto);
4e7a5dcd 1505
859bff51
ID
1506 con->out_connect.features =
1507 cpu_to_le64(from_msgr(con->msgr)->supported_features);
31b8006e
SW
1508 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1509 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1510 con->out_connect.global_seq = cpu_to_le32(global_seq);
1511 con->out_connect.protocol_version = cpu_to_le32(proto);
1512 con->out_connect.flags = 0;
31b8006e 1513
262614c4
ID
1514 ret = get_connect_authorizer(con);
1515 if (ret)
1516 return ret;
3da54776 1517
c0f56b48 1518 __prepare_write_connect(con);
e10c758e 1519 return 0;
31b8006e
SW
1520}
1521
31b8006e
SW
1522/*
1523 * write as much of pending kvecs to the socket as we can.
1524 * 1 -> done
1525 * 0 -> socket full, but more to do
1526 * <0 -> error
1527 */
1528static int write_partial_kvec(struct ceph_connection *con)
1529{
1530 int ret;
1531
1532 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
1533 while (con->out_kvec_bytes > 0) {
1534 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
1535 con->out_kvec_left, con->out_kvec_bytes,
1536 con->out_more);
1537 if (ret <= 0)
1538 goto out;
1539 con->out_kvec_bytes -= ret;
1540 if (con->out_kvec_bytes == 0)
1541 break; /* done */
f42299e6
AE
1542
1543 /* account for full iov entries consumed */
1544 while (ret >= con->out_kvec_cur->iov_len) {
1545 BUG_ON(!con->out_kvec_left);
1546 ret -= con->out_kvec_cur->iov_len;
1547 con->out_kvec_cur++;
1548 con->out_kvec_left--;
1549 }
1550 /* and for a partially-consumed entry */
1551 if (ret) {
1552 con->out_kvec_cur->iov_len -= ret;
1553 con->out_kvec_cur->iov_base += ret;
31b8006e
SW
1554 }
1555 }
1556 con->out_kvec_left = 0;
31b8006e
SW
1557 ret = 1;
1558out:
1559 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
1560 con->out_kvec_bytes, con->out_kvec_left, ret);
1561 return ret; /* done! */
1562}
1563
6503e0b6
ID
1564u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
1565 unsigned int length)
35b62808
AE
1566{
1567 char *kaddr;
1568
1569 kaddr = kmap(page);
1570 BUG_ON(kaddr == NULL);
1571 crc = crc32c(crc, kaddr + page_offset, length);
1572 kunmap(page);
1573
1574 return crc;
1575}
31b8006e
SW
1576/*
1577 * Write as much message data payload as we can. If we finish, queue
1578 * up the footer.
1579 * 1 -> done, footer is now queued in out_kvec[].
1580 * 0 -> socket full, but more to do
1581 * <0 -> error
1582 */
34d2d200 1583static int write_partial_message_data(struct ceph_connection *con)
31b8006e
SW
1584{
1585 struct ceph_msg *msg = con->out_msg;
8ae4f4f5 1586 struct ceph_msg_data_cursor *cursor = &msg->cursor;
859bff51 1587 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
433b0a12 1588 int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
f5db90bc 1589 u32 crc;
31b8006e 1590
859a35d5 1591 dout("%s %p msg %p\n", __func__, con, msg);
31b8006e 1592
0d9c1ab3 1593 if (!msg->num_data_items)
4c59b4a2
AE
1594 return -EINVAL;
1595
5821bd8c
AE
1596 /*
1597 * Iterate through each page that contains data to be
1598 * written, and send as much as possible for each.
1599 *
1600 * If we are calculating the data crc (the default), we will
1601 * need to map the page. If we have no pages, they have
1602 * been revoked, so use the zero page.
1603 */
f5db90bc 1604 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
45a267db 1605 while (cursor->total_resid) {
8a166d05 1606 struct page *page;
e387d525
AE
1607 size_t page_offset;
1608 size_t length;
f5db90bc 1609 int ret;
68b4476b 1610
45a267db
ID
1611 if (!cursor->resid) {
1612 ceph_msg_data_advance(cursor, 0);
1613 continue;
1614 }
1615
1f6b821a 1616 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
433b0a12
ID
1617 if (length == cursor->total_resid)
1618 more = MSG_MORE;
1f6b821a 1619 ret = ceph_tcp_sendpage(con->sock, page, page_offset, length,
433b0a12 1620 more);
f5db90bc
AE
1621 if (ret <= 0) {
1622 if (do_datacrc)
1623 msg->footer.data_crc = cpu_to_le32(crc);
31b8006e 1624
f5db90bc
AE
1625 return ret;
1626 }
143334ff
AE
1627 if (do_datacrc && cursor->need_crc)
1628 crc = ceph_crc32c_page(crc, page, page_offset, length);
1759f7b0 1629 ceph_msg_data_advance(cursor, (size_t)ret);
31b8006e
SW
1630 }
1631
34d2d200 1632 dout("%s %p msg %p done\n", __func__, con, msg);
31b8006e
SW
1633
1634 /* prepare and queue up footer, too */
f5db90bc
AE
1635 if (do_datacrc)
1636 msg->footer.data_crc = cpu_to_le32(crc);
1637 else
84ca8fc8 1638 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
e2200423 1639 con_out_kvec_reset(con);
859eb799 1640 prepare_write_message_footer(con);
f5db90bc
AE
1641
1642 return 1; /* must return > 0 to indicate success */
31b8006e
SW
1643}
1644
1645/*
1646 * write some zeros
1647 */
1648static int write_partial_skip(struct ceph_connection *con)
1649{
433b0a12 1650 int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
31b8006e
SW
1651 int ret;
1652
67645d76 1653 dout("%s %p %d left\n", __func__, con, con->out_skip);
31b8006e 1654 while (con->out_skip > 0) {
09cbfeaf 1655 size_t size = min(con->out_skip, (int) PAGE_SIZE);
31b8006e 1656
433b0a12
ID
1657 if (size == con->out_skip)
1658 more = MSG_MORE;
699921d9
ID
1659 ret = ceph_tcp_sendpage(con->sock, ceph_zero_page, 0, size,
1660 more);
31b8006e
SW
1661 if (ret <= 0)
1662 goto out;
1663 con->out_skip -= ret;
1664 }
1665 ret = 1;
1666out:
1667 return ret;
1668}
1669
1670/*
1671 * Prepare to read connection handshake, or an ack.
1672 */
eed0ef2c
SW
1673static void prepare_read_banner(struct ceph_connection *con)
1674{
1675 dout("prepare_read_banner %p\n", con);
1676 con->in_base_pos = 0;
1677}
1678
31b8006e
SW
1679static void prepare_read_connect(struct ceph_connection *con)
1680{
1681 dout("prepare_read_connect %p\n", con);
1682 con->in_base_pos = 0;
1683}
1684
1685static void prepare_read_ack(struct ceph_connection *con)
1686{
1687 dout("prepare_read_ack %p\n", con);
1688 con->in_base_pos = 0;
1689}
1690
3a23083b
SW
1691static void prepare_read_seq(struct ceph_connection *con)
1692{
1693 dout("prepare_read_seq %p\n", con);
1694 con->in_base_pos = 0;
1695 con->in_tag = CEPH_MSGR_TAG_SEQ;
1696}
1697
31b8006e
SW
1698static void prepare_read_tag(struct ceph_connection *con)
1699{
1700 dout("prepare_read_tag %p\n", con);
1701 con->in_base_pos = 0;
1702 con->in_tag = CEPH_MSGR_TAG_READY;
1703}
1704
8b9558aa
YZ
1705static void prepare_read_keepalive_ack(struct ceph_connection *con)
1706{
1707 dout("prepare_read_keepalive_ack %p\n", con);
1708 con->in_base_pos = 0;
1709}
1710
31b8006e
SW
1711/*
1712 * Prepare to read a message.
1713 */
1714static int prepare_read_message(struct ceph_connection *con)
1715{
1716 dout("prepare_read_message %p\n", con);
1717 BUG_ON(con->in_msg != NULL);
1718 con->in_base_pos = 0;
1719 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1720 return 0;
1721}
1722
1723
1724static int read_partial(struct ceph_connection *con,
fd51653f 1725 int end, int size, void *object)
31b8006e 1726{
e6cee71f
AE
1727 while (con->in_base_pos < end) {
1728 int left = end - con->in_base_pos;
31b8006e
SW
1729 int have = size - left;
1730 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1731 if (ret <= 0)
1732 return ret;
1733 con->in_base_pos += ret;
1734 }
1735 return 1;
1736}
1737
1738
1739/*
1740 * Read all or part of the connect-side handshake on a new connection
1741 */
eed0ef2c 1742static int read_partial_banner(struct ceph_connection *con)
31b8006e 1743{
fd51653f
AE
1744 int size;
1745 int end;
1746 int ret;
31b8006e 1747
eed0ef2c 1748 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
31b8006e
SW
1749
1750 /* peer's banner */
fd51653f
AE
1751 size = strlen(CEPH_BANNER);
1752 end = size;
1753 ret = read_partial(con, end, size, con->in_banner);
31b8006e
SW
1754 if (ret <= 0)
1755 goto out;
fd51653f
AE
1756
1757 size = sizeof (con->actual_peer_addr);
1758 end += size;
1759 ret = read_partial(con, end, size, &con->actual_peer_addr);
31b8006e
SW
1760 if (ret <= 0)
1761 goto out;
2c66de56 1762 ceph_decode_banner_addr(&con->actual_peer_addr);
fd51653f
AE
1763
1764 size = sizeof (con->peer_addr_for_me);
1765 end += size;
1766 ret = read_partial(con, end, size, &con->peer_addr_for_me);
31b8006e
SW
1767 if (ret <= 0)
1768 goto out;
2c66de56 1769 ceph_decode_banner_addr(&con->peer_addr_for_me);
fd51653f 1770
eed0ef2c
SW
1771out:
1772 return ret;
1773}
1774
1775static int read_partial_connect(struct ceph_connection *con)
1776{
fd51653f
AE
1777 int size;
1778 int end;
1779 int ret;
eed0ef2c
SW
1780
1781 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1782
fd51653f
AE
1783 size = sizeof (con->in_reply);
1784 end = size;
1785 ret = read_partial(con, end, size, &con->in_reply);
31b8006e
SW
1786 if (ret <= 0)
1787 goto out;
fd51653f 1788
262614c4
ID
1789 if (con->auth) {
1790 size = le32_to_cpu(con->in_reply.authorizer_len);
130f52f2
ID
1791 if (size > con->auth->authorizer_reply_buf_len) {
1792 pr_err("authorizer reply too big: %d > %zu\n", size,
1793 con->auth->authorizer_reply_buf_len);
1794 ret = -EINVAL;
1795 goto out;
1796 }
1797
262614c4
ID
1798 end += size;
1799 ret = read_partial(con, end, size,
1800 con->auth->authorizer_reply_buf);
1801 if (ret <= 0)
1802 goto out;
1803 }
31b8006e 1804
4e7a5dcd
SW
1805 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1806 con, (int)con->in_reply.tag,
1807 le32_to_cpu(con->in_reply.connect_seq),
31b8006e
SW
1808 le32_to_cpu(con->in_reply.global_seq));
1809out:
1810 return ret;
1811}
1812
1813/*
1814 * Verify the hello banner looks okay.
1815 */
1816static int verify_hello(struct ceph_connection *con)
1817{
1818 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
13e38c8a 1819 pr_err("connect to %s got bad banner\n",
b726ec97 1820 ceph_pr_addr(&con->peer_addr));
31b8006e
SW
1821 con->error_msg = "protocol error, bad banner";
1822 return -1;
1823 }
1824 return 0;
1825}
1826
6503e0b6 1827bool ceph_addr_is_blank(const struct ceph_entity_addr *addr)
31b8006e 1828{
cede185b
JL
1829 struct sockaddr_storage ss = addr->in_addr; /* align */
1830 struct in_addr *addr4 = &((struct sockaddr_in *)&ss)->sin_addr;
1831 struct in6_addr *addr6 = &((struct sockaddr_in6 *)&ss)->sin6_addr;
c44bd69c 1832
cede185b 1833 switch (ss.ss_family) {
31b8006e 1834 case AF_INET:
cede185b 1835 return addr4->s_addr == htonl(INADDR_ANY);
31b8006e 1836 case AF_INET6:
c44bd69c
ID
1837 return ipv6_addr_any(addr6);
1838 default:
1839 return true;
31b8006e 1840 }
31b8006e
SW
1841}
1842
6503e0b6 1843int ceph_addr_port(const struct ceph_entity_addr *addr)
31b8006e 1844{
cede185b 1845 switch (get_unaligned(&addr->in_addr.ss_family)) {
31b8006e 1846 case AF_INET:
cede185b 1847 return ntohs(get_unaligned(&((struct sockaddr_in *)&addr->in_addr)->sin_port));
31b8006e 1848 case AF_INET6:
cede185b 1849 return ntohs(get_unaligned(&((struct sockaddr_in6 *)&addr->in_addr)->sin6_port));
31b8006e
SW
1850 }
1851 return 0;
1852}
1853
6503e0b6 1854void ceph_addr_set_port(struct ceph_entity_addr *addr, int p)
31b8006e 1855{
cede185b 1856 switch (get_unaligned(&addr->in_addr.ss_family)) {
31b8006e 1857 case AF_INET:
cede185b 1858 put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
a2a79609 1859 break;
31b8006e 1860 case AF_INET6:
cede185b 1861 put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
a2a79609 1862 break;
31b8006e
SW
1863 }
1864}
1865
ee3b56f2
NW
1866/*
1867 * Unlike other *_pton function semantics, zero indicates success.
1868 */
cede185b 1869static int ceph_pton(const char *str, size_t len, struct ceph_entity_addr *addr,
ee3b56f2
NW
1870 char delim, const char **ipend)
1871{
cede185b 1872 memset(&addr->in_addr, 0, sizeof(addr->in_addr));
ee3b56f2 1873
cede185b
JL
1874 if (in4_pton(str, len, (u8 *)&((struct sockaddr_in *)&addr->in_addr)->sin_addr.s_addr, delim, ipend)) {
1875 put_unaligned(AF_INET, &addr->in_addr.ss_family);
ee3b56f2
NW
1876 return 0;
1877 }
1878
cede185b
JL
1879 if (in6_pton(str, len, (u8 *)&((struct sockaddr_in6 *)&addr->in_addr)->sin6_addr.s6_addr, delim, ipend)) {
1880 put_unaligned(AF_INET6, &addr->in_addr.ss_family);
ee3b56f2
NW
1881 return 0;
1882 }
1883
1884 return -EINVAL;
1885}
1886
1887/*
1888 * Extract hostname string and resolve using kernel DNS facility.
1889 */
1890#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1891static int ceph_dns_resolve_name(const char *name, size_t namelen,
cede185b 1892 struct ceph_entity_addr *addr, char delim, const char **ipend)
ee3b56f2
NW
1893{
1894 const char *end, *delim_p;
1895 char *colon_p, *ip_addr = NULL;
1896 int ip_len, ret;
1897
1898 /*
1899 * The end of the hostname occurs immediately preceding the delimiter or
1900 * the port marker (':') where the delimiter takes precedence.
1901 */
1902 delim_p = memchr(name, delim, namelen);
1903 colon_p = memchr(name, ':', namelen);
1904
1905 if (delim_p && colon_p)
1906 end = delim_p < colon_p ? delim_p : colon_p;
1907 else if (!delim_p && colon_p)
1908 end = colon_p;
1909 else {
1910 end = delim_p;
1911 if (!end) /* case: hostname:/ */
1912 end = name + namelen;
1913 }
1914
1915 if (end <= name)
1916 return -EINVAL;
1917
1918 /* do dns_resolve upcall */
a58946c1
DH
1919 ip_len = dns_query(current->nsproxy->net_ns,
1920 NULL, name, end - name, NULL, &ip_addr, NULL, false);
ee3b56f2 1921 if (ip_len > 0)
cede185b 1922 ret = ceph_pton(ip_addr, ip_len, addr, -1, NULL);
ee3b56f2
NW
1923 else
1924 ret = -ESRCH;
1925
1926 kfree(ip_addr);
1927
1928 *ipend = end;
1929
1930 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
b726ec97 1931 ret, ret ? "failed" : ceph_pr_addr(addr));
ee3b56f2
NW
1932
1933 return ret;
1934}
1935#else
1936static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
cede185b 1937 struct ceph_entity_addr *addr, char delim, const char **ipend)
ee3b56f2
NW
1938{
1939 return -EINVAL;
1940}
1941#endif
1942
1943/*
1944 * Parse a server name (IP or hostname). If a valid IP address is not found
1945 * then try to extract a hostname to resolve using userspace DNS upcall.
1946 */
1947static int ceph_parse_server_name(const char *name, size_t namelen,
cede185b 1948 struct ceph_entity_addr *addr, char delim, const char **ipend)
ee3b56f2
NW
1949{
1950 int ret;
1951
cede185b 1952 ret = ceph_pton(name, namelen, addr, delim, ipend);
ee3b56f2 1953 if (ret)
cede185b 1954 ret = ceph_dns_resolve_name(name, namelen, addr, delim, ipend);
ee3b56f2
NW
1955
1956 return ret;
1957}
1958
31b8006e
SW
1959/*
1960 * Parse an ip[:port] list into an addr array. Use the default
1961 * monitor port if a port isn't specified.
1962 */
1963int ceph_parse_ips(const char *c, const char *end,
1964 struct ceph_entity_addr *addr,
1965 int max_count, int *count)
1966{
ee3b56f2 1967 int i, ret = -EINVAL;
31b8006e
SW
1968 const char *p = c;
1969
1970 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1971 for (i = 0; i < max_count; i++) {
1972 const char *ipend;
31b8006e 1973 int port;
39139f64
SW
1974 char delim = ',';
1975
1976 if (*p == '[') {
1977 delim = ']';
1978 p++;
1979 }
31b8006e 1980
cede185b 1981 ret = ceph_parse_server_name(p, end - p, &addr[i], delim, &ipend);
ee3b56f2 1982 if (ret)
31b8006e 1983 goto bad;
ee3b56f2
NW
1984 ret = -EINVAL;
1985
31b8006e
SW
1986 p = ipend;
1987
39139f64
SW
1988 if (delim == ']') {
1989 if (*p != ']') {
1990 dout("missing matching ']'\n");
1991 goto bad;
1992 }
1993 p++;
1994 }
1995
31b8006e
SW
1996 /* port? */
1997 if (p < end && *p == ':') {
1998 port = 0;
1999 p++;
2000 while (p < end && *p >= '0' && *p <= '9') {
2001 port = (port * 10) + (*p - '0');
2002 p++;
2003 }
f48db1e9
ID
2004 if (port == 0)
2005 port = CEPH_MON_PORT;
2006 else if (port > 65535)
31b8006e
SW
2007 goto bad;
2008 } else {
2009 port = CEPH_MON_PORT;
2010 }
2011
6503e0b6 2012 ceph_addr_set_port(&addr[i], port);
d3c3c0a8 2013 addr[i].type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
31b8006e 2014
b726ec97 2015 dout("parse_ips got %s\n", ceph_pr_addr(&addr[i]));
31b8006e
SW
2016
2017 if (p == end)
2018 break;
2019 if (*p != ',')
2020 goto bad;
2021 p++;
2022 }
2023
2024 if (p != end)
2025 goto bad;
2026
2027 if (count)
2028 *count = i + 1;
2029 return 0;
2030
2031bad:
ee3b56f2 2032 return ret;
31b8006e
SW
2033}
2034
eed0ef2c 2035static int process_banner(struct ceph_connection *con)
31b8006e 2036{
fd1a154c
ID
2037 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
2038
eed0ef2c 2039 dout("process_banner on %p\n", con);
31b8006e
SW
2040
2041 if (verify_hello(con) < 0)
2042 return -1;
2043
2044 /*
2045 * Make sure the other end is who we wanted. note that the other
2046 * end may not yet know their ip address, so if it's 0.0.0.0, give
2047 * them the benefit of the doubt.
2048 */
103e2d3a
SW
2049 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
2050 sizeof(con->peer_addr)) != 0 &&
6503e0b6 2051 !(ceph_addr_is_blank(&con->actual_peer_addr) &&
31b8006e 2052 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
a9dfe31e 2053 pr_warn("wrong peer, want %s/%u, got %s/%u\n",
b726ec97 2054 ceph_pr_addr(&con->peer_addr),
a9dfe31e 2055 le32_to_cpu(con->peer_addr.nonce),
b726ec97 2056 ceph_pr_addr(&con->actual_peer_addr),
a9dfe31e 2057 le32_to_cpu(con->actual_peer_addr.nonce));
58bb3b37 2058 con->error_msg = "wrong peer at address";
31b8006e
SW
2059 return -1;
2060 }
2061
2062 /*
2063 * did we learn our address?
2064 */
6503e0b6 2065 if (ceph_addr_is_blank(my_addr)) {
fd1a154c 2066 memcpy(&my_addr->in_addr,
31b8006e
SW
2067 &con->peer_addr_for_me.in_addr,
2068 sizeof(con->peer_addr_for_me.in_addr));
6503e0b6
ID
2069 ceph_addr_set_port(my_addr, 0);
2070 ceph_encode_my_addr(con->msgr);
eed0ef2c 2071 dout("process_banner learned my addr is %s\n",
fd1a154c 2072 ceph_pr_addr(my_addr));
31b8006e
SW
2073 }
2074
eed0ef2c
SW
2075 return 0;
2076}
2077
2078static int process_connect(struct ceph_connection *con)
2079{
859bff51
ID
2080 u64 sup_feat = from_msgr(con->msgr)->supported_features;
2081 u64 req_feat = from_msgr(con->msgr)->required_features;
dcbbd97c 2082 u64 server_feat = le64_to_cpu(con->in_reply.features);
0da5d703 2083 int ret;
04a419f9 2084
eed0ef2c
SW
2085 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2086
262614c4 2087 if (con->auth) {
0fd3fd0a
ID
2088 int len = le32_to_cpu(con->in_reply.authorizer_len);
2089
5c056fdc
ID
2090 /*
2091 * Any connection that defines ->get_authorizer()
6daca13d
ID
2092 * should also define ->add_authorizer_challenge() and
2093 * ->verify_authorizer_reply().
2094 *
5c056fdc
ID
2095 * See get_connect_authorizer().
2096 */
6daca13d
ID
2097 if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
2098 ret = con->ops->add_authorizer_challenge(
0fd3fd0a 2099 con, con->auth->authorizer_reply_buf, len);
6daca13d
ID
2100 if (ret < 0)
2101 return ret;
2102
2103 con_out_kvec_reset(con);
2104 __prepare_write_connect(con);
2105 prepare_read_connect(con);
2106 return 0;
2107 }
2108
0fd3fd0a
ID
2109 if (len) {
2110 ret = con->ops->verify_authorizer_reply(con);
2111 if (ret < 0) {
2112 con->error_msg = "bad authorize reply";
2113 return ret;
2114 }
5c056fdc
ID
2115 }
2116 }
2117
31b8006e 2118 switch (con->in_reply.tag) {
04a419f9
SW
2119 case CEPH_MSGR_TAG_FEATURES:
2120 pr_err("%s%lld %s feature set mismatch,"
2121 " my %llx < server's %llx, missing %llx\n",
2122 ENTITY_NAME(con->peer_name),
b726ec97 2123 ceph_pr_addr(&con->peer_addr),
04a419f9
SW
2124 sup_feat, server_feat, server_feat & ~sup_feat);
2125 con->error_msg = "missing required protocol features";
04a419f9
SW
2126 return -1;
2127
31b8006e 2128 case CEPH_MSGR_TAG_BADPROTOVER:
31b8006e
SW
2129 pr_err("%s%lld %s protocol version mismatch,"
2130 " my %d != server's %d\n",
2131 ENTITY_NAME(con->peer_name),
b726ec97 2132 ceph_pr_addr(&con->peer_addr),
31b8006e
SW
2133 le32_to_cpu(con->out_connect.protocol_version),
2134 le32_to_cpu(con->in_reply.protocol_version));
2135 con->error_msg = "protocol version mismatch";
31b8006e
SW
2136 return -1;
2137
4e7a5dcd
SW
2138 case CEPH_MSGR_TAG_BADAUTHORIZER:
2139 con->auth_retry++;
2140 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
2141 con->auth_retry);
2142 if (con->auth_retry == 2) {
2143 con->error_msg = "connect authorization failure";
4e7a5dcd
SW
2144 return -1;
2145 }
6d4221b5 2146 con_out_kvec_reset(con);
e825a66d 2147 ret = prepare_write_connect(con);
0da5d703
SW
2148 if (ret < 0)
2149 return ret;
63733a0f 2150 prepare_read_connect(con);
4e7a5dcd 2151 break;
31b8006e
SW
2152
2153 case CEPH_MSGR_TAG_RESETSESSION:
2154 /*
2155 * If we connected with a large connect_seq but the peer
2156 * has no record of a session with us (no connection, or
2157 * connect_seq == 0), they will send RESETSESION to indicate
2158 * that they must have reset their session, and may have
2159 * dropped messages.
2160 */
2161 dout("process_connect got RESET peer seq %u\n",
5bdca4e0 2162 le32_to_cpu(con->in_reply.connect_seq));
d3c1248c
ID
2163 pr_info("%s%lld %s session reset\n",
2164 ENTITY_NAME(con->peer_name),
2165 ceph_pr_addr(&con->peer_addr));
5963c3d0 2166 ceph_con_reset_session(con);
6d4221b5 2167 con_out_kvec_reset(con);
5a0f8fdd
AE
2168 ret = prepare_write_connect(con);
2169 if (ret < 0)
2170 return ret;
31b8006e
SW
2171 prepare_read_connect(con);
2172
2173 /* Tell ceph about it. */
ec302645 2174 mutex_unlock(&con->mutex);
31b8006e
SW
2175 if (con->ops->peer_reset)
2176 con->ops->peer_reset(con);
ec302645 2177 mutex_lock(&con->mutex);
6d7f62bf 2178 if (con->state != CEPH_CON_S_V1_CONNECT_MSG)
0da5d703 2179 return -EAGAIN;
31b8006e
SW
2180 break;
2181
2182 case CEPH_MSGR_TAG_RETRY_SESSION:
2183 /*
2184 * If we sent a smaller connect_seq than the peer has, try
2185 * again with a larger value.
2186 */
5bdca4e0 2187 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
31b8006e 2188 le32_to_cpu(con->out_connect.connect_seq),
5bdca4e0
SW
2189 le32_to_cpu(con->in_reply.connect_seq));
2190 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
6d4221b5 2191 con_out_kvec_reset(con);
5a0f8fdd
AE
2192 ret = prepare_write_connect(con);
2193 if (ret < 0)
2194 return ret;
31b8006e
SW
2195 prepare_read_connect(con);
2196 break;
2197
2198 case CEPH_MSGR_TAG_RETRY_GLOBAL:
2199 /*
2200 * If we sent a smaller global_seq than the peer has, try
2201 * again with a larger value.
2202 */
eed0ef2c 2203 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
31b8006e 2204 con->peer_global_seq,
5bdca4e0 2205 le32_to_cpu(con->in_reply.global_seq));
6503e0b6
ID
2206 ceph_get_global_seq(con->msgr,
2207 le32_to_cpu(con->in_reply.global_seq));
6d4221b5 2208 con_out_kvec_reset(con);
5a0f8fdd
AE
2209 ret = prepare_write_connect(con);
2210 if (ret < 0)
2211 return ret;
31b8006e
SW
2212 prepare_read_connect(con);
2213 break;
2214
3a23083b 2215 case CEPH_MSGR_TAG_SEQ:
31b8006e 2216 case CEPH_MSGR_TAG_READY:
04a419f9
SW
2217 if (req_feat & ~server_feat) {
2218 pr_err("%s%lld %s protocol feature mismatch,"
2219 " my required %llx > server's %llx, need %llx\n",
2220 ENTITY_NAME(con->peer_name),
b726ec97 2221 ceph_pr_addr(&con->peer_addr),
04a419f9
SW
2222 req_feat, server_feat, req_feat & ~server_feat);
2223 con->error_msg = "missing required protocol features";
04a419f9
SW
2224 return -1;
2225 }
8dacc7da 2226
6d7f62bf
ID
2227 WARN_ON(con->state != CEPH_CON_S_V1_CONNECT_MSG);
2228 con->state = CEPH_CON_S_OPEN;
20e55c4c 2229 con->auth_retry = 0; /* we authenticated; clear flag */
31b8006e
SW
2230 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2231 con->connect_seq++;
aba558e2 2232 con->peer_features = server_feat;
31b8006e
SW
2233 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2234 con->peer_global_seq,
2235 le32_to_cpu(con->in_reply.connect_seq),
2236 con->connect_seq);
2237 WARN_ON(con->connect_seq !=
2238 le32_to_cpu(con->in_reply.connect_seq));
92ac41d0
SW
2239
2240 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
6503e0b6 2241 ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
92ac41d0 2242
85effe18 2243 con->delay = 0; /* reset backoff memory */
92ac41d0 2244
3a23083b
SW
2245 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) {
2246 prepare_write_seq(con);
2247 prepare_read_seq(con);
2248 } else {
2249 prepare_read_tag(con);
2250 }
31b8006e
SW
2251 break;
2252
2253 case CEPH_MSGR_TAG_WAIT:
2254 /*
2255 * If there is a connection race (we are opening
2256 * connections to each other), one of us may just have
2257 * to WAIT. This shouldn't happen if we are the
2258 * client.
2259 */
04177882
SW
2260 con->error_msg = "protocol error, got WAIT as client";
2261 return -1;
31b8006e
SW
2262
2263 default:
31b8006e
SW
2264 con->error_msg = "protocol error, garbage tag during connect";
2265 return -1;
2266 }
2267 return 0;
2268}
2269
2270
2271/*
2272 * read (part of) an ack
2273 */
2274static int read_partial_ack(struct ceph_connection *con)
2275{
fd51653f
AE
2276 int size = sizeof (con->in_temp_ack);
2277 int end = size;
31b8006e 2278
fd51653f 2279 return read_partial(con, end, size, &con->in_temp_ack);
31b8006e
SW
2280}
2281
31b8006e
SW
2282/*
2283 * We can finally discard anything that's been acked.
2284 */
2285static void process_ack(struct ceph_connection *con)
2286{
31b8006e 2287 u64 ack = le64_to_cpu(con->in_temp_ack);
31b8006e 2288
02471928
ID
2289 if (con->in_tag == CEPH_MSGR_TAG_ACK)
2290 ceph_con_discard_sent(con, ack);
2291 else
2292 ceph_con_discard_requeued(con, ack);
0a2ad541 2293
31b8006e
SW
2294 prepare_read_tag(con);
2295}
2296
2297
2450418c 2298static int read_partial_message_section(struct ceph_connection *con,
213c99ee
SW
2299 struct kvec *section,
2300 unsigned int sec_len, u32 *crc)
2450418c 2301{
68b4476b 2302 int ret, left;
2450418c
YS
2303
2304 BUG_ON(!section);
2305
2306 while (section->iov_len < sec_len) {
2307 BUG_ON(section->iov_base == NULL);
2308 left = sec_len - section->iov_len;
2309 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
2310 section->iov_len, left);
2311 if (ret <= 0)
2312 return ret;
2313 section->iov_len += ret;
2450418c 2314 }
fe3ad593
AE
2315 if (section->iov_len == sec_len)
2316 *crc = crc32c(0, section->iov_base, section->iov_len);
31b8006e 2317
2450418c
YS
2318 return 1;
2319}
31b8006e 2320
34d2d200
AE
2321static int read_partial_msg_data(struct ceph_connection *con)
2322{
2323 struct ceph_msg *msg = con->in_msg;
8ae4f4f5 2324 struct ceph_msg_data_cursor *cursor = &msg->cursor;
859bff51 2325 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
686be208
AE
2326 struct page *page;
2327 size_t page_offset;
2328 size_t length;
f5db90bc 2329 u32 crc = 0;
34d2d200
AE
2330 int ret;
2331
0d9c1ab3 2332 if (!msg->num_data_items)
4c59b4a2 2333 return -EIO;
34d2d200 2334
f5db90bc
AE
2335 if (do_datacrc)
2336 crc = con->in_data_crc;
45a267db
ID
2337 while (cursor->total_resid) {
2338 if (!cursor->resid) {
2339 ceph_msg_data_advance(cursor, 0);
2340 continue;
2341 }
2342
343128ce 2343 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
686be208 2344 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
f5db90bc
AE
2345 if (ret <= 0) {
2346 if (do_datacrc)
2347 con->in_data_crc = crc;
2348
686be208 2349 return ret;
f5db90bc 2350 }
686be208
AE
2351
2352 if (do_datacrc)
f5db90bc 2353 crc = ceph_crc32c_page(crc, page, page_offset, ret);
1759f7b0 2354 ceph_msg_data_advance(cursor, (size_t)ret);
34d2d200 2355 }
f5db90bc
AE
2356 if (do_datacrc)
2357 con->in_data_crc = crc;
34d2d200
AE
2358
2359 return 1; /* must return > 0 to indicate success */
2360}
2361
31b8006e
SW
2362/*
2363 * read (part of) a message.
2364 */
2365static int read_partial_message(struct ceph_connection *con)
2366{
2367 struct ceph_msg *m = con->in_msg;
fd51653f
AE
2368 int size;
2369 int end;
31b8006e 2370 int ret;
95c96174 2371 unsigned int front_len, middle_len, data_len;
859bff51 2372 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
33d07337 2373 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
ae18756b 2374 u64 seq;
fe3ad593 2375 u32 crc;
31b8006e
SW
2376
2377 dout("read_partial_message con %p msg %p\n", con, m);
2378
2379 /* header */
fd51653f
AE
2380 size = sizeof (con->in_hdr);
2381 end = size;
2382 ret = read_partial(con, end, size, &con->in_hdr);
57dac9d1
AE
2383 if (ret <= 0)
2384 return ret;
fe3ad593
AE
2385
2386 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
2387 if (cpu_to_le32(crc) != con->in_hdr.crc) {
67c64eb7 2388 pr_err("read_partial_message bad hdr crc %u != expected %u\n",
fe3ad593
AE
2389 crc, con->in_hdr.crc);
2390 return -EBADMSG;
2391 }
2392
31b8006e
SW
2393 front_len = le32_to_cpu(con->in_hdr.front_len);
2394 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
2395 return -EIO;
2396 middle_len = le32_to_cpu(con->in_hdr.middle_len);
7b11ba37 2397 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
31b8006e
SW
2398 return -EIO;
2399 data_len = le32_to_cpu(con->in_hdr.data_len);
2400 if (data_len > CEPH_MSG_MAX_DATA_LEN)
2401 return -EIO;
2402
ae18756b
SW
2403 /* verify seq# */
2404 seq = le64_to_cpu(con->in_hdr.seq);
2405 if ((s64)seq - (s64)con->in_seq < 1) {
df9f86fa 2406 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
ae18756b 2407 ENTITY_NAME(con->peer_name),
b726ec97 2408 ceph_pr_addr(&con->peer_addr),
ae18756b
SW
2409 seq, con->in_seq + 1);
2410 con->in_base_pos = -front_len - middle_len - data_len -
dbc0d3ca 2411 sizeof_footer(con);
ae18756b 2412 con->in_tag = CEPH_MSGR_TAG_READY;
e7a88e82 2413 return 1;
ae18756b
SW
2414 } else if ((s64)seq - (s64)con->in_seq > 1) {
2415 pr_err("read_partial_message bad seq %lld expected %lld\n",
2416 seq, con->in_seq + 1);
2417 con->error_msg = "bad message sequence # for incoming message";
67c64eb7 2418 return -EBADE;
ae18756b
SW
2419 }
2420
31b8006e
SW
2421 /* allocate message? */
2422 if (!con->in_msg) {
4740a623
SW
2423 int skip = 0;
2424
31b8006e 2425 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
6ebc8b32 2426 front_len, data_len);
fc4c128e 2427 ret = ceph_con_in_msg_alloc(con, &con->in_hdr, &skip);
4740a623
SW
2428 if (ret < 0)
2429 return ret;
f759ebb9
AE
2430
2431 BUG_ON(!con->in_msg ^ skip);
2450418c 2432 if (skip) {
31b8006e 2433 /* skip this message */
a79832f2 2434 dout("alloc_msg said skip message\n");
31b8006e 2435 con->in_base_pos = -front_len - middle_len - data_len -
dbc0d3ca 2436 sizeof_footer(con);
31b8006e 2437 con->in_tag = CEPH_MSGR_TAG_READY;
684be25c 2438 con->in_seq++;
e7a88e82 2439 return 1;
31b8006e 2440 }
38941f80 2441
4740a623 2442 BUG_ON(!con->in_msg);
38941f80 2443 BUG_ON(con->in_msg->con != con);
31b8006e
SW
2444 m = con->in_msg;
2445 m->front.iov_len = 0; /* haven't read it yet */
2450418c
YS
2446 if (m->middle)
2447 m->middle->vec.iov_len = 0;
9d7f0f13 2448
78625051 2449 /* prepare for data payload, if any */
a4107026 2450
78625051 2451 if (data_len)
98fa5dd8 2452 prepare_message_data(con->in_msg, data_len);
31b8006e
SW
2453 }
2454
2455 /* front */
2450418c
YS
2456 ret = read_partial_message_section(con, &m->front, front_len,
2457 &con->in_front_crc);
2458 if (ret <= 0)
2459 return ret;
31b8006e
SW
2460
2461 /* middle */
2450418c 2462 if (m->middle) {
213c99ee
SW
2463 ret = read_partial_message_section(con, &m->middle->vec,
2464 middle_len,
2450418c 2465 &con->in_middle_crc);
31b8006e
SW
2466 if (ret <= 0)
2467 return ret;
31b8006e
SW
2468 }
2469
2470 /* (page) data */
34d2d200
AE
2471 if (data_len) {
2472 ret = read_partial_msg_data(con);
2473 if (ret <= 0)
2474 return ret;
31b8006e
SW
2475 }
2476
31b8006e 2477 /* footer */
89f08173 2478 size = sizeof_footer(con);
fd51653f
AE
2479 end += size;
2480 ret = read_partial(con, end, size, &m->footer);
57dac9d1
AE
2481 if (ret <= 0)
2482 return ret;
2483
33d07337
YZ
2484 if (!need_sign) {
2485 m->footer.flags = m->old_footer.flags;
2486 m->footer.sig = 0;
2487 }
2488
31b8006e
SW
2489 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2490 m, front_len, m->footer.front_crc, middle_len,
2491 m->footer.middle_crc, data_len, m->footer.data_crc);
2492
2493 /* crc ok? */
2494 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
2495 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2496 m, con->in_front_crc, m->footer.front_crc);
2497 return -EBADMSG;
2498 }
2499 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
2500 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2501 m, con->in_middle_crc, m->footer.middle_crc);
2502 return -EBADMSG;
2503 }
bca064d2 2504 if (do_datacrc &&
31b8006e
SW
2505 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
2506 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
2507 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
2508 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
2509 return -EBADMSG;
2510 }
2511
33d07337 2512 if (need_sign && con->ops->check_message_signature &&
79dbd1ba 2513 con->ops->check_message_signature(m)) {
33d07337
YZ
2514 pr_err("read_partial_message %p signature check failed\n", m);
2515 return -EBADMSG;
2516 }
2517
31b8006e
SW
2518 return 1; /* done! */
2519}
2520
2521/*
2522 * Process message. This happens in the worker thread. The callback should
2523 * be careful not to do anything that waits on other incoming messages or it
2524 * may deadlock.
2525 */
6503e0b6 2526void ceph_con_process_message(struct ceph_connection *con)
31b8006e 2527{
583d0fef 2528 struct ceph_msg *msg = con->in_msg;
31b8006e 2529
38941f80 2530 BUG_ON(con->in_msg->con != con);
31b8006e
SW
2531 con->in_msg = NULL;
2532
2533 /* if first message, set peer_name */
2534 if (con->peer_name.type == 0)
dbad185d 2535 con->peer_name = msg->hdr.src;
31b8006e 2536
31b8006e 2537 con->in_seq++;
ec302645 2538 mutex_unlock(&con->mutex);
31b8006e 2539
b77f8f0e 2540 dout("===== %p %llu from %s%lld %d=%s len %d+%d+%d (%u %u %u) =====\n",
31b8006e 2541 msg, le64_to_cpu(msg->hdr.seq),
dbad185d 2542 ENTITY_NAME(msg->hdr.src),
31b8006e
SW
2543 le16_to_cpu(msg->hdr.type),
2544 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2545 le32_to_cpu(msg->hdr.front_len),
b77f8f0e 2546 le32_to_cpu(msg->hdr.middle_len),
31b8006e
SW
2547 le32_to_cpu(msg->hdr.data_len),
2548 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
2549 con->ops->dispatch(con, msg);
ec302645
SW
2550
2551 mutex_lock(&con->mutex);
31b8006e
SW
2552}
2553
8b9558aa
YZ
2554static int read_keepalive_ack(struct ceph_connection *con)
2555{
2556 struct ceph_timespec ceph_ts;
2557 size_t size = sizeof(ceph_ts);
2558 int ret = read_partial(con, size, size, &ceph_ts);
2559 if (ret <= 0)
2560 return ret;
473bd2d7 2561 ceph_decode_timespec64(&con->last_keepalive_ack, &ceph_ts);
8b9558aa
YZ
2562 prepare_read_tag(con);
2563 return 1;
2564}
31b8006e
SW
2565
2566/*
2567 * Write something to the socket. Called in a worker thread when the
2568 * socket appears to be writeable and we have something ready to send.
2569 */
566050e1 2570int ceph_con_v1_try_write(struct ceph_connection *con)
31b8006e 2571{
31b8006e
SW
2572 int ret = 1;
2573
30be780a 2574 dout("try_write start %p state %d\n", con, con->state);
6d7f62bf
ID
2575 if (con->state != CEPH_CON_S_PREOPEN &&
2576 con->state != CEPH_CON_S_V1_BANNER &&
2577 con->state != CEPH_CON_S_V1_CONNECT_MSG &&
2578 con->state != CEPH_CON_S_OPEN)
9c55ad1c 2579 return 0;
31b8006e 2580
31b8006e 2581 /* open the socket first? */
6d7f62bf 2582 if (con->state == CEPH_CON_S_PREOPEN) {
8dacc7da 2583 BUG_ON(con->sock);
6d7f62bf 2584 con->state = CEPH_CON_S_V1_BANNER;
a5988c49 2585
e2200423 2586 con_out_kvec_reset(con);
e825a66d 2587 prepare_write_banner(con);
eed0ef2c 2588 prepare_read_banner(con);
31b8006e 2589
cf3e5c40 2590 BUG_ON(con->in_msg);
31b8006e 2591 con->in_tag = CEPH_MSGR_TAG_READY;
30be780a 2592 dout("try_write initiating connect on %p new state %d\n",
31b8006e 2593 con, con->state);
41617d0c
AE
2594 ret = ceph_tcp_connect(con);
2595 if (ret < 0) {
31b8006e 2596 con->error_msg = "connect error";
31b8006e
SW
2597 goto out;
2598 }
2599 }
2600
d2935d6f
ID
2601more:
2602 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
9c55ad1c
ID
2603 BUG_ON(!con->sock);
2604
31b8006e 2605 /* kvec data queued? */
67645d76
ID
2606 if (con->out_kvec_left) {
2607 ret = write_partial_kvec(con);
31b8006e 2608 if (ret <= 0)
42961d23 2609 goto out;
31b8006e 2610 }
67645d76
ID
2611 if (con->out_skip) {
2612 ret = write_partial_skip(con);
31b8006e 2613 if (ret <= 0)
42961d23 2614 goto out;
31b8006e
SW
2615 }
2616
2617 /* msg pages? */
2618 if (con->out_msg) {
c86a2930
SW
2619 if (con->out_msg_done) {
2620 ceph_msg_put(con->out_msg);
2621 con->out_msg = NULL; /* we're done with this one */
2622 goto do_next;
2623 }
2624
34d2d200 2625 ret = write_partial_message_data(con);
31b8006e 2626 if (ret == 1)
d2935d6f 2627 goto more; /* we need to send the footer, too! */
31b8006e 2628 if (ret == 0)
42961d23 2629 goto out;
31b8006e 2630 if (ret < 0) {
34d2d200 2631 dout("try_write write_partial_message_data err %d\n",
31b8006e 2632 ret);
42961d23 2633 goto out;
31b8006e
SW
2634 }
2635 }
2636
c86a2930 2637do_next:
6d7f62bf 2638 if (con->state == CEPH_CON_S_OPEN) {
6503e0b6 2639 if (ceph_con_flag_test_and_clear(con,
3fefd43e 2640 CEPH_CON_F_KEEPALIVE_PENDING)) {
8b9558aa
YZ
2641 prepare_write_keepalive(con);
2642 goto more;
2643 }
31b8006e
SW
2644 /* is anything else pending? */
2645 if (!list_empty(&con->out_queue)) {
2646 prepare_write_message(con);
2647 goto more;
2648 }
2649 if (con->in_seq > con->in_seq_acked) {
2650 prepare_write_ack(con);
2651 goto more;
2652 }
31b8006e
SW
2653 }
2654
2655 /* Nothing to do! */
6503e0b6 2656 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
31b8006e 2657 dout("try_write nothing else to write.\n");
31b8006e
SW
2658 ret = 0;
2659out:
42961d23 2660 dout("try_write done on %p ret %d\n", con, ret);
31b8006e
SW
2661 return ret;
2662}
2663
31b8006e
SW
2664/*
2665 * Read what we can from the socket.
2666 */
566050e1 2667int ceph_con_v1_try_read(struct ceph_connection *con)
31b8006e 2668{
31b8006e
SW
2669 int ret = -1;
2670
8dacc7da 2671more:
30be780a 2672 dout("try_read start %p state %d\n", con, con->state);
6d7f62bf
ID
2673 if (con->state != CEPH_CON_S_V1_BANNER &&
2674 con->state != CEPH_CON_S_V1_CONNECT_MSG &&
2675 con->state != CEPH_CON_S_OPEN)
31b8006e
SW
2676 return 0;
2677
8dacc7da 2678 BUG_ON(!con->sock);
ec302645 2679
31b8006e
SW
2680 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2681 con->in_base_pos);
0da5d703 2682
6d7f62bf 2683 if (con->state == CEPH_CON_S_V1_BANNER) {
7593af92
AE
2684 ret = read_partial_banner(con);
2685 if (ret <= 0)
ab166d5a 2686 goto out;
7593af92
AE
2687 ret = process_banner(con);
2688 if (ret < 0)
2689 goto out;
2690
6d7f62bf 2691 con->state = CEPH_CON_S_V1_CONNECT_MSG;
7593af92 2692
6d4221b5
JS
2693 /*
2694 * Received banner is good, exchange connection info.
2695 * Do not reset out_kvec, as sending our banner raced
2696 * with receiving peer banner after connect completed.
2697 */
7593af92
AE
2698 ret = prepare_write_connect(con);
2699 if (ret < 0)
2700 goto out;
2701 prepare_read_connect(con);
2702
2703 /* Send connection info before awaiting response */
0da5d703
SW
2704 goto out;
2705 }
2706
6d7f62bf 2707 if (con->state == CEPH_CON_S_V1_CONNECT_MSG) {
31b8006e
SW
2708 ret = read_partial_connect(con);
2709 if (ret <= 0)
31b8006e 2710 goto out;
98bdb0aa
SW
2711 ret = process_connect(con);
2712 if (ret < 0)
2713 goto out;
31b8006e
SW
2714 goto more;
2715 }
2716
6d7f62bf 2717 WARN_ON(con->state != CEPH_CON_S_OPEN);
8dacc7da 2718
31b8006e
SW
2719 if (con->in_base_pos < 0) {
2720 /*
2721 * skipping + discarding content.
31b8006e 2722 */
e5c93883 2723 ret = ceph_tcp_recvmsg(con->sock, NULL, -con->in_base_pos);
31b8006e 2724 if (ret <= 0)
98bdb0aa 2725 goto out;
e5c93883 2726 dout("skipped %d / %d bytes\n", ret, -con->in_base_pos);
31b8006e
SW
2727 con->in_base_pos += ret;
2728 if (con->in_base_pos)
2729 goto more;
2730 }
2731 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2732 /*
2733 * what's next?
2734 */
2735 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2736 if (ret <= 0)
98bdb0aa 2737 goto out;
31b8006e
SW
2738 dout("try_read got tag %d\n", (int)con->in_tag);
2739 switch (con->in_tag) {
2740 case CEPH_MSGR_TAG_MSG:
2741 prepare_read_message(con);
2742 break;
2743 case CEPH_MSGR_TAG_ACK:
2744 prepare_read_ack(con);
2745 break;
8b9558aa
YZ
2746 case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
2747 prepare_read_keepalive_ack(con);
2748 break;
31b8006e 2749 case CEPH_MSGR_TAG_CLOSE:
6503e0b6 2750 ceph_con_close_socket(con);
6d7f62bf 2751 con->state = CEPH_CON_S_CLOSED;
98bdb0aa 2752 goto out;
31b8006e
SW
2753 default:
2754 goto bad_tag;
2755 }
2756 }
2757 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2758 ret = read_partial_message(con);
2759 if (ret <= 0) {
2760 switch (ret) {
2761 case -EBADMSG:
a51983e4 2762 con->error_msg = "bad crc/signature";
df561f66 2763 fallthrough;
67c64eb7 2764 case -EBADE:
31b8006e 2765 ret = -EIO;
98bdb0aa 2766 break;
31b8006e
SW
2767 case -EIO:
2768 con->error_msg = "io error";
98bdb0aa 2769 break;
31b8006e 2770 }
98bdb0aa 2771 goto out;
31b8006e
SW
2772 }
2773 if (con->in_tag == CEPH_MSGR_TAG_READY)
2774 goto more;
6503e0b6 2775 ceph_con_process_message(con);
6d7f62bf 2776 if (con->state == CEPH_CON_S_OPEN)
7b862e07 2777 prepare_read_tag(con);
31b8006e
SW
2778 goto more;
2779 }
3a23083b
SW
2780 if (con->in_tag == CEPH_MSGR_TAG_ACK ||
2781 con->in_tag == CEPH_MSGR_TAG_SEQ) {
2782 /*
2783 * the final handshake seq exchange is semantically
2784 * equivalent to an ACK
2785 */
31b8006e
SW
2786 ret = read_partial_ack(con);
2787 if (ret <= 0)
98bdb0aa 2788 goto out;
31b8006e
SW
2789 process_ack(con);
2790 goto more;
2791 }
8b9558aa
YZ
2792 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
2793 ret = read_keepalive_ack(con);
2794 if (ret <= 0)
2795 goto out;
2796 goto more;
2797 }
31b8006e 2798
31b8006e 2799out:
98bdb0aa 2800 dout("try_read done on %p ret %d\n", con, ret);
31b8006e
SW
2801 return ret;
2802
2803bad_tag:
2804 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2805 con->error_msg = "protocol error, garbage tag";
2806 ret = -1;
2807 goto out;
2808}
2809
2810
2811/*
802c6d96
AE
2812 * Atomically queue work on a connection after the specified delay.
2813 * Bump @con reference to avoid races with connection teardown.
2814 * Returns 0 if work was queued, or an error code otherwise.
31b8006e 2815 */
802c6d96 2816static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
31b8006e 2817{
31b8006e 2818 if (!con->ops->get(con)) {
802c6d96 2819 dout("%s %p ref count 0\n", __func__, con);
802c6d96 2820 return -ENOENT;
31b8006e
SW
2821 }
2822
418af5b3
ID
2823 if (delay >= HZ)
2824 delay = round_jiffies_relative(delay);
2825
5a5036c8 2826 dout("%s %p %lu\n", __func__, con, delay);
802c6d96
AE
2827 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2828 dout("%s %p - already queued\n", __func__, con);
31b8006e 2829 con->ops->put(con);
802c6d96 2830 return -EBUSY;
31b8006e 2831 }
802c6d96 2832
802c6d96
AE
2833 return 0;
2834}
2835
2836static void queue_con(struct ceph_connection *con)
2837{
2838 (void) queue_con_delay(con, 0);
31b8006e
SW
2839}
2840
37ab77ac
ID
2841static void cancel_con(struct ceph_connection *con)
2842{
2843 if (cancel_delayed_work(&con->work)) {
2844 dout("%s %p\n", __func__, con);
2845 con->ops->put(con);
2846 }
2847}
2848
7bb21d68
AE
2849static bool con_sock_closed(struct ceph_connection *con)
2850{
6503e0b6 2851 if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_SOCK_CLOSED))
7bb21d68
AE
2852 return false;
2853
2854#define CASE(x) \
6d7f62bf 2855 case CEPH_CON_S_ ## x: \
7bb21d68
AE
2856 con->error_msg = "socket closed (con state " #x ")"; \
2857 break;
2858
2859 switch (con->state) {
2860 CASE(CLOSED);
2861 CASE(PREOPEN);
6d7f62bf
ID
2862 CASE(V1_BANNER);
2863 CASE(V1_CONNECT_MSG);
7bb21d68
AE
2864 CASE(OPEN);
2865 CASE(STANDBY);
2866 default:
7bb21d68 2867 BUG();
7bb21d68
AE
2868 }
2869#undef CASE
2870
2871 return true;
2872}
2873
f20a39fd
AE
2874static bool con_backoff(struct ceph_connection *con)
2875{
2876 int ret;
2877
6503e0b6 2878 if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_BACKOFF))
f20a39fd
AE
2879 return false;
2880
418af5b3 2881 ret = queue_con_delay(con, con->delay);
f20a39fd
AE
2882 if (ret) {
2883 dout("%s: con %p FAILED to back off %lu\n", __func__,
2884 con, con->delay);
2885 BUG_ON(ret == -ENOENT);
6503e0b6 2886 ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
f20a39fd
AE
2887 }
2888
2889 return true;
2890}
2891
93209264
AE
2892/* Finish fault handling; con->mutex must *not* be held here */
2893
2894static void con_fault_finish(struct ceph_connection *con)
2895{
f6330cc1
ID
2896 dout("%s %p\n", __func__, con);
2897
93209264
AE
2898 /*
2899 * in case we faulted due to authentication, invalidate our
2900 * current tickets so that we can get new ones.
2901 */
f6330cc1
ID
2902 if (con->auth_retry) {
2903 dout("auth_retry %d, invalidating\n", con->auth_retry);
2904 if (con->ops->invalidate_authorizer)
2905 con->ops->invalidate_authorizer(con);
2906 con->auth_retry = 0;
93209264
AE
2907 }
2908
2909 if (con->ops->fault)
2910 con->ops->fault(con);
2911}
2912
31b8006e
SW
2913/*
2914 * Do some work on a connection. Drop a connection ref when we're done.
2915 */
68931622 2916static void ceph_con_workfn(struct work_struct *work)
31b8006e
SW
2917{
2918 struct ceph_connection *con = container_of(work, struct ceph_connection,
2919 work.work);
49659416 2920 bool fault;
31b8006e 2921
9dd4658d 2922 mutex_lock(&con->mutex);
49659416
AE
2923 while (true) {
2924 int ret;
31b8006e 2925
49659416
AE
2926 if ((fault = con_sock_closed(con))) {
2927 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
2928 break;
2929 }
2930 if (con_backoff(con)) {
2931 dout("%s: con %p BACKOFF\n", __func__, con);
2932 break;
2933 }
6d7f62bf 2934 if (con->state == CEPH_CON_S_STANDBY) {
49659416
AE
2935 dout("%s: con %p STANDBY\n", __func__, con);
2936 break;
2937 }
6d7f62bf 2938 if (con->state == CEPH_CON_S_CLOSED) {
49659416
AE
2939 dout("%s: con %p CLOSED\n", __func__, con);
2940 BUG_ON(con->sock);
2941 break;
2942 }
6d7f62bf 2943 if (con->state == CEPH_CON_S_PREOPEN) {
49659416
AE
2944 dout("%s: con %p PREOPEN\n", __func__, con);
2945 BUG_ON(con->sock);
2946 }
0da5d703 2947
566050e1 2948 ret = ceph_con_v1_try_read(con);
49659416
AE
2949 if (ret < 0) {
2950 if (ret == -EAGAIN)
2951 continue;
67c64eb7
ID
2952 if (!con->error_msg)
2953 con->error_msg = "socket error on read";
49659416
AE
2954 fault = true;
2955 break;
2956 }
2957
566050e1 2958 ret = ceph_con_v1_try_write(con);
49659416
AE
2959 if (ret < 0) {
2960 if (ret == -EAGAIN)
2961 continue;
67c64eb7
ID
2962 if (!con->error_msg)
2963 con->error_msg = "socket error on write";
49659416
AE
2964 fault = true;
2965 }
2966
2967 break; /* If we make it to here, we're done */
3a140a0d 2968 }
b6e7b6a1
AE
2969 if (fault)
2970 con_fault(con);
9dd4658d 2971 mutex_unlock(&con->mutex);
0da5d703 2972
b6e7b6a1
AE
2973 if (fault)
2974 con_fault_finish(con);
2975
2976 con->ops->put(con);
31b8006e
SW
2977}
2978
31b8006e
SW
2979/*
2980 * Generic error/fault handler. A retry mechanism is used with
2981 * exponential backoff
2982 */
93209264 2983static void con_fault(struct ceph_connection *con)
31b8006e 2984{
30be780a 2985 dout("fault %p state %d to peer %s\n",
b726ec97 2986 con, con->state, ceph_pr_addr(&con->peer_addr));
31b8006e 2987
67c64eb7 2988 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
b726ec97 2989 ceph_pr_addr(&con->peer_addr), con->error_msg);
67c64eb7
ID
2990 con->error_msg = NULL;
2991
6d7f62bf
ID
2992 WARN_ON(con->state != CEPH_CON_S_V1_BANNER &&
2993 con->state != CEPH_CON_S_V1_CONNECT_MSG &&
2994 con->state != CEPH_CON_S_OPEN);
ec302645 2995
3596f4c1 2996 ceph_con_reset_protocol(con);
5e095e8b 2997
6503e0b6 2998 if (ceph_con_flag_test(con, CEPH_CON_F_LOSSYTX)) {
8dacc7da 2999 dout("fault on LOSSYTX channel, marking CLOSED\n");
6d7f62bf 3000 con->state = CEPH_CON_S_CLOSED;
93209264 3001 return;
3b5ede07
SW
3002 }
3003
e80a52d1
SW
3004 /* Requeue anything that hasn't been acked */
3005 list_splice_init(&con->out_sent, &con->out_queue);
9bd2e6f8 3006
e76661d0
SW
3007 /* If there are no messages queued or keepalive pending, place
3008 * the connection in a STANDBY state */
3009 if (list_empty(&con->out_queue) &&
6503e0b6 3010 !ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
e00de341 3011 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
6503e0b6 3012 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
6d7f62bf 3013 con->state = CEPH_CON_S_STANDBY;
e80a52d1
SW
3014 } else {
3015 /* retry after a delay. */
6d7f62bf 3016 con->state = CEPH_CON_S_PREOPEN;
418af5b3 3017 if (!con->delay) {
e80a52d1 3018 con->delay = BASE_DELAY_INTERVAL;
418af5b3 3019 } else if (con->delay < MAX_DELAY_INTERVAL) {
e80a52d1 3020 con->delay *= 2;
418af5b3
ID
3021 if (con->delay > MAX_DELAY_INTERVAL)
3022 con->delay = MAX_DELAY_INTERVAL;
3023 }
6503e0b6 3024 ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
8618e30b 3025 queue_con(con);
31b8006e 3026 }
31b8006e
SW
3027}
3028
3029
120a75ea
YZ
3030void ceph_messenger_reset_nonce(struct ceph_messenger *msgr)
3031{
3032 u32 nonce = le32_to_cpu(msgr->inst.addr.nonce) + 1000000;
3033 msgr->inst.addr.nonce = cpu_to_le32(nonce);
6503e0b6 3034 ceph_encode_my_addr(msgr);
120a75ea 3035}
31b8006e
SW
3036
3037/*
15d9882c 3038 * initialize a new messenger instance
31b8006e 3039 */
15d9882c 3040void ceph_messenger_init(struct ceph_messenger *msgr,
859bff51 3041 struct ceph_entity_addr *myaddr)
31b8006e 3042{
31b8006e
SW
3043 spin_lock_init(&msgr->global_seq_lock);
3044
fd1a154c
ID
3045 if (myaddr) {
3046 memcpy(&msgr->inst.addr.in_addr, &myaddr->in_addr,
3047 sizeof(msgr->inst.addr.in_addr));
6503e0b6 3048 ceph_addr_set_port(&msgr->inst.addr, 0);
fd1a154c 3049 }
31b8006e 3050
ac8839d7 3051 msgr->inst.addr.type = 0;
fd1a154c
ID
3052
3053 /* generate a random non-zero nonce */
3054 do {
3055 get_random_bytes(&msgr->inst.addr.nonce,
3056 sizeof(msgr->inst.addr.nonce));
3057 } while (!msgr->inst.addr.nonce);
6503e0b6 3058 ceph_encode_my_addr(msgr);
31b8006e 3059
a2a32584 3060 atomic_set(&msgr->stopping, 0);
757856d2 3061 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
31b8006e 3062
15d9882c 3063 dout("%s %p\n", __func__, msgr);
31b8006e
SW
3064}
3065
757856d2
ID
3066void ceph_messenger_fini(struct ceph_messenger *msgr)
3067{
3068 put_net(read_pnet(&msgr->net));
3069}
757856d2 3070
583d0fef
ID
3071static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
3072{
3073 if (msg->con)
3074 msg->con->ops->put(msg->con);
3075
3076 msg->con = con ? con->ops->get(con) : NULL;
3077 BUG_ON(msg->con != con);
3078}
3079
e00de341
SW
3080static void clear_standby(struct ceph_connection *con)
3081{
3082 /* come back from STANDBY? */
6d7f62bf 3083 if (con->state == CEPH_CON_S_STANDBY) {
e00de341 3084 dout("clear_standby %p and ++connect_seq\n", con);
6d7f62bf 3085 con->state = CEPH_CON_S_PREOPEN;
e00de341 3086 con->connect_seq++;
6503e0b6
ID
3087 WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
3088 WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
e00de341
SW
3089 }
3090}
3091
31b8006e
SW
3092/*
3093 * Queue up an outgoing message on the given connection.
771294fe
ID
3094 *
3095 * Consumes a ref on @msg.
31b8006e
SW
3096 */
3097void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
3098{
31b8006e 3099 /* set src+dst */
dbad185d 3100 msg->hdr.src = con->msgr->inst.name;
3ca02ef9 3101 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
e84346b7
SW
3102 msg->needs_out_seq = true;
3103
ec302645 3104 mutex_lock(&con->mutex);
92ce034b 3105
6d7f62bf 3106 if (con->state == CEPH_CON_S_CLOSED) {
a59b55a6
SW
3107 dout("con_send %p closed, dropping %p\n", con, msg);
3108 ceph_msg_put(msg);
3109 mutex_unlock(&con->mutex);
3110 return;
3111 }
3112
583d0fef 3113 msg_con_set(msg, con);
92ce034b 3114
31b8006e
SW
3115 BUG_ON(!list_empty(&msg->list_head));
3116 list_add_tail(&msg->list_head, &con->out_queue);
3117 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
3118 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
3119 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
3120 le32_to_cpu(msg->hdr.front_len),
3121 le32_to_cpu(msg->hdr.middle_len),
3122 le32_to_cpu(msg->hdr.data_len));
00650931
SW
3123
3124 clear_standby(con);
ec302645 3125 mutex_unlock(&con->mutex);
31b8006e
SW
3126
3127 /* if there wasn't anything waiting to send before, queue
3128 * new work */
6503e0b6 3129 if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
31b8006e
SW
3130 queue_con(con);
3131}
3d14c5d2 3132EXPORT_SYMBOL(ceph_con_send);
31b8006e 3133
566050e1
ID
3134void ceph_con_v1_revoke(struct ceph_connection *con)
3135{
3136 struct ceph_msg *msg = con->out_msg;
3137
3138 WARN_ON(con->out_skip);
3139 /* footer */
3140 if (con->out_msg_done) {
3141 con->out_skip += con_out_kvec_skip(con);
3142 } else {
3143 WARN_ON(!msg->data_length);
3144 con->out_skip += sizeof_footer(con);
3145 }
3146 /* data, middle, front */
3147 if (msg->data_length)
3148 con->out_skip += msg->cursor.total_resid;
3149 if (msg->middle)
3150 con->out_skip += con_out_kvec_skip(con);
3151 con->out_skip += con_out_kvec_skip(con);
3152
3153 dout("%s con %p out_kvec_bytes %d out_skip %d\n", __func__, con,
3154 con->out_kvec_bytes, con->out_skip);
3155}
3156
31b8006e
SW
3157/*
3158 * Revoke a message that was previously queued for send
3159 */
6740a845 3160void ceph_msg_revoke(struct ceph_msg *msg)
31b8006e 3161{
6740a845
AE
3162 struct ceph_connection *con = msg->con;
3163
583d0fef
ID
3164 if (!con) {
3165 dout("%s msg %p null con\n", __func__, msg);
6740a845 3166 return; /* Message not in our possession */
583d0fef 3167 }
6740a845 3168
ec302645 3169 mutex_lock(&con->mutex);
566050e1
ID
3170 if (list_empty(&msg->list_head)) {
3171 WARN_ON(con->out_msg == msg);
3172 dout("%s con %p msg %p not linked\n", __func__, con, msg);
3173 mutex_unlock(&con->mutex);
3174 return;
ed98adad 3175 }
67645d76 3176
566050e1
ID
3177 dout("%s con %p msg %p was linked\n", __func__, con, msg);
3178 msg->hdr.seq = 0;
3179 ceph_msg_remove(msg);
3180
3181 if (con->out_msg == msg) {
3182 WARN_ON(con->state != CEPH_CON_S_OPEN);
3183 dout("%s con %p msg %p was sending\n", __func__, con, msg);
3184 ceph_con_v1_revoke(con);
3185 ceph_msg_put(con->out_msg);
67645d76 3186 con->out_msg = NULL;
566050e1
ID
3187 } else {
3188 dout("%s con %p msg %p not current, out_msg %p\n", __func__,
3189 con, msg, con->out_msg);
31b8006e 3190 }
ec302645 3191 mutex_unlock(&con->mutex);
31b8006e
SW
3192}
3193
566050e1
ID
3194void ceph_con_v1_revoke_incoming(struct ceph_connection *con)
3195{
3196 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
3197 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
3198 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
3199
3200 /* skip rest of message */
3201 con->in_base_pos = con->in_base_pos -
3202 sizeof(struct ceph_msg_header) -
3203 front_len -
3204 middle_len -
3205 data_len -
3206 sizeof(struct ceph_msg_footer);
3207
3208 con->in_tag = CEPH_MSGR_TAG_READY;
3209 con->in_seq++;
3210
3211 dout("%s con %p in_base_pos %d\n", __func__, con, con->in_base_pos);
3212}
3213
350b1c32 3214/*
0d59ab81 3215 * Revoke a message that we may be reading data into
350b1c32 3216 */
8921d114 3217void ceph_msg_revoke_incoming(struct ceph_msg *msg)
350b1c32 3218{
583d0fef 3219 struct ceph_connection *con = msg->con;
8921d114 3220
583d0fef 3221 if (!con) {
8921d114 3222 dout("%s msg %p null con\n", __func__, msg);
8921d114
AE
3223 return; /* Message not in our possession */
3224 }
3225
350b1c32 3226 mutex_lock(&con->mutex);
8921d114 3227 if (con->in_msg == msg) {
566050e1
ID
3228 WARN_ON(con->state != CEPH_CON_S_OPEN);
3229 dout("%s con %p msg %p was recving\n", __func__, con, msg);
3230 ceph_con_v1_revoke_incoming(con);
350b1c32
SW
3231 ceph_msg_put(con->in_msg);
3232 con->in_msg = NULL;
350b1c32 3233 } else {
566050e1
ID
3234 dout("%s con %p msg %p not current, in_msg %p\n", __func__,
3235 con, msg, con->in_msg);
350b1c32
SW
3236 }
3237 mutex_unlock(&con->mutex);
3238}
3239
31b8006e
SW
3240/*
3241 * Queue a keepalive byte to ensure the tcp connection is alive.
3242 */
3243void ceph_con_keepalive(struct ceph_connection *con)
3244{
e00de341 3245 dout("con_keepalive %p\n", con);
00650931 3246 mutex_lock(&con->mutex);
e00de341 3247 clear_standby(con);
6503e0b6 3248 ceph_con_flag_set(con, CEPH_CON_F_KEEPALIVE_PENDING);
00650931 3249 mutex_unlock(&con->mutex);
4aac9228 3250
6503e0b6 3251 if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
31b8006e
SW
3252 queue_con(con);
3253}
3d14c5d2 3254EXPORT_SYMBOL(ceph_con_keepalive);
31b8006e 3255
8b9558aa
YZ
3256bool ceph_con_keepalive_expired(struct ceph_connection *con,
3257 unsigned long interval)
3258{
3259 if (interval > 0 &&
3260 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
473bd2d7
AB
3261 struct timespec64 now;
3262 struct timespec64 ts;
3263 ktime_get_real_ts64(&now);
3264 jiffies_to_timespec64(interval, &ts);
3265 ts = timespec64_add(con->last_keepalive_ack, ts);
3266 return timespec64_compare(&now, &ts) >= 0;
8b9558aa
YZ
3267 }
3268 return false;
3269}
3270
0d9c1ab3 3271static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
43794509 3272{
0d9c1ab3
ID
3273 BUG_ON(msg->num_data_items >= msg->max_data_items);
3274 return &msg->data[msg->num_data_items++];
6644ed7b
AE
3275}
3276
3277static void ceph_msg_data_destroy(struct ceph_msg_data *data)
3278{
e8862740
ID
3279 if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
3280 int num_pages = calc_pages_for(data->alignment, data->length);
3281 ceph_release_page_vector(data->pages, num_pages);
3282 } else if (data->type == CEPH_MSG_DATA_PAGELIST) {
6644ed7b 3283 ceph_pagelist_release(data->pagelist);
e8862740 3284 }
43794509
AE
3285}
3286
90af3602 3287void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
e8862740 3288 size_t length, size_t alignment, bool own_pages)
02afca6c 3289{
6644ed7b
AE
3290 struct ceph_msg_data *data;
3291
07aa1558
AE
3292 BUG_ON(!pages);
3293 BUG_ON(!length);
6644ed7b 3294
0d9c1ab3
ID
3295 data = ceph_msg_data_add(msg);
3296 data->type = CEPH_MSG_DATA_PAGES;
6644ed7b
AE
3297 data->pages = pages;
3298 data->length = length;
3299 data->alignment = alignment & ~PAGE_MASK;
e8862740 3300 data->own_pages = own_pages;
02afca6c 3301
5240d9f9 3302 msg->data_length += length;
02afca6c 3303}
90af3602 3304EXPORT_SYMBOL(ceph_msg_data_add_pages);
31b8006e 3305
90af3602 3306void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
27fa8385
AE
3307 struct ceph_pagelist *pagelist)
3308{
6644ed7b
AE
3309 struct ceph_msg_data *data;
3310
07aa1558
AE
3311 BUG_ON(!pagelist);
3312 BUG_ON(!pagelist->length);
27fa8385 3313
0d9c1ab3
ID
3314 data = ceph_msg_data_add(msg);
3315 data->type = CEPH_MSG_DATA_PAGELIST;
89486833 3316 refcount_inc(&pagelist->refcnt);
6644ed7b
AE
3317 data->pagelist = pagelist;
3318
5240d9f9 3319 msg->data_length += pagelist->length;
27fa8385 3320}
90af3602 3321EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
27fa8385 3322
ea96571f 3323#ifdef CONFIG_BLOCK
5359a17d
ID
3324void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
3325 u32 length)
27fa8385 3326{
6644ed7b
AE
3327 struct ceph_msg_data *data;
3328
0d9c1ab3
ID
3329 data = ceph_msg_data_add(msg);
3330 data->type = CEPH_MSG_DATA_BIO;
5359a17d 3331 data->bio_pos = *bio_pos;
c851c495 3332 data->bio_length = length;
6644ed7b 3333
5240d9f9 3334 msg->data_length += length;
27fa8385 3335}
90af3602 3336EXPORT_SYMBOL(ceph_msg_data_add_bio);
ea96571f 3337#endif /* CONFIG_BLOCK */
27fa8385 3338
b9e281c2
ID
3339void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
3340 struct ceph_bvec_iter *bvec_pos)
3341{
3342 struct ceph_msg_data *data;
3343
0d9c1ab3
ID
3344 data = ceph_msg_data_add(msg);
3345 data->type = CEPH_MSG_DATA_BVECS;
b9e281c2
ID
3346 data->bvec_pos = *bvec_pos;
3347
b9e281c2
ID
3348 msg->data_length += bvec_pos->iter.bi_size;
3349}
3350EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
3351
31b8006e
SW
3352/*
3353 * construct a new message with given type, size
3354 * the new msg has a ref count of 1.
3355 */
0d9c1ab3
ID
3356struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
3357 gfp_t flags, bool can_fail)
31b8006e
SW
3358{
3359 struct ceph_msg *m;
3360
e3d5d638 3361 m = kmem_cache_zalloc(ceph_msg_cache, flags);
31b8006e
SW
3362 if (m == NULL)
3363 goto out;
31b8006e
SW
3364
3365 m->hdr.type = cpu_to_le16(type);
45c6ceb5 3366 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
31b8006e 3367 m->hdr.front_len = cpu_to_le32(front_len);
ca20892d 3368
9516e45b
AE
3369 INIT_LIST_HEAD(&m->list_head);
3370 kref_init(&m->kref);
ca20892d 3371
31b8006e
SW
3372 /* front */
3373 if (front_len) {
eeb0bed5 3374 m->front.iov_base = ceph_kvmalloc(front_len, flags);
31b8006e 3375 if (m->front.iov_base == NULL) {
b61c2763 3376 dout("ceph_msg_new can't allocate %d bytes\n",
31b8006e
SW
3377 front_len);
3378 goto out2;
3379 }
3380 } else {
3381 m->front.iov_base = NULL;
3382 }
f2be82b0 3383 m->front_alloc_len = m->front.iov_len = front_len;
31b8006e 3384
0d9c1ab3
ID
3385 if (max_data_items) {
3386 m->data = kmalloc_array(max_data_items, sizeof(*m->data),
3387 flags);
3388 if (!m->data)
3389 goto out2;
3390
3391 m->max_data_items = max_data_items;
3392 }
3393
bb257664 3394 dout("ceph_msg_new %p front %d\n", m, front_len);
31b8006e
SW
3395 return m;
3396
3397out2:
3398 ceph_msg_put(m);
3399out:
b61c2763
SW
3400 if (!can_fail) {
3401 pr_err("msg_new can't create type %d front %d\n", type,
3402 front_len);
f0ed1b7c 3403 WARN_ON(1);
b61c2763
SW
3404 } else {
3405 dout("msg_new can't create type %d front %d\n", type,
3406 front_len);
3407 }
a79832f2 3408 return NULL;
31b8006e 3409}
0d9c1ab3
ID
3410EXPORT_SYMBOL(ceph_msg_new2);
3411
3412struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3413 bool can_fail)
3414{
3415 return ceph_msg_new2(type, front_len, 0, flags, can_fail);
3416}
3d14c5d2 3417EXPORT_SYMBOL(ceph_msg_new);
31b8006e 3418
31b8006e
SW
3419/*
3420 * Allocate "middle" portion of a message, if it is needed and wasn't
3421 * allocated by alloc_msg. This allows us to read a small fixed-size
3422 * per-type header in the front and then gracefully fail (i.e.,
3423 * propagate the error to the caller based on info in the front) when
3424 * the middle is too large.
3425 */
2450418c 3426static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
31b8006e
SW
3427{
3428 int type = le16_to_cpu(msg->hdr.type);
3429 int middle_len = le32_to_cpu(msg->hdr.middle_len);
3430
3431 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
3432 ceph_msg_type_name(type), middle_len);
3433 BUG_ON(!middle_len);
3434 BUG_ON(msg->middle);
3435
b6c1d5b8 3436 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
31b8006e
SW
3437 if (!msg->middle)
3438 return -ENOMEM;
3439 return 0;
3440}
3441
2450418c 3442/*
1c20f2d2
AE
3443 * Allocate a message for receiving an incoming message on a
3444 * connection, and save the result in con->in_msg. Uses the
3445 * connection's private alloc_msg op if available.
3446 *
4740a623
SW
3447 * Returns 0 on success, or a negative error code.
3448 *
3449 * On success, if we set *skip = 1:
3450 * - the next message should be skipped and ignored.
3451 * - con->in_msg == NULL
3452 * or if we set *skip = 0:
3453 * - con->in_msg is non-null.
3454 * On error (ENOMEM, EAGAIN, ...),
3455 * - con->in_msg == NULL
2450418c 3456 */
6503e0b6
ID
3457int ceph_con_in_msg_alloc(struct ceph_connection *con,
3458 struct ceph_msg_header *hdr, int *skip)
2450418c 3459{
2450418c 3460 int middle_len = le32_to_cpu(hdr->middle_len);
1d866d1c 3461 struct ceph_msg *msg;
4740a623 3462 int ret = 0;
2450418c 3463
1c20f2d2 3464 BUG_ON(con->in_msg != NULL);
53ded495 3465 BUG_ON(!con->ops->alloc_msg);
2450418c 3466
53ded495
AE
3467 mutex_unlock(&con->mutex);
3468 msg = con->ops->alloc_msg(con, hdr, skip);
3469 mutex_lock(&con->mutex);
6d7f62bf 3470 if (con->state != CEPH_CON_S_OPEN) {
53ded495 3471 if (msg)
1d866d1c 3472 ceph_msg_put(msg);
53ded495
AE
3473 return -EAGAIN;
3474 }
4137577a
AE
3475 if (msg) {
3476 BUG_ON(*skip);
583d0fef 3477 msg_con_set(msg, con);
4137577a 3478 con->in_msg = msg;
4137577a
AE
3479 } else {
3480 /*
3481 * Null message pointer means either we should skip
3482 * this message or we couldn't allocate memory. The
3483 * former is not an error.
3484 */
3485 if (*skip)
3486 return 0;
4137577a 3487
67c64eb7 3488 con->error_msg = "error allocating memory for incoming message";
53ded495 3489 return -ENOMEM;
2450418c 3490 }
fc4c128e 3491 memcpy(&con->in_msg->hdr, hdr, sizeof(*hdr));
2450418c 3492
1c20f2d2
AE
3493 if (middle_len && !con->in_msg->middle) {
3494 ret = ceph_alloc_middle(con, con->in_msg);
2450418c 3495 if (ret < 0) {
1c20f2d2
AE
3496 ceph_msg_put(con->in_msg);
3497 con->in_msg = NULL;
2450418c
YS
3498 }
3499 }
9d7f0f13 3500
4740a623 3501 return ret;
2450418c
YS
3502}
3503
6503e0b6 3504void ceph_con_get_out_msg(struct ceph_connection *con)
771294fe
ID
3505{
3506 struct ceph_msg *msg;
3507
3508 BUG_ON(list_empty(&con->out_queue));
3509 msg = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
3510 WARN_ON(msg->con != con);
3511
3512 /*
3513 * Put the message on "sent" list using a ref from ceph_con_send().
3514 * It is put when the message is acked or revoked.
3515 */
3516 list_move_tail(&msg->list_head, &con->out_sent);
3517
3518 /*
3519 * Only assign outgoing seq # if we haven't sent this message
3520 * yet. If it is requeued, resend with it's original seq.
3521 */
3522 if (msg->needs_out_seq) {
3523 msg->hdr.seq = cpu_to_le64(++con->out_seq);
3524 msg->needs_out_seq = false;
3525
3526 if (con->ops->reencode_message)
3527 con->ops->reencode_message(msg);
3528 }
3529
3530 /*
3531 * Get a ref for out_msg. It is put when we are done sending the
3532 * message or in case of a fault.
3533 */
3534 WARN_ON(con->out_msg);
3535 con->out_msg = ceph_msg_get(msg);
3536}
31b8006e
SW
3537
3538/*
3539 * Free a generically kmalloc'd message.
3540 */
0215e44b 3541static void ceph_msg_free(struct ceph_msg *m)
31b8006e 3542{
0215e44b 3543 dout("%s %p\n", __func__, m);
4965fc38 3544 kvfree(m->front.iov_base);
0d9c1ab3 3545 kfree(m->data);
e3d5d638 3546 kmem_cache_free(ceph_msg_cache, m);
31b8006e
SW
3547}
3548
0215e44b 3549static void ceph_msg_release(struct kref *kref)
c2e552e7
SW
3550{
3551 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
0d9c1ab3 3552 int i;
31b8006e 3553
0215e44b 3554 dout("%s %p\n", __func__, m);
c2e552e7
SW
3555 WARN_ON(!list_empty(&m->list_head));
3556
583d0fef
ID
3557 msg_con_set(m, NULL);
3558
c2e552e7
SW
3559 /* drop middle, data, if any */
3560 if (m->middle) {
3561 ceph_buffer_put(m->middle);
3562 m->middle = NULL;
31b8006e 3563 }
5240d9f9 3564
0d9c1ab3
ID
3565 for (i = 0; i < m->num_data_items; i++)
3566 ceph_msg_data_destroy(&m->data[i]);
58bb3b37 3567
c2e552e7
SW
3568 if (m->pool)
3569 ceph_msgpool_put(m->pool, m);
3570 else
0215e44b
ID
3571 ceph_msg_free(m);
3572}
3573
3574struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
3575{
3576 dout("%s %p (was %d)\n", __func__, msg,
2c935bc5 3577 kref_read(&msg->kref));
0215e44b
ID
3578 kref_get(&msg->kref);
3579 return msg;
3580}
3581EXPORT_SYMBOL(ceph_msg_get);
3582
3583void ceph_msg_put(struct ceph_msg *msg)
3584{
3585 dout("%s %p (was %d)\n", __func__, msg,
2c935bc5 3586 kref_read(&msg->kref));
0215e44b 3587 kref_put(&msg->kref, ceph_msg_release);
31b8006e 3588}
0215e44b 3589EXPORT_SYMBOL(ceph_msg_put);
9ec7cab1
SW
3590
3591void ceph_msg_dump(struct ceph_msg *msg)
3592{
3cea4c30
ID
3593 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
3594 msg->front_alloc_len, msg->data_length);
9ec7cab1
SW
3595 print_hex_dump(KERN_DEBUG, "header: ",
3596 DUMP_PREFIX_OFFSET, 16, 1,
3597 &msg->hdr, sizeof(msg->hdr), true);
3598 print_hex_dump(KERN_DEBUG, " front: ",
3599 DUMP_PREFIX_OFFSET, 16, 1,
3600 msg->front.iov_base, msg->front.iov_len, true);
3601 if (msg->middle)
3602 print_hex_dump(KERN_DEBUG, "middle: ",
3603 DUMP_PREFIX_OFFSET, 16, 1,
3604 msg->middle->vec.iov_base,
3605 msg->middle->vec.iov_len, true);
3606 print_hex_dump(KERN_DEBUG, "footer: ",
3607 DUMP_PREFIX_OFFSET, 16, 1,
3608 &msg->footer, sizeof(msg->footer), true);
3609}
3d14c5d2 3610EXPORT_SYMBOL(ceph_msg_dump);