mptcp: Implement MPTCP receive path
[linux-2.6-block.git] / net / mptcp / protocol.c
CommitLineData
f870fa0b
MM
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <net/sock.h>
13#include <net/inet_common.h>
14#include <net/inet_hashtables.h>
15#include <net/protocol.h>
16#include <net/tcp.h>
cf7da0d6
PK
17#if IS_ENABLED(CONFIG_MPTCP_IPV6)
18#include <net/transp_v6.h>
19#endif
f870fa0b
MM
20#include <net/mptcp.h>
21#include "protocol.h"
22
2303f994
PK
23#define MPTCP_SAME_STATE TCP_MAX_STATES
24
25/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
26 * completed yet or has failed, return the subflow socket.
27 * Otherwise return NULL.
28 */
29static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
30{
cec37a6e 31 if (!msk->subflow || mptcp_subflow_ctx(msk->subflow->sk)->fourth_ack)
2303f994
PK
32 return NULL;
33
34 return msk->subflow;
35}
36
cec37a6e
PK
37/* if msk has a single subflow, and the mp_capable handshake is failed,
38 * return it.
39 * Otherwise returns NULL
40 */
41static struct socket *__mptcp_tcp_fallback(const struct mptcp_sock *msk)
42{
43 struct socket *ssock = __mptcp_nmpc_socket(msk);
44
45 sock_owned_by_me((const struct sock *)msk);
46
47 if (!ssock || sk_is_mptcp(ssock->sk))
48 return NULL;
49
50 return ssock;
51}
52
2303f994
PK
53static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
54{
55 return ((struct sock *)msk)->sk_state == TCP_CLOSE;
56}
57
58static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state)
59{
60 struct mptcp_subflow_context *subflow;
61 struct sock *sk = (struct sock *)msk;
62 struct socket *ssock;
63 int err;
64
65 ssock = __mptcp_nmpc_socket(msk);
66 if (ssock)
67 goto set_state;
68
69 if (!__mptcp_can_create_subflow(msk))
70 return ERR_PTR(-EINVAL);
71
72 err = mptcp_subflow_create_socket(sk, &ssock);
73 if (err)
74 return ERR_PTR(err);
75
76 msk->subflow = ssock;
77 subflow = mptcp_subflow_ctx(ssock->sk);
cec37a6e 78 list_add(&subflow->node, &msk->conn_list);
2303f994
PK
79 subflow->request_mptcp = 1;
80
81set_state:
82 if (state != MPTCP_SAME_STATE)
83 inet_sk_state_store(sk, state);
84 return ssock;
85}
86
cec37a6e
PK
87static struct sock *mptcp_subflow_get(const struct mptcp_sock *msk)
88{
89 struct mptcp_subflow_context *subflow;
90
91 sock_owned_by_me((const struct sock *)msk);
92
93 mptcp_for_each_subflow(msk, subflow) {
94 return mptcp_subflow_tcp_sock(subflow);
95 }
96
97 return NULL;
98}
99
6d0060f6
MM
100static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
101{
102 if (!msk->cached_ext)
103 msk->cached_ext = __skb_ext_alloc();
104
105 return !!msk->cached_ext;
106}
107
108static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
109 struct msghdr *msg, long *timeo)
110{
111 int mss_now = 0, size_goal = 0, ret = 0;
112 struct mptcp_sock *msk = mptcp_sk(sk);
113 struct mptcp_ext *mpext = NULL;
114 struct page_frag *pfrag;
115 struct sk_buff *skb;
116 size_t psize;
117
118 /* use the mptcp page cache so that we can easily move the data
119 * from one substream to another, but do per subflow memory accounting
120 */
121 pfrag = sk_page_frag(sk);
122 while (!sk_page_frag_refill(ssk, pfrag) ||
123 !mptcp_ext_cache_refill(msk)) {
124 ret = sk_stream_wait_memory(ssk, timeo);
125 if (ret)
126 return ret;
127 }
128
129 /* compute copy limit */
130 mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
131 psize = min_t(int, pfrag->size - pfrag->offset, size_goal);
132
133 pr_debug("left=%zu", msg_data_left(msg));
134 psize = copy_page_from_iter(pfrag->page, pfrag->offset,
135 min_t(size_t, msg_data_left(msg), psize),
136 &msg->msg_iter);
137 pr_debug("left=%zu", msg_data_left(msg));
138 if (!psize)
139 return -EINVAL;
140
141 /* Mark the end of the previous write so the beginning of the
142 * next write (with its own mptcp skb extension data) is not
143 * collapsed.
144 */
145 skb = tcp_write_queue_tail(ssk);
146 if (skb)
147 TCP_SKB_CB(skb)->eor = 1;
148
149 ret = do_tcp_sendpages(ssk, pfrag->page, pfrag->offset, psize,
150 msg->msg_flags | MSG_SENDPAGE_NOTLAST);
151 if (ret <= 0)
152 return ret;
153 if (unlikely(ret < psize))
154 iov_iter_revert(&msg->msg_iter, psize - ret);
155
156 skb = tcp_write_queue_tail(ssk);
157 mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
158 msk->cached_ext = NULL;
159
160 memset(mpext, 0, sizeof(*mpext));
161 mpext->data_seq = msk->write_seq;
162 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
163 mpext->data_len = ret;
164 mpext->use_map = 1;
165 mpext->dsn64 = 1;
166
167 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
168 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
169 mpext->dsn64);
170
171 pfrag->offset += ret;
172 msk->write_seq += ret;
173 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
174
175 tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, size_goal);
176 return ret;
177}
178
f870fa0b
MM
179static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
180{
181 struct mptcp_sock *msk = mptcp_sk(sk);
cec37a6e 182 struct socket *ssock;
6d0060f6 183 size_t copied = 0;
cec37a6e 184 struct sock *ssk;
6d0060f6
MM
185 int ret = 0;
186 long timeo;
f870fa0b
MM
187
188 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
189 return -EOPNOTSUPP;
190
cec37a6e
PK
191 lock_sock(sk);
192 ssock = __mptcp_tcp_fallback(msk);
193 if (ssock) {
194 pr_debug("fallback passthrough");
195 ret = sock_sendmsg(ssock, msg);
196 release_sock(sk);
197 return ret;
198 }
199
6d0060f6
MM
200 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
201
cec37a6e
PK
202 ssk = mptcp_subflow_get(msk);
203 if (!ssk) {
204 release_sock(sk);
205 return -ENOTCONN;
206 }
207
6d0060f6 208 pr_debug("conn_list->subflow=%p", ssk);
cec37a6e 209
6d0060f6
MM
210 lock_sock(ssk);
211 while (msg_data_left(msg)) {
212 ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo);
213 if (ret < 0)
214 break;
215
216 copied += ret;
217 }
218
219 if (copied > 0)
220 ret = copied;
221
222 release_sock(ssk);
cec37a6e
PK
223 release_sock(sk);
224 return ret;
f870fa0b
MM
225}
226
648ef4b8
MM
227int mptcp_read_actor(read_descriptor_t *desc, struct sk_buff *skb,
228 unsigned int offset, size_t len)
229{
230 struct mptcp_read_arg *arg = desc->arg.data;
231 size_t copy_len;
232
233 copy_len = min(desc->count, len);
234
235 if (likely(arg->msg)) {
236 int err;
237
238 err = skb_copy_datagram_msg(skb, offset, arg->msg, copy_len);
239 if (err) {
240 pr_debug("error path");
241 desc->error = err;
242 return err;
243 }
244 } else {
245 pr_debug("Flushing skb payload");
246 }
247
248 desc->count -= copy_len;
249
250 pr_debug("consumed %zu bytes, %zu left", copy_len, desc->count);
251 return copy_len;
252}
253
f870fa0b
MM
254static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
255 int nonblock, int flags, int *addr_len)
256{
257 struct mptcp_sock *msk = mptcp_sk(sk);
cec37a6e
PK
258 struct socket *ssock;
259 struct sock *ssk;
260 int copied = 0;
f870fa0b
MM
261
262 if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
263 return -EOPNOTSUPP;
264
cec37a6e
PK
265 lock_sock(sk);
266 ssock = __mptcp_tcp_fallback(msk);
267 if (ssock) {
268 pr_debug("fallback-read subflow=%p",
269 mptcp_subflow_ctx(ssock->sk));
270 copied = sock_recvmsg(ssock, msg, flags);
271 release_sock(sk);
272 return copied;
273 }
274
275 ssk = mptcp_subflow_get(msk);
276 if (!ssk) {
277 release_sock(sk);
278 return -ENOTCONN;
279 }
280
281 copied = sock_recvmsg(ssk->sk_socket, msg, flags);
282
283 release_sock(sk);
284
285 return copied;
286}
287
288/* subflow sockets can be either outgoing (connect) or incoming
289 * (accept).
290 *
291 * Outgoing subflows use in-kernel sockets.
292 * Incoming subflows do not have their own 'struct socket' allocated,
293 * so we need to use tcp_close() after detaching them from the mptcp
294 * parent socket.
295 */
296static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
297 struct mptcp_subflow_context *subflow,
298 long timeout)
299{
300 struct socket *sock = READ_ONCE(ssk->sk_socket);
301
302 list_del(&subflow->node);
303
304 if (sock && sock != sk->sk_socket) {
305 /* outgoing subflow */
306 sock_release(sock);
307 } else {
308 /* incoming subflow */
309 tcp_close(ssk, timeout);
310 }
f870fa0b
MM
311}
312
313static int mptcp_init_sock(struct sock *sk)
314{
cec37a6e
PK
315 struct mptcp_sock *msk = mptcp_sk(sk);
316
317 INIT_LIST_HEAD(&msk->conn_list);
318
f870fa0b
MM
319 return 0;
320}
321
21498490
PK
322static void mptcp_subflow_shutdown(struct sock *ssk, int how)
323{
324 lock_sock(ssk);
325
326 switch (ssk->sk_state) {
327 case TCP_LISTEN:
328 if (!(how & RCV_SHUTDOWN))
329 break;
330 /* fall through */
331 case TCP_SYN_SENT:
332 tcp_disconnect(ssk, O_NONBLOCK);
333 break;
334 default:
335 ssk->sk_shutdown |= how;
336 tcp_shutdown(ssk, how);
337 break;
338 }
339
340 /* Wake up anyone sleeping in poll. */
341 ssk->sk_state_change(ssk);
342 release_sock(ssk);
343}
344
f870fa0b
MM
345static void mptcp_close(struct sock *sk, long timeout)
346{
cec37a6e 347 struct mptcp_subflow_context *subflow, *tmp;
f870fa0b
MM
348 struct mptcp_sock *msk = mptcp_sk(sk);
349
79c0949e 350 mptcp_token_destroy(msk->token);
f870fa0b
MM
351 inet_sk_state_store(sk, TCP_CLOSE);
352
cec37a6e
PK
353 lock_sock(sk);
354
355 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
356 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
357
358 __mptcp_close_ssk(sk, ssk, subflow, timeout);
f870fa0b
MM
359 }
360
6d0060f6
MM
361 if (msk->cached_ext)
362 __skb_ext_put(msk->cached_ext);
cec37a6e
PK
363 release_sock(sk);
364 sk_common_release(sk);
f870fa0b
MM
365}
366
cf7da0d6
PK
367static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
368{
369#if IS_ENABLED(CONFIG_MPTCP_IPV6)
370 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
371 struct ipv6_pinfo *msk6 = inet6_sk(msk);
372
373 msk->sk_v6_daddr = ssk->sk_v6_daddr;
374 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
375
376 if (msk6 && ssk6) {
377 msk6->saddr = ssk6->saddr;
378 msk6->flow_label = ssk6->flow_label;
379 }
380#endif
381
382 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
383 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
384 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
385 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
386 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
387 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
388}
389
390static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
391 bool kern)
392{
393 struct mptcp_sock *msk = mptcp_sk(sk);
394 struct socket *listener;
395 struct sock *newsk;
396
397 listener = __mptcp_nmpc_socket(msk);
398 if (WARN_ON_ONCE(!listener)) {
399 *err = -EINVAL;
400 return NULL;
401 }
402
403 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
404 newsk = inet_csk_accept(listener->sk, flags, err, kern);
405 if (!newsk)
406 return NULL;
407
408 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
409
410 if (sk_is_mptcp(newsk)) {
411 struct mptcp_subflow_context *subflow;
412 struct sock *new_mptcp_sock;
413 struct sock *ssk = newsk;
6d0060f6 414 u64 ack_seq;
cf7da0d6
PK
415
416 subflow = mptcp_subflow_ctx(newsk);
417 lock_sock(sk);
418
419 local_bh_disable();
420 new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC);
421 if (!new_mptcp_sock) {
422 *err = -ENOBUFS;
423 local_bh_enable();
424 release_sock(sk);
21498490 425 mptcp_subflow_shutdown(newsk, SHUT_RDWR + 1);
cf7da0d6
PK
426 tcp_close(newsk, 0);
427 return NULL;
428 }
429
430 mptcp_init_sock(new_mptcp_sock);
431
432 msk = mptcp_sk(new_mptcp_sock);
433 msk->remote_key = subflow->remote_key;
434 msk->local_key = subflow->local_key;
79c0949e 435 msk->token = subflow->token;
cf7da0d6
PK
436 msk->subflow = NULL;
437
79c0949e 438 mptcp_token_update_accept(newsk, new_mptcp_sock);
6d0060f6
MM
439
440 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
441 msk->write_seq = subflow->idsn + 1;
442 ack_seq++;
443 msk->ack_seq = ack_seq;
648ef4b8
MM
444 subflow->map_seq = ack_seq;
445 subflow->map_subflow_seq = 1;
6d0060f6 446 subflow->rel_write_seq = 1;
648ef4b8 447 subflow->tcp_sock = ssk;
cf7da0d6
PK
448 newsk = new_mptcp_sock;
449 mptcp_copy_inaddrs(newsk, ssk);
450 list_add(&subflow->node, &msk->conn_list);
451
452 /* will be fully established at mptcp_stream_accept()
453 * completion.
454 */
455 inet_sk_state_store(new_mptcp_sock, TCP_SYN_RECV);
456 bh_unlock_sock(new_mptcp_sock);
457 local_bh_enable();
458 release_sock(sk);
459 }
460
461 return newsk;
462}
463
79c0949e
PK
464static void mptcp_destroy(struct sock *sk)
465{
466}
467
717e79c8
PK
468static int mptcp_setsockopt(struct sock *sk, int level, int optname,
469 char __user *uoptval, unsigned int optlen)
470{
471 struct mptcp_sock *msk = mptcp_sk(sk);
472 char __kernel *optval;
473 int ret = -EOPNOTSUPP;
474 struct socket *ssock;
475
476 /* will be treated as __user in tcp_setsockopt */
477 optval = (char __kernel __force *)uoptval;
478
479 pr_debug("msk=%p", msk);
480
481 /* @@ the meaning of setsockopt() when the socket is connected and
482 * there are multiple subflows is not defined.
483 */
484 lock_sock(sk);
485 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
486 if (!IS_ERR(ssock)) {
487 pr_debug("subflow=%p", ssock->sk);
488 ret = kernel_setsockopt(ssock, level, optname, optval, optlen);
489 }
490 release_sock(sk);
491
492 return ret;
493}
494
495static int mptcp_getsockopt(struct sock *sk, int level, int optname,
496 char __user *uoptval, int __user *uoption)
497{
498 struct mptcp_sock *msk = mptcp_sk(sk);
499 char __kernel *optval;
500 int ret = -EOPNOTSUPP;
501 int __kernel *option;
502 struct socket *ssock;
503
504 /* will be treated as __user in tcp_getsockopt */
505 optval = (char __kernel __force *)uoptval;
506 option = (int __kernel __force *)uoption;
507
508 pr_debug("msk=%p", msk);
509
510 /* @@ the meaning of getsockopt() when the socket is connected and
511 * there are multiple subflows is not defined.
512 */
513 lock_sock(sk);
514 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
515 if (!IS_ERR(ssock)) {
516 pr_debug("subflow=%p", ssock->sk);
517 ret = kernel_getsockopt(ssock, level, optname, optval, option);
518 }
519 release_sock(sk);
520
521 return ret;
522}
523
cec37a6e 524static int mptcp_get_port(struct sock *sk, unsigned short snum)
f870fa0b
MM
525{
526 struct mptcp_sock *msk = mptcp_sk(sk);
cec37a6e 527 struct socket *ssock;
f870fa0b 528
cec37a6e
PK
529 ssock = __mptcp_nmpc_socket(msk);
530 pr_debug("msk=%p, subflow=%p", msk, ssock);
531 if (WARN_ON_ONCE(!ssock))
532 return -EINVAL;
f870fa0b 533
cec37a6e
PK
534 return inet_csk_get_port(ssock->sk, snum);
535}
f870fa0b 536
cec37a6e
PK
537void mptcp_finish_connect(struct sock *ssk)
538{
539 struct mptcp_subflow_context *subflow;
540 struct mptcp_sock *msk;
541 struct sock *sk;
6d0060f6 542 u64 ack_seq;
f870fa0b 543
cec37a6e 544 subflow = mptcp_subflow_ctx(ssk);
f870fa0b 545
cec37a6e
PK
546 if (!subflow->mp_capable)
547 return;
548
549 sk = subflow->conn;
550 msk = mptcp_sk(sk);
551
648ef4b8
MM
552 pr_debug("msk=%p, token=%u", sk, subflow->token);
553
6d0060f6
MM
554 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
555 ack_seq++;
648ef4b8
MM
556 subflow->map_seq = ack_seq;
557 subflow->map_subflow_seq = 1;
6d0060f6
MM
558 subflow->rel_write_seq = 1;
559
cec37a6e
PK
560 /* the socket is not connected yet, no msk/subflow ops can access/race
561 * accessing the field below
562 */
563 WRITE_ONCE(msk->remote_key, subflow->remote_key);
564 WRITE_ONCE(msk->local_key, subflow->local_key);
79c0949e 565 WRITE_ONCE(msk->token, subflow->token);
6d0060f6
MM
566 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
567 WRITE_ONCE(msk->ack_seq, ack_seq);
f870fa0b
MM
568}
569
cf7da0d6
PK
570static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
571{
572 write_lock_bh(&sk->sk_callback_lock);
573 rcu_assign_pointer(sk->sk_wq, &parent->wq);
574 sk_set_socket(sk, parent);
575 sk->sk_uid = SOCK_INODE(parent)->i_uid;
576 write_unlock_bh(&sk->sk_callback_lock);
577}
578
f870fa0b
MM
579static struct proto mptcp_prot = {
580 .name = "MPTCP",
581 .owner = THIS_MODULE,
582 .init = mptcp_init_sock,
583 .close = mptcp_close,
cf7da0d6 584 .accept = mptcp_accept,
717e79c8
PK
585 .setsockopt = mptcp_setsockopt,
586 .getsockopt = mptcp_getsockopt,
f870fa0b 587 .shutdown = tcp_shutdown,
79c0949e 588 .destroy = mptcp_destroy,
f870fa0b
MM
589 .sendmsg = mptcp_sendmsg,
590 .recvmsg = mptcp_recvmsg,
591 .hash = inet_hash,
592 .unhash = inet_unhash,
cec37a6e 593 .get_port = mptcp_get_port,
f870fa0b
MM
594 .obj_size = sizeof(struct mptcp_sock),
595 .no_autobind = true,
596};
597
2303f994
PK
598static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
599{
600 struct mptcp_sock *msk = mptcp_sk(sock->sk);
601 struct socket *ssock;
cf7da0d6 602 int err;
2303f994
PK
603
604 lock_sock(sock->sk);
605 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
606 if (IS_ERR(ssock)) {
607 err = PTR_ERR(ssock);
608 goto unlock;
609 }
610
611 err = ssock->ops->bind(ssock, uaddr, addr_len);
cf7da0d6
PK
612 if (!err)
613 mptcp_copy_inaddrs(sock->sk, ssock->sk);
2303f994
PK
614
615unlock:
616 release_sock(sock->sk);
617 return err;
618}
619
620static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
621 int addr_len, int flags)
622{
623 struct mptcp_sock *msk = mptcp_sk(sock->sk);
624 struct socket *ssock;
625 int err;
626
627 lock_sock(sock->sk);
628 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
629 if (IS_ERR(ssock)) {
630 err = PTR_ERR(ssock);
631 goto unlock;
632 }
633
cf7da0d6
PK
634#ifdef CONFIG_TCP_MD5SIG
635 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
636 * TCP option space.
637 */
638 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
639 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
640#endif
641
2303f994
PK
642 err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
643 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
cf7da0d6 644 mptcp_copy_inaddrs(sock->sk, ssock->sk);
2303f994
PK
645
646unlock:
647 release_sock(sock->sk);
648 return err;
649}
650
cf7da0d6
PK
651static int mptcp_v4_getname(struct socket *sock, struct sockaddr *uaddr,
652 int peer)
653{
654 if (sock->sk->sk_prot == &tcp_prot) {
655 /* we are being invoked from __sys_accept4, after
656 * mptcp_accept() has just accepted a non-mp-capable
657 * flow: sk is a tcp_sk, not an mptcp one.
658 *
659 * Hand the socket over to tcp so all further socket ops
660 * bypass mptcp.
661 */
662 sock->ops = &inet_stream_ops;
663 }
664
665 return inet_getname(sock, uaddr, peer);
666}
667
668#if IS_ENABLED(CONFIG_MPTCP_IPV6)
669static int mptcp_v6_getname(struct socket *sock, struct sockaddr *uaddr,
670 int peer)
671{
672 if (sock->sk->sk_prot == &tcpv6_prot) {
673 /* we are being invoked from __sys_accept4 after
674 * mptcp_accept() has accepted a non-mp-capable
675 * subflow: sk is a tcp_sk, not mptcp.
676 *
677 * Hand the socket over to tcp so all further
678 * socket ops bypass mptcp.
679 */
680 sock->ops = &inet6_stream_ops;
681 }
682
683 return inet6_getname(sock, uaddr, peer);
684}
685#endif
686
687static int mptcp_listen(struct socket *sock, int backlog)
688{
689 struct mptcp_sock *msk = mptcp_sk(sock->sk);
690 struct socket *ssock;
691 int err;
692
693 pr_debug("msk=%p", msk);
694
695 lock_sock(sock->sk);
696 ssock = __mptcp_socket_create(msk, TCP_LISTEN);
697 if (IS_ERR(ssock)) {
698 err = PTR_ERR(ssock);
699 goto unlock;
700 }
701
702 err = ssock->ops->listen(ssock, backlog);
703 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
704 if (!err)
705 mptcp_copy_inaddrs(sock->sk, ssock->sk);
706
707unlock:
708 release_sock(sock->sk);
709 return err;
710}
711
712static bool is_tcp_proto(const struct proto *p)
713{
714#if IS_ENABLED(CONFIG_MPTCP_IPV6)
715 return p == &tcp_prot || p == &tcpv6_prot;
716#else
717 return p == &tcp_prot;
718#endif
719}
720
721static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
722 int flags, bool kern)
723{
724 struct mptcp_sock *msk = mptcp_sk(sock->sk);
725 struct socket *ssock;
726 int err;
727
728 pr_debug("msk=%p", msk);
729
730 lock_sock(sock->sk);
731 if (sock->sk->sk_state != TCP_LISTEN)
732 goto unlock_fail;
733
734 ssock = __mptcp_nmpc_socket(msk);
735 if (!ssock)
736 goto unlock_fail;
737
738 sock_hold(ssock->sk);
739 release_sock(sock->sk);
740
741 err = ssock->ops->accept(sock, newsock, flags, kern);
742 if (err == 0 && !is_tcp_proto(newsock->sk->sk_prot)) {
743 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
744 struct mptcp_subflow_context *subflow;
745
746 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
747 * This is needed so NOSPACE flag can be set from tcp stack.
748 */
749 list_for_each_entry(subflow, &msk->conn_list, node) {
750 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
751
752 if (!ssk->sk_socket)
753 mptcp_sock_graft(ssk, newsock);
754 }
755
756 inet_sk_state_store(newsock->sk, TCP_ESTABLISHED);
757 }
758
759 sock_put(ssock->sk);
760 return err;
761
762unlock_fail:
763 release_sock(sock->sk);
764 return -EINVAL;
765}
766
2303f994
PK
767static __poll_t mptcp_poll(struct file *file, struct socket *sock,
768 struct poll_table_struct *wait)
769{
770 __poll_t mask = 0;
771
772 return mask;
773}
774
21498490
PK
775static int mptcp_shutdown(struct socket *sock, int how)
776{
777 struct mptcp_sock *msk = mptcp_sk(sock->sk);
778 struct mptcp_subflow_context *subflow;
779 int ret = 0;
780
781 pr_debug("sk=%p, how=%d", msk, how);
782
783 lock_sock(sock->sk);
784
785 if (how == SHUT_WR || how == SHUT_RDWR)
786 inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
787
788 how++;
789
790 if ((how & ~SHUTDOWN_MASK) || !how) {
791 ret = -EINVAL;
792 goto out_unlock;
793 }
794
795 if (sock->state == SS_CONNECTING) {
796 if ((1 << sock->sk->sk_state) &
797 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
798 sock->state = SS_DISCONNECTING;
799 else
800 sock->state = SS_CONNECTED;
801 }
802
803 mptcp_for_each_subflow(msk, subflow) {
804 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
805
806 mptcp_subflow_shutdown(tcp_sk, how);
807 }
808
809out_unlock:
810 release_sock(sock->sk);
811
812 return ret;
813}
814
2303f994
PK
815static struct proto_ops mptcp_stream_ops;
816
f870fa0b
MM
817static struct inet_protosw mptcp_protosw = {
818 .type = SOCK_STREAM,
819 .protocol = IPPROTO_MPTCP,
820 .prot = &mptcp_prot,
2303f994
PK
821 .ops = &mptcp_stream_ops,
822 .flags = INET_PROTOSW_ICSK,
f870fa0b
MM
823};
824
825void __init mptcp_init(void)
826{
2303f994
PK
827 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
828 mptcp_stream_ops = inet_stream_ops;
829 mptcp_stream_ops.bind = mptcp_bind;
830 mptcp_stream_ops.connect = mptcp_stream_connect;
831 mptcp_stream_ops.poll = mptcp_poll;
cf7da0d6
PK
832 mptcp_stream_ops.accept = mptcp_stream_accept;
833 mptcp_stream_ops.getname = mptcp_v4_getname;
834 mptcp_stream_ops.listen = mptcp_listen;
21498490 835 mptcp_stream_ops.shutdown = mptcp_shutdown;
2303f994
PK
836
837 mptcp_subflow_init();
838
f870fa0b
MM
839 if (proto_register(&mptcp_prot, 1) != 0)
840 panic("Failed to register MPTCP proto.\n");
841
842 inet_register_protosw(&mptcp_protosw);
843}
844
845#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2303f994 846static struct proto_ops mptcp_v6_stream_ops;
f870fa0b
MM
847static struct proto mptcp_v6_prot;
848
79c0949e
PK
849static void mptcp_v6_destroy(struct sock *sk)
850{
851 mptcp_destroy(sk);
852 inet6_destroy_sock(sk);
853}
854
f870fa0b
MM
855static struct inet_protosw mptcp_v6_protosw = {
856 .type = SOCK_STREAM,
857 .protocol = IPPROTO_MPTCP,
858 .prot = &mptcp_v6_prot,
2303f994 859 .ops = &mptcp_v6_stream_ops,
f870fa0b
MM
860 .flags = INET_PROTOSW_ICSK,
861};
862
863int mptcpv6_init(void)
864{
865 int err;
866
867 mptcp_v6_prot = mptcp_prot;
868 strcpy(mptcp_v6_prot.name, "MPTCPv6");
869 mptcp_v6_prot.slab = NULL;
79c0949e 870 mptcp_v6_prot.destroy = mptcp_v6_destroy;
f870fa0b
MM
871 mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) +
872 sizeof(struct ipv6_pinfo);
873
874 err = proto_register(&mptcp_v6_prot, 1);
875 if (err)
876 return err;
877
2303f994
PK
878 mptcp_v6_stream_ops = inet6_stream_ops;
879 mptcp_v6_stream_ops.bind = mptcp_bind;
880 mptcp_v6_stream_ops.connect = mptcp_stream_connect;
881 mptcp_v6_stream_ops.poll = mptcp_poll;
cf7da0d6
PK
882 mptcp_v6_stream_ops.accept = mptcp_stream_accept;
883 mptcp_v6_stream_ops.getname = mptcp_v6_getname;
884 mptcp_v6_stream_ops.listen = mptcp_listen;
21498490 885 mptcp_v6_stream_ops.shutdown = mptcp_shutdown;
2303f994 886
f870fa0b
MM
887 err = inet6_register_protosw(&mptcp_v6_protosw);
888 if (err)
889 proto_unregister(&mptcp_v6_prot);
890
891 return err;
892}
893#endif