subflow: use rsk_ops->send_reset()
[linux-2.6-block.git] / net / mptcp / subflow.c
CommitLineData
2303f994
PK
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
79c0949e
PK
7#define pr_fmt(fmt) "MPTCP: " fmt
8
2303f994
PK
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
f296234c 12#include <crypto/algapi.h>
bd697222 13#include <crypto/sha.h>
2303f994
PK
14#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
cec37a6e
PK
19#if IS_ENABLED(CONFIG_MPTCP_IPV6)
20#include <net/ip6_route.h>
21#endif
2303f994
PK
22#include <net/mptcp.h>
23#include "protocol.h"
fc518953
FW
24#include "mib.h"
25
26static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
27 enum linux_mptcp_mib_field field)
28{
29 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
30}
2303f994 31
79c0949e
PK
32static void subflow_req_destructor(struct request_sock *req)
33{
34 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
35
36 pr_debug("subflow_req=%p", subflow_req);
37
8fd4de12
PA
38 if (subflow_req->msk)
39 sock_put((struct sock *)subflow_req->msk);
40
2c5ebd00 41 mptcp_token_destroy_request(req);
79c0949e
PK
42 tcp_request_sock_ops.destructor(req);
43}
44
f296234c
PK
45static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
46 void *hmac)
47{
48 u8 msg[8];
49
50 put_unaligned_be32(nonce1, &msg[0]);
51 put_unaligned_be32(nonce2, &msg[4]);
52
53 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
54}
55
56/* validate received token and create truncated hmac and nonce for SYN-ACK */
8fd4de12
PA
57static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
58 const struct sk_buff *skb)
f296234c
PK
59{
60 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
bd697222 61 u8 hmac[SHA256_DIGEST_SIZE];
f296234c
PK
62 struct mptcp_sock *msk;
63 int local_id;
64
65 msk = mptcp_token_get_sock(subflow_req->token);
66 if (!msk) {
fc518953 67 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
8fd4de12 68 return NULL;
f296234c
PK
69 }
70
71 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
72 if (local_id < 0) {
73 sock_put((struct sock *)msk);
8fd4de12 74 return NULL;
f296234c
PK
75 }
76 subflow_req->local_id = local_id;
77
78 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
79
80 subflow_generate_hmac(msk->local_key, msk->remote_key,
81 subflow_req->local_nonce,
82 subflow_req->remote_nonce, hmac);
83
84 subflow_req->thmac = get_unaligned_be64(hmac);
8fd4de12 85 return msk;
f296234c
PK
86}
87
cec37a6e
PK
88static void subflow_init_req(struct request_sock *req,
89 const struct sock *sk_listener,
90 struct sk_buff *skb)
91{
92 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
93 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
cfde141e 94 struct mptcp_options_received mp_opt;
cec37a6e
PK
95
96 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
97
cfde141e 98 mptcp_get_options(skb, &mp_opt);
cec37a6e
PK
99
100 subflow_req->mp_capable = 0;
f296234c 101 subflow_req->mp_join = 0;
8fd4de12 102 subflow_req->msk = NULL;
2c5ebd00 103 mptcp_token_init_request(req);
cec37a6e
PK
104
105#ifdef CONFIG_TCP_MD5SIG
106 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
107 * TCP option space.
108 */
109 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
110 return;
111#endif
112
cfde141e 113 if (mp_opt.mp_capable) {
fc518953
FW
114 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
115
cfde141e 116 if (mp_opt.mp_join)
fc518953 117 return;
cfde141e 118 } else if (mp_opt.mp_join) {
fc518953
FW
119 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
120 }
f296234c 121
cfde141e 122 if (mp_opt.mp_capable && listener->request_mptcp) {
79c0949e
PK
123 int err;
124
125 err = mptcp_token_new_request(req);
126 if (err == 0)
127 subflow_req->mp_capable = 1;
128
648ef4b8 129 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
cfde141e 130 } else if (mp_opt.mp_join && listener->request_mptcp) {
ec3edaa7 131 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
f296234c 132 subflow_req->mp_join = 1;
cfde141e
PA
133 subflow_req->backup = mp_opt.backup;
134 subflow_req->remote_id = mp_opt.join_id;
135 subflow_req->token = mp_opt.token;
136 subflow_req->remote_nonce = mp_opt.nonce;
8fd4de12
PA
137 subflow_req->msk = subflow_token_join_request(req, skb);
138 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
139 subflow_req->remote_nonce, subflow_req->msk);
cec37a6e
PK
140 }
141}
142
143static void subflow_v4_init_req(struct request_sock *req,
144 const struct sock *sk_listener,
145 struct sk_buff *skb)
146{
147 tcp_rsk(req)->is_mptcp = 1;
148
149 tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
150
151 subflow_init_req(req, sk_listener, skb);
152}
153
154#if IS_ENABLED(CONFIG_MPTCP_IPV6)
155static void subflow_v6_init_req(struct request_sock *req,
156 const struct sock *sk_listener,
157 struct sk_buff *skb)
158{
159 tcp_rsk(req)->is_mptcp = 1;
160
161 tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
162
163 subflow_init_req(req, sk_listener, skb);
164}
165#endif
166
ec3edaa7
PK
167/* validate received truncated hmac and create hmac for third ACK */
168static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
169{
bd697222 170 u8 hmac[SHA256_DIGEST_SIZE];
ec3edaa7
PK
171 u64 thmac;
172
173 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
174 subflow->remote_nonce, subflow->local_nonce,
175 hmac);
176
177 thmac = get_unaligned_be64(hmac);
178 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
179 subflow, subflow->token,
180 (unsigned long long)thmac,
181 (unsigned long long)subflow->thmac);
182
183 return thmac == subflow->thmac;
184}
185
cec37a6e
PK
186static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
187{
188 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
cfde141e 189 struct mptcp_options_received mp_opt;
c3c123d1 190 struct sock *parent = subflow->conn;
cec37a6e
PK
191
192 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
193
1200832c 194 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
c3c123d1
DC
195 inet_sk_state_store(parent, TCP_ESTABLISHED);
196 parent->sk_state_change(parent);
197 }
198
263e1201
PA
199 /* be sure no special action on any packet other than syn-ack */
200 if (subflow->conn_finished)
201 return;
202
b0977bb2 203 subflow->rel_write_seq = 1;
263e1201 204 subflow->conn_finished = 1;
e1ff9e82
DC
205 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
206 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
263e1201 207
cfde141e 208 mptcp_get_options(skb, &mp_opt);
fa25e815
PA
209 if (subflow->request_mptcp) {
210 if (!mp_opt.mp_capable) {
211 MPTCP_INC_STATS(sock_net(sk),
212 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
213 mptcp_do_fallback(sk);
214 pr_fallback(mptcp_sk(subflow->conn));
215 goto fallback;
216 }
217
263e1201
PA
218 subflow->mp_capable = 1;
219 subflow->can_ack = 1;
cfde141e 220 subflow->remote_key = mp_opt.sndr_key;
263e1201
PA
221 pr_debug("subflow=%p, remote_key=%llu", subflow,
222 subflow->remote_key);
fa25e815
PA
223 mptcp_finish_connect(sk);
224 } else if (subflow->request_join) {
225 u8 hmac[SHA256_DIGEST_SIZE];
226
227 if (!mp_opt.mp_join)
228 goto do_reset;
229
cfde141e
PA
230 subflow->thmac = mp_opt.thmac;
231 subflow->remote_nonce = mp_opt.nonce;
263e1201
PA
232 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
233 subflow->thmac, subflow->remote_nonce);
263e1201 234
ec3edaa7 235 if (!subflow_thmac_valid(subflow)) {
fc518953 236 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
ec3edaa7
PK
237 goto do_reset;
238 }
239
240 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
241 subflow->local_nonce,
242 subflow->remote_nonce,
bd697222 243 hmac);
bd697222 244 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
ec3edaa7 245
ec3edaa7
PK
246 if (!mptcp_finish_join(sk))
247 goto do_reset;
248
fa25e815 249 subflow->mp_join = 1;
fc518953 250 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
fa25e815
PA
251 } else if (mptcp_check_fallback(sk)) {
252fallback:
253 mptcp_rcv_space_init(mptcp_sk(parent), sk);
cec37a6e 254 }
fa25e815
PA
255 return;
256
257do_reset:
258 tcp_send_active_reset(sk, GFP_ATOMIC);
259 tcp_done(sk);
cec37a6e
PK
260}
261
262static struct request_sock_ops subflow_request_sock_ops;
263static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
264
265static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
266{
267 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
268
269 pr_debug("subflow=%p", subflow);
270
271 /* Never answer to SYNs sent to broadcast or multicast */
272 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
273 goto drop;
274
275 return tcp_conn_request(&subflow_request_sock_ops,
276 &subflow_request_sock_ipv4_ops,
277 sk, skb);
278drop:
279 tcp_listendrop(sk);
280 return 0;
281}
282
283#if IS_ENABLED(CONFIG_MPTCP_IPV6)
284static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
285static struct inet_connection_sock_af_ops subflow_v6_specific;
286static struct inet_connection_sock_af_ops subflow_v6m_specific;
287
288static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
289{
290 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
291
292 pr_debug("subflow=%p", subflow);
293
294 if (skb->protocol == htons(ETH_P_IP))
295 return subflow_v4_conn_request(sk, skb);
296
297 if (!ipv6_unicast_destination(skb))
298 goto drop;
299
300 return tcp_conn_request(&subflow_request_sock_ops,
301 &subflow_request_sock_ipv6_ops, sk, skb);
302
303drop:
304 tcp_listendrop(sk);
305 return 0; /* don't send reset */
306}
307#endif
308
f296234c
PK
309/* validate hmac received in third ACK */
310static bool subflow_hmac_valid(const struct request_sock *req,
cfde141e 311 const struct mptcp_options_received *mp_opt)
f296234c
PK
312{
313 const struct mptcp_subflow_request_sock *subflow_req;
bd697222 314 u8 hmac[SHA256_DIGEST_SIZE];
f296234c 315 struct mptcp_sock *msk;
f296234c
PK
316
317 subflow_req = mptcp_subflow_rsk(req);
8fd4de12 318 msk = subflow_req->msk;
f296234c
PK
319 if (!msk)
320 return false;
321
322 subflow_generate_hmac(msk->remote_key, msk->local_key,
323 subflow_req->remote_nonce,
324 subflow_req->local_nonce, hmac);
325
8fd4de12 326 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
f296234c
PK
327}
328
df1036da
FW
329static void mptcp_sock_destruct(struct sock *sk)
330{
331 /* if new mptcp socket isn't accepted, it is free'd
332 * from the tcp listener sockets request queue, linked
333 * from req->sk. The tcp socket is released.
334 * This calls the ULP release function which will
335 * also remove the mptcp socket, via
336 * sock_put(ctx->conn).
337 *
338 * Problem is that the mptcp socket will not be in
339 * SYN_RECV state and doesn't have SOCK_DEAD flag.
340 * Both result in warnings from inet_sock_destruct.
341 */
342
343 if (sk->sk_state == TCP_SYN_RECV) {
344 sk->sk_state = TCP_CLOSE;
345 WARN_ON_ONCE(sk->sk_socket);
346 sock_orphan(sk);
347 }
348
2c5ebd00 349 mptcp_token_destroy(mptcp_sk(sk));
df1036da
FW
350 inet_sock_destruct(sk);
351}
352
9f5ca6a5
FW
353static void mptcp_force_close(struct sock *sk)
354{
355 inet_sk_state_store(sk, TCP_CLOSE);
356 sk_common_release(sk);
357}
358
4c8941de
PA
359static void subflow_ulp_fallback(struct sock *sk,
360 struct mptcp_subflow_context *old_ctx)
361{
362 struct inet_connection_sock *icsk = inet_csk(sk);
363
364 mptcp_subflow_tcp_fallback(sk, old_ctx);
365 icsk->icsk_ulp_ops = NULL;
366 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
367 tcp_sk(sk)->is_mptcp = 0;
368}
369
39884604
PA
370static void subflow_drop_ctx(struct sock *ssk)
371{
372 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
373
374 if (!ctx)
375 return;
376
377 subflow_ulp_fallback(ssk, ctx);
378 if (ctx->conn)
379 sock_put(ctx->conn);
380
381 kfree_rcu(ctx, rcu);
382}
383
b93df08c
PA
384void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
385 struct mptcp_options_received *mp_opt)
386{
387 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
388
389 subflow->remote_key = mp_opt->sndr_key;
390 subflow->fully_established = 1;
391 subflow->can_ack = 1;
392 WRITE_ONCE(msk->fully_established, true);
393}
394
cec37a6e
PK
395static struct sock *subflow_syn_recv_sock(const struct sock *sk,
396 struct sk_buff *skb,
397 struct request_sock *req,
398 struct dst_entry *dst,
399 struct request_sock *req_unhash,
400 bool *own_req)
401{
402 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
cc7972ea 403 struct mptcp_subflow_request_sock *subflow_req;
cfde141e 404 struct mptcp_options_received mp_opt;
9e365ff5 405 bool fallback, fallback_is_fatal;
58b09919 406 struct sock *new_msk = NULL;
cec37a6e
PK
407 struct sock *child;
408
409 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
410
9e365ff5
PA
411 /* After child creation we must look for 'mp_capable' even when options
412 * are not parsed
cfde141e
PA
413 */
414 mp_opt.mp_capable = 0;
9e365ff5
PA
415
416 /* hopefully temporary handling for MP_JOIN+syncookie */
417 subflow_req = mptcp_subflow_rsk(req);
b7514694 418 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
9e365ff5
PA
419 fallback = !tcp_rsk(req)->is_mptcp;
420 if (fallback)
ae2dd716
FW
421 goto create_child;
422
d22f4988 423 /* if the sk is MP_CAPABLE, we try to fetch the client key */
cc7972ea 424 if (subflow_req->mp_capable) {
d22f4988
CP
425 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
426 /* here we can receive and accept an in-window,
427 * out-of-order pkt, which will not carry the MP_CAPABLE
428 * opt even on mptcp enabled paths
429 */
58b09919 430 goto create_msk;
d22f4988
CP
431 }
432
cfde141e
PA
433 mptcp_get_options(skb, &mp_opt);
434 if (!mp_opt.mp_capable) {
4c8941de 435 fallback = true;
58b09919 436 goto create_child;
d22f4988 437 }
58b09919
PA
438
439create_msk:
cfde141e 440 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
58b09919 441 if (!new_msk)
4c8941de 442 fallback = true;
f296234c 443 } else if (subflow_req->mp_join) {
cfde141e
PA
444 mptcp_get_options(skb, &mp_opt);
445 if (!mp_opt.mp_join ||
446 !subflow_hmac_valid(req, &mp_opt)) {
fc518953 447 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
9e365ff5 448 fallback = true;
fc518953 449 }
cc7972ea 450 }
cec37a6e 451
d22f4988 452create_child:
cec37a6e
PK
453 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
454 req_unhash, own_req);
455
456 if (child && *own_req) {
79c0949e
PK
457 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
458
90bf4513
PA
459 tcp_rsk(req)->drop_req = false;
460
4c8941de
PA
461 /* we need to fallback on ctx allocation failure and on pre-reqs
462 * checking above. In the latter scenario we additionally need
463 * to reset the context to non MPTCP status.
79c0949e 464 */
4c8941de 465 if (!ctx || fallback) {
f296234c 466 if (fallback_is_fatal)
729cd643 467 goto dispose_child;
4c8941de 468
39884604 469 subflow_drop_ctx(child);
58b09919 470 goto out;
f296234c 471 }
79c0949e
PK
472
473 if (ctx->mp_capable) {
b93df08c
PA
474 /* this can't race with mptcp_close(), as the msk is
475 * not yet exposted to user-space
476 */
477 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
478
58b09919
PA
479 /* new mpc subflow takes ownership of the newly
480 * created mptcp socket
481 */
df1036da 482 new_msk->sk_destruct = mptcp_sock_destruct;
1b1c7a0e 483 mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
2c5ebd00 484 mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
58b09919
PA
485 ctx->conn = new_msk;
486 new_msk = NULL;
fca5c82c
PA
487
488 /* with OoO packets we can reach here without ingress
489 * mpc option
490 */
b93df08c
PA
491 if (mp_opt.mp_capable)
492 mptcp_subflow_fully_established(ctx, &mp_opt);
f296234c
PK
493 } else if (ctx->mp_join) {
494 struct mptcp_sock *owner;
495
8fd4de12 496 owner = subflow_req->msk;
f296234c 497 if (!owner)
729cd643 498 goto dispose_child;
f296234c 499
8fd4de12
PA
500 /* move the msk reference ownership to the subflow */
501 subflow_req->msk = NULL;
f296234c
PK
502 ctx->conn = (struct sock *)owner;
503 if (!mptcp_finish_join(child))
729cd643 504 goto dispose_child;
fc518953
FW
505
506 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
90bf4513 507 tcp_rsk(req)->drop_req = true;
cec37a6e
PK
508 }
509 }
510
58b09919
PA
511out:
512 /* dispose of the left over mptcp master, if any */
513 if (unlikely(new_msk))
9f5ca6a5 514 mptcp_force_close(new_msk);
4c8941de
PA
515
516 /* check for expected invariant - should never trigger, just help
517 * catching eariler subtle bugs
518 */
ac2b47fb 519 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
4c8941de
PA
520 (!mptcp_subflow_ctx(child) ||
521 !mptcp_subflow_ctx(child)->conn));
cec37a6e 522 return child;
f296234c 523
729cd643 524dispose_child:
39884604 525 subflow_drop_ctx(child);
729cd643 526 tcp_rsk(req)->drop_req = true;
729cd643 527 inet_csk_prepare_for_destroy_sock(child);
f296234c 528 tcp_done(child);
97e61751 529 req->rsk_ops->send_reset(sk, skb);
729cd643
PA
530
531 /* The last child reference will be released by the caller */
532 return child;
cec37a6e
PK
533}
534
535static struct inet_connection_sock_af_ops subflow_specific;
536
648ef4b8
MM
537enum mapping_status {
538 MAPPING_OK,
539 MAPPING_INVALID,
540 MAPPING_EMPTY,
e1ff9e82
DC
541 MAPPING_DATA_FIN,
542 MAPPING_DUMMY
648ef4b8
MM
543};
544
545static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
546{
547 if ((u32)seq == (u32)old_seq)
548 return old_seq;
549
550 /* Assume map covers data not mapped yet. */
551 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
552}
553
554static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
555{
556 WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
557 ssn, subflow->map_subflow_seq, subflow->map_data_len);
558}
559
560static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
561{
562 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
563 unsigned int skb_consumed;
564
565 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
566 if (WARN_ON_ONCE(skb_consumed >= skb->len))
567 return true;
568
569 return skb->len - skb_consumed <= subflow->map_data_len -
570 mptcp_subflow_get_map_offset(subflow);
571}
572
573static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
574{
575 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
576 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
577
578 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
579 /* Mapping covers data later in the subflow stream,
580 * currently unsupported.
581 */
582 warn_bad_map(subflow, ssn);
583 return false;
584 }
585 if (unlikely(!before(ssn, subflow->map_subflow_seq +
586 subflow->map_data_len))) {
587 /* Mapping does covers past subflow data, invalid */
588 warn_bad_map(subflow, ssn + skb->len);
589 return false;
590 }
591 return true;
592}
593
594static enum mapping_status get_mapping_status(struct sock *ssk)
595{
596 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
597 struct mptcp_ext *mpext;
598 struct sk_buff *skb;
599 u16 data_len;
600 u64 map_seq;
601
602 skb = skb_peek(&ssk->sk_receive_queue);
603 if (!skb)
604 return MAPPING_EMPTY;
605
e1ff9e82
DC
606 if (mptcp_check_fallback(ssk))
607 return MAPPING_DUMMY;
608
648ef4b8
MM
609 mpext = mptcp_get_ext(skb);
610 if (!mpext || !mpext->use_map) {
611 if (!subflow->map_valid && !skb->len) {
612 /* the TCP stack deliver 0 len FIN pkt to the receive
613 * queue, that is the only 0len pkts ever expected here,
614 * and we can admit no mapping only for 0 len pkts
615 */
616 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
617 WARN_ONCE(1, "0len seq %d:%d flags %x",
618 TCP_SKB_CB(skb)->seq,
619 TCP_SKB_CB(skb)->end_seq,
620 TCP_SKB_CB(skb)->tcp_flags);
621 sk_eat_skb(ssk, skb);
622 return MAPPING_EMPTY;
623 }
624
625 if (!subflow->map_valid)
626 return MAPPING_INVALID;
627
628 goto validate_seq;
629 }
630
631 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
632 mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
633 mpext->data_len, mpext->data_fin);
634
635 data_len = mpext->data_len;
636 if (data_len == 0) {
637 pr_err("Infinite mapping not handled");
fc518953 638 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
648ef4b8
MM
639 return MAPPING_INVALID;
640 }
641
642 if (mpext->data_fin == 1) {
643 if (data_len == 1) {
644 pr_debug("DATA_FIN with no payload");
645 if (subflow->map_valid) {
646 /* A DATA_FIN might arrive in a DSS
647 * option before the previous mapping
648 * has been fully consumed. Continue
649 * handling the existing mapping.
650 */
651 skb_ext_del(skb, SKB_EXT_MPTCP);
652 return MAPPING_OK;
653 } else {
654 return MAPPING_DATA_FIN;
655 }
656 }
657
658 /* Adjust for DATA_FIN using 1 byte of sequence space */
659 data_len--;
660 }
661
662 if (!mpext->dsn64) {
663 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
664 mpext->data_seq);
a0c1d0ea 665 subflow->use_64bit_ack = 0;
648ef4b8
MM
666 pr_debug("expanded seq=%llu", subflow->map_seq);
667 } else {
668 map_seq = mpext->data_seq;
a0c1d0ea 669 subflow->use_64bit_ack = 1;
648ef4b8
MM
670 }
671
672 if (subflow->map_valid) {
673 /* Allow replacing only with an identical map */
674 if (subflow->map_seq == map_seq &&
675 subflow->map_subflow_seq == mpext->subflow_seq &&
676 subflow->map_data_len == data_len) {
677 skb_ext_del(skb, SKB_EXT_MPTCP);
678 return MAPPING_OK;
679 }
680
681 /* If this skb data are fully covered by the current mapping,
682 * the new map would need caching, which is not supported
683 */
fc518953
FW
684 if (skb_is_fully_mapped(ssk, skb)) {
685 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
648ef4b8 686 return MAPPING_INVALID;
fc518953 687 }
648ef4b8
MM
688
689 /* will validate the next map after consuming the current one */
690 return MAPPING_OK;
691 }
692
693 subflow->map_seq = map_seq;
694 subflow->map_subflow_seq = mpext->subflow_seq;
695 subflow->map_data_len = data_len;
696 subflow->map_valid = 1;
d22f4988 697 subflow->mpc_map = mpext->mpc_map;
648ef4b8
MM
698 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
699 subflow->map_seq, subflow->map_subflow_seq,
700 subflow->map_data_len);
701
702validate_seq:
703 /* we revalidate valid mapping on new skb, because we must ensure
704 * the current skb is completely covered by the available mapping
705 */
706 if (!validate_mapping(ssk, skb))
707 return MAPPING_INVALID;
708
709 skb_ext_del(skb, SKB_EXT_MPTCP);
710 return MAPPING_OK;
711}
712
bfae9dae
FW
713static int subflow_read_actor(read_descriptor_t *desc,
714 struct sk_buff *skb,
715 unsigned int offset, size_t len)
716{
717 size_t copy_len = min(desc->count, len);
718
719 desc->count -= copy_len;
720
721 pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
722 return copy_len;
723}
724
648ef4b8
MM
725static bool subflow_check_data_avail(struct sock *ssk)
726{
727 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
728 enum mapping_status status;
729 struct mptcp_sock *msk;
730 struct sk_buff *skb;
731
732 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
733 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
734 if (subflow->data_avail)
735 return true;
736
648ef4b8
MM
737 msk = mptcp_sk(subflow->conn);
738 for (;;) {
739 u32 map_remaining;
740 size_t delta;
741 u64 ack_seq;
742 u64 old_ack;
743
744 status = get_mapping_status(ssk);
745 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
746 if (status == MAPPING_INVALID) {
747 ssk->sk_err = EBADMSG;
748 goto fatal;
749 }
e1ff9e82
DC
750 if (status == MAPPING_DUMMY) {
751 __mptcp_do_fallback(msk);
752 skb = skb_peek(&ssk->sk_receive_queue);
753 subflow->map_valid = 1;
754 subflow->map_seq = READ_ONCE(msk->ack_seq);
755 subflow->map_data_len = skb->len;
756 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
757 subflow->ssn_offset;
758 return true;
759 }
648ef4b8
MM
760
761 if (status != MAPPING_OK)
762 return false;
763
764 skb = skb_peek(&ssk->sk_receive_queue);
765 if (WARN_ON_ONCE(!skb))
766 return false;
767
d22f4988
CP
768 /* if msk lacks the remote key, this subflow must provide an
769 * MP_CAPABLE-based mapping
770 */
771 if (unlikely(!READ_ONCE(msk->can_ack))) {
772 if (!subflow->mpc_map) {
773 ssk->sk_err = EBADMSG;
774 goto fatal;
775 }
776 WRITE_ONCE(msk->remote_key, subflow->remote_key);
777 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
778 WRITE_ONCE(msk->can_ack, true);
779 }
780
648ef4b8
MM
781 old_ack = READ_ONCE(msk->ack_seq);
782 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
783 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
784 ack_seq);
785 if (ack_seq == old_ack)
786 break;
787
788 /* only accept in-sequence mapping. Old values are spurious
789 * retransmission; we can hit "future" values on active backup
790 * subflow switch, we relay on retransmissions to get
791 * in-sequence data.
792 * Cuncurrent subflows support will require subflow data
793 * reordering
794 */
795 map_remaining = subflow->map_data_len -
796 mptcp_subflow_get_map_offset(subflow);
797 if (before64(ack_seq, old_ack))
798 delta = min_t(size_t, old_ack - ack_seq, map_remaining);
799 else
800 delta = min_t(size_t, ack_seq - old_ack, map_remaining);
801
802 /* discard mapped data */
803 pr_debug("discarding %zu bytes, current map len=%d", delta,
804 map_remaining);
805 if (delta) {
648ef4b8
MM
806 read_descriptor_t desc = {
807 .count = delta,
648ef4b8
MM
808 };
809 int ret;
810
bfae9dae 811 ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
648ef4b8
MM
812 if (ret < 0) {
813 ssk->sk_err = -ret;
814 goto fatal;
815 }
816 if (ret < delta)
817 return false;
818 if (delta == map_remaining)
819 subflow->map_valid = 0;
820 }
821 }
822 return true;
823
824fatal:
825 /* fatal protocol error, close the socket */
826 /* This barrier is coupled with smp_rmb() in tcp_poll() */
827 smp_wmb();
828 ssk->sk_error_report(ssk);
829 tcp_set_state(ssk, TCP_CLOSE);
830 tcp_send_active_reset(ssk, GFP_ATOMIC);
831 return false;
832}
833
834bool mptcp_subflow_data_available(struct sock *sk)
835{
836 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
837 struct sk_buff *skb;
838
839 /* check if current mapping is still valid */
840 if (subflow->map_valid &&
841 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
842 subflow->map_valid = 0;
843 subflow->data_avail = 0;
844
845 pr_debug("Done with mapping: seq=%u data_len=%u",
846 subflow->map_subflow_seq,
847 subflow->map_data_len);
848 }
849
850 if (!subflow_check_data_avail(sk)) {
851 subflow->data_avail = 0;
852 return false;
853 }
854
855 skb = skb_peek(&sk->sk_receive_queue);
856 subflow->data_avail = skb &&
857 before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq);
858 return subflow->data_avail;
859}
860
071c8ed6
FW
861/* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
862 * not the ssk one.
863 *
864 * In mptcp, rwin is about the mptcp-level connection data.
865 *
866 * Data that is still on the ssk rx queue can thus be ignored,
867 * as far as mptcp peer is concerened that data is still inflight.
868 * DSS ACK is updated when skb is moved to the mptcp rx queue.
869 */
870void mptcp_space(const struct sock *ssk, int *space, int *full_space)
871{
872 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
873 const struct sock *sk = subflow->conn;
874
875 *space = tcp_space(sk);
876 *full_space = tcp_full_space(sk);
877}
878
648ef4b8
MM
879static void subflow_data_ready(struct sock *sk)
880{
881 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
8c728940 882 u16 state = 1 << inet_sk_state_load(sk);
648ef4b8 883 struct sock *parent = subflow->conn;
e1ff9e82 884 struct mptcp_sock *msk;
648ef4b8 885
e1ff9e82 886 msk = mptcp_sk(parent);
8c728940 887 if (state & TCPF_LISTEN) {
e1ff9e82 888 set_bit(MPTCP_DATA_READY, &msk->flags);
dc093db5 889 parent->sk_data_ready(parent);
648ef4b8
MM
890 return;
891 }
892
e1ff9e82 893 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
8c728940 894 !subflow->mp_join && !(state & TCPF_CLOSE));
e1ff9e82 895
101f6f85 896 if (mptcp_subflow_data_available(sk))
2e52213c 897 mptcp_data_ready(parent, sk);
648ef4b8
MM
898}
899
900static void subflow_write_space(struct sock *sk)
901{
902 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
903 struct sock *parent = subflow->conn;
904
905 sk_stream_write_space(sk);
dc093db5 906 if (sk_stream_is_writeable(sk)) {
1891c4a0
FW
907 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
908 smp_mb__after_atomic();
909 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
648ef4b8
MM
910 sk_stream_write_space(parent);
911 }
912}
913
cec37a6e
PK
914static struct inet_connection_sock_af_ops *
915subflow_default_af_ops(struct sock *sk)
916{
917#if IS_ENABLED(CONFIG_MPTCP_IPV6)
918 if (sk->sk_family == AF_INET6)
919 return &subflow_v6_specific;
920#endif
921 return &subflow_specific;
922}
923
cec37a6e 924#if IS_ENABLED(CONFIG_MPTCP_IPV6)
31484d56
GU
925void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
926{
cec37a6e
PK
927 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
928 struct inet_connection_sock *icsk = inet_csk(sk);
929 struct inet_connection_sock_af_ops *target;
930
931 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
932
933 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
edc7e489 934 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
cec37a6e
PK
935
936 if (likely(icsk->icsk_af_ops == target))
937 return;
938
939 subflow->icsk_af_ops = icsk->icsk_af_ops;
940 icsk->icsk_af_ops = target;
cec37a6e 941}
31484d56 942#endif
cec37a6e 943
ec3edaa7
PK
944static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
945 struct sockaddr_storage *addr)
946{
947 memset(addr, 0, sizeof(*addr));
948 addr->ss_family = info->family;
949 if (addr->ss_family == AF_INET) {
950 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
951
952 in_addr->sin_addr = info->addr;
953 in_addr->sin_port = info->port;
954 }
955#if IS_ENABLED(CONFIG_MPTCP_IPV6)
956 else if (addr->ss_family == AF_INET6) {
957 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
958
959 in6_addr->sin6_addr = info->addr6;
960 in6_addr->sin6_port = info->port;
961 }
962#endif
963}
964
965int __mptcp_subflow_connect(struct sock *sk, int ifindex,
966 const struct mptcp_addr_info *loc,
967 const struct mptcp_addr_info *remote)
968{
969 struct mptcp_sock *msk = mptcp_sk(sk);
970 struct mptcp_subflow_context *subflow;
971 struct sockaddr_storage addr;
6bad912b 972 int local_id = loc->id;
ec3edaa7 973 struct socket *sf;
6bad912b 974 struct sock *ssk;
ec3edaa7
PK
975 u32 remote_token;
976 int addrlen;
977 int err;
978
b93df08c 979 if (!mptcp_is_fully_established(sk))
ec3edaa7
PK
980 return -ENOTCONN;
981
982 err = mptcp_subflow_create_socket(sk, &sf);
983 if (err)
984 return err;
985
6bad912b
PA
986 ssk = sf->sk;
987 subflow = mptcp_subflow_ctx(ssk);
988 do {
989 get_random_bytes(&subflow->local_nonce, sizeof(u32));
990 } while (!subflow->local_nonce);
991
992 if (!local_id) {
993 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
994 if (err < 0)
995 goto failed;
996
997 local_id = err;
998 }
999
ec3edaa7
PK
1000 subflow->remote_key = msk->remote_key;
1001 subflow->local_key = msk->local_key;
1002 subflow->token = msk->token;
1003 mptcp_info2sockaddr(loc, &addr);
1004
1005 addrlen = sizeof(struct sockaddr_in);
1006#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1007 if (loc->family == AF_INET6)
1008 addrlen = sizeof(struct sockaddr_in6);
1009#endif
6bad912b 1010 ssk->sk_bound_dev_if = ifindex;
ec3edaa7
PK
1011 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1012 if (err)
1013 goto failed;
1014
1015 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
6bad912b
PA
1016 pr_debug("msk=%p remote_token=%u local_id=%d", msk, remote_token,
1017 local_id);
ec3edaa7 1018 subflow->remote_token = remote_token;
6bad912b 1019 subflow->local_id = local_id;
ec3edaa7
PK
1020 subflow->request_join = 1;
1021 subflow->request_bkup = 1;
1022 mptcp_info2sockaddr(remote, &addr);
1023
1024 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1025 if (err && err != -EINPROGRESS)
1026 goto failed;
1027
1028 spin_lock_bh(&msk->join_list_lock);
1029 list_add_tail(&subflow->node, &msk->join_list);
1030 spin_unlock_bh(&msk->join_list_lock);
1031
1032 return err;
1033
1034failed:
1035 sock_release(sf);
1036 return err;
1037}
1038
2303f994
PK
1039int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1040{
1041 struct mptcp_subflow_context *subflow;
1042 struct net *net = sock_net(sk);
1043 struct socket *sf;
1044 int err;
1045
cec37a6e
PK
1046 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1047 &sf);
2303f994
PK
1048 if (err)
1049 return err;
1050
1051 lock_sock(sf->sk);
1052
1053 /* kernel sockets do not by default acquire net ref, but TCP timer
1054 * needs it.
1055 */
1056 sf->sk->sk_net_refcnt = 1;
1057 get_net(net);
f6f7d8cf 1058#ifdef CONFIG_PROC_FS
2303f994 1059 this_cpu_add(*net->core.sock_inuse, 1);
f6f7d8cf 1060#endif
2303f994
PK
1061 err = tcp_set_ulp(sf->sk, "mptcp");
1062 release_sock(sf->sk);
1063
b8ad540d
WY
1064 if (err) {
1065 sock_release(sf);
2303f994 1066 return err;
b8ad540d 1067 }
2303f994 1068
7d14b0d2
PA
1069 /* the newly created socket really belongs to the owning MPTCP master
1070 * socket, even if for additional subflows the allocation is performed
1071 * by a kernel workqueue. Adjust inode references, so that the
1072 * procfs/diag interaces really show this one belonging to the correct
1073 * user.
1074 */
1075 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1076 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1077 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1078
2303f994
PK
1079 subflow = mptcp_subflow_ctx(sf->sk);
1080 pr_debug("subflow=%p", subflow);
1081
1082 *new_sock = sf;
79c0949e 1083 sock_hold(sk);
2303f994
PK
1084 subflow->conn = sk;
1085
1086 return 0;
1087}
1088
1089static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1090 gfp_t priority)
1091{
1092 struct inet_connection_sock *icsk = inet_csk(sk);
1093 struct mptcp_subflow_context *ctx;
1094
1095 ctx = kzalloc(sizeof(*ctx), priority);
1096 if (!ctx)
1097 return NULL;
1098
1099 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
cec37a6e 1100 INIT_LIST_HEAD(&ctx->node);
2303f994
PK
1101
1102 pr_debug("subflow=%p", ctx);
1103
1104 ctx->tcp_sock = sk;
1105
1106 return ctx;
1107}
1108
648ef4b8
MM
1109static void __subflow_state_change(struct sock *sk)
1110{
1111 struct socket_wq *wq;
1112
1113 rcu_read_lock();
1114 wq = rcu_dereference(sk->sk_wq);
1115 if (skwq_has_sleeper(wq))
1116 wake_up_interruptible_all(&wq->wait);
1117 rcu_read_unlock();
1118}
1119
1120static bool subflow_is_done(const struct sock *sk)
1121{
1122 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1123}
1124
1125static void subflow_state_change(struct sock *sk)
1126{
1127 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
dc093db5 1128 struct sock *parent = subflow->conn;
648ef4b8
MM
1129
1130 __subflow_state_change(sk);
1131
8fd73804
DC
1132 if (subflow_simultaneous_connect(sk)) {
1133 mptcp_do_fallback(sk);
a6b118fe 1134 mptcp_rcv_space_init(mptcp_sk(parent), sk);
8fd73804
DC
1135 pr_fallback(mptcp_sk(parent));
1136 subflow->conn_finished = 1;
1137 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1138 inet_sk_state_store(parent, TCP_ESTABLISHED);
1139 parent->sk_state_change(parent);
1140 }
1141 }
1142
648ef4b8
MM
1143 /* as recvmsg() does not acquire the subflow socket for ssk selection
1144 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1145 * the data available machinery here.
1146 */
e1ff9e82 1147 if (mptcp_subflow_data_available(sk))
2e52213c 1148 mptcp_data_ready(parent, sk);
648ef4b8 1149
dc093db5 1150 if (!(parent->sk_shutdown & RCV_SHUTDOWN) &&
648ef4b8
MM
1151 !subflow->rx_eof && subflow_is_done(sk)) {
1152 subflow->rx_eof = 1;
59832e24 1153 mptcp_subflow_eof(parent);
648ef4b8
MM
1154 }
1155}
1156
2303f994
PK
1157static int subflow_ulp_init(struct sock *sk)
1158{
cec37a6e 1159 struct inet_connection_sock *icsk = inet_csk(sk);
2303f994
PK
1160 struct mptcp_subflow_context *ctx;
1161 struct tcp_sock *tp = tcp_sk(sk);
1162 int err = 0;
1163
1164 /* disallow attaching ULP to a socket unless it has been
1165 * created with sock_create_kern()
1166 */
1167 if (!sk->sk_kern_sock) {
1168 err = -EOPNOTSUPP;
1169 goto out;
1170 }
1171
1172 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1173 if (!ctx) {
1174 err = -ENOMEM;
1175 goto out;
1176 }
1177
1178 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1179
1180 tp->is_mptcp = 1;
cec37a6e
PK
1181 ctx->icsk_af_ops = icsk->icsk_af_ops;
1182 icsk->icsk_af_ops = subflow_default_af_ops(sk);
648ef4b8
MM
1183 ctx->tcp_data_ready = sk->sk_data_ready;
1184 ctx->tcp_state_change = sk->sk_state_change;
1185 ctx->tcp_write_space = sk->sk_write_space;
1186 sk->sk_data_ready = subflow_data_ready;
1187 sk->sk_write_space = subflow_write_space;
1188 sk->sk_state_change = subflow_state_change;
2303f994
PK
1189out:
1190 return err;
1191}
1192
1193static void subflow_ulp_release(struct sock *sk)
1194{
1195 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
1196
1197 if (!ctx)
1198 return;
1199
79c0949e
PK
1200 if (ctx->conn)
1201 sock_put(ctx->conn);
1202
2303f994
PK
1203 kfree_rcu(ctx, rcu);
1204}
1205
cec37a6e
PK
1206static void subflow_ulp_clone(const struct request_sock *req,
1207 struct sock *newsk,
1208 const gfp_t priority)
1209{
1210 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1211 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1212 struct mptcp_subflow_context *new_ctx;
1213
f296234c
PK
1214 if (!tcp_rsk(req)->is_mptcp ||
1215 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
648ef4b8 1216 subflow_ulp_fallback(newsk, old_ctx);
cec37a6e
PK
1217 return;
1218 }
1219
1220 new_ctx = subflow_create_ctx(newsk, priority);
edc7e489 1221 if (!new_ctx) {
648ef4b8 1222 subflow_ulp_fallback(newsk, old_ctx);
cec37a6e
PK
1223 return;
1224 }
1225
1226 new_ctx->conn_finished = 1;
1227 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
648ef4b8
MM
1228 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1229 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1230 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
58b09919
PA
1231 new_ctx->rel_write_seq = 1;
1232 new_ctx->tcp_sock = newsk;
1233
f296234c
PK
1234 if (subflow_req->mp_capable) {
1235 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1236 * is fully established only after we receive the remote key
1237 */
1238 new_ctx->mp_capable = 1;
f296234c
PK
1239 new_ctx->local_key = subflow_req->local_key;
1240 new_ctx->token = subflow_req->token;
1241 new_ctx->ssn_offset = subflow_req->ssn_offset;
1242 new_ctx->idsn = subflow_req->idsn;
1243 } else if (subflow_req->mp_join) {
ec3edaa7 1244 new_ctx->ssn_offset = subflow_req->ssn_offset;
f296234c
PK
1245 new_ctx->mp_join = 1;
1246 new_ctx->fully_established = 1;
1247 new_ctx->backup = subflow_req->backup;
1248 new_ctx->local_id = subflow_req->local_id;
1249 new_ctx->token = subflow_req->token;
1250 new_ctx->thmac = subflow_req->thmac;
1251 }
cec37a6e
PK
1252}
1253
2303f994
PK
1254static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1255 .name = "mptcp",
1256 .owner = THIS_MODULE,
1257 .init = subflow_ulp_init,
1258 .release = subflow_ulp_release,
cec37a6e 1259 .clone = subflow_ulp_clone,
2303f994
PK
1260};
1261
cec37a6e
PK
1262static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1263{
1264 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1265 subflow_ops->slab_name = "request_sock_subflow";
1266
1267 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1268 subflow_ops->obj_size, 0,
1269 SLAB_ACCOUNT |
1270 SLAB_TYPESAFE_BY_RCU,
1271 NULL);
1272 if (!subflow_ops->slab)
1273 return -ENOMEM;
1274
79c0949e
PK
1275 subflow_ops->destructor = subflow_req_destructor;
1276
cec37a6e
PK
1277 return 0;
1278}
1279
d39dceca 1280void __init mptcp_subflow_init(void)
2303f994 1281{
cec37a6e
PK
1282 subflow_request_sock_ops = tcp_request_sock_ops;
1283 if (subflow_ops_init(&subflow_request_sock_ops) != 0)
1284 panic("MPTCP: failed to init subflow request sock ops\n");
1285
1286 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1287 subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
1288
1289 subflow_specific = ipv4_specific;
1290 subflow_specific.conn_request = subflow_v4_conn_request;
1291 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1292 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1293
1294#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1295 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1296 subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
1297
1298 subflow_v6_specific = ipv6_specific;
1299 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1300 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1301 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1302
1303 subflow_v6m_specific = subflow_v6_specific;
1304 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1305 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1306 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1307 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1308 subflow_v6m_specific.net_frag_header_len = 0;
1309#endif
1310
5147dfb5
DC
1311 mptcp_diag_subflow_init(&subflow_ulp_ops);
1312
2303f994
PK
1313 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1314 panic("MPTCP: failed to register subflows to ULP\n");
1315}