Commit | Line | Data |
---|---|---|
2303f994 PK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2017 - 2019, Intel Corporation. | |
5 | */ | |
6 | ||
79c0949e PK |
7 | #define pr_fmt(fmt) "MPTCP: " fmt |
8 | ||
2303f994 PK |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
a24d22b2 | 12 | #include <crypto/sha2.h> |
bd5af654 | 13 | #include <crypto/utils.h> |
2303f994 PK |
14 | #include <net/sock.h> |
15 | #include <net/inet_common.h> | |
16 | #include <net/inet_hashtables.h> | |
17 | #include <net/protocol.h> | |
cec37a6e PK |
18 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
19 | #include <net/ip6_route.h> | |
b19bc294 | 20 | #include <net/transp_v6.h> |
cec37a6e | 21 | #endif |
2303f994 | 22 | #include <net/mptcp.h> |
6be49dea | 23 | |
2303f994 | 24 | #include "protocol.h" |
fc518953 FW |
25 | #include "mib.h" |
26 | ||
0918e34b | 27 | #include <trace/events/mptcp.h> |
40e0b090 | 28 | #include <trace/events/sock.h> |
0918e34b | 29 | |
b19bc294 PA |
30 | static void mptcp_subflow_ops_undo_override(struct sock *ssk); |
31 | ||
fc518953 FW |
32 | static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, |
33 | enum linux_mptcp_mib_field field) | |
34 | { | |
35 | MPTCP_INC_STATS(sock_net(req_to_sk(req)), field); | |
36 | } | |
2303f994 | 37 | |
79c0949e PK |
38 | static void subflow_req_destructor(struct request_sock *req) |
39 | { | |
40 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); | |
41 | ||
cb41b195 | 42 | pr_debug("subflow_req=%p\n", subflow_req); |
79c0949e | 43 | |
8fd4de12 PA |
44 | if (subflow_req->msk) |
45 | sock_put((struct sock *)subflow_req->msk); | |
46 | ||
2c5ebd00 | 47 | mptcp_token_destroy_request(req); |
79c0949e PK |
48 | } |
49 | ||
f296234c PK |
50 | static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, |
51 | void *hmac) | |
52 | { | |
53 | u8 msg[8]; | |
54 | ||
55 | put_unaligned_be32(nonce1, &msg[0]); | |
56 | put_unaligned_be32(nonce2, &msg[4]); | |
57 | ||
58 | mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); | |
59 | } | |
60 | ||
4cf8b7e4 PA |
61 | static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk) |
62 | { | |
63 | return mptcp_is_fully_established((void *)msk) && | |
4d25247d KM |
64 | ((mptcp_pm_is_userspace(msk) && |
65 | mptcp_userspace_pm_active(msk)) || | |
66 | READ_ONCE(msk->pm.accept_subflow)); | |
4cf8b7e4 PA |
67 | } |
68 | ||
f296234c | 69 | /* validate received token and create truncated hmac and nonce for SYN-ACK */ |
ec20e143 GT |
70 | static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req) |
71 | { | |
72 | struct mptcp_sock *msk = subflow_req->msk; | |
73 | u8 hmac[SHA256_DIGEST_SIZE]; | |
74 | ||
75 | get_random_bytes(&subflow_req->local_nonce, sizeof(u32)); | |
76 | ||
1c09d7cb PA |
77 | subflow_generate_hmac(READ_ONCE(msk->local_key), |
78 | READ_ONCE(msk->remote_key), | |
ec20e143 GT |
79 | subflow_req->local_nonce, |
80 | subflow_req->remote_nonce, hmac); | |
81 | ||
82 | subflow_req->thmac = get_unaligned_be64(hmac); | |
83 | } | |
84 | ||
b5e2e42f | 85 | static struct mptcp_sock *subflow_token_join_request(struct request_sock *req) |
f296234c PK |
86 | { |
87 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); | |
f296234c PK |
88 | struct mptcp_sock *msk; |
89 | int local_id; | |
90 | ||
ea1300b9 | 91 | msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token); |
f296234c | 92 | if (!msk) { |
fc518953 | 93 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); |
8fd4de12 | 94 | return NULL; |
f296234c PK |
95 | } |
96 | ||
97 | local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req); | |
98 | if (local_id < 0) { | |
99 | sock_put((struct sock *)msk); | |
8fd4de12 | 100 | return NULL; |
f296234c PK |
101 | } |
102 | subflow_req->local_id = local_id; | |
6834097f | 103 | subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req); |
f296234c | 104 | |
8fd4de12 | 105 | return msk; |
f296234c PK |
106 | } |
107 | ||
d8b59efa | 108 | static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener) |
cec37a6e | 109 | { |
cec37a6e | 110 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); |
cec37a6e PK |
111 | |
112 | subflow_req->mp_capable = 0; | |
f296234c | 113 | subflow_req->mp_join = 0; |
06fe1719 | 114 | subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener)); |
bab6b88e | 115 | subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener)); |
8fd4de12 | 116 | subflow_req->msk = NULL; |
2c5ebd00 | 117 | mptcp_token_init_request(req); |
78d8b7bc FW |
118 | } |
119 | ||
5bc56388 GT |
120 | static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk) |
121 | { | |
122 | return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport; | |
123 | } | |
124 | ||
dc87efdb FW |
125 | static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason) |
126 | { | |
127 | struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP); | |
128 | ||
129 | if (mpext) { | |
130 | memset(mpext, 0, sizeof(*mpext)); | |
131 | mpext->reset_reason = reason; | |
132 | } | |
133 | } | |
134 | ||
3d041393 PA |
135 | static int subflow_reset_req_endp(struct request_sock *req, struct sk_buff *skb) |
136 | { | |
137 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEENDPATTEMPT); | |
138 | subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); | |
139 | return -EPERM; | |
140 | } | |
141 | ||
3ecfbe3e FW |
142 | /* Init mptcp request socket. |
143 | * | |
144 | * Returns an error code if a JOIN has failed and a TCP reset | |
145 | * should be sent. | |
146 | */ | |
d8b59efa PA |
147 | static int subflow_check_req(struct request_sock *req, |
148 | const struct sock *sk_listener, | |
149 | struct sk_buff *skb) | |
78d8b7bc FW |
150 | { |
151 | struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); | |
152 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); | |
153 | struct mptcp_options_received mp_opt; | |
74c7dfbe | 154 | bool opt_mp_capable, opt_mp_join; |
78d8b7bc | 155 | |
cb41b195 | 156 | pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener); |
78d8b7bc | 157 | |
d8b59efa PA |
158 | #ifdef CONFIG_TCP_MD5SIG |
159 | /* no MPTCP if MD5SIG is enabled on this socket or we may run out of | |
160 | * TCP option space. | |
161 | */ | |
382c6001 JX |
162 | if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) { |
163 | subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); | |
d8b59efa | 164 | return -EINVAL; |
382c6001 | 165 | } |
d8b59efa | 166 | #endif |
78d8b7bc | 167 | |
0799e21b | 168 | mptcp_get_options(skb, &mp_opt); |
78d8b7bc | 169 | |
724b00c1 | 170 | opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN); |
66ff70df | 171 | opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN); |
74c7dfbe | 172 | if (opt_mp_capable) { |
fc518953 FW |
173 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); |
174 | ||
3d041393 PA |
175 | if (unlikely(listener->pm_listener)) |
176 | return subflow_reset_req_endp(req, skb); | |
74c7dfbe | 177 | if (opt_mp_join) |
3ecfbe3e | 178 | return 0; |
74c7dfbe | 179 | } else if (opt_mp_join) { |
fc518953 | 180 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); |
4dde0d72 MBN |
181 | |
182 | if (mp_opt.backup) | |
183 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX); | |
3d041393 PA |
184 | } else if (unlikely(listener->pm_listener)) { |
185 | return subflow_reset_req_endp(req, skb); | |
fc518953 | 186 | } |
f296234c | 187 | |
74c7dfbe | 188 | if (opt_mp_capable && listener->request_mptcp) { |
c68a0cd1 | 189 | int err, retries = MPTCP_TOKEN_MAX_RETRIES; |
535fb815 | 190 | |
c83a47e5 | 191 | subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; |
535fb815 FW |
192 | again: |
193 | do { | |
194 | get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key)); | |
195 | } while (subflow_req->local_key == 0); | |
79c0949e | 196 | |
c83a47e5 FW |
197 | if (unlikely(req->syncookie)) { |
198 | mptcp_crypto_key_sha(subflow_req->local_key, | |
199 | &subflow_req->token, | |
200 | &subflow_req->idsn); | |
201 | if (mptcp_token_exists(subflow_req->token)) { | |
202 | if (retries-- > 0) | |
203 | goto again; | |
a16195e3 | 204 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); |
c83a47e5 FW |
205 | } else { |
206 | subflow_req->mp_capable = 1; | |
207 | } | |
3ecfbe3e | 208 | return 0; |
c83a47e5 FW |
209 | } |
210 | ||
79c0949e PK |
211 | err = mptcp_token_new_request(req); |
212 | if (err == 0) | |
213 | subflow_req->mp_capable = 1; | |
535fb815 FW |
214 | else if (retries-- > 0) |
215 | goto again; | |
a16195e3 PA |
216 | else |
217 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); | |
79c0949e | 218 | |
74c7dfbe | 219 | } else if (opt_mp_join && listener->request_mptcp) { |
ec3edaa7 | 220 | subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; |
f296234c | 221 | subflow_req->mp_join = 1; |
cfde141e PA |
222 | subflow_req->backup = mp_opt.backup; |
223 | subflow_req->remote_id = mp_opt.join_id; | |
224 | subflow_req->token = mp_opt.token; | |
225 | subflow_req->remote_nonce = mp_opt.nonce; | |
b5e2e42f | 226 | subflow_req->msk = subflow_token_join_request(req); |
9466a1cc | 227 | |
3ecfbe3e | 228 | /* Can't fall back to TCP in this case. */ |
dc87efdb FW |
229 | if (!subflow_req->msk) { |
230 | subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); | |
3ecfbe3e | 231 | return -EPERM; |
dc87efdb | 232 | } |
3ecfbe3e | 233 | |
5bc56388 | 234 | if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { |
cb41b195 | 235 | pr_debug("syn inet_sport=%d %d\n", |
5bc56388 GT |
236 | ntohs(inet_sk(sk_listener)->inet_sport), |
237 | ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); | |
238 | if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { | |
2fbdd9ea | 239 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX); |
382c6001 | 240 | subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); |
5bc56388 GT |
241 | return -EPERM; |
242 | } | |
2fbdd9ea | 243 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX); |
5bc56388 GT |
244 | } |
245 | ||
ec20e143 GT |
246 | subflow_req_create_thmac(subflow_req); |
247 | ||
3ecfbe3e | 248 | if (unlikely(req->syncookie)) { |
382c6001 | 249 | if (!mptcp_can_accept_new_subflow(subflow_req->msk)) { |
4ce7fb8d | 250 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINREJECTED); |
382c6001 | 251 | subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); |
8547ea5f | 252 | return -EPERM; |
382c6001 JX |
253 | } |
254 | ||
255 | subflow_init_req_cookie_join_save(subflow_req, skb); | |
9466a1cc FW |
256 | } |
257 | ||
cb41b195 | 258 | pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token, |
8fd4de12 | 259 | subflow_req->remote_nonce, subflow_req->msk); |
cec37a6e | 260 | } |
3ecfbe3e FW |
261 | |
262 | return 0; | |
cec37a6e PK |
263 | } |
264 | ||
c83a47e5 FW |
265 | int mptcp_subflow_init_cookie_req(struct request_sock *req, |
266 | const struct sock *sk_listener, | |
267 | struct sk_buff *skb) | |
268 | { | |
269 | struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); | |
270 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); | |
271 | struct mptcp_options_received mp_opt; | |
74c7dfbe | 272 | bool opt_mp_capable, opt_mp_join; |
c83a47e5 FW |
273 | int err; |
274 | ||
d8b59efa | 275 | subflow_init_req(req, sk_listener); |
0799e21b | 276 | mptcp_get_options(skb, &mp_opt); |
c83a47e5 | 277 | |
724b00c1 | 278 | opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK); |
66ff70df | 279 | opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK); |
74c7dfbe | 280 | if (opt_mp_capable && opt_mp_join) |
c83a47e5 FW |
281 | return -EINVAL; |
282 | ||
74c7dfbe | 283 | if (opt_mp_capable && listener->request_mptcp) { |
c83a47e5 FW |
284 | if (mp_opt.sndr_key == 0) |
285 | return -EINVAL; | |
286 | ||
287 | subflow_req->local_key = mp_opt.rcvr_key; | |
288 | err = mptcp_token_new_request(req); | |
289 | if (err) | |
290 | return err; | |
291 | ||
292 | subflow_req->mp_capable = 1; | |
293 | subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; | |
74c7dfbe | 294 | } else if (opt_mp_join && listener->request_mptcp) { |
9466a1cc FW |
295 | if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) |
296 | return -EINVAL; | |
297 | ||
8547ea5f | 298 | subflow_req->mp_join = 1; |
9466a1cc | 299 | subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; |
c83a47e5 FW |
300 | } |
301 | ||
302 | return 0; | |
303 | } | |
304 | EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); | |
305 | ||
445c0b69 ED |
306 | static enum sk_rst_reason mptcp_get_rst_reason(const struct sk_buff *skb) |
307 | { | |
308 | const struct mptcp_ext *mpext = mptcp_get_ext(skb); | |
309 | ||
310 | if (!mpext) | |
311 | return SK_RST_REASON_NOT_SPECIFIED; | |
312 | ||
313 | return sk_rst_convert_mptcp_reason(mpext->reset_reason); | |
314 | } | |
315 | ||
7ea851d1 FW |
316 | static struct dst_entry *subflow_v4_route_req(const struct sock *sk, |
317 | struct sk_buff *skb, | |
318 | struct flowi *fl, | |
b9e81040 ED |
319 | struct request_sock *req, |
320 | u32 tw_isn) | |
cec37a6e | 321 | { |
7ea851d1 | 322 | struct dst_entry *dst; |
3ecfbe3e | 323 | int err; |
7ea851d1 | 324 | |
cec37a6e | 325 | tcp_rsk(req)->is_mptcp = 1; |
d8b59efa | 326 | subflow_init_req(req, sk); |
cec37a6e | 327 | |
b9e81040 | 328 | dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req, tw_isn); |
7ea851d1 FW |
329 | if (!dst) |
330 | return NULL; | |
cec37a6e | 331 | |
d8b59efa | 332 | err = subflow_check_req(req, sk, skb); |
3ecfbe3e FW |
333 | if (err == 0) |
334 | return dst; | |
cec37a6e | 335 | |
3ecfbe3e | 336 | dst_release(dst); |
445c0b69 ED |
337 | if (!req->syncookie) |
338 | tcp_request_sock_ops.send_reset(sk, skb, | |
339 | mptcp_get_rst_reason(skb)); | |
3ecfbe3e | 340 | return NULL; |
cec37a6e PK |
341 | } |
342 | ||
36b122ba DS |
343 | static void subflow_prep_synack(const struct sock *sk, struct request_sock *req, |
344 | struct tcp_fastopen_cookie *foc, | |
345 | enum tcp_synack_type synack_type) | |
346 | { | |
347 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
348 | struct inet_request_sock *ireq = inet_rsk(req); | |
349 | ||
350 | /* clear tstamp_ok, as needed depending on cookie */ | |
351 | if (foc && foc->len > -1) | |
352 | ireq->tstamp_ok = 0; | |
353 | ||
354 | if (synack_type == TCP_SYNACK_FASTOPEN) | |
355 | mptcp_fastopen_subflow_synack_set_params(subflow, req); | |
356 | } | |
357 | ||
358 | static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst, | |
359 | struct flowi *fl, | |
360 | struct request_sock *req, | |
361 | struct tcp_fastopen_cookie *foc, | |
362 | enum tcp_synack_type synack_type, | |
363 | struct sk_buff *syn_skb) | |
364 | { | |
365 | subflow_prep_synack(sk, req, foc, synack_type); | |
366 | ||
367 | return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc, | |
368 | synack_type, syn_skb); | |
369 | } | |
370 | ||
cec37a6e | 371 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
36b122ba DS |
372 | static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst, |
373 | struct flowi *fl, | |
374 | struct request_sock *req, | |
375 | struct tcp_fastopen_cookie *foc, | |
376 | enum tcp_synack_type synack_type, | |
377 | struct sk_buff *syn_skb) | |
378 | { | |
379 | subflow_prep_synack(sk, req, foc, synack_type); | |
380 | ||
381 | return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc, | |
382 | synack_type, syn_skb); | |
383 | } | |
384 | ||
7ea851d1 FW |
385 | static struct dst_entry *subflow_v6_route_req(const struct sock *sk, |
386 | struct sk_buff *skb, | |
387 | struct flowi *fl, | |
b9e81040 ED |
388 | struct request_sock *req, |
389 | u32 tw_isn) | |
cec37a6e | 390 | { |
7ea851d1 | 391 | struct dst_entry *dst; |
3ecfbe3e | 392 | int err; |
7ea851d1 | 393 | |
cec37a6e | 394 | tcp_rsk(req)->is_mptcp = 1; |
d8b59efa | 395 | subflow_init_req(req, sk); |
cec37a6e | 396 | |
b9e81040 | 397 | dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req, tw_isn); |
7ea851d1 FW |
398 | if (!dst) |
399 | return NULL; | |
cec37a6e | 400 | |
d8b59efa | 401 | err = subflow_check_req(req, sk, skb); |
3ecfbe3e FW |
402 | if (err == 0) |
403 | return dst; | |
404 | ||
405 | dst_release(dst); | |
445c0b69 ED |
406 | if (!req->syncookie) |
407 | tcp6_request_sock_ops.send_reset(sk, skb, | |
408 | mptcp_get_rst_reason(skb)); | |
3ecfbe3e | 409 | return NULL; |
cec37a6e PK |
410 | } |
411 | #endif | |
412 | ||
ec3edaa7 PK |
413 | /* validate received truncated hmac and create hmac for third ACK */ |
414 | static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) | |
415 | { | |
bd697222 | 416 | u8 hmac[SHA256_DIGEST_SIZE]; |
ec3edaa7 PK |
417 | u64 thmac; |
418 | ||
419 | subflow_generate_hmac(subflow->remote_key, subflow->local_key, | |
420 | subflow->remote_nonce, subflow->local_nonce, | |
421 | hmac); | |
422 | ||
423 | thmac = get_unaligned_be64(hmac); | |
424 | pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", | |
742e2f36 | 425 | subflow, subflow->token, thmac, subflow->thmac); |
ec3edaa7 PK |
426 | |
427 | return thmac == subflow->thmac; | |
428 | } | |
429 | ||
d5824847 PA |
430 | void mptcp_subflow_reset(struct sock *ssk) |
431 | { | |
0e4f35d7 PA |
432 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
433 | struct sock *sk = subflow->conn; | |
434 | ||
3a236aef PA |
435 | /* mptcp_mp_fail_no_response() can reach here on an already closed |
436 | * socket | |
437 | */ | |
438 | if (ssk->sk_state == TCP_CLOSE) | |
439 | return; | |
440 | ||
ab82e996 FW |
441 | /* must hold: tcp_done() could drop last reference on parent */ |
442 | sock_hold(sk); | |
443 | ||
215d4024 | 444 | mptcp_send_active_reset_reason(ssk); |
d5824847 | 445 | tcp_done(ssk); |
a5cb752b PA |
446 | if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags)) |
447 | mptcp_schedule_work(sk); | |
ab82e996 FW |
448 | |
449 | sock_put(sk); | |
d5824847 PA |
450 | } |
451 | ||
5bc56388 GT |
452 | static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk) |
453 | { | |
454 | return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport; | |
455 | } | |
456 | ||
4fd19a30 | 457 | void __mptcp_sync_state(struct sock *sk, int state) |
490274b4 | 458 | { |
3f83d8a7 | 459 | struct mptcp_subflow_context *subflow; |
4fd19a30 | 460 | struct mptcp_sock *msk = mptcp_sk(sk); |
3f83d8a7 | 461 | struct sock *ssk = msk->first; |
4fd19a30 | 462 | |
3f83d8a7 PA |
463 | subflow = mptcp_subflow_ctx(ssk); |
464 | __mptcp_propagate_sndbuf(sk, ssk); | |
013e3179 | 465 | if (!msk->rcvspace_init) |
3f83d8a7 | 466 | mptcp_rcv_space_init(msk, ssk); |
4fd19a30 | 467 | |
490274b4 | 468 | if (sk->sk_state == TCP_SYN_SENT) { |
3f83d8a7 PA |
469 | /* subflow->idsn is always available is TCP_SYN_SENT state, |
470 | * even for the FASTOPEN scenarios | |
471 | */ | |
472 | WRITE_ONCE(msk->write_seq, subflow->idsn + 1); | |
473 | WRITE_ONCE(msk->snd_nxt, msk->write_seq); | |
c693a851 | 474 | mptcp_set_state(sk, state); |
490274b4 PA |
475 | sk->sk_state_change(sk); |
476 | } | |
477 | } | |
478 | ||
b3ea6b27 PA |
479 | static void subflow_set_remote_key(struct mptcp_sock *msk, |
480 | struct mptcp_subflow_context *subflow, | |
481 | const struct mptcp_options_received *mp_opt) | |
482 | { | |
483 | /* active MPC subflow will reach here multiple times: | |
484 | * at subflow_finish_connect() time and at 4th ack time | |
485 | */ | |
486 | if (subflow->remote_key_valid) | |
487 | return; | |
488 | ||
489 | subflow->remote_key_valid = 1; | |
490 | subflow->remote_key = mp_opt->sndr_key; | |
491 | mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn); | |
492 | subflow->iasn++; | |
493 | ||
494 | WRITE_ONCE(msk->remote_key, subflow->remote_key); | |
495 | WRITE_ONCE(msk->ack_seq, subflow->iasn); | |
496 | WRITE_ONCE(msk->can_ack, true); | |
497 | atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); | |
498 | } | |
499 | ||
e4a0fa47 PA |
500 | static void mptcp_propagate_state(struct sock *sk, struct sock *ssk, |
501 | struct mptcp_subflow_context *subflow, | |
502 | const struct mptcp_options_received *mp_opt) | |
503 | { | |
504 | struct mptcp_sock *msk = mptcp_sk(sk); | |
505 | ||
506 | mptcp_data_lock(sk); | |
507 | if (mp_opt) { | |
508 | /* Options are available only in the non fallback cases | |
509 | * avoid updating rx path fields otherwise | |
510 | */ | |
511 | WRITE_ONCE(msk->snd_una, subflow->idsn + 1); | |
512 | WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd); | |
513 | subflow_set_remote_key(msk, subflow, mp_opt); | |
514 | } | |
515 | ||
516 | if (!sock_owned_by_user(sk)) { | |
517 | __mptcp_sync_state(sk, ssk->sk_state); | |
518 | } else { | |
519 | msk->pending_state = ssk->sk_state; | |
520 | __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags); | |
521 | } | |
522 | mptcp_data_unlock(sk); | |
523 | } | |
524 | ||
cec37a6e PK |
525 | static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) |
526 | { | |
527 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
cfde141e | 528 | struct mptcp_options_received mp_opt; |
c3c123d1 | 529 | struct sock *parent = subflow->conn; |
b3ea6b27 | 530 | struct mptcp_sock *msk; |
cec37a6e PK |
531 | |
532 | subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); | |
533 | ||
263e1201 PA |
534 | /* be sure no special action on any packet other than syn-ack */ |
535 | if (subflow->conn_finished) | |
536 | return; | |
537 | ||
b3ea6b27 | 538 | msk = mptcp_sk(parent); |
b0977bb2 | 539 | subflow->rel_write_seq = 1; |
263e1201 | 540 | subflow->conn_finished = 1; |
e1ff9e82 | 541 | subflow->ssn_offset = TCP_SKB_CB(skb)->seq; |
cb41b195 | 542 | pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset); |
263e1201 | 543 | |
0799e21b | 544 | mptcp_get_options(skb, &mp_opt); |
fa25e815 | 545 | if (subflow->request_mptcp) { |
724b00c1 | 546 | if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) { |
fa25e815 PA |
547 | MPTCP_INC_STATS(sock_net(sk), |
548 | MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); | |
549 | mptcp_do_fallback(sk); | |
b3ea6b27 | 550 | pr_fallback(msk); |
fa25e815 PA |
551 | goto fallback; |
552 | } | |
553 | ||
74c7dfbe | 554 | if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD) |
b3ea6b27 | 555 | WRITE_ONCE(msk->csum_enabled, true); |
df377be3 | 556 | if (mp_opt.deny_join_id0) |
b3ea6b27 | 557 | WRITE_ONCE(msk->pm.remote_deny_join_id0, true); |
263e1201 | 558 | subflow->mp_capable = 1; |
5695eb88 | 559 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK); |
fa25e815 | 560 | mptcp_finish_connect(sk); |
27069e7c | 561 | mptcp_active_enable(parent); |
e4a0fa47 | 562 | mptcp_propagate_state(parent, sk, subflow, &mp_opt); |
fa25e815 PA |
563 | } else if (subflow->request_join) { |
564 | u8 hmac[SHA256_DIGEST_SIZE]; | |
565 | ||
be1d9d9d | 566 | if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) { |
dc87efdb | 567 | subflow->reset_reason = MPTCP_RST_EMPTCP; |
fa25e815 | 568 | goto do_reset; |
dc87efdb | 569 | } |
fa25e815 | 570 | |
0460ce22 | 571 | subflow->backup = mp_opt.backup; |
cfde141e PA |
572 | subflow->thmac = mp_opt.thmac; |
573 | subflow->remote_nonce = mp_opt.nonce; | |
967d3c27 | 574 | WRITE_ONCE(subflow->remote_id, mp_opt.join_id); |
cb41b195 | 575 | pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n", |
0460ce22 PA |
576 | subflow, subflow->thmac, subflow->remote_nonce, |
577 | subflow->backup); | |
263e1201 | 578 | |
ec3edaa7 | 579 | if (!subflow_thmac_valid(subflow)) { |
fc518953 | 580 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC); |
dc87efdb | 581 | subflow->reset_reason = MPTCP_RST_EMPTCP; |
ec3edaa7 PK |
582 | goto do_reset; |
583 | } | |
584 | ||
0a4d8e96 JW |
585 | if (!mptcp_finish_join(sk)) |
586 | goto do_reset; | |
587 | ||
ec3edaa7 PK |
588 | subflow_generate_hmac(subflow->local_key, subflow->remote_key, |
589 | subflow->local_nonce, | |
590 | subflow->remote_nonce, | |
bd697222 | 591 | hmac); |
bd697222 | 592 | memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); |
ec3edaa7 | 593 | |
fa25e815 | 594 | subflow->mp_join = 1; |
fc518953 | 595 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); |
5bc56388 | 596 | |
4dde0d72 MBN |
597 | if (subflow->backup) |
598 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX); | |
599 | ||
b3ea6b27 | 600 | if (subflow_use_different_dport(msk, sk)) { |
cb41b195 | 601 | pr_debug("synack inet_dport=%d %d\n", |
5bc56388 GT |
602 | ntohs(inet_sk(sk)->inet_dport), |
603 | ntohs(inet_sk(parent)->inet_dport)); | |
2fbdd9ea | 604 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX); |
5bc56388 | 605 | } |
fa25e815 | 606 | } else if (mptcp_check_fallback(sk)) { |
27069e7c MBN |
607 | /* It looks like MPTCP is blocked, while TCP is not */ |
608 | if (subflow->mpc_drop) | |
609 | mptcp_active_disable(parent); | |
fa25e815 | 610 | fallback: |
e4a0fa47 | 611 | mptcp_propagate_state(parent, sk, subflow, NULL); |
cec37a6e | 612 | } |
fa25e815 PA |
613 | return; |
614 | ||
615 | do_reset: | |
dc87efdb | 616 | subflow->reset_transient = 0; |
d5824847 | 617 | mptcp_subflow_reset(sk); |
cec37a6e PK |
618 | } |
619 | ||
4cf86ae8 PA |
620 | static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) |
621 | { | |
a7cfe776 PA |
622 | WARN_ON_ONCE(local_id < 0 || local_id > 255); |
623 | WRITE_ONCE(subflow->local_id, local_id); | |
4cf86ae8 PA |
624 | } |
625 | ||
626 | static int subflow_chk_local_id(struct sock *sk) | |
627 | { | |
628 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
629 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); | |
630 | int err; | |
631 | ||
a7cfe776 | 632 | if (likely(subflow->local_id >= 0)) |
4cf86ae8 PA |
633 | return 0; |
634 | ||
635 | err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk); | |
636 | if (err < 0) | |
637 | return err; | |
638 | ||
639 | subflow_set_local_id(subflow, err); | |
6834097f MBN |
640 | subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk); |
641 | ||
4cf86ae8 PA |
642 | return 0; |
643 | } | |
644 | ||
645 | static int subflow_rebuild_header(struct sock *sk) | |
646 | { | |
647 | int err = subflow_chk_local_id(sk); | |
648 | ||
649 | if (unlikely(err < 0)) | |
650 | return err; | |
651 | ||
652 | return inet_sk_rebuild_header(sk); | |
653 | } | |
654 | ||
655 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
656 | static int subflow_v6_rebuild_header(struct sock *sk) | |
657 | { | |
658 | int err = subflow_chk_local_id(sk); | |
659 | ||
660 | if (unlikely(err < 0)) | |
661 | return err; | |
662 | ||
663 | return inet6_sk_rebuild_header(sk); | |
664 | } | |
665 | #endif | |
666 | ||
34b21d1d | 667 | static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; |
51fa7f8e | 668 | static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; |
cec37a6e PK |
669 | |
670 | static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |
671 | { | |
672 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
673 | ||
cb41b195 | 674 | pr_debug("subflow=%p\n", subflow); |
cec37a6e PK |
675 | |
676 | /* Never answer to SYNs sent to broadcast or multicast */ | |
677 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | |
678 | goto drop; | |
679 | ||
34b21d1d | 680 | return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops, |
cec37a6e PK |
681 | &subflow_request_sock_ipv4_ops, |
682 | sk, skb); | |
683 | drop: | |
684 | tcp_listendrop(sk); | |
685 | return 0; | |
686 | } | |
687 | ||
d3295fee MB |
688 | static void subflow_v4_req_destructor(struct request_sock *req) |
689 | { | |
690 | subflow_req_destructor(req); | |
691 | tcp_request_sock_ops.destructor(req); | |
692 | } | |
693 | ||
cec37a6e | 694 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
34b21d1d | 695 | static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; |
51fa7f8e FW |
696 | static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; |
697 | static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; | |
698 | static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; | |
822467a4 | 699 | static struct proto tcpv6_prot_override __ro_after_init; |
cec37a6e PK |
700 | |
701 | static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |
702 | { | |
703 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
704 | ||
cb41b195 | 705 | pr_debug("subflow=%p\n", subflow); |
cec37a6e PK |
706 | |
707 | if (skb->protocol == htons(ETH_P_IP)) | |
708 | return subflow_v4_conn_request(sk, skb); | |
709 | ||
710 | if (!ipv6_unicast_destination(skb)) | |
711 | goto drop; | |
712 | ||
dcc32f4f JK |
713 | if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { |
714 | __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); | |
715 | return 0; | |
716 | } | |
717 | ||
34b21d1d | 718 | return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops, |
cec37a6e PK |
719 | &subflow_request_sock_ipv6_ops, sk, skb); |
720 | ||
721 | drop: | |
722 | tcp_listendrop(sk); | |
723 | return 0; /* don't send reset */ | |
724 | } | |
d3295fee MB |
725 | |
726 | static void subflow_v6_req_destructor(struct request_sock *req) | |
727 | { | |
728 | subflow_req_destructor(req); | |
729 | tcp6_request_sock_ops.destructor(req); | |
730 | } | |
cec37a6e PK |
731 | #endif |
732 | ||
3fff8818 MB |
733 | struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, |
734 | struct sock *sk_listener, | |
735 | bool attach_listener) | |
736 | { | |
34b21d1d MB |
737 | if (ops->family == AF_INET) |
738 | ops = &mptcp_subflow_v4_request_sock_ops; | |
739 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
740 | else if (ops->family == AF_INET6) | |
741 | ops = &mptcp_subflow_v6_request_sock_ops; | |
cec37a6e PK |
742 | #endif |
743 | ||
3fff8818 MB |
744 | return inet_reqsk_alloc(ops, sk_listener, attach_listener); |
745 | } | |
746 | EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); | |
747 | ||
f296234c | 748 | /* validate hmac received in third ACK */ |
60cbf315 | 749 | static bool subflow_hmac_valid(const struct mptcp_subflow_request_sock *subflow_req, |
cfde141e | 750 | const struct mptcp_options_received *mp_opt) |
f296234c | 751 | { |
60cbf315 | 752 | struct mptcp_sock *msk = subflow_req->msk; |
bd697222 | 753 | u8 hmac[SHA256_DIGEST_SIZE]; |
f296234c | 754 | |
1c09d7cb PA |
755 | subflow_generate_hmac(READ_ONCE(msk->remote_key), |
756 | READ_ONCE(msk->local_key), | |
f296234c PK |
757 | subflow_req->remote_nonce, |
758 | subflow_req->local_nonce, hmac); | |
759 | ||
8fd4de12 | 760 | return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); |
f296234c PK |
761 | } |
762 | ||
4c8941de PA |
763 | static void subflow_ulp_fallback(struct sock *sk, |
764 | struct mptcp_subflow_context *old_ctx) | |
765 | { | |
766 | struct inet_connection_sock *icsk = inet_csk(sk); | |
767 | ||
768 | mptcp_subflow_tcp_fallback(sk, old_ctx); | |
769 | icsk->icsk_ulp_ops = NULL; | |
770 | rcu_assign_pointer(icsk->icsk_ulp_data, NULL); | |
771 | tcp_sk(sk)->is_mptcp = 0; | |
b19bc294 PA |
772 | |
773 | mptcp_subflow_ops_undo_override(sk); | |
4c8941de PA |
774 | } |
775 | ||
b6985b9b | 776 | void mptcp_subflow_drop_ctx(struct sock *ssk) |
39884604 PA |
777 | { |
778 | struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); | |
779 | ||
780 | if (!ctx) | |
781 | return; | |
782 | ||
63740448 PA |
783 | list_del(&mptcp_subflow_ctx(ssk)->node); |
784 | if (inet_csk(ssk)->icsk_ulp_ops) { | |
785 | subflow_ulp_fallback(ssk, ctx); | |
786 | if (ctx->conn) | |
787 | sock_put(ctx->conn); | |
788 | } | |
39884604 PA |
789 | |
790 | kfree_rcu(ctx, rcu); | |
791 | } | |
792 | ||
e4a0fa47 PA |
793 | void __mptcp_subflow_fully_established(struct mptcp_sock *msk, |
794 | struct mptcp_subflow_context *subflow, | |
795 | const struct mptcp_options_received *mp_opt) | |
b93df08c | 796 | { |
b3ea6b27 | 797 | subflow_set_remote_key(msk, subflow, mp_opt); |
581c8cbf | 798 | WRITE_ONCE(subflow->fully_established, true); |
b93df08c PA |
799 | WRITE_ONCE(msk->fully_established, true); |
800 | } | |
801 | ||
cec37a6e PK |
802 | static struct sock *subflow_syn_recv_sock(const struct sock *sk, |
803 | struct sk_buff *skb, | |
804 | struct request_sock *req, | |
805 | struct dst_entry *dst, | |
806 | struct request_sock *req_unhash, | |
807 | bool *own_req) | |
808 | { | |
809 | struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); | |
cc7972ea | 810 | struct mptcp_subflow_request_sock *subflow_req; |
cfde141e | 811 | struct mptcp_options_received mp_opt; |
9e365ff5 | 812 | bool fallback, fallback_is_fatal; |
3e140491 | 813 | enum sk_rst_reason reason; |
3a236aef | 814 | struct mptcp_sock *owner; |
cec37a6e PK |
815 | struct sock *child; |
816 | ||
cb41b195 | 817 | pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn); |
cec37a6e | 818 | |
74c7dfbe | 819 | /* After child creation we must look for MPC even when options |
9e365ff5 | 820 | * are not parsed |
cfde141e | 821 | */ |
74c7dfbe | 822 | mp_opt.suboptions = 0; |
9e365ff5 PA |
823 | |
824 | /* hopefully temporary handling for MP_JOIN+syncookie */ | |
825 | subflow_req = mptcp_subflow_rsk(req); | |
b7514694 | 826 | fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join; |
9e365ff5 PA |
827 | fallback = !tcp_rsk(req)->is_mptcp; |
828 | if (fallback) | |
ae2dd716 FW |
829 | goto create_child; |
830 | ||
d22f4988 | 831 | /* if the sk is MP_CAPABLE, we try to fetch the client key */ |
cc7972ea | 832 | if (subflow_req->mp_capable) { |
06f9a435 PA |
833 | /* we can receive and accept an in-window, out-of-order pkt, |
834 | * which may not carry the MP_CAPABLE opt even on mptcp enabled | |
835 | * paths: always try to extract the peer key, and fallback | |
836 | * for packets missing it. | |
837 | * Even OoO DSS packets coming legitly after dropped or | |
838 | * reordered MPC will cause fallback, but we don't have other | |
839 | * options. | |
840 | */ | |
0799e21b | 841 | mptcp_get_options(skb, &mp_opt); |
c0f5aec2 PA |
842 | if (!(mp_opt.suboptions & |
843 | (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK))) | |
4c8941de | 844 | fallback = true; |
58b09919 | 845 | |
f296234c | 846 | } else if (subflow_req->mp_join) { |
0799e21b | 847 | mptcp_get_options(skb, &mp_opt); |
443041de | 848 | if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK)) |
9e365ff5 | 849 | fallback = true; |
cc7972ea | 850 | } |
cec37a6e | 851 | |
d22f4988 | 852 | create_child: |
cec37a6e PK |
853 | child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, |
854 | req_unhash, own_req); | |
855 | ||
856 | if (child && *own_req) { | |
79c0949e PK |
857 | struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child); |
858 | ||
90bf4513 PA |
859 | tcp_rsk(req)->drop_req = false; |
860 | ||
4c8941de PA |
861 | /* we need to fallback on ctx allocation failure and on pre-reqs |
862 | * checking above. In the latter scenario we additionally need | |
863 | * to reset the context to non MPTCP status. | |
79c0949e | 864 | */ |
4c8941de | 865 | if (!ctx || fallback) { |
dc87efdb FW |
866 | if (fallback_is_fatal) { |
867 | subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); | |
729cd643 | 868 | goto dispose_child; |
dc87efdb | 869 | } |
a88d0092 | 870 | goto fallback; |
f296234c | 871 | } |
79c0949e | 872 | |
df00b087 FW |
873 | /* ssk inherits options of listener sk */ |
874 | ctx->setsockopt_seq = listener->setsockopt_seq; | |
875 | ||
79c0949e | 876 | if (ctx->mp_capable) { |
7e8b88ec | 877 | ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req); |
a88d0092 PA |
878 | if (!ctx->conn) |
879 | goto fallback; | |
880 | ||
6f06b4d4 | 881 | ctx->subflow_id = 1; |
a88d0092 | 882 | owner = mptcp_sk(ctx->conn); |
3a236aef | 883 | mptcp_pm_new_connection(owner, child, 1); |
e72e4032 | 884 | |
fca5c82c PA |
885 | /* with OoO packets we can reach here without ingress |
886 | * mpc option | |
887 | */ | |
3a236aef | 888 | if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) { |
7a486c44 | 889 | mptcp_pm_fully_established(owner, child); |
3a236aef PA |
890 | ctx->pm_notified = 1; |
891 | } | |
f296234c | 892 | } else if (ctx->mp_join) { |
8fd4de12 | 893 | owner = subflow_req->msk; |
dc87efdb FW |
894 | if (!owner) { |
895 | subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); | |
729cd643 | 896 | goto dispose_child; |
dc87efdb | 897 | } |
f296234c | 898 | |
60cbf315 | 899 | if (!subflow_hmac_valid(subflow_req, &mp_opt)) { |
443041de GY |
900 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); |
901 | subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); | |
902 | goto dispose_child; | |
903 | } | |
904 | ||
21c02e82 | 905 | if (!mptcp_can_accept_new_subflow(owner)) { |
4ce7fb8d | 906 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINREJECTED); |
21c02e82 MBN |
907 | subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); |
908 | goto dispose_child; | |
909 | } | |
910 | ||
8fd4de12 PA |
911 | /* move the msk reference ownership to the subflow */ |
912 | subflow_req->msk = NULL; | |
f296234c | 913 | ctx->conn = (struct sock *)owner; |
5bc56388 GT |
914 | |
915 | if (subflow_use_different_sport(owner, sk)) { | |
cb41b195 | 916 | pr_debug("ack inet_sport=%d %d\n", |
5bc56388 GT |
917 | ntohs(inet_sk(sk)->inet_sport), |
918 | ntohs(inet_sk((struct sock *)owner)->inet_sport)); | |
2fbdd9ea GT |
919 | if (!mptcp_pm_sport_in_anno_list(owner, sk)) { |
920 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX); | |
382c6001 | 921 | subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); |
9238e900 | 922 | goto dispose_child; |
2fbdd9ea GT |
923 | } |
924 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX); | |
5bc56388 | 925 | } |
9238e900 | 926 | |
382c6001 JX |
927 | if (!mptcp_finish_join(child)) { |
928 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child); | |
929 | ||
930 | subflow_add_reset_reason(skb, subflow->reset_reason); | |
9238e900 | 931 | goto dispose_child; |
382c6001 | 932 | } |
9238e900 GT |
933 | |
934 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX); | |
935 | tcp_rsk(req)->drop_req = true; | |
cec37a6e PK |
936 | } |
937 | } | |
938 | ||
4c8941de | 939 | /* check for expected invariant - should never trigger, just help |
46a5d3ab | 940 | * catching earlier subtle bugs |
4c8941de | 941 | */ |
ac2b47fb | 942 | WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && |
4c8941de PA |
943 | (!mptcp_subflow_ctx(child) || |
944 | !mptcp_subflow_ctx(child)->conn)); | |
cec37a6e | 945 | return child; |
f296234c | 946 | |
729cd643 | 947 | dispose_child: |
b6985b9b | 948 | mptcp_subflow_drop_ctx(child); |
729cd643 | 949 | tcp_rsk(req)->drop_req = true; |
729cd643 | 950 | inet_csk_prepare_for_destroy_sock(child); |
f296234c | 951 | tcp_done(child); |
445c0b69 | 952 | reason = mptcp_get_rst_reason(skb); |
3e140491 | 953 | req->rsk_ops->send_reset(sk, skb, reason); |
729cd643 PA |
954 | |
955 | /* The last child reference will be released by the caller */ | |
956 | return child; | |
a88d0092 PA |
957 | |
958 | fallback: | |
7a1b3490 DC |
959 | if (fallback) |
960 | SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); | |
a88d0092 PA |
961 | mptcp_subflow_drop_ctx(child); |
962 | return child; | |
cec37a6e PK |
963 | } |
964 | ||
51fa7f8e | 965 | static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; |
822467a4 | 966 | static struct proto tcp_prot_override __ro_after_init; |
cec37a6e | 967 | |
648ef4b8 MM |
968 | enum mapping_status { |
969 | MAPPING_OK, | |
970 | MAPPING_INVALID, | |
971 | MAPPING_EMPTY, | |
e1ff9e82 | 972 | MAPPING_DATA_FIN, |
31bf11de | 973 | MAPPING_DUMMY, |
46a3282b DC |
974 | MAPPING_BAD_CSUM, |
975 | MAPPING_NODSS | |
648ef4b8 MM |
976 | }; |
977 | ||
61e71022 | 978 | static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) |
648ef4b8 | 979 | { |
cb41b195 | 980 | pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n", |
61e71022 | 981 | ssn, subflow->map_subflow_seq, subflow->map_data_len); |
648ef4b8 MM |
982 | } |
983 | ||
984 | static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) | |
985 | { | |
986 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
987 | unsigned int skb_consumed; | |
988 | ||
989 | skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; | |
e32d262c PA |
990 | if (unlikely(skb_consumed >= skb->len)) { |
991 | DEBUG_NET_WARN_ON_ONCE(1); | |
648ef4b8 | 992 | return true; |
e32d262c | 993 | } |
648ef4b8 MM |
994 | |
995 | return skb->len - skb_consumed <= subflow->map_data_len - | |
996 | mptcp_subflow_get_map_offset(subflow); | |
997 | } | |
998 | ||
999 | static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) | |
1000 | { | |
1001 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
1002 | u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; | |
1003 | ||
1004 | if (unlikely(before(ssn, subflow->map_subflow_seq))) { | |
1005 | /* Mapping covers data later in the subflow stream, | |
1006 | * currently unsupported. | |
1007 | */ | |
61e71022 | 1008 | dbg_bad_map(subflow, ssn); |
648ef4b8 MM |
1009 | return false; |
1010 | } | |
1011 | if (unlikely(!before(ssn, subflow->map_subflow_seq + | |
1012 | subflow->map_data_len))) { | |
1013 | /* Mapping does covers past subflow data, invalid */ | |
61e71022 | 1014 | dbg_bad_map(subflow, ssn); |
648ef4b8 MM |
1015 | return false; |
1016 | } | |
1017 | return true; | |
1018 | } | |
1019 | ||
dd8bcd17 PA |
1020 | static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb, |
1021 | bool csum_reqd) | |
1022 | { | |
1023 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
dd8bcd17 | 1024 | u32 offset, seq, delta; |
ba2c89e0 | 1025 | __sum16 csum; |
dd8bcd17 PA |
1026 | int len; |
1027 | ||
1028 | if (!csum_reqd) | |
1029 | return MAPPING_OK; | |
1030 | ||
1031 | /* mapping already validated on previous traversal */ | |
1032 | if (subflow->map_csum_len == subflow->map_data_len) | |
1033 | return MAPPING_OK; | |
1034 | ||
1035 | /* traverse the receive queue, ensuring it contains a full | |
1036 | * DSS mapping and accumulating the related csum. | |
1037 | * Preserve the accoumlate csum across multiple calls, to compute | |
1038 | * the csum only once | |
1039 | */ | |
1040 | delta = subflow->map_data_len - subflow->map_csum_len; | |
1041 | for (;;) { | |
1042 | seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; | |
1043 | offset = seq - TCP_SKB_CB(skb)->seq; | |
1044 | ||
1045 | /* if the current skb has not been accounted yet, csum its contents | |
1046 | * up to the amount covered by the current DSS | |
1047 | */ | |
1048 | if (offset < skb->len) { | |
1049 | __wsum csum; | |
1050 | ||
1051 | len = min(skb->len - offset, delta); | |
1052 | csum = skb_checksum(skb, offset, len, 0); | |
1053 | subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum, | |
1054 | subflow->map_csum_len); | |
1055 | ||
1056 | delta -= len; | |
1057 | subflow->map_csum_len += len; | |
1058 | } | |
1059 | if (delta == 0) | |
1060 | break; | |
1061 | ||
1062 | if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) { | |
1063 | /* if this subflow is closed, the partial mapping | |
1064 | * will be never completed; flush the pending skbs, so | |
1065 | * that subflow_sched_work_if_closed() can kick in | |
1066 | */ | |
1067 | if (unlikely(ssk->sk_state == TCP_CLOSE)) | |
1068 | while ((skb = skb_peek(&ssk->sk_receive_queue))) | |
1069 | sk_eat_skb(ssk, skb); | |
1070 | ||
1071 | /* not enough data to validate the csum */ | |
1072 | return MAPPING_EMPTY; | |
1073 | } | |
1074 | ||
1075 | /* the DSS mapping for next skbs will be validated later, | |
1076 | * when a get_mapping_status call will process such skb | |
1077 | */ | |
1078 | skb = skb->next; | |
1079 | } | |
1080 | ||
1081 | /* note that 'map_data_len' accounts only for the carried data, does | |
1082 | * not include the eventual seq increment due to the data fin, | |
1083 | * while the pseudo header requires the original DSS data len, | |
1084 | * including that | |
1085 | */ | |
8401e87f GT |
1086 | csum = __mptcp_make_csum(subflow->map_seq, |
1087 | subflow->map_subflow_seq, | |
1088 | subflow->map_data_len + subflow->map_data_fin, | |
1089 | subflow->map_data_csum); | |
1090 | if (unlikely(csum)) { | |
fe3ab1cb | 1091 | MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR); |
31bf11de | 1092 | return MAPPING_BAD_CSUM; |
fe3ab1cb | 1093 | } |
dd8bcd17 | 1094 | |
ae66fb2b | 1095 | subflow->valid_csum_seen = 1; |
dd8bcd17 PA |
1096 | return MAPPING_OK; |
1097 | } | |
1098 | ||
43b54c6e MM |
1099 | static enum mapping_status get_mapping_status(struct sock *ssk, |
1100 | struct mptcp_sock *msk) | |
648ef4b8 MM |
1101 | { |
1102 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
dd8bcd17 | 1103 | bool csum_reqd = READ_ONCE(msk->csum_enabled); |
648ef4b8 MM |
1104 | struct mptcp_ext *mpext; |
1105 | struct sk_buff *skb; | |
1106 | u16 data_len; | |
1107 | u64 map_seq; | |
1108 | ||
1109 | skb = skb_peek(&ssk->sk_receive_queue); | |
1110 | if (!skb) | |
1111 | return MAPPING_EMPTY; | |
1112 | ||
e1ff9e82 DC |
1113 | if (mptcp_check_fallback(ssk)) |
1114 | return MAPPING_DUMMY; | |
1115 | ||
648ef4b8 MM |
1116 | mpext = mptcp_get_ext(skb); |
1117 | if (!mpext || !mpext->use_map) { | |
1118 | if (!subflow->map_valid && !skb->len) { | |
1119 | /* the TCP stack deliver 0 len FIN pkt to the receive | |
1120 | * queue, that is the only 0len pkts ever expected here, | |
1121 | * and we can admit no mapping only for 0 len pkts | |
1122 | */ | |
1123 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) | |
1124 | WARN_ONCE(1, "0len seq %d:%d flags %x", | |
1125 | TCP_SKB_CB(skb)->seq, | |
1126 | TCP_SKB_CB(skb)->end_seq, | |
1127 | TCP_SKB_CB(skb)->tcp_flags); | |
1128 | sk_eat_skb(ssk, skb); | |
1129 | return MAPPING_EMPTY; | |
1130 | } | |
1131 | ||
46a3282b | 1132 | /* If the required DSS has likely been dropped by a middlebox */ |
648ef4b8 | 1133 | if (!subflow->map_valid) |
46a3282b | 1134 | return MAPPING_NODSS; |
648ef4b8 MM |
1135 | |
1136 | goto validate_seq; | |
1137 | } | |
1138 | ||
0918e34b | 1139 | trace_get_mapping_status(mpext); |
648ef4b8 MM |
1140 | |
1141 | data_len = mpext->data_len; | |
1142 | if (data_len == 0) { | |
cb41b195 | 1143 | pr_debug("infinite mapping received\n"); |
fc518953 | 1144 | MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); |
648ef4b8 MM |
1145 | return MAPPING_INVALID; |
1146 | } | |
1147 | ||
1148 | if (mpext->data_fin == 1) { | |
00797af9 MBN |
1149 | u64 data_fin_seq; |
1150 | ||
648ef4b8 | 1151 | if (data_len == 1) { |
1a49b2c2 MM |
1152 | bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq, |
1153 | mpext->dsn64); | |
cb41b195 | 1154 | pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq); |
648ef4b8 MM |
1155 | if (subflow->map_valid) { |
1156 | /* A DATA_FIN might arrive in a DSS | |
1157 | * option before the previous mapping | |
1158 | * has been fully consumed. Continue | |
1159 | * handling the existing mapping. | |
1160 | */ | |
1161 | skb_ext_del(skb, SKB_EXT_MPTCP); | |
1162 | return MAPPING_OK; | |
648ef4b8 | 1163 | } |
1a49b2c2 | 1164 | |
00797af9 MBN |
1165 | if (updated) |
1166 | mptcp_schedule_work((struct sock *)msk); | |
1a49b2c2 | 1167 | |
00797af9 | 1168 | return MAPPING_DATA_FIN; |
648ef4b8 MM |
1169 | } |
1170 | ||
00797af9 MBN |
1171 | data_fin_seq = mpext->data_seq + data_len - 1; |
1172 | ||
1173 | /* If mpext->data_seq is a 32-bit value, data_fin_seq must also | |
1174 | * be limited to 32 bits. | |
1175 | */ | |
1176 | if (!mpext->dsn64) | |
1177 | data_fin_seq &= GENMASK_ULL(31, 0); | |
1178 | ||
1179 | mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); | |
cb41b195 | 1180 | pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n", |
00797af9 MBN |
1181 | data_fin_seq, mpext->dsn64); |
1182 | ||
648ef4b8 MM |
1183 | /* Adjust for DATA_FIN using 1 byte of sequence space */ |
1184 | data_len--; | |
1185 | } | |
1186 | ||
5957a890 | 1187 | map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64); |
37198e93 | 1188 | WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); |
648ef4b8 MM |
1189 | |
1190 | if (subflow->map_valid) { | |
1191 | /* Allow replacing only with an identical map */ | |
1192 | if (subflow->map_seq == map_seq && | |
1193 | subflow->map_subflow_seq == mpext->subflow_seq && | |
dd8bcd17 PA |
1194 | subflow->map_data_len == data_len && |
1195 | subflow->map_csum_reqd == mpext->csum_reqd) { | |
648ef4b8 | 1196 | skb_ext_del(skb, SKB_EXT_MPTCP); |
dd8bcd17 | 1197 | goto validate_csum; |
648ef4b8 MM |
1198 | } |
1199 | ||
1200 | /* If this skb data are fully covered by the current mapping, | |
1201 | * the new map would need caching, which is not supported | |
1202 | */ | |
fc518953 FW |
1203 | if (skb_is_fully_mapped(ssk, skb)) { |
1204 | MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH); | |
648ef4b8 | 1205 | return MAPPING_INVALID; |
fc518953 | 1206 | } |
648ef4b8 MM |
1207 | |
1208 | /* will validate the next map after consuming the current one */ | |
dd8bcd17 | 1209 | goto validate_csum; |
648ef4b8 MM |
1210 | } |
1211 | ||
1212 | subflow->map_seq = map_seq; | |
1213 | subflow->map_subflow_seq = mpext->subflow_seq; | |
1214 | subflow->map_data_len = data_len; | |
1215 | subflow->map_valid = 1; | |
dd8bcd17 | 1216 | subflow->map_data_fin = mpext->data_fin; |
d22f4988 | 1217 | subflow->mpc_map = mpext->mpc_map; |
dd8bcd17 PA |
1218 | subflow->map_csum_reqd = mpext->csum_reqd; |
1219 | subflow->map_csum_len = 0; | |
1220 | subflow->map_data_csum = csum_unfold(mpext->csum); | |
1221 | ||
1222 | /* Cfr RFC 8684 Section 3.3.0 */ | |
1223 | if (unlikely(subflow->map_csum_reqd != csum_reqd)) | |
1224 | return MAPPING_INVALID; | |
1225 | ||
cb41b195 | 1226 | pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n", |
648ef4b8 | 1227 | subflow->map_seq, subflow->map_subflow_seq, |
dd8bcd17 PA |
1228 | subflow->map_data_len, subflow->map_csum_reqd, |
1229 | subflow->map_data_csum); | |
648ef4b8 MM |
1230 | |
1231 | validate_seq: | |
1232 | /* we revalidate valid mapping on new skb, because we must ensure | |
1233 | * the current skb is completely covered by the available mapping | |
1234 | */ | |
06285da9 PA |
1235 | if (!validate_mapping(ssk, skb)) { |
1236 | MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH); | |
648ef4b8 | 1237 | return MAPPING_INVALID; |
06285da9 | 1238 | } |
648ef4b8 MM |
1239 | |
1240 | skb_ext_del(skb, SKB_EXT_MPTCP); | |
dd8bcd17 PA |
1241 | |
1242 | validate_csum: | |
1243 | return validate_data_csum(ssk, skb, csum_reqd); | |
648ef4b8 MM |
1244 | } |
1245 | ||
04e4cd4f | 1246 | static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, |
1d39cd8c | 1247 | u64 limit) |
6719331c PA |
1248 | { |
1249 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
04e4cd4f | 1250 | bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; |
68cc9247 PA |
1251 | struct tcp_sock *tp = tcp_sk(ssk); |
1252 | u32 offset, incr, avail_len; | |
04e4cd4f | 1253 | |
68cc9247 PA |
1254 | offset = tp->copied_seq - TCP_SKB_CB(skb)->seq; |
1255 | if (WARN_ON_ONCE(offset > skb->len)) | |
1256 | goto out; | |
04e4cd4f | 1257 | |
68cc9247 PA |
1258 | avail_len = skb->len - offset; |
1259 | incr = limit >= avail_len ? avail_len + fin : limit; | |
1260 | ||
cb41b195 | 1261 | pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len, |
68cc9247 | 1262 | offset, subflow->map_subflow_seq); |
06242e44 | 1263 | MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); |
04e4cd4f | 1264 | tcp_sk(ssk)->copied_seq += incr; |
68cc9247 PA |
1265 | |
1266 | out: | |
04e4cd4f PA |
1267 | if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) |
1268 | sk_eat_skb(ssk, skb); | |
1269 | if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) | |
1270 | subflow->map_valid = 0; | |
6719331c PA |
1271 | } |
1272 | ||
c3349a22 PA |
1273 | static bool subflow_is_done(const struct sock *sk) |
1274 | { | |
1275 | return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; | |
1276 | } | |
1277 | ||
1278 | /* sched mptcp worker for subflow cleanup if no more data is pending */ | |
40947e13 FW |
1279 | static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) |
1280 | { | |
f09b0ad5 MBN |
1281 | struct sock *sk = (struct sock *)msk; |
1282 | ||
1283 | if (likely(ssk->sk_state != TCP_CLOSE && | |
1284 | (ssk->sk_state != TCP_CLOSE_WAIT || | |
1285 | inet_sk_state_load(sk) != TCP_ESTABLISHED))) | |
40947e13 FW |
1286 | return; |
1287 | ||
c3349a22 PA |
1288 | if (!skb_queue_empty(&ssk->sk_receive_queue)) |
1289 | return; | |
1290 | ||
1291 | if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) | |
1292 | mptcp_schedule_work(sk); | |
1293 | ||
1294 | /* when the fallback subflow closes the rx side, trigger a 'dummy' | |
1295 | * ingress data fin, so that the msk state will follow along | |
1296 | */ | |
1297 | if (__mptcp_check_fallback(msk) && subflow_is_done(ssk) && | |
1298 | msk->first == ssk && | |
1299 | mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true)) | |
f09b0ad5 | 1300 | mptcp_schedule_work(sk); |
40947e13 FW |
1301 | } |
1302 | ||
76a13b31 GT |
1303 | static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) |
1304 | { | |
1305 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
1306 | unsigned long fail_tout; | |
1307 | ||
46a5d3ab | 1308 | /* graceful failure can happen only on the MPC subflow */ |
76a13b31 GT |
1309 | if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) |
1310 | return; | |
1311 | ||
1312 | /* since the close timeout take precedence on the fail one, | |
1313 | * no need to start the latter when the first is already set | |
1314 | */ | |
1315 | if (sock_flag((struct sock *)msk, SOCK_DEAD)) | |
1316 | return; | |
1317 | ||
1318 | /* we don't need extreme accuracy here, use a zero fail_tout as special | |
1319 | * value meaning no fail timeout at all; | |
1320 | */ | |
1321 | fail_tout = jiffies + TCP_RTO_MAX; | |
1322 | if (!fail_tout) | |
1323 | fail_tout = 1; | |
1324 | WRITE_ONCE(subflow->fail_tout, fail_tout); | |
1325 | tcp_send_ack(ssk); | |
1326 | ||
f6909dc1 | 1327 | mptcp_reset_tout_timer(msk, subflow->fail_tout); |
76a13b31 GT |
1328 | } |
1329 | ||
648ef4b8 MM |
1330 | static bool subflow_check_data_avail(struct sock *ssk) |
1331 | { | |
1332 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
1333 | enum mapping_status status; | |
1334 | struct mptcp_sock *msk; | |
1335 | struct sk_buff *skb; | |
1336 | ||
47bebdf3 | 1337 | if (!skb_peek(&ssk->sk_receive_queue)) |
f1f26512 | 1338 | WRITE_ONCE(subflow->data_avail, false); |
648ef4b8 MM |
1339 | if (subflow->data_avail) |
1340 | return true; | |
1341 | ||
648ef4b8 MM |
1342 | msk = mptcp_sk(subflow->conn); |
1343 | for (;;) { | |
648ef4b8 MM |
1344 | u64 ack_seq; |
1345 | u64 old_ack; | |
1346 | ||
43b54c6e | 1347 | status = get_mapping_status(ssk, msk); |
d96a838a | 1348 | trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue)); |
31bf11de | 1349 | if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY || |
46a3282b | 1350 | status == MAPPING_BAD_CSUM || status == MAPPING_NODSS)) |
dea2b1ea | 1351 | goto fallback; |
648ef4b8 MM |
1352 | |
1353 | if (status != MAPPING_OK) | |
40947e13 | 1354 | goto no_data; |
648ef4b8 MM |
1355 | |
1356 | skb = skb_peek(&ssk->sk_receive_queue); | |
1357 | if (WARN_ON_ONCE(!skb)) | |
40947e13 | 1358 | goto no_data; |
648ef4b8 | 1359 | |
b3ea6b27 PA |
1360 | if (unlikely(!READ_ONCE(msk->can_ack))) |
1361 | goto fallback; | |
d22f4988 | 1362 | |
648ef4b8 MM |
1363 | old_ack = READ_ONCE(msk->ack_seq); |
1364 | ack_seq = mptcp_subflow_get_mapped_dsn(subflow); | |
cb41b195 | 1365 | pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack, |
648ef4b8 | 1366 | ack_seq); |
99d1055c PA |
1367 | if (unlikely(before64(ack_seq, old_ack))) { |
1368 | mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); | |
1369 | continue; | |
47bebdf3 | 1370 | } |
648ef4b8 | 1371 | |
f1f26512 | 1372 | WRITE_ONCE(subflow->data_avail, true); |
99d1055c | 1373 | break; |
648ef4b8 MM |
1374 | } |
1375 | return true; | |
1376 | ||
40947e13 FW |
1377 | no_data: |
1378 | subflow_sched_work_if_closed(msk, ssk); | |
1379 | return false; | |
dea2b1ea PA |
1380 | |
1381 | fallback: | |
0348c690 GT |
1382 | if (!__mptcp_check_fallback(msk)) { |
1383 | /* RFC 8684 section 3.7. */ | |
31bf11de PA |
1384 | if (status == MAPPING_BAD_CSUM && |
1385 | (subflow->mp_join || subflow->valid_csum_seen)) { | |
1386 | subflow->send_mp_fail = 1; | |
1387 | ||
7b16871f | 1388 | if (!READ_ONCE(msk->allow_infinite_fallback)) { |
0348c690 GT |
1389 | subflow->reset_transient = 0; |
1390 | subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; | |
f745a3eb | 1391 | goto reset; |
0348c690 | 1392 | } |
f745a3eb | 1393 | mptcp_subflow_fail(msk, ssk); |
f1f26512 | 1394 | WRITE_ONCE(subflow->data_avail, true); |
0348c690 GT |
1395 | return true; |
1396 | } | |
1397 | ||
8668860b | 1398 | if (!READ_ONCE(msk->allow_infinite_fallback)) { |
0348c690 GT |
1399 | /* fatal protocol error, close the socket. |
1400 | * subflow_error_report() will introduce the appropriate barriers | |
1401 | */ | |
1761fed2 | 1402 | subflow->reset_transient = 0; |
46a3282b DC |
1403 | subflow->reset_reason = status == MAPPING_NODSS ? |
1404 | MPTCP_RST_EMIDDLEBOX : | |
1405 | MPTCP_RST_EMPTCP; | |
f745a3eb PA |
1406 | |
1407 | reset: | |
9ae8e5ad | 1408 | WRITE_ONCE(ssk->sk_err, EBADMSG); |
f745a3eb PA |
1409 | tcp_set_state(ssk, TCP_CLOSE); |
1410 | while ((skb = skb_peek(&ssk->sk_receive_queue))) | |
1411 | sk_eat_skb(ssk, skb); | |
215d4024 | 1412 | mptcp_send_active_reset_reason(ssk); |
f1f26512 | 1413 | WRITE_ONCE(subflow->data_avail, false); |
0348c690 | 1414 | return false; |
478d7700 | 1415 | } |
478d7700 | 1416 | |
d51991e2 | 1417 | mptcp_do_fallback(ssk); |
dea2b1ea PA |
1418 | } |
1419 | ||
dea2b1ea PA |
1420 | skb = skb_peek(&ssk->sk_receive_queue); |
1421 | subflow->map_valid = 1; | |
1422 | subflow->map_seq = READ_ONCE(msk->ack_seq); | |
1423 | subflow->map_data_len = skb->len; | |
1424 | subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; | |
f1f26512 | 1425 | WRITE_ONCE(subflow->data_avail, true); |
dea2b1ea | 1426 | return true; |
648ef4b8 MM |
1427 | } |
1428 | ||
1429 | bool mptcp_subflow_data_available(struct sock *sk) | |
1430 | { | |
1431 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
648ef4b8 MM |
1432 | |
1433 | /* check if current mapping is still valid */ | |
1434 | if (subflow->map_valid && | |
1435 | mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { | |
1436 | subflow->map_valid = 0; | |
f1f26512 | 1437 | WRITE_ONCE(subflow->data_avail, false); |
648ef4b8 | 1438 | |
cb41b195 | 1439 | pr_debug("Done with mapping: seq=%u data_len=%u\n", |
648ef4b8 MM |
1440 | subflow->map_subflow_seq, |
1441 | subflow->map_data_len); | |
1442 | } | |
1443 | ||
47bebdf3 | 1444 | return subflow_check_data_avail(sk); |
648ef4b8 MM |
1445 | } |
1446 | ||
071c8ed6 FW |
1447 | /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy, |
1448 | * not the ssk one. | |
1449 | * | |
1450 | * In mptcp, rwin is about the mptcp-level connection data. | |
1451 | * | |
1452 | * Data that is still on the ssk rx queue can thus be ignored, | |
55320b82 | 1453 | * as far as mptcp peer is concerned that data is still inflight. |
071c8ed6 FW |
1454 | * DSS ACK is updated when skb is moved to the mptcp rx queue. |
1455 | */ | |
1456 | void mptcp_space(const struct sock *ssk, int *space, int *full_space) | |
1457 | { | |
1458 | const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
1459 | const struct sock *sk = subflow->conn; | |
1460 | ||
ea4ca586 | 1461 | *space = __mptcp_space(sk); |
b8dc6d6c | 1462 | *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); |
071c8ed6 FW |
1463 | } |
1464 | ||
15cc1045 PA |
1465 | static void subflow_error_report(struct sock *ssk) |
1466 | { | |
1467 | struct sock *sk = mptcp_subflow_ctx(ssk)->conn; | |
1468 | ||
b7a679ba PA |
1469 | /* bail early if this is a no-op, so that we avoid introducing a |
1470 | * problematic lockdep dependency between TCP accept queue lock | |
1471 | * and msk socket spinlock | |
1472 | */ | |
1473 | if (!sk->sk_socket) | |
1474 | return; | |
1475 | ||
15cc1045 PA |
1476 | mptcp_data_lock(sk); |
1477 | if (!sock_owned_by_user(sk)) | |
1478 | __mptcp_error_report(sk); | |
1479 | else | |
e9d09bac | 1480 | __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags); |
15cc1045 PA |
1481 | mptcp_data_unlock(sk); |
1482 | } | |
1483 | ||
499ada50 PA |
1484 | static void subflow_data_ready(struct sock *sk) |
1485 | { | |
1486 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
1487 | u16 state = 1 << inet_sk_state_load(sk); | |
1488 | struct sock *parent = subflow->conn; | |
1489 | struct mptcp_sock *msk; | |
1490 | ||
40e0b090 PY |
1491 | trace_sk_data_ready(sk); |
1492 | ||
499ada50 PA |
1493 | msk = mptcp_sk(parent); |
1494 | if (state & TCPF_LISTEN) { | |
1495 | /* MPJ subflow are removed from accept queue before reaching here, | |
1496 | * avoid stray wakeups | |
1497 | */ | |
1498 | if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) | |
1499 | return; | |
1500 | ||
499ada50 PA |
1501 | parent->sk_data_ready(parent); |
1502 | return; | |
1503 | } | |
1504 | ||
1505 | WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && | |
1506 | !subflow->mp_join && !(state & TCPF_CLOSE)); | |
1507 | ||
5684ab1a | 1508 | if (mptcp_subflow_data_available(sk)) { |
499ada50 | 1509 | mptcp_data_ready(parent, sk); |
5684ab1a PA |
1510 | |
1511 | /* subflow-level lowat test are not relevant. | |
1512 | * respect the msk-level threshold eventually mandating an immediate ack | |
1513 | */ | |
1514 | if (mptcp_data_avail(msk) < parent->sk_rcvlowat && | |
1515 | (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss) | |
1516 | inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; | |
1517 | } else if (unlikely(sk->sk_err)) { | |
499ada50 | 1518 | subflow_error_report(sk); |
5684ab1a | 1519 | } |
499ada50 PA |
1520 | } |
1521 | ||
1522 | static void subflow_write_space(struct sock *ssk) | |
1523 | { | |
1524 | struct sock *sk = mptcp_subflow_ctx(ssk)->conn; | |
1525 | ||
1526 | mptcp_propagate_sndbuf(sk, ssk); | |
1527 | mptcp_write_space(sk); | |
1528 | } | |
1529 | ||
51fa7f8e | 1530 | static const struct inet_connection_sock_af_ops * |
cec37a6e PK |
1531 | subflow_default_af_ops(struct sock *sk) |
1532 | { | |
1533 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1534 | if (sk->sk_family == AF_INET6) | |
1535 | return &subflow_v6_specific; | |
1536 | #endif | |
1537 | return &subflow_specific; | |
1538 | } | |
1539 | ||
cec37a6e | 1540 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
31484d56 GU |
1541 | void mptcpv6_handle_mapped(struct sock *sk, bool mapped) |
1542 | { | |
cec37a6e PK |
1543 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
1544 | struct inet_connection_sock *icsk = inet_csk(sk); | |
51fa7f8e | 1545 | const struct inet_connection_sock_af_ops *target; |
cec37a6e PK |
1546 | |
1547 | target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); | |
1548 | ||
cb41b195 | 1549 | pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n", |
edc7e489 | 1550 | subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); |
cec37a6e PK |
1551 | |
1552 | if (likely(icsk->icsk_af_ops == target)) | |
1553 | return; | |
1554 | ||
1555 | subflow->icsk_af_ops = icsk->icsk_af_ops; | |
1556 | icsk->icsk_af_ops = target; | |
cec37a6e | 1557 | } |
31484d56 | 1558 | #endif |
cec37a6e | 1559 | |
1729cf18 GT |
1560 | void mptcp_info2sockaddr(const struct mptcp_addr_info *info, |
1561 | struct sockaddr_storage *addr, | |
1562 | unsigned short family) | |
ec3edaa7 PK |
1563 | { |
1564 | memset(addr, 0, sizeof(*addr)); | |
50a13bc3 | 1565 | addr->ss_family = family; |
ec3edaa7 PK |
1566 | if (addr->ss_family == AF_INET) { |
1567 | struct sockaddr_in *in_addr = (struct sockaddr_in *)addr; | |
1568 | ||
50a13bc3 MB |
1569 | if (info->family == AF_INET) |
1570 | in_addr->sin_addr = info->addr; | |
1571 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1572 | else if (ipv6_addr_v4mapped(&info->addr6)) | |
1573 | in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3]; | |
1574 | #endif | |
ec3edaa7 PK |
1575 | in_addr->sin_port = info->port; |
1576 | } | |
1577 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1578 | else if (addr->ss_family == AF_INET6) { | |
1579 | struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr; | |
1580 | ||
50a13bc3 MB |
1581 | if (info->family == AF_INET) |
1582 | ipv6_addr_set_v4mapped(info->addr.s_addr, | |
1583 | &in6_addr->sin6_addr); | |
1584 | else | |
1585 | in6_addr->sin6_addr = info->addr6; | |
ec3edaa7 PK |
1586 | in6_addr->sin6_port = info->port; |
1587 | } | |
1588 | #endif | |
1589 | } | |
1590 | ||
b83fbca1 | 1591 | int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local, |
ee285257 | 1592 | const struct mptcp_addr_info *remote) |
ec3edaa7 PK |
1593 | { |
1594 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1595 | struct mptcp_subflow_context *subflow; | |
b83fbca1 | 1596 | int local_id = local->addr.id; |
ec3edaa7 | 1597 | struct sockaddr_storage addr; |
2ff0e566 | 1598 | int remote_id = remote->id; |
95d68651 | 1599 | int err = -ENOTCONN; |
ec3edaa7 | 1600 | struct socket *sf; |
6bad912b | 1601 | struct sock *ssk; |
ec3edaa7 PK |
1602 | u32 remote_token; |
1603 | int addrlen; | |
ec3edaa7 | 1604 | |
1bd1788b | 1605 | /* The userspace PM sent the request too early? */ |
b93df08c | 1606 | if (!mptcp_is_fully_established(sk)) |
95d68651 | 1607 | goto err_out; |
ec3edaa7 | 1608 | |
b83fbca1 | 1609 | err = mptcp_subflow_create_socket(sk, local->addr.family, &sf); |
1bd1788b MBN |
1610 | if (err) { |
1611 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCREATSKERR); | |
1612 | pr_debug("msk=%p local=%d remote=%d create sock error: %d\n", | |
1613 | msk, local_id, remote_id, err); | |
95d68651 | 1614 | goto err_out; |
1bd1788b | 1615 | } |
ec3edaa7 | 1616 | |
6bad912b PA |
1617 | ssk = sf->sk; |
1618 | subflow = mptcp_subflow_ctx(ssk); | |
1619 | do { | |
1620 | get_random_bytes(&subflow->local_nonce, sizeof(u32)); | |
1621 | } while (!subflow->local_nonce); | |
1622 | ||
b83fbca1 MBN |
1623 | /* if 'IPADDRANY', the ID will be set later, after the routing */ |
1624 | if (local->addr.family == AF_INET) { | |
1625 | if (!local->addr.addr.s_addr) | |
1626 | local_id = -1; | |
1627 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1628 | } else if (sk->sk_family == AF_INET6) { | |
1629 | if (ipv6_addr_any(&local->addr.addr6)) | |
1630 | local_id = -1; | |
1631 | #endif | |
1632 | } | |
1633 | ||
1634 | if (local_id >= 0) | |
4cf86ae8 | 1635 | subflow_set_local_id(subflow, local_id); |
6bad912b | 1636 | |
b3ea6b27 | 1637 | subflow->remote_key_valid = 1; |
1c09d7cb PA |
1638 | subflow->remote_key = READ_ONCE(msk->remote_key); |
1639 | subflow->local_key = READ_ONCE(msk->local_key); | |
ec3edaa7 | 1640 | subflow->token = msk->token; |
b83fbca1 | 1641 | mptcp_info2sockaddr(&local->addr, &addr, ssk->sk_family); |
ec3edaa7 PK |
1642 | |
1643 | addrlen = sizeof(struct sockaddr_in); | |
1644 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
50a13bc3 | 1645 | if (addr.ss_family == AF_INET6) |
ec3edaa7 PK |
1646 | addrlen = sizeof(struct sockaddr_in6); |
1647 | #endif | |
b83fbca1 | 1648 | ssk->sk_bound_dev_if = local->ifindex; |
ec3edaa7 | 1649 | err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); |
1bd1788b MBN |
1650 | if (err) { |
1651 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXBINDERR); | |
1652 | pr_debug("msk=%p local=%d remote=%d bind error: %d\n", | |
1653 | msk, local_id, remote_id, err); | |
ec3edaa7 | 1654 | goto failed; |
1bd1788b | 1655 | } |
ec3edaa7 PK |
1656 | |
1657 | mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); | |
cb41b195 | 1658 | pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk, |
2ff0e566 | 1659 | remote_token, local_id, remote_id); |
ec3edaa7 | 1660 | subflow->remote_token = remote_token; |
967d3c27 | 1661 | WRITE_ONCE(subflow->remote_id, remote_id); |
ec3edaa7 | 1662 | subflow->request_join = 1; |
b83fbca1 | 1663 | subflow->request_bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP); |
6f06b4d4 | 1664 | subflow->subflow_id = msk->subflow_id++; |
50a13bc3 | 1665 | mptcp_info2sockaddr(remote, &addr, ssk->sk_family); |
ec3edaa7 | 1666 | |
3e501490 PA |
1667 | sock_hold(ssk); |
1668 | list_add_tail(&subflow->node, &msk->conn_list); | |
ec3edaa7 | 1669 | err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); |
1bd1788b MBN |
1670 | if (err && err != -EINPROGRESS) { |
1671 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCONNECTERR); | |
1672 | pr_debug("msk=%p local=%d remote=%d connect error: %d\n", | |
1673 | msk, local_id, remote_id, err); | |
5b950ff4 | 1674 | goto failed_unlink; |
1bd1788b MBN |
1675 | } |
1676 | ||
1677 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTX); | |
ec3edaa7 | 1678 | |
866f26f2 PA |
1679 | /* discard the subflow socket */ |
1680 | mptcp_sock_graft(ssk, sk->sk_socket); | |
1681 | iput(SOCK_INODE(sf)); | |
0530020a | 1682 | WRITE_ONCE(msk->allow_infinite_fallback, false); |
27e5ccc2 | 1683 | mptcp_stop_tout_timer(sk); |
b5177ed9 | 1684 | return 0; |
ec3edaa7 | 1685 | |
5b950ff4 | 1686 | failed_unlink: |
5b950ff4 | 1687 | list_del(&subflow->node); |
f0715779 | 1688 | sock_put(mptcp_subflow_tcp_sock(subflow)); |
ec3edaa7 | 1689 | |
ec3edaa7 | 1690 | failed: |
e16163b6 | 1691 | subflow->disposable = 1; |
ec3edaa7 | 1692 | sock_release(sf); |
95d68651 PA |
1693 | |
1694 | err_out: | |
1695 | /* we account subflows before the creation, and this failures will not | |
1696 | * be caught by sk_state_change() | |
1697 | */ | |
1698 | mptcp_pm_close_subflow(msk); | |
ec3edaa7 PK |
1699 | return err; |
1700 | } | |
1701 | ||
3764b0c5 NR |
1702 | static void mptcp_attach_cgroup(struct sock *parent, struct sock *child) |
1703 | { | |
1704 | #ifdef CONFIG_SOCK_CGROUP_DATA | |
1705 | struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data, | |
1706 | *child_skcd = &child->sk_cgrp_data; | |
1707 | ||
1708 | /* only the additional subflows created by kworkers have to be modified */ | |
1709 | if (cgroup_id(sock_cgroup_ptr(parent_skcd)) != | |
1710 | cgroup_id(sock_cgroup_ptr(child_skcd))) { | |
1711 | #ifdef CONFIG_MEMCG | |
1712 | struct mem_cgroup *memcg = parent->sk_memcg; | |
1713 | ||
1714 | mem_cgroup_sk_free(child); | |
1715 | if (memcg && css_tryget(&memcg->css)) | |
1716 | child->sk_memcg = memcg; | |
1717 | #endif /* CONFIG_MEMCG */ | |
1718 | ||
1719 | cgroup_sk_free(child_skcd); | |
1720 | *child_skcd = *parent_skcd; | |
1721 | cgroup_sk_clone(child_skcd); | |
1722 | } | |
1723 | #endif /* CONFIG_SOCK_CGROUP_DATA */ | |
1724 | } | |
1725 | ||
b19bc294 PA |
1726 | static void mptcp_subflow_ops_override(struct sock *ssk) |
1727 | { | |
1728 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1729 | if (ssk->sk_prot == &tcpv6_prot) | |
1730 | ssk->sk_prot = &tcpv6_prot_override; | |
1731 | else | |
1732 | #endif | |
1733 | ssk->sk_prot = &tcp_prot_override; | |
1734 | } | |
1735 | ||
1736 | static void mptcp_subflow_ops_undo_override(struct sock *ssk) | |
1737 | { | |
1738 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1739 | if (ssk->sk_prot == &tcpv6_prot_override) | |
1740 | ssk->sk_prot = &tcpv6_prot; | |
1741 | else | |
1742 | #endif | |
1743 | ssk->sk_prot = &tcp_prot; | |
1744 | } | |
6bc1fe7d PA |
1745 | |
1746 | int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, | |
1747 | struct socket **new_sock) | |
2303f994 PK |
1748 | { |
1749 | struct mptcp_subflow_context *subflow; | |
1750 | struct net *net = sock_net(sk); | |
1751 | struct socket *sf; | |
1752 | int err; | |
1753 | ||
adf73410 PA |
1754 | /* un-accepted server sockets can reach here - on bad configuration |
1755 | * bail early to avoid greater trouble later | |
1756 | */ | |
1757 | if (unlikely(!sk->sk_socket)) | |
1758 | return -EINVAL; | |
1759 | ||
6bc1fe7d | 1760 | err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf); |
2303f994 PK |
1761 | if (err) |
1762 | return err; | |
1763 | ||
ad217100 | 1764 | lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING); |
2303f994 | 1765 | |
e3d9387f PA |
1766 | err = security_mptcp_add_subflow(sk, sf->sk); |
1767 | if (err) | |
a1ab24e5 | 1768 | goto err_free; |
e3d9387f | 1769 | |
3764b0c5 NR |
1770 | /* the newly created socket has to be in the same cgroup as its parent */ |
1771 | mptcp_attach_cgroup(sk, sf->sk); | |
1772 | ||
2303f994 PK |
1773 | /* kernel sockets do not by default acquire net ref, but TCP timer |
1774 | * needs it. | |
d1e96cc4 | 1775 | * Update ns_tracker to current stack trace and refcounted tracker. |
2303f994 | 1776 | */ |
5c70eb5c | 1777 | sk_net_refcnt_upgrade(sf->sk); |
2303f994 | 1778 | err = tcp_set_ulp(sf->sk, "mptcp"); |
a1ab24e5 PA |
1779 | if (err) |
1780 | goto err_free; | |
e3d9387f | 1781 | |
a1ab24e5 | 1782 | mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk); |
2303f994 PK |
1783 | release_sock(sf->sk); |
1784 | ||
92f74c1e | 1785 | /* the newly created socket really belongs to the owning MPTCP |
7d14b0d2 PA |
1786 | * socket, even if for additional subflows the allocation is performed |
1787 | * by a kernel workqueue. Adjust inode references, so that the | |
d640516a | 1788 | * procfs/diag interfaces really show this one belonging to the correct |
7d14b0d2 PA |
1789 | * user. |
1790 | */ | |
1791 | SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino; | |
1792 | SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid; | |
1793 | SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid; | |
1794 | ||
2303f994 | 1795 | subflow = mptcp_subflow_ctx(sf->sk); |
cb41b195 | 1796 | pr_debug("subflow=%p\n", subflow); |
2303f994 PK |
1797 | |
1798 | *new_sock = sf; | |
79c0949e | 1799 | sock_hold(sk); |
2303f994 | 1800 | subflow->conn = sk; |
b19bc294 | 1801 | mptcp_subflow_ops_override(sf->sk); |
2303f994 PK |
1802 | |
1803 | return 0; | |
a1ab24e5 PA |
1804 | |
1805 | err_free: | |
1806 | release_sock(sf->sk); | |
1807 | sock_release(sf); | |
1808 | return err; | |
2303f994 PK |
1809 | } |
1810 | ||
1811 | static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk, | |
1812 | gfp_t priority) | |
1813 | { | |
1814 | struct inet_connection_sock *icsk = inet_csk(sk); | |
1815 | struct mptcp_subflow_context *ctx; | |
1816 | ||
1817 | ctx = kzalloc(sizeof(*ctx), priority); | |
1818 | if (!ctx) | |
1819 | return NULL; | |
1820 | ||
1821 | rcu_assign_pointer(icsk->icsk_ulp_data, ctx); | |
cec37a6e | 1822 | INIT_LIST_HEAD(&ctx->node); |
b19bc294 | 1823 | INIT_LIST_HEAD(&ctx->delegated_node); |
2303f994 | 1824 | |
cb41b195 | 1825 | pr_debug("subflow=%p\n", ctx); |
2303f994 PK |
1826 | |
1827 | ctx->tcp_sock = sk; | |
a7cfe776 | 1828 | WRITE_ONCE(ctx->local_id, -1); |
2303f994 PK |
1829 | |
1830 | return ctx; | |
1831 | } | |
1832 | ||
648ef4b8 MM |
1833 | static void __subflow_state_change(struct sock *sk) |
1834 | { | |
1835 | struct socket_wq *wq; | |
1836 | ||
1837 | rcu_read_lock(); | |
1838 | wq = rcu_dereference(sk->sk_wq); | |
1839 | if (skwq_has_sleeper(wq)) | |
1840 | wake_up_interruptible_all(&wq->wait); | |
1841 | rcu_read_unlock(); | |
1842 | } | |
1843 | ||
648ef4b8 MM |
1844 | static void subflow_state_change(struct sock *sk) |
1845 | { | |
1846 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
dc093db5 | 1847 | struct sock *parent = subflow->conn; |
81c1d029 | 1848 | struct mptcp_sock *msk; |
648ef4b8 MM |
1849 | |
1850 | __subflow_state_change(sk); | |
1851 | ||
81c1d029 | 1852 | msk = mptcp_sk(parent); |
8fd73804 DC |
1853 | if (subflow_simultaneous_connect(sk)) { |
1854 | mptcp_do_fallback(sk); | |
81c1d029 | 1855 | pr_fallback(msk); |
8fd73804 | 1856 | subflow->conn_finished = 1; |
e4a0fa47 | 1857 | mptcp_propagate_state(parent, sk, subflow, NULL); |
8fd73804 DC |
1858 | } |
1859 | ||
648ef4b8 MM |
1860 | /* as recvmsg() does not acquire the subflow socket for ssk selection |
1861 | * a fin packet carrying a DSS can be unnoticed if we don't trigger | |
1862 | * the data available machinery here. | |
1863 | */ | |
e1ff9e82 | 1864 | if (mptcp_subflow_data_available(sk)) |
2e52213c | 1865 | mptcp_data_ready(parent, sk); |
499ada50 PA |
1866 | else if (unlikely(sk->sk_err)) |
1867 | subflow_error_report(sk); | |
648ef4b8 | 1868 | |
40947e13 | 1869 | subflow_sched_work_if_closed(mptcp_sk(parent), sk); |
648ef4b8 MM |
1870 | } |
1871 | ||
2a6a870e PA |
1872 | void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) |
1873 | { | |
1874 | struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; | |
511b90e3 PA |
1875 | struct request_sock *req, *head, *tail; |
1876 | struct mptcp_subflow_context *subflow; | |
1877 | struct sock *sk, *ssk; | |
2a6a870e | 1878 | |
511b90e3 PA |
1879 | /* Due to lock dependencies no relevant lock can be acquired under rskq_lock. |
1880 | * Splice the req list, so that accept() can not reach the pending ssk after | |
1881 | * the listener socket is released below. | |
1882 | */ | |
2a6a870e | 1883 | spin_lock_bh(&queue->rskq_lock); |
511b90e3 PA |
1884 | head = queue->rskq_accept_head; |
1885 | tail = queue->rskq_accept_tail; | |
1886 | queue->rskq_accept_head = NULL; | |
1887 | queue->rskq_accept_tail = NULL; | |
1888 | spin_unlock_bh(&queue->rskq_lock); | |
1889 | ||
1890 | if (!head) | |
1891 | return; | |
1892 | ||
1893 | /* can't acquire the msk socket lock under the subflow one, | |
1894 | * or will cause ABBA deadlock | |
1895 | */ | |
1896 | release_sock(listener_ssk); | |
2a6a870e | 1897 | |
511b90e3 PA |
1898 | for (req = head; req; req = req->dl_next) { |
1899 | ssk = req->sk; | |
2a6a870e PA |
1900 | if (!sk_is_mptcp(ssk)) |
1901 | continue; | |
1902 | ||
1903 | subflow = mptcp_subflow_ctx(ssk); | |
1904 | if (!subflow || !subflow->conn) | |
1905 | continue; | |
1906 | ||
63740448 | 1907 | sk = subflow->conn; |
63740448 | 1908 | sock_hold(sk); |
2a6a870e PA |
1909 | |
1910 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); | |
63740448 | 1911 | __mptcp_unaccepted_force_close(sk); |
2a6a870e PA |
1912 | release_sock(sk); |
1913 | ||
1914 | /* lockdep will report a false positive ABBA deadlock | |
1915 | * between cancel_work_sync and the listener socket. | |
1916 | * The involved locks belong to different sockets WRT | |
1917 | * the existing AB chain. | |
1918 | * Using a per socket key is problematic as key | |
1919 | * deregistration requires process context and must be | |
1920 | * performed at socket disposal time, in atomic | |
1921 | * context. | |
1922 | * Just tell lockdep to consider the listener socket | |
1923 | * released here. | |
1924 | */ | |
1925 | mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_); | |
1926 | mptcp_cancel_work(sk); | |
1927 | mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_); | |
1928 | ||
1929 | sock_put(sk); | |
1930 | } | |
1931 | ||
1932 | /* we are still under the listener msk socket lock */ | |
1933 | lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING); | |
511b90e3 PA |
1934 | |
1935 | /* restore the listener queue, to let the TCP code clean it up */ | |
1936 | spin_lock_bh(&queue->rskq_lock); | |
1937 | WARN_ON_ONCE(queue->rskq_accept_head); | |
1938 | queue->rskq_accept_head = head; | |
1939 | queue->rskq_accept_tail = tail; | |
1940 | spin_unlock_bh(&queue->rskq_lock); | |
2a6a870e PA |
1941 | } |
1942 | ||
2303f994 PK |
1943 | static int subflow_ulp_init(struct sock *sk) |
1944 | { | |
cec37a6e | 1945 | struct inet_connection_sock *icsk = inet_csk(sk); |
2303f994 PK |
1946 | struct mptcp_subflow_context *ctx; |
1947 | struct tcp_sock *tp = tcp_sk(sk); | |
1948 | int err = 0; | |
1949 | ||
1950 | /* disallow attaching ULP to a socket unless it has been | |
1951 | * created with sock_create_kern() | |
1952 | */ | |
1953 | if (!sk->sk_kern_sock) { | |
1954 | err = -EOPNOTSUPP; | |
1955 | goto out; | |
1956 | } | |
1957 | ||
1958 | ctx = subflow_create_ctx(sk, GFP_KERNEL); | |
1959 | if (!ctx) { | |
1960 | err = -ENOMEM; | |
1961 | goto out; | |
1962 | } | |
1963 | ||
cb41b195 | 1964 | pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family); |
2303f994 PK |
1965 | |
1966 | tp->is_mptcp = 1; | |
cec37a6e PK |
1967 | ctx->icsk_af_ops = icsk->icsk_af_ops; |
1968 | icsk->icsk_af_ops = subflow_default_af_ops(sk); | |
648ef4b8 | 1969 | ctx->tcp_state_change = sk->sk_state_change; |
15cc1045 | 1970 | ctx->tcp_error_report = sk->sk_error_report; |
952382c6 FW |
1971 | |
1972 | WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable); | |
1973 | WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space); | |
1974 | ||
648ef4b8 MM |
1975 | sk->sk_data_ready = subflow_data_ready; |
1976 | sk->sk_write_space = subflow_write_space; | |
1977 | sk->sk_state_change = subflow_state_change; | |
15cc1045 | 1978 | sk->sk_error_report = subflow_error_report; |
2303f994 PK |
1979 | out: |
1980 | return err; | |
1981 | } | |
1982 | ||
e16163b6 | 1983 | static void subflow_ulp_release(struct sock *ssk) |
2303f994 | 1984 | { |
e16163b6 PA |
1985 | struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); |
1986 | bool release = true; | |
1987 | struct sock *sk; | |
2303f994 PK |
1988 | |
1989 | if (!ctx) | |
1990 | return; | |
1991 | ||
e16163b6 PA |
1992 | sk = ctx->conn; |
1993 | if (sk) { | |
1994 | /* if the msk has been orphaned, keep the ctx | |
0597d0f8 PA |
1995 | * alive, will be freed by __mptcp_close_ssk(), |
1996 | * when the subflow is still unaccepted | |
e16163b6 | 1997 | */ |
0597d0f8 | 1998 | release = ctx->disposable || list_empty(&ctx->node); |
b6985b9b PA |
1999 | |
2000 | /* inet_child_forget() does not call sk_state_change(), | |
2001 | * explicitly trigger the socket close machinery | |
2002 | */ | |
2003 | if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, | |
2004 | &mptcp_sk(sk)->flags)) | |
2005 | mptcp_schedule_work(sk); | |
e16163b6 PA |
2006 | sock_put(sk); |
2007 | } | |
79c0949e | 2008 | |
b19bc294 | 2009 | mptcp_subflow_ops_undo_override(ssk); |
e16163b6 PA |
2010 | if (release) |
2011 | kfree_rcu(ctx, rcu); | |
2303f994 PK |
2012 | } |
2013 | ||
cec37a6e PK |
2014 | static void subflow_ulp_clone(const struct request_sock *req, |
2015 | struct sock *newsk, | |
2016 | const gfp_t priority) | |
2017 | { | |
2018 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); | |
2019 | struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk); | |
2020 | struct mptcp_subflow_context *new_ctx; | |
2021 | ||
f296234c PK |
2022 | if (!tcp_rsk(req)->is_mptcp || |
2023 | (!subflow_req->mp_capable && !subflow_req->mp_join)) { | |
648ef4b8 | 2024 | subflow_ulp_fallback(newsk, old_ctx); |
cec37a6e PK |
2025 | return; |
2026 | } | |
2027 | ||
2028 | new_ctx = subflow_create_ctx(newsk, priority); | |
edc7e489 | 2029 | if (!new_ctx) { |
648ef4b8 | 2030 | subflow_ulp_fallback(newsk, old_ctx); |
cec37a6e PK |
2031 | return; |
2032 | } | |
2033 | ||
2034 | new_ctx->conn_finished = 1; | |
2035 | new_ctx->icsk_af_ops = old_ctx->icsk_af_ops; | |
648ef4b8 | 2036 | new_ctx->tcp_state_change = old_ctx->tcp_state_change; |
15cc1045 | 2037 | new_ctx->tcp_error_report = old_ctx->tcp_error_report; |
58b09919 | 2038 | new_ctx->rel_write_seq = 1; |
58b09919 | 2039 | |
f296234c PK |
2040 | if (subflow_req->mp_capable) { |
2041 | /* see comments in subflow_syn_recv_sock(), MPTCP connection | |
2042 | * is fully established only after we receive the remote key | |
2043 | */ | |
2044 | new_ctx->mp_capable = 1; | |
f296234c PK |
2045 | new_ctx->local_key = subflow_req->local_key; |
2046 | new_ctx->token = subflow_req->token; | |
2047 | new_ctx->ssn_offset = subflow_req->ssn_offset; | |
2048 | new_ctx->idsn = subflow_req->idsn; | |
4cf86ae8 PA |
2049 | |
2050 | /* this is the first subflow, id is always 0 */ | |
a7cfe776 | 2051 | subflow_set_local_id(new_ctx, 0); |
f296234c | 2052 | } else if (subflow_req->mp_join) { |
ec3edaa7 | 2053 | new_ctx->ssn_offset = subflow_req->ssn_offset; |
f296234c | 2054 | new_ctx->mp_join = 1; |
581c8cbf | 2055 | WRITE_ONCE(new_ctx->fully_established, true); |
b3ea6b27 | 2056 | new_ctx->remote_key_valid = 1; |
f296234c | 2057 | new_ctx->backup = subflow_req->backup; |
efd340bf | 2058 | new_ctx->request_bkup = subflow_req->request_bkup; |
967d3c27 | 2059 | WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id); |
f296234c PK |
2060 | new_ctx->token = subflow_req->token; |
2061 | new_ctx->thmac = subflow_req->thmac; | |
4cf86ae8 PA |
2062 | |
2063 | /* the subflow req id is valid, fetched via subflow_check_req() | |
2064 | * and subflow_token_join_request() | |
2065 | */ | |
2066 | subflow_set_local_id(new_ctx, subflow_req->local_id); | |
f296234c | 2067 | } |
cec37a6e PK |
2068 | } |
2069 | ||
b19bc294 PA |
2070 | static void tcp_release_cb_override(struct sock *ssk) |
2071 | { | |
2072 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
a5efdbce | 2073 | long status; |
b19bc294 | 2074 | |
a5efdbce PA |
2075 | /* process and clear all the pending actions, but leave the subflow into |
2076 | * the napi queue. To respect locking, only the same CPU that originated | |
2077 | * the action can touch the list. mptcp_napi_poll will take care of it. | |
2078 | */ | |
2079 | status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); | |
2080 | if (status) | |
2081 | mptcp_subflow_process_delegated(ssk, status); | |
b19bc294 PA |
2082 | |
2083 | tcp_release_cb(ssk); | |
2084 | } | |
2085 | ||
4c028829 PA |
2086 | static int tcp_abort_override(struct sock *ssk, int err) |
2087 | { | |
2088 | /* closing a listener subflow requires a great deal of care. | |
2089 | * keep it simple and just prevent such operation | |
2090 | */ | |
2091 | if (inet_sk_state_load(ssk) == TCP_LISTEN) | |
2092 | return -EINVAL; | |
2093 | ||
2094 | return tcp_abort(ssk, err); | |
2095 | } | |
2096 | ||
2303f994 PK |
2097 | static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { |
2098 | .name = "mptcp", | |
2099 | .owner = THIS_MODULE, | |
2100 | .init = subflow_ulp_init, | |
2101 | .release = subflow_ulp_release, | |
cec37a6e | 2102 | .clone = subflow_ulp_clone, |
2303f994 PK |
2103 | }; |
2104 | ||
cec37a6e PK |
2105 | static int subflow_ops_init(struct request_sock_ops *subflow_ops) |
2106 | { | |
2107 | subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); | |
cec37a6e PK |
2108 | |
2109 | subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, | |
2110 | subflow_ops->obj_size, 0, | |
2111 | SLAB_ACCOUNT | | |
2112 | SLAB_TYPESAFE_BY_RCU, | |
2113 | NULL); | |
2114 | if (!subflow_ops->slab) | |
2115 | return -ENOMEM; | |
2116 | ||
2117 | return 0; | |
2118 | } | |
2119 | ||
d39dceca | 2120 | void __init mptcp_subflow_init(void) |
2303f994 | 2121 | { |
34b21d1d MB |
2122 | mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; |
2123 | mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4"; | |
d3295fee MB |
2124 | mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; |
2125 | ||
34b21d1d MB |
2126 | if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0) |
2127 | panic("MPTCP: failed to init subflow v4 request sock ops\n"); | |
cec37a6e PK |
2128 | |
2129 | subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; | |
7ea851d1 | 2130 | subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req; |
36b122ba | 2131 | subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack; |
cec37a6e PK |
2132 | |
2133 | subflow_specific = ipv4_specific; | |
2134 | subflow_specific.conn_request = subflow_v4_conn_request; | |
2135 | subflow_specific.syn_recv_sock = subflow_syn_recv_sock; | |
2136 | subflow_specific.sk_rx_dst_set = subflow_finish_connect; | |
4cf86ae8 | 2137 | subflow_specific.rebuild_header = subflow_rebuild_header; |
cec37a6e | 2138 | |
b19bc294 PA |
2139 | tcp_prot_override = tcp_prot; |
2140 | tcp_prot_override.release_cb = tcp_release_cb_override; | |
4c028829 | 2141 | tcp_prot_override.diag_destroy = tcp_abort_override; |
b19bc294 | 2142 | |
cec37a6e | 2143 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
34b21d1d MB |
2144 | /* In struct mptcp_subflow_request_sock, we assume the TCP request sock |
2145 | * structures for v4 and v6 have the same size. It should not changed in | |
2146 | * the future but better to make sure to be warned if it is no longer | |
2147 | * the case. | |
2148 | */ | |
2149 | BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); | |
2150 | ||
2151 | mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; | |
2152 | mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6"; | |
d3295fee MB |
2153 | mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; |
2154 | ||
34b21d1d MB |
2155 | if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0) |
2156 | panic("MPTCP: failed to init subflow v6 request sock ops\n"); | |
2157 | ||
cec37a6e | 2158 | subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; |
7ea851d1 | 2159 | subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req; |
36b122ba | 2160 | subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack; |
cec37a6e PK |
2161 | |
2162 | subflow_v6_specific = ipv6_specific; | |
2163 | subflow_v6_specific.conn_request = subflow_v6_conn_request; | |
2164 | subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; | |
2165 | subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; | |
4cf86ae8 | 2166 | subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header; |
cec37a6e PK |
2167 | |
2168 | subflow_v6m_specific = subflow_v6_specific; | |
2169 | subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; | |
2170 | subflow_v6m_specific.send_check = ipv4_specific.send_check; | |
2171 | subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; | |
2172 | subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; | |
4cf86ae8 | 2173 | subflow_v6m_specific.rebuild_header = subflow_rebuild_header; |
b19bc294 PA |
2174 | |
2175 | tcpv6_prot_override = tcpv6_prot; | |
2176 | tcpv6_prot_override.release_cb = tcp_release_cb_override; | |
4c028829 | 2177 | tcpv6_prot_override.diag_destroy = tcp_abort_override; |
cec37a6e PK |
2178 | #endif |
2179 | ||
5147dfb5 DC |
2180 | mptcp_diag_subflow_init(&subflow_ulp_ops); |
2181 | ||
2303f994 PK |
2182 | if (tcp_register_ulp(&subflow_ulp_ops) != 0) |
2183 | panic("MPTCP: failed to register subflows to ULP\n"); | |
2184 | } |