Commit | Line | Data |
---|---|---|
f870fa0b MM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2017 - 2019, Intel Corporation. | |
5 | */ | |
6 | ||
7 | #define pr_fmt(fmt) "MPTCP: " fmt | |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
7a6a6cbc PA |
12 | #include <linux/sched/signal.h> |
13 | #include <linux/atomic.h> | |
f870fa0b MM |
14 | #include <net/sock.h> |
15 | #include <net/inet_common.h> | |
16 | #include <net/inet_hashtables.h> | |
17 | #include <net/protocol.h> | |
18 | #include <net/tcp.h> | |
3721b9b6 | 19 | #include <net/tcp_states.h> |
cf7da0d6 PK |
20 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
21 | #include <net/transp_v6.h> | |
22 | #endif | |
f870fa0b MM |
23 | #include <net/mptcp.h> |
24 | #include "protocol.h" | |
fc518953 | 25 | #include "mib.h" |
f870fa0b | 26 | |
b0519de8 FW |
27 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
28 | struct mptcp6_sock { | |
29 | struct mptcp_sock msk; | |
30 | struct ipv6_pinfo np; | |
31 | }; | |
32 | #endif | |
33 | ||
6771bfd9 | 34 | struct mptcp_skb_cb { |
ab174ad8 PA |
35 | u64 map_seq; |
36 | u64 end_seq; | |
6771bfd9 FW |
37 | u32 offset; |
38 | }; | |
39 | ||
40 | #define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0])) | |
41 | ||
d027236c PA |
42 | static struct percpu_counter mptcp_sockets_allocated; |
43 | ||
2303f994 PK |
44 | /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not |
45 | * completed yet or has failed, return the subflow socket. | |
46 | * Otherwise return NULL. | |
47 | */ | |
48 | static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk) | |
49 | { | |
d22f4988 | 50 | if (!msk->subflow || READ_ONCE(msk->can_ack)) |
2303f994 PK |
51 | return NULL; |
52 | ||
53 | return msk->subflow; | |
54 | } | |
55 | ||
d2f77c53 | 56 | static bool mptcp_is_tcpsk(struct sock *sk) |
0b4f33de FW |
57 | { |
58 | struct socket *sock = sk->sk_socket; | |
59 | ||
0b4f33de FW |
60 | if (unlikely(sk->sk_prot == &tcp_prot)) { |
61 | /* we are being invoked after mptcp_accept() has | |
62 | * accepted a non-mp-capable flow: sk is a tcp_sk, | |
63 | * not an mptcp one. | |
64 | * | |
65 | * Hand the socket over to tcp so all further socket ops | |
66 | * bypass mptcp. | |
67 | */ | |
68 | sock->ops = &inet_stream_ops; | |
d2f77c53 | 69 | return true; |
0b4f33de FW |
70 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
71 | } else if (unlikely(sk->sk_prot == &tcpv6_prot)) { | |
72 | sock->ops = &inet6_stream_ops; | |
d2f77c53 | 73 | return true; |
0b4f33de FW |
74 | #endif |
75 | } | |
76 | ||
d2f77c53 | 77 | return false; |
0b4f33de FW |
78 | } |
79 | ||
76660afb | 80 | static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk) |
cec37a6e | 81 | { |
cec37a6e PK |
82 | sock_owned_by_me((const struct sock *)msk); |
83 | ||
e1ff9e82 | 84 | if (likely(!__mptcp_check_fallback(msk))) |
cec37a6e PK |
85 | return NULL; |
86 | ||
76660afb | 87 | return msk->first; |
cec37a6e PK |
88 | } |
89 | ||
fa68018d | 90 | static int __mptcp_socket_create(struct mptcp_sock *msk) |
2303f994 PK |
91 | { |
92 | struct mptcp_subflow_context *subflow; | |
93 | struct sock *sk = (struct sock *)msk; | |
94 | struct socket *ssock; | |
95 | int err; | |
96 | ||
2303f994 PK |
97 | err = mptcp_subflow_create_socket(sk, &ssock); |
98 | if (err) | |
fa68018d | 99 | return err; |
2303f994 | 100 | |
8ab183de | 101 | msk->first = ssock->sk; |
2303f994 PK |
102 | msk->subflow = ssock; |
103 | subflow = mptcp_subflow_ctx(ssock->sk); | |
cec37a6e | 104 | list_add(&subflow->node, &msk->conn_list); |
2303f994 PK |
105 | subflow->request_mptcp = 1; |
106 | ||
e1ff9e82 DC |
107 | /* accept() will wait on first subflow sk_wq, and we always wakes up |
108 | * via msk->sk_socket | |
109 | */ | |
110 | RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq); | |
111 | ||
fa68018d | 112 | return 0; |
2303f994 PK |
113 | } |
114 | ||
ab174ad8 PA |
115 | static void mptcp_drop(struct sock *sk, struct sk_buff *skb) |
116 | { | |
117 | sk_drops_add(sk, skb); | |
118 | __kfree_skb(skb); | |
119 | } | |
120 | ||
8268ed4c PA |
121 | static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, |
122 | struct sk_buff *from) | |
123 | { | |
124 | bool fragstolen; | |
125 | int delta; | |
126 | ||
127 | if (MPTCP_SKB_CB(from)->offset || | |
128 | !skb_try_coalesce(to, from, &fragstolen, &delta)) | |
129 | return false; | |
130 | ||
06242e44 PA |
131 | pr_debug("colesced seq %llx into %llx new len %d new end seq %llx", |
132 | MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, | |
133 | to->len, MPTCP_SKB_CB(from)->end_seq); | |
ab174ad8 | 134 | MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; |
8268ed4c PA |
135 | kfree_skb_partial(from, fragstolen); |
136 | atomic_add(delta, &sk->sk_rmem_alloc); | |
137 | sk_mem_charge(sk, delta); | |
138 | return true; | |
139 | } | |
140 | ||
ab174ad8 PA |
141 | static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, |
142 | struct sk_buff *from) | |
143 | { | |
144 | if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) | |
145 | return false; | |
146 | ||
147 | return mptcp_try_coalesce((struct sock *)msk, to, from); | |
148 | } | |
149 | ||
150 | /* "inspired" by tcp_data_queue_ofo(), main differences: | |
151 | * - use mptcp seqs | |
152 | * - don't cope with sacks | |
153 | */ | |
154 | static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) | |
155 | { | |
156 | struct sock *sk = (struct sock *)msk; | |
157 | struct rb_node **p, *parent; | |
158 | u64 seq, end_seq, max_seq; | |
159 | struct sk_buff *skb1; | |
c2ec6bc0 | 160 | int space; |
ab174ad8 PA |
161 | |
162 | seq = MPTCP_SKB_CB(skb)->map_seq; | |
163 | end_seq = MPTCP_SKB_CB(skb)->end_seq; | |
c2ec6bc0 YB |
164 | space = tcp_space(sk); |
165 | max_seq = space > 0 ? space + msk->ack_seq : msk->ack_seq; | |
ab174ad8 | 166 | |
06242e44 PA |
167 | pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq, |
168 | RB_EMPTY_ROOT(&msk->out_of_order_queue)); | |
ab174ad8 PA |
169 | if (after64(seq, max_seq)) { |
170 | /* out of window */ | |
171 | mptcp_drop(sk, skb); | |
06242e44 | 172 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW); |
ab174ad8 PA |
173 | return; |
174 | } | |
175 | ||
176 | p = &msk->out_of_order_queue.rb_node; | |
06242e44 | 177 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE); |
ab174ad8 PA |
178 | if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { |
179 | rb_link_node(&skb->rbnode, NULL, p); | |
180 | rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); | |
181 | msk->ooo_last_skb = skb; | |
182 | goto end; | |
183 | } | |
184 | ||
185 | /* with 2 subflows, adding at end of ooo queue is quite likely | |
186 | * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. | |
187 | */ | |
06242e44 PA |
188 | if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) { |
189 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); | |
190 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); | |
ab174ad8 | 191 | return; |
06242e44 | 192 | } |
ab174ad8 PA |
193 | |
194 | /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ | |
195 | if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { | |
06242e44 | 196 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); |
ab174ad8 PA |
197 | parent = &msk->ooo_last_skb->rbnode; |
198 | p = &parent->rb_right; | |
199 | goto insert; | |
200 | } | |
201 | ||
202 | /* Find place to insert this segment. Handle overlaps on the way. */ | |
203 | parent = NULL; | |
204 | while (*p) { | |
205 | parent = *p; | |
206 | skb1 = rb_to_skb(parent); | |
207 | if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { | |
208 | p = &parent->rb_left; | |
209 | continue; | |
210 | } | |
211 | if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { | |
212 | if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { | |
213 | /* All the bits are present. Drop. */ | |
214 | mptcp_drop(sk, skb); | |
06242e44 | 215 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
ab174ad8 PA |
216 | return; |
217 | } | |
218 | if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { | |
219 | /* partial overlap: | |
220 | * | skb | | |
221 | * | skb1 | | |
222 | * continue traversing | |
223 | */ | |
224 | } else { | |
225 | /* skb's seq == skb1's seq and skb covers skb1. | |
226 | * Replace skb1 with skb. | |
227 | */ | |
228 | rb_replace_node(&skb1->rbnode, &skb->rbnode, | |
229 | &msk->out_of_order_queue); | |
230 | mptcp_drop(sk, skb1); | |
06242e44 | 231 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
ab174ad8 PA |
232 | goto merge_right; |
233 | } | |
234 | } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) { | |
06242e44 | 235 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); |
ab174ad8 PA |
236 | return; |
237 | } | |
238 | p = &parent->rb_right; | |
239 | } | |
06242e44 | 240 | |
ab174ad8 PA |
241 | insert: |
242 | /* Insert segment into RB tree. */ | |
243 | rb_link_node(&skb->rbnode, parent, p); | |
244 | rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); | |
245 | ||
246 | merge_right: | |
247 | /* Remove other segments covered by skb. */ | |
248 | while ((skb1 = skb_rb_next(skb)) != NULL) { | |
249 | if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) | |
250 | break; | |
251 | rb_erase(&skb1->rbnode, &msk->out_of_order_queue); | |
252 | mptcp_drop(sk, skb1); | |
06242e44 | 253 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
ab174ad8 PA |
254 | } |
255 | /* If there is no skb after us, we are the last_skb ! */ | |
256 | if (!skb1) | |
257 | msk->ooo_last_skb = skb; | |
258 | ||
259 | end: | |
260 | skb_condense(skb); | |
261 | skb_set_owner_r(skb, sk); | |
262 | } | |
263 | ||
264 | static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, | |
265 | struct sk_buff *skb, unsigned int offset, | |
266 | size_t copy_len) | |
6771bfd9 | 267 | { |
ab174ad8 | 268 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
6771bfd9 | 269 | struct sock *sk = (struct sock *)msk; |
4e637c70 | 270 | struct sk_buff *tail; |
6771bfd9 FW |
271 | |
272 | __skb_unlink(skb, &ssk->sk_receive_queue); | |
6771bfd9 | 273 | |
4e637c70 FW |
274 | skb_ext_reset(skb); |
275 | skb_orphan(skb); | |
ab174ad8 | 276 | |
9c3f94e1 PA |
277 | /* try to fetch required memory from subflow */ |
278 | if (!sk_rmem_schedule(sk, skb, skb->truesize)) { | |
279 | if (ssk->sk_forward_alloc < skb->truesize) | |
280 | goto drop; | |
281 | __sk_mem_reclaim(ssk, skb->truesize); | |
282 | if (!sk_rmem_schedule(sk, skb, skb->truesize)) | |
283 | goto drop; | |
284 | } | |
285 | ||
ab174ad8 PA |
286 | /* the skb map_seq accounts for the skb offset: |
287 | * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq | |
288 | * value | |
289 | */ | |
290 | MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); | |
291 | MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len; | |
8268ed4c | 292 | MPTCP_SKB_CB(skb)->offset = offset; |
4e637c70 | 293 | |
ab174ad8 PA |
294 | if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { |
295 | /* in sequence */ | |
8b0308fe | 296 | WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); |
ab174ad8 PA |
297 | tail = skb_peek_tail(&sk->sk_receive_queue); |
298 | if (tail && mptcp_try_coalesce(sk, tail, skb)) | |
299 | return true; | |
4e637c70 | 300 | |
ab174ad8 PA |
301 | skb_set_owner_r(skb, sk); |
302 | __skb_queue_tail(&sk->sk_receive_queue, skb); | |
303 | return true; | |
304 | } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { | |
305 | mptcp_data_queue_ofo(msk, skb); | |
306 | return false; | |
307 | } | |
308 | ||
309 | /* old data, keep it simple and drop the whole pkt, sender | |
310 | * will retransmit as needed, if needed. | |
311 | */ | |
06242e44 | 312 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
9c3f94e1 | 313 | drop: |
ab174ad8 PA |
314 | mptcp_drop(sk, skb); |
315 | return false; | |
6771bfd9 FW |
316 | } |
317 | ||
16a9a9da MM |
318 | static void mptcp_stop_timer(struct sock *sk) |
319 | { | |
320 | struct inet_connection_sock *icsk = inet_csk(sk); | |
321 | ||
322 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | |
323 | mptcp_sk(sk)->timer_ival = 0; | |
324 | } | |
325 | ||
16a9a9da MM |
326 | static void mptcp_check_data_fin_ack(struct sock *sk) |
327 | { | |
328 | struct mptcp_sock *msk = mptcp_sk(sk); | |
329 | ||
330 | if (__mptcp_check_fallback(msk)) | |
331 | return; | |
332 | ||
333 | /* Look for an acknowledged DATA_FIN */ | |
334 | if (((1 << sk->sk_state) & | |
335 | (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) && | |
336 | msk->write_seq == atomic64_read(&msk->snd_una)) { | |
337 | mptcp_stop_timer(sk); | |
338 | ||
339 | WRITE_ONCE(msk->snd_data_fin_enable, 0); | |
340 | ||
341 | switch (sk->sk_state) { | |
342 | case TCP_FIN_WAIT1: | |
343 | inet_sk_state_store(sk, TCP_FIN_WAIT2); | |
344 | sk->sk_state_change(sk); | |
345 | break; | |
346 | case TCP_CLOSING: | |
16a9a9da MM |
347 | case TCP_LAST_ACK: |
348 | inet_sk_state_store(sk, TCP_CLOSE); | |
349 | sk->sk_state_change(sk); | |
350 | break; | |
351 | } | |
352 | ||
353 | if (sk->sk_shutdown == SHUTDOWN_MASK || | |
354 | sk->sk_state == TCP_CLOSE) | |
355 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); | |
356 | else | |
357 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | |
358 | } | |
359 | } | |
360 | ||
3721b9b6 MM |
361 | static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) |
362 | { | |
363 | struct mptcp_sock *msk = mptcp_sk(sk); | |
364 | ||
365 | if (READ_ONCE(msk->rcv_data_fin) && | |
366 | ((1 << sk->sk_state) & | |
367 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) { | |
368 | u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); | |
369 | ||
370 | if (msk->ack_seq == rcv_data_fin_seq) { | |
371 | if (seq) | |
372 | *seq = rcv_data_fin_seq; | |
373 | ||
374 | return true; | |
375 | } | |
376 | } | |
377 | ||
378 | return false; | |
379 | } | |
380 | ||
381 | static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk) | |
382 | { | |
383 | long tout = ssk && inet_csk(ssk)->icsk_pending ? | |
384 | inet_csk(ssk)->icsk_timeout - jiffies : 0; | |
385 | ||
386 | if (tout <= 0) | |
387 | tout = mptcp_sk(sk)->timer_ival; | |
388 | mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; | |
389 | } | |
390 | ||
391 | static void mptcp_check_data_fin(struct sock *sk) | |
392 | { | |
393 | struct mptcp_sock *msk = mptcp_sk(sk); | |
394 | u64 rcv_data_fin_seq; | |
395 | ||
396 | if (__mptcp_check_fallback(msk) || !msk->first) | |
397 | return; | |
398 | ||
399 | /* Need to ack a DATA_FIN received from a peer while this side | |
400 | * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2. | |
401 | * msk->rcv_data_fin was set when parsing the incoming options | |
402 | * at the subflow level and the msk lock was not held, so this | |
403 | * is the first opportunity to act on the DATA_FIN and change | |
404 | * the msk state. | |
405 | * | |
406 | * If we are caught up to the sequence number of the incoming | |
407 | * DATA_FIN, send the DATA_ACK now and do state transition. If | |
408 | * not caught up, do nothing and let the recv code send DATA_ACK | |
409 | * when catching up. | |
410 | */ | |
411 | ||
412 | if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) { | |
413 | struct mptcp_subflow_context *subflow; | |
414 | ||
917944da | 415 | WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); |
3721b9b6 MM |
416 | WRITE_ONCE(msk->rcv_data_fin, 0); |
417 | ||
418 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
16a9a9da MM |
419 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ |
420 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
3721b9b6 MM |
421 | |
422 | switch (sk->sk_state) { | |
423 | case TCP_ESTABLISHED: | |
424 | inet_sk_state_store(sk, TCP_CLOSE_WAIT); | |
425 | break; | |
426 | case TCP_FIN_WAIT1: | |
427 | inet_sk_state_store(sk, TCP_CLOSING); | |
428 | break; | |
429 | case TCP_FIN_WAIT2: | |
430 | inet_sk_state_store(sk, TCP_CLOSE); | |
431 | // @@ Close subflows now? | |
432 | break; | |
433 | default: | |
434 | /* Other states not expected */ | |
435 | WARN_ON_ONCE(1); | |
436 | break; | |
437 | } | |
438 | ||
439 | mptcp_set_timeout(sk, NULL); | |
440 | mptcp_for_each_subflow(msk, subflow) { | |
441 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
442 | ||
443 | lock_sock(ssk); | |
444 | tcp_send_ack(ssk); | |
445 | release_sock(ssk); | |
446 | } | |
447 | ||
448 | sk->sk_state_change(sk); | |
449 | ||
450 | if (sk->sk_shutdown == SHUTDOWN_MASK || | |
451 | sk->sk_state == TCP_CLOSE) | |
452 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); | |
453 | else | |
454 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | |
455 | } | |
456 | } | |
457 | ||
6771bfd9 FW |
458 | static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, |
459 | struct sock *ssk, | |
460 | unsigned int *bytes) | |
461 | { | |
462 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
600911ff | 463 | struct sock *sk = (struct sock *)msk; |
6771bfd9 FW |
464 | unsigned int moved = 0; |
465 | bool more_data_avail; | |
466 | struct tcp_sock *tp; | |
717f2034 | 467 | u32 old_copied_seq; |
6771bfd9 | 468 | bool done = false; |
13c7ba0c FW |
469 | int sk_rbuf; |
470 | ||
471 | sk_rbuf = READ_ONCE(sk->sk_rcvbuf); | |
472 | ||
473 | if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { | |
474 | int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); | |
475 | ||
476 | if (unlikely(ssk_rbuf > sk_rbuf)) { | |
477 | WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf); | |
478 | sk_rbuf = ssk_rbuf; | |
479 | } | |
480 | } | |
600911ff | 481 | |
ab174ad8 | 482 | pr_debug("msk=%p ssk=%p", msk, ssk); |
6771bfd9 | 483 | tp = tcp_sk(ssk); |
717f2034 | 484 | old_copied_seq = tp->copied_seq; |
6771bfd9 FW |
485 | do { |
486 | u32 map_remaining, offset; | |
487 | u32 seq = tp->copied_seq; | |
488 | struct sk_buff *skb; | |
489 | bool fin; | |
490 | ||
491 | /* try to move as much data as available */ | |
492 | map_remaining = subflow->map_data_len - | |
493 | mptcp_subflow_get_map_offset(subflow); | |
494 | ||
495 | skb = skb_peek(&ssk->sk_receive_queue); | |
d9fb8c50 PA |
496 | if (!skb) { |
497 | /* if no data is found, a racing workqueue/recvmsg | |
498 | * already processed the new data, stop here or we | |
499 | * can enter an infinite loop | |
500 | */ | |
501 | if (!moved) | |
502 | done = true; | |
6771bfd9 | 503 | break; |
d9fb8c50 | 504 | } |
6771bfd9 | 505 | |
e1ff9e82 DC |
506 | if (__mptcp_check_fallback(msk)) { |
507 | /* if we are running under the workqueue, TCP could have | |
508 | * collapsed skbs between dummy map creation and now | |
509 | * be sure to adjust the size | |
510 | */ | |
511 | map_remaining = skb->len; | |
512 | subflow->map_data_len = skb->len; | |
513 | } | |
514 | ||
6771bfd9 FW |
515 | offset = seq - TCP_SKB_CB(skb)->seq; |
516 | fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; | |
517 | if (fin) { | |
518 | done = true; | |
519 | seq++; | |
520 | } | |
521 | ||
522 | if (offset < skb->len) { | |
523 | size_t len = skb->len - offset; | |
524 | ||
525 | if (tp->urg_data) | |
526 | done = true; | |
527 | ||
ab174ad8 PA |
528 | if (__mptcp_move_skb(msk, ssk, skb, offset, len)) |
529 | moved += len; | |
6771bfd9 | 530 | seq += len; |
6771bfd9 FW |
531 | |
532 | if (WARN_ON_ONCE(map_remaining < len)) | |
533 | break; | |
534 | } else { | |
535 | WARN_ON_ONCE(!fin); | |
536 | sk_eat_skb(ssk, skb); | |
537 | done = true; | |
538 | } | |
539 | ||
540 | WRITE_ONCE(tp->copied_seq, seq); | |
541 | more_data_avail = mptcp_subflow_data_available(ssk); | |
600911ff | 542 | |
13c7ba0c | 543 | if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) { |
600911ff FW |
544 | done = true; |
545 | break; | |
546 | } | |
6771bfd9 FW |
547 | } while (more_data_avail); |
548 | ||
6719331c | 549 | *bytes += moved; |
717f2034 PA |
550 | if (tp->copied_seq != old_copied_seq) |
551 | tcp_cleanup_rbuf(ssk, 1); | |
6771bfd9 FW |
552 | |
553 | return done; | |
554 | } | |
555 | ||
ab174ad8 PA |
556 | static bool mptcp_ofo_queue(struct mptcp_sock *msk) |
557 | { | |
558 | struct sock *sk = (struct sock *)msk; | |
559 | struct sk_buff *skb, *tail; | |
560 | bool moved = false; | |
561 | struct rb_node *p; | |
562 | u64 end_seq; | |
563 | ||
564 | p = rb_first(&msk->out_of_order_queue); | |
06242e44 | 565 | pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); |
ab174ad8 PA |
566 | while (p) { |
567 | skb = rb_to_skb(p); | |
568 | if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) | |
569 | break; | |
570 | ||
571 | p = rb_next(p); | |
572 | rb_erase(&skb->rbnode, &msk->out_of_order_queue); | |
573 | ||
574 | if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq, | |
575 | msk->ack_seq))) { | |
576 | mptcp_drop(sk, skb); | |
06242e44 | 577 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
ab174ad8 PA |
578 | continue; |
579 | } | |
580 | ||
581 | end_seq = MPTCP_SKB_CB(skb)->end_seq; | |
582 | tail = skb_peek_tail(&sk->sk_receive_queue); | |
583 | if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) { | |
584 | int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; | |
585 | ||
586 | /* skip overlapping data, if any */ | |
06242e44 PA |
587 | pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d", |
588 | MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, | |
589 | delta); | |
ab174ad8 PA |
590 | MPTCP_SKB_CB(skb)->offset += delta; |
591 | __skb_queue_tail(&sk->sk_receive_queue, skb); | |
592 | } | |
593 | msk->ack_seq = end_seq; | |
594 | moved = true; | |
595 | } | |
596 | return moved; | |
597 | } | |
598 | ||
2e52213c FW |
599 | /* In most cases we will be able to lock the mptcp socket. If its already |
600 | * owned, we need to defer to the work queue to avoid ABBA deadlock. | |
601 | */ | |
602 | static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) | |
603 | { | |
604 | struct sock *sk = (struct sock *)msk; | |
605 | unsigned int moved = 0; | |
606 | ||
607 | if (READ_ONCE(sk->sk_lock.owned)) | |
608 | return false; | |
609 | ||
610 | if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock))) | |
611 | return false; | |
612 | ||
613 | /* must re-check after taking the lock */ | |
ab174ad8 | 614 | if (!READ_ONCE(sk->sk_lock.owned)) { |
2e52213c | 615 | __mptcp_move_skbs_from_subflow(msk, ssk, &moved); |
ab174ad8 PA |
616 | mptcp_ofo_queue(msk); |
617 | ||
618 | /* If the moves have caught up with the DATA_FIN sequence number | |
619 | * it's time to ack the DATA_FIN and change socket state, but | |
620 | * this is not a good place to change state. Let the workqueue | |
621 | * do it. | |
622 | */ | |
ba8f48f7 PA |
623 | if (mptcp_pending_data_fin(sk, NULL)) |
624 | mptcp_schedule_work(sk); | |
ab174ad8 | 625 | } |
2e52213c FW |
626 | |
627 | spin_unlock_bh(&sk->sk_lock.slock); | |
628 | ||
629 | return moved > 0; | |
630 | } | |
631 | ||
632 | void mptcp_data_ready(struct sock *sk, struct sock *ssk) | |
101f6f85 | 633 | { |
6719331c | 634 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
101f6f85 | 635 | struct mptcp_sock *msk = mptcp_sk(sk); |
13c7ba0c | 636 | int sk_rbuf, ssk_rbuf; |
6719331c | 637 | bool wake; |
101f6f85 | 638 | |
6719331c PA |
639 | /* move_skbs_to_msk below can legitly clear the data_avail flag, |
640 | * but we will need later to properly woke the reader, cache its | |
641 | * value | |
642 | */ | |
643 | wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL; | |
644 | if (wake) | |
645 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
6771bfd9 | 646 | |
13c7ba0c FW |
647 | ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); |
648 | sk_rbuf = READ_ONCE(sk->sk_rcvbuf); | |
649 | if (unlikely(ssk_rbuf > sk_rbuf)) | |
650 | sk_rbuf = ssk_rbuf; | |
651 | ||
652 | /* over limit? can't append more skbs to msk */ | |
653 | if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) | |
2e52213c FW |
654 | goto wake; |
655 | ||
13c7ba0c | 656 | if (move_skbs_to_msk(msk, ssk)) |
600911ff FW |
657 | goto wake; |
658 | ||
14c441b5 PA |
659 | /* mptcp socket is owned, release_cb should retry */ |
660 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, | |
661 | &sk->sk_tsq_flags)) { | |
662 | sock_hold(sk); | |
6771bfd9 | 663 | |
14c441b5 PA |
664 | /* need to try again, its possible release_cb() has already |
665 | * been called after the test_and_set_bit() above. | |
666 | */ | |
667 | move_skbs_to_msk(msk, ssk); | |
668 | } | |
600911ff | 669 | wake: |
6719331c PA |
670 | if (wake) |
671 | sk->sk_data_ready(sk); | |
101f6f85 FW |
672 | } |
673 | ||
ec3edaa7 PK |
674 | static void __mptcp_flush_join_list(struct mptcp_sock *msk) |
675 | { | |
676 | if (likely(list_empty(&msk->join_list))) | |
677 | return; | |
678 | ||
679 | spin_lock_bh(&msk->join_list_lock); | |
680 | list_splice_tail_init(&msk->join_list, &msk->conn_list); | |
681 | spin_unlock_bh(&msk->join_list_lock); | |
682 | } | |
683 | ||
b51f9b80 PA |
684 | static bool mptcp_timer_pending(struct sock *sk) |
685 | { | |
686 | return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); | |
687 | } | |
688 | ||
689 | static void mptcp_reset_timer(struct sock *sk) | |
690 | { | |
691 | struct inet_connection_sock *icsk = inet_csk(sk); | |
692 | unsigned long tout; | |
693 | ||
694 | /* should never be called with mptcp level timer cleared */ | |
695 | tout = READ_ONCE(mptcp_sk(sk)->timer_ival); | |
696 | if (WARN_ON_ONCE(!tout)) | |
697 | tout = TCP_RTO_MIN; | |
698 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout); | |
699 | } | |
700 | ||
ba8f48f7 PA |
701 | bool mptcp_schedule_work(struct sock *sk) |
702 | { | |
703 | if (inet_sk_state_load(sk) != TCP_CLOSE && | |
704 | schedule_work(&mptcp_sk(sk)->work)) { | |
705 | /* each subflow already holds a reference to the sk, and the | |
706 | * workqueue is invoked by a subflow, so sk can't go away here. | |
707 | */ | |
708 | sock_hold(sk); | |
709 | return true; | |
710 | } | |
711 | return false; | |
712 | } | |
713 | ||
b51f9b80 PA |
714 | void mptcp_data_acked(struct sock *sk) |
715 | { | |
716 | mptcp_reset_timer(sk); | |
3b1d6210 | 717 | |
63561a40 | 718 | if ((!test_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags) || |
ba8f48f7 PA |
719 | (inet_sk_state_load(sk) != TCP_ESTABLISHED))) |
720 | mptcp_schedule_work(sk); | |
b51f9b80 PA |
721 | } |
722 | ||
59832e24 FW |
723 | void mptcp_subflow_eof(struct sock *sk) |
724 | { | |
ba8f48f7 PA |
725 | if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags)) |
726 | mptcp_schedule_work(sk); | |
59832e24 FW |
727 | } |
728 | ||
5969856a PA |
729 | static void mptcp_check_for_eof(struct mptcp_sock *msk) |
730 | { | |
731 | struct mptcp_subflow_context *subflow; | |
732 | struct sock *sk = (struct sock *)msk; | |
733 | int receivers = 0; | |
734 | ||
735 | mptcp_for_each_subflow(msk, subflow) | |
736 | receivers += !subflow->rx_eof; | |
737 | ||
738 | if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) { | |
739 | /* hopefully temporary hack: propagate shutdown status | |
740 | * to msk, when all subflows agree on it | |
741 | */ | |
742 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
743 | ||
744 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ | |
745 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
746 | sk->sk_data_ready(sk); | |
747 | } | |
748 | } | |
749 | ||
6d0060f6 MM |
750 | static bool mptcp_ext_cache_refill(struct mptcp_sock *msk) |
751 | { | |
4930f483 FW |
752 | const struct sock *sk = (const struct sock *)msk; |
753 | ||
6d0060f6 | 754 | if (!msk->cached_ext) |
4930f483 | 755 | msk->cached_ext = __skb_ext_alloc(sk->sk_allocation); |
6d0060f6 MM |
756 | |
757 | return !!msk->cached_ext; | |
758 | } | |
759 | ||
7a6a6cbc PA |
760 | static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) |
761 | { | |
762 | struct mptcp_subflow_context *subflow; | |
763 | struct sock *sk = (struct sock *)msk; | |
764 | ||
765 | sock_owned_by_me(sk); | |
766 | ||
767 | mptcp_for_each_subflow(msk, subflow) { | |
768 | if (subflow->data_avail) | |
769 | return mptcp_subflow_tcp_sock(subflow); | |
770 | } | |
771 | ||
772 | return NULL; | |
773 | } | |
774 | ||
3f8e0aae PA |
775 | static bool mptcp_skb_can_collapse_to(u64 write_seq, |
776 | const struct sk_buff *skb, | |
777 | const struct mptcp_ext *mpext) | |
57040755 PA |
778 | { |
779 | if (!tcp_skb_can_collapse_to(skb)) | |
780 | return false; | |
781 | ||
5a369ca6 PA |
782 | /* can collapse only if MPTCP level sequence is in order and this |
783 | * mapping has not been xmitted yet | |
784 | */ | |
785 | return mpext && mpext->data_seq + mpext->data_len == write_seq && | |
786 | !mpext->frozen; | |
57040755 PA |
787 | } |
788 | ||
18b683bf PA |
789 | static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, |
790 | const struct page_frag *pfrag, | |
791 | const struct mptcp_data_frag *df) | |
792 | { | |
793 | return df && pfrag->page == df->page && | |
794 | df->data_seq + df->data_len == msk->write_seq; | |
795 | } | |
796 | ||
d027236c PA |
797 | static void dfrag_uncharge(struct sock *sk, int len) |
798 | { | |
799 | sk_mem_uncharge(sk, len); | |
7948f6cc | 800 | sk_wmem_queued_add(sk, -len); |
d027236c PA |
801 | } |
802 | ||
803 | static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) | |
18b683bf | 804 | { |
d027236c PA |
805 | int len = dfrag->data_len + dfrag->overhead; |
806 | ||
18b683bf | 807 | list_del(&dfrag->list); |
d027236c | 808 | dfrag_uncharge(sk, len); |
18b683bf PA |
809 | put_page(dfrag->page); |
810 | } | |
811 | ||
63561a40 PA |
812 | static bool mptcp_is_writeable(struct mptcp_sock *msk) |
813 | { | |
814 | struct mptcp_subflow_context *subflow; | |
815 | ||
816 | if (!sk_stream_is_writeable((struct sock *)msk)) | |
817 | return false; | |
818 | ||
819 | mptcp_for_each_subflow(msk, subflow) { | |
820 | if (sk_stream_is_writeable(subflow->tcp_sock)) | |
821 | return true; | |
822 | } | |
823 | return false; | |
824 | } | |
825 | ||
18b683bf PA |
826 | static void mptcp_clean_una(struct sock *sk) |
827 | { | |
828 | struct mptcp_sock *msk = mptcp_sk(sk); | |
829 | struct mptcp_data_frag *dtmp, *dfrag; | |
d027236c | 830 | bool cleaned = false; |
e1ff9e82 DC |
831 | u64 snd_una; |
832 | ||
833 | /* on fallback we just need to ignore snd_una, as this is really | |
834 | * plain TCP | |
835 | */ | |
836 | if (__mptcp_check_fallback(msk)) | |
837 | atomic64_set(&msk->snd_una, msk->write_seq); | |
838 | snd_una = atomic64_read(&msk->snd_una); | |
18b683bf PA |
839 | |
840 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { | |
841 | if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) | |
842 | break; | |
843 | ||
d027236c PA |
844 | dfrag_clear(sk, dfrag); |
845 | cleaned = true; | |
846 | } | |
847 | ||
7948f6cc FW |
848 | dfrag = mptcp_rtx_head(sk); |
849 | if (dfrag && after64(snd_una, dfrag->data_seq)) { | |
53eb4c38 PA |
850 | u64 delta = snd_una - dfrag->data_seq; |
851 | ||
852 | if (WARN_ON_ONCE(delta > dfrag->data_len)) | |
853 | goto out; | |
7948f6cc FW |
854 | |
855 | dfrag->data_seq += delta; | |
53eb4c38 | 856 | dfrag->offset += delta; |
7948f6cc FW |
857 | dfrag->data_len -= delta; |
858 | ||
859 | dfrag_uncharge(sk, delta); | |
860 | cleaned = true; | |
861 | } | |
862 | ||
53eb4c38 | 863 | out: |
95ed690e | 864 | if (cleaned) |
d027236c | 865 | sk_mem_reclaim_partial(sk); |
95ed690e | 866 | } |
7948f6cc | 867 | |
95ed690e FW |
868 | static void mptcp_clean_una_wakeup(struct sock *sk) |
869 | { | |
870 | struct mptcp_sock *msk = mptcp_sk(sk); | |
63561a40 | 871 | |
95ed690e FW |
872 | mptcp_clean_una(sk); |
873 | ||
874 | /* Only wake up writers if a subflow is ready */ | |
875 | if (mptcp_is_writeable(msk)) { | |
876 | set_bit(MPTCP_SEND_SPACE, &msk->flags); | |
877 | smp_mb__after_atomic(); | |
878 | ||
879 | /* set SEND_SPACE before sk_stream_write_space clears | |
880 | * NOSPACE | |
881 | */ | |
882 | sk_stream_write_space(sk); | |
18b683bf PA |
883 | } |
884 | } | |
885 | ||
886 | /* ensure we get enough memory for the frag hdr, beyond some minimal amount of | |
887 | * data | |
888 | */ | |
889 | static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | |
890 | { | |
891 | if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag), | |
892 | pfrag, sk->sk_allocation))) | |
893 | return true; | |
894 | ||
895 | sk->sk_prot->enter_memory_pressure(sk); | |
896 | sk_stream_moderate_sndbuf(sk); | |
897 | return false; | |
898 | } | |
899 | ||
900 | static struct mptcp_data_frag * | |
901 | mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, | |
902 | int orig_offset) | |
903 | { | |
904 | int offset = ALIGN(orig_offset, sizeof(long)); | |
905 | struct mptcp_data_frag *dfrag; | |
906 | ||
907 | dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset); | |
908 | dfrag->data_len = 0; | |
909 | dfrag->data_seq = msk->write_seq; | |
910 | dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag); | |
911 | dfrag->offset = offset + sizeof(struct mptcp_data_frag); | |
912 | dfrag->page = pfrag->page; | |
913 | ||
914 | return dfrag; | |
915 | } | |
916 | ||
caf971df PA |
917 | struct mptcp_sendmsg_info { |
918 | int mss_now; | |
919 | int size_goal; | |
920 | }; | |
921 | ||
6d0060f6 | 922 | static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, |
3f8e0aae | 923 | struct msghdr *msg, struct mptcp_data_frag *dfrag, |
caf971df | 924 | struct mptcp_sendmsg_info *info) |
6d0060f6 | 925 | { |
caf971df | 926 | int avail_size, offset, ret, frag_truesize = 0; |
18b683bf | 927 | bool dfrag_collapsed, can_collapse = false; |
6d0060f6 MM |
928 | struct mptcp_sock *msk = mptcp_sk(sk); |
929 | struct mptcp_ext *mpext = NULL; | |
3f8e0aae | 930 | bool retransmission = !!dfrag; |
57040755 | 931 | struct sk_buff *skb, *tail; |
6d0060f6 | 932 | struct page_frag *pfrag; |
3f8e0aae PA |
933 | struct page *page; |
934 | u64 *write_seq; | |
6d0060f6 MM |
935 | size_t psize; |
936 | ||
937 | /* use the mptcp page cache so that we can easily move the data | |
938 | * from one substream to another, but do per subflow memory accounting | |
3f8e0aae PA |
939 | * Note: pfrag is used only !retransmission, but the compiler if |
940 | * fooled into a warning if we don't init here | |
6d0060f6 MM |
941 | */ |
942 | pfrag = sk_page_frag(sk); | |
3f8e0aae PA |
943 | if (!retransmission) { |
944 | write_seq = &msk->write_seq; | |
945 | page = pfrag->page; | |
946 | } else { | |
947 | write_seq = &dfrag->data_seq; | |
948 | page = dfrag->page; | |
949 | } | |
6d0060f6 MM |
950 | |
951 | /* compute copy limit */ | |
caf971df PA |
952 | info->mss_now = tcp_send_mss(ssk, &info->size_goal, msg->msg_flags); |
953 | avail_size = info->size_goal; | |
57040755 PA |
954 | skb = tcp_write_queue_tail(ssk); |
955 | if (skb) { | |
956 | mpext = skb_ext_find(skb, SKB_EXT_MPTCP); | |
957 | ||
958 | /* Limit the write to the size available in the | |
959 | * current skb, if any, so that we create at most a new skb. | |
960 | * Explicitly tells TCP internals to avoid collapsing on later | |
961 | * queue management operation, to avoid breaking the ext <-> | |
962 | * SSN association set here | |
963 | */ | |
caf971df | 964 | can_collapse = (info->size_goal - skb->len > 0) && |
3f8e0aae | 965 | mptcp_skb_can_collapse_to(*write_seq, skb, mpext); |
57040755 PA |
966 | if (!can_collapse) |
967 | TCP_SKB_CB(skb)->eor = 1; | |
968 | else | |
caf971df | 969 | avail_size = info->size_goal - skb->len; |
57040755 | 970 | } |
18b683bf | 971 | |
3f8e0aae PA |
972 | if (!retransmission) { |
973 | /* reuse tail pfrag, if possible, or carve a new one from the | |
974 | * page allocator | |
975 | */ | |
976 | dfrag = mptcp_rtx_tail(sk); | |
977 | offset = pfrag->offset; | |
978 | dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); | |
979 | if (!dfrag_collapsed) { | |
980 | dfrag = mptcp_carve_data_frag(msk, pfrag, offset); | |
981 | offset = dfrag->offset; | |
982 | frag_truesize = dfrag->overhead; | |
983 | } | |
984 | psize = min_t(size_t, pfrag->size - offset, avail_size); | |
985 | ||
986 | /* Copy to page */ | |
987 | pr_debug("left=%zu", msg_data_left(msg)); | |
988 | psize = copy_page_from_iter(pfrag->page, offset, | |
989 | min_t(size_t, msg_data_left(msg), | |
990 | psize), | |
991 | &msg->msg_iter); | |
992 | pr_debug("left=%zu", msg_data_left(msg)); | |
993 | if (!psize) | |
994 | return -EINVAL; | |
995 | ||
35759383 FW |
996 | if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) { |
997 | iov_iter_revert(&msg->msg_iter, psize); | |
3f8e0aae | 998 | return -ENOMEM; |
35759383 | 999 | } |
3f8e0aae | 1000 | } else { |
18b683bf | 1001 | offset = dfrag->offset; |
3f8e0aae | 1002 | psize = min_t(size_t, dfrag->data_len, avail_size); |
18b683bf | 1003 | } |
d027236c | 1004 | |
e2223995 PA |
1005 | tail = tcp_build_frag(ssk, psize, msg->msg_flags, page, offset, &psize); |
1006 | if (!tail) { | |
1007 | tcp_remove_empty_skb(sk, tcp_write_queue_tail(ssk)); | |
1008 | return -ENOMEM; | |
35759383 | 1009 | } |
18b683bf | 1010 | |
e2223995 | 1011 | ret = psize; |
18b683bf | 1012 | frag_truesize += ret; |
3f8e0aae PA |
1013 | if (!retransmission) { |
1014 | if (unlikely(ret < psize)) | |
1015 | iov_iter_revert(&msg->msg_iter, psize - ret); | |
6d0060f6 | 1016 | |
3f8e0aae PA |
1017 | /* send successful, keep track of sent data for mptcp-level |
1018 | * retransmission | |
1019 | */ | |
1020 | dfrag->data_len += ret; | |
1021 | if (!dfrag_collapsed) { | |
1022 | get_page(dfrag->page); | |
1023 | list_add_tail(&dfrag->list, &msk->rtx_queue); | |
1024 | sk_wmem_queued_add(sk, frag_truesize); | |
1025 | } else { | |
1026 | sk_wmem_queued_add(sk, ret); | |
1027 | } | |
18b683bf | 1028 | |
3f8e0aae PA |
1029 | /* charge data on mptcp rtx queue to the master socket |
1030 | * Note: we charge such data both to sk and ssk | |
1031 | */ | |
1032 | sk->sk_forward_alloc -= frag_truesize; | |
1033 | } | |
d027236c | 1034 | |
e2223995 | 1035 | /* if the tail skb is still the cached one, collapsing really happened. |
57040755 | 1036 | */ |
e2223995 | 1037 | if (skb == tail) { |
57040755 PA |
1038 | WARN_ON_ONCE(!can_collapse); |
1039 | mpext->data_len += ret; | |
1040 | goto out; | |
1041 | } | |
1042 | ||
e2223995 | 1043 | mpext = __skb_ext_set(tail, SKB_EXT_MPTCP, msk->cached_ext); |
6d0060f6 MM |
1044 | msk->cached_ext = NULL; |
1045 | ||
1046 | memset(mpext, 0, sizeof(*mpext)); | |
3f8e0aae | 1047 | mpext->data_seq = *write_seq; |
6d0060f6 MM |
1048 | mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; |
1049 | mpext->data_len = ret; | |
1050 | mpext->use_map = 1; | |
1051 | mpext->dsn64 = 1; | |
1052 | ||
1053 | pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", | |
1054 | mpext->data_seq, mpext->subflow_seq, mpext->data_len, | |
1055 | mpext->dsn64); | |
1056 | ||
57040755 | 1057 | out: |
3f8e0aae PA |
1058 | if (!retransmission) |
1059 | pfrag->offset += frag_truesize; | |
721e9089 | 1060 | WRITE_ONCE(*write_seq, *write_seq + ret); |
6d0060f6 MM |
1061 | mptcp_subflow_ctx(ssk)->rel_write_seq += ret; |
1062 | ||
6d0060f6 MM |
1063 | return ret; |
1064 | } | |
1065 | ||
63561a40 | 1066 | static void mptcp_nospace(struct mptcp_sock *msk) |
a0e17064 | 1067 | { |
63561a40 PA |
1068 | struct mptcp_subflow_context *subflow; |
1069 | ||
a0e17064 FW |
1070 | clear_bit(MPTCP_SEND_SPACE, &msk->flags); |
1071 | smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */ | |
1072 | ||
63561a40 PA |
1073 | mptcp_for_each_subflow(msk, subflow) { |
1074 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
1075 | struct socket *sock = READ_ONCE(ssk->sk_socket); | |
1076 | ||
1077 | /* enables ssk->write_space() callbacks */ | |
1078 | if (sock) | |
1079 | set_bit(SOCK_NOSPACE, &sock->flags); | |
1080 | } | |
a0e17064 FW |
1081 | } |
1082 | ||
d5f49190 PA |
1083 | static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) |
1084 | { | |
1085 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
1086 | ||
1087 | /* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */ | |
1088 | if (subflow->request_join && !subflow->fully_established) | |
1089 | return false; | |
1090 | ||
1091 | /* only send if our side has not closed yet */ | |
1092 | return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)); | |
1093 | } | |
1094 | ||
1095 | #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \ | |
1096 | sizeof(struct tcphdr) - \ | |
1097 | MAX_TCP_OPTION_SPACE - \ | |
1098 | sizeof(struct ipv6hdr) - \ | |
1099 | sizeof(struct frag_hdr)) | |
1100 | ||
1101 | struct subflow_send_info { | |
1102 | struct sock *ssk; | |
1103 | u64 ratio; | |
1104 | }; | |
1105 | ||
da51aef5 PA |
1106 | static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk, |
1107 | u32 *sndbuf) | |
f296234c | 1108 | { |
d5f49190 | 1109 | struct subflow_send_info send_info[2]; |
f296234c | 1110 | struct mptcp_subflow_context *subflow; |
d5f49190 PA |
1111 | int i, nr_active = 0; |
1112 | struct sock *ssk; | |
1113 | u64 ratio; | |
1114 | u32 pace; | |
f296234c | 1115 | |
d5f49190 | 1116 | sock_owned_by_me((struct sock *)msk); |
f296234c | 1117 | |
da51aef5 | 1118 | *sndbuf = 0; |
149f7c71 FW |
1119 | if (!mptcp_ext_cache_refill(msk)) |
1120 | return NULL; | |
1121 | ||
d5f49190 PA |
1122 | if (__mptcp_check_fallback(msk)) { |
1123 | if (!msk->first) | |
f296234c | 1124 | return NULL; |
d5f49190 PA |
1125 | *sndbuf = msk->first->sk_sndbuf; |
1126 | return sk_stream_memory_free(msk->first) ? msk->first : NULL; | |
1127 | } | |
1128 | ||
1129 | /* re-use last subflow, if the burst allow that */ | |
1130 | if (msk->last_snd && msk->snd_burst > 0 && | |
1131 | sk_stream_memory_free(msk->last_snd) && | |
1132 | mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) { | |
1133 | mptcp_for_each_subflow(msk, subflow) { | |
1134 | ssk = mptcp_subflow_tcp_sock(subflow); | |
1135 | *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf); | |
f296234c | 1136 | } |
d5f49190 PA |
1137 | return msk->last_snd; |
1138 | } | |
f296234c | 1139 | |
d5f49190 PA |
1140 | /* pick the subflow with the lower wmem/wspace ratio */ |
1141 | for (i = 0; i < 2; ++i) { | |
1142 | send_info[i].ssk = NULL; | |
1143 | send_info[i].ratio = -1; | |
1144 | } | |
1145 | mptcp_for_each_subflow(msk, subflow) { | |
1146 | ssk = mptcp_subflow_tcp_sock(subflow); | |
1147 | if (!mptcp_subflow_active(subflow)) | |
1148 | continue; | |
1149 | ||
1150 | nr_active += !subflow->backup; | |
da51aef5 | 1151 | *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf); |
d5f49190 PA |
1152 | if (!sk_stream_memory_free(subflow->tcp_sock)) |
1153 | continue; | |
f296234c | 1154 | |
d5f49190 PA |
1155 | pace = READ_ONCE(ssk->sk_pacing_rate); |
1156 | if (!pace) | |
f296234c | 1157 | continue; |
f296234c | 1158 | |
d5f49190 PA |
1159 | ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, |
1160 | pace); | |
1161 | if (ratio < send_info[subflow->backup].ratio) { | |
1162 | send_info[subflow->backup].ssk = ssk; | |
1163 | send_info[subflow->backup].ratio = ratio; | |
1164 | } | |
f296234c PK |
1165 | } |
1166 | ||
d5f49190 PA |
1167 | pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld", |
1168 | msk, nr_active, send_info[0].ssk, send_info[0].ratio, | |
1169 | send_info[1].ssk, send_info[1].ratio); | |
1170 | ||
1171 | /* pick the best backup if no other subflow is active */ | |
1172 | if (!nr_active) | |
1173 | send_info[0].ssk = send_info[1].ssk; | |
1174 | ||
1175 | if (send_info[0].ssk) { | |
1176 | msk->last_snd = send_info[0].ssk; | |
1177 | msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE, | |
1178 | sk_stream_wspace(msk->last_snd)); | |
1179 | return msk->last_snd; | |
1180 | } | |
1181 | return NULL; | |
f296234c PK |
1182 | } |
1183 | ||
63561a40 | 1184 | static void ssk_check_wmem(struct mptcp_sock *msk) |
1891c4a0 | 1185 | { |
63561a40 PA |
1186 | if (unlikely(!mptcp_is_writeable(msk))) |
1187 | mptcp_nospace(msk); | |
1891c4a0 FW |
1188 | } |
1189 | ||
f870fa0b MM |
1190 | static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
1191 | { | |
1192 | struct mptcp_sock *msk = mptcp_sk(sk); | |
caf971df PA |
1193 | struct mptcp_sendmsg_info info = { |
1194 | .mss_now = 0, | |
1195 | .size_goal = 0, | |
1196 | }; | |
17091708 | 1197 | struct page_frag *pfrag; |
6d0060f6 | 1198 | size_t copied = 0; |
cec37a6e | 1199 | struct sock *ssk; |
caf971df | 1200 | int ret = 0; |
da51aef5 | 1201 | u32 sndbuf; |
72511aab | 1202 | bool tx_ok; |
6d0060f6 | 1203 | long timeo; |
f870fa0b MM |
1204 | |
1205 | if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) | |
1206 | return -EOPNOTSUPP; | |
1207 | ||
cec37a6e | 1208 | lock_sock(sk); |
1954b860 MM |
1209 | |
1210 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | |
1211 | ||
1212 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { | |
1213 | ret = sk_stream_wait_connect(sk, &timeo); | |
1214 | if (ret) | |
1215 | goto out; | |
1216 | } | |
1217 | ||
17091708 | 1218 | pfrag = sk_page_frag(sk); |
72511aab | 1219 | restart: |
18b683bf PA |
1220 | mptcp_clean_una(sk); |
1221 | ||
57baaf28 MM |
1222 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { |
1223 | ret = -EPIPE; | |
1224 | goto out; | |
1225 | } | |
1226 | ||
ec3edaa7 | 1227 | __mptcp_flush_join_list(msk); |
da51aef5 | 1228 | ssk = mptcp_subflow_get_send(msk, &sndbuf); |
17091708 FW |
1229 | while (!sk_stream_memory_free(sk) || |
1230 | !ssk || | |
1231 | !mptcp_page_frag_refill(ssk, pfrag)) { | |
fb529e62 FW |
1232 | if (ssk) { |
1233 | /* make sure retransmit timer is | |
1234 | * running before we wait for memory. | |
1235 | * | |
1236 | * The retransmit timer might be needed | |
1237 | * to make the peer send an up-to-date | |
1238 | * MPTCP Ack. | |
1239 | */ | |
1240 | mptcp_set_timeout(sk, ssk); | |
1241 | if (!mptcp_timer_pending(sk)) | |
1242 | mptcp_reset_timer(sk); | |
1243 | } | |
1244 | ||
63561a40 | 1245 | mptcp_nospace(msk); |
f296234c PK |
1246 | ret = sk_stream_wait_memory(sk, &timeo); |
1247 | if (ret) | |
1248 | goto out; | |
1249 | ||
18b683bf PA |
1250 | mptcp_clean_una(sk); |
1251 | ||
da51aef5 | 1252 | ssk = mptcp_subflow_get_send(msk, &sndbuf); |
f296234c PK |
1253 | if (list_empty(&msk->conn_list)) { |
1254 | ret = -ENOTCONN; | |
1255 | goto out; | |
1256 | } | |
cec37a6e PK |
1257 | } |
1258 | ||
da51aef5 PA |
1259 | /* do auto tuning */ |
1260 | if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) && | |
1261 | sndbuf > READ_ONCE(sk->sk_sndbuf)) | |
1262 | WRITE_ONCE(sk->sk_sndbuf, sndbuf); | |
1263 | ||
6d0060f6 | 1264 | pr_debug("conn_list->subflow=%p", ssk); |
cec37a6e | 1265 | |
6d0060f6 | 1266 | lock_sock(ssk); |
72511aab FW |
1267 | tx_ok = msg_data_left(msg); |
1268 | while (tx_ok) { | |
caf971df | 1269 | ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &info); |
72511aab FW |
1270 | if (ret < 0) { |
1271 | if (ret == -EAGAIN && timeo > 0) { | |
1272 | mptcp_set_timeout(sk, ssk); | |
1273 | release_sock(ssk); | |
1274 | goto restart; | |
1275 | } | |
6d0060f6 | 1276 | break; |
72511aab | 1277 | } |
6d0060f6 | 1278 | |
d5f49190 PA |
1279 | /* burst can be negative, we will try move to the next subflow |
1280 | * at selection time, if possible. | |
1281 | */ | |
1282 | msk->snd_burst -= ret; | |
6d0060f6 | 1283 | copied += ret; |
fb529e62 | 1284 | |
72511aab FW |
1285 | tx_ok = msg_data_left(msg); |
1286 | if (!tx_ok) | |
1287 | break; | |
1288 | ||
149f7c71 | 1289 | if (!sk_stream_memory_free(ssk) || |
17091708 | 1290 | !mptcp_page_frag_refill(ssk, pfrag) || |
149f7c71 | 1291 | !mptcp_ext_cache_refill(msk)) { |
caf971df PA |
1292 | tcp_push(ssk, msg->msg_flags, info.mss_now, |
1293 | tcp_sk(ssk)->nonagle, info.size_goal); | |
72511aab FW |
1294 | mptcp_set_timeout(sk, ssk); |
1295 | release_sock(ssk); | |
1296 | goto restart; | |
1297 | } | |
1298 | ||
fb529e62 FW |
1299 | /* memory is charged to mptcp level socket as well, i.e. |
1300 | * if msg is very large, mptcp socket may run out of buffer | |
1301 | * space. mptcp_clean_una() will release data that has | |
1302 | * been acked at mptcp level in the mean time, so there is | |
1303 | * a good chance we can continue sending data right away. | |
72511aab FW |
1304 | * |
1305 | * Normally, when the tcp subflow can accept more data, then | |
1306 | * so can the MPTCP socket. However, we need to cope with | |
1307 | * peers that might lag behind in their MPTCP-level | |
1308 | * acknowledgements, i.e. data might have been acked at | |
1309 | * tcp level only. So, we must also check the MPTCP socket | |
1310 | * limits before we send more data. | |
fb529e62 FW |
1311 | */ |
1312 | if (unlikely(!sk_stream_memory_free(sk))) { | |
caf971df PA |
1313 | tcp_push(ssk, msg->msg_flags, info.mss_now, |
1314 | tcp_sk(ssk)->nonagle, info.size_goal); | |
fb529e62 FW |
1315 | mptcp_clean_una(sk); |
1316 | if (!sk_stream_memory_free(sk)) { | |
1317 | /* can't send more for now, need to wait for | |
1318 | * MPTCP-level ACKs from peer. | |
1319 | * | |
1320 | * Wakeup will happen via mptcp_clean_una(). | |
1321 | */ | |
1322 | mptcp_set_timeout(sk, ssk); | |
1323 | release_sock(ssk); | |
1cec170d | 1324 | goto restart; |
fb529e62 FW |
1325 | } |
1326 | } | |
6d0060f6 MM |
1327 | } |
1328 | ||
b51f9b80 | 1329 | mptcp_set_timeout(sk, ssk); |
57040755 | 1330 | if (copied) { |
caf971df PA |
1331 | tcp_push(ssk, msg->msg_flags, info.mss_now, |
1332 | tcp_sk(ssk)->nonagle, info.size_goal); | |
b51f9b80 PA |
1333 | |
1334 | /* start the timer, if it's not pending */ | |
1335 | if (!mptcp_timer_pending(sk)) | |
1336 | mptcp_reset_timer(sk); | |
57040755 | 1337 | } |
6d0060f6 MM |
1338 | |
1339 | release_sock(ssk); | |
1954b860 | 1340 | out: |
63561a40 | 1341 | ssk_check_wmem(msk); |
cec37a6e | 1342 | release_sock(sk); |
8555c6bf | 1343 | return copied ? : ret; |
f870fa0b MM |
1344 | } |
1345 | ||
7a6a6cbc PA |
1346 | static void mptcp_wait_data(struct sock *sk, long *timeo) |
1347 | { | |
1348 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | |
1349 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1350 | ||
1351 | add_wait_queue(sk_sleep(sk), &wait); | |
1352 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); | |
1353 | ||
1354 | sk_wait_event(sk, timeo, | |
1355 | test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait); | |
1356 | ||
1357 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); | |
1358 | remove_wait_queue(sk_sleep(sk), &wait); | |
1359 | } | |
1360 | ||
6771bfd9 FW |
1361 | static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, |
1362 | struct msghdr *msg, | |
1363 | size_t len) | |
1364 | { | |
1365 | struct sock *sk = (struct sock *)msk; | |
1366 | struct sk_buff *skb; | |
1367 | int copied = 0; | |
1368 | ||
1369 | while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { | |
1370 | u32 offset = MPTCP_SKB_CB(skb)->offset; | |
1371 | u32 data_len = skb->len - offset; | |
1372 | u32 count = min_t(size_t, len - copied, data_len); | |
1373 | int err; | |
1374 | ||
1375 | err = skb_copy_datagram_msg(skb, offset, msg, count); | |
1376 | if (unlikely(err < 0)) { | |
1377 | if (!copied) | |
1378 | return err; | |
1379 | break; | |
1380 | } | |
1381 | ||
1382 | copied += count; | |
1383 | ||
1384 | if (count < data_len) { | |
1385 | MPTCP_SKB_CB(skb)->offset += count; | |
1386 | break; | |
1387 | } | |
1388 | ||
1389 | __skb_unlink(skb, &sk->sk_receive_queue); | |
1390 | __kfree_skb(skb); | |
1391 | ||
1392 | if (copied >= len) | |
1393 | break; | |
1394 | } | |
1395 | ||
1396 | return copied; | |
1397 | } | |
1398 | ||
a6b118fe FW |
1399 | /* receive buffer autotuning. See tcp_rcv_space_adjust for more information. |
1400 | * | |
1401 | * Only difference: Use highest rtt estimate of the subflows in use. | |
1402 | */ | |
1403 | static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) | |
1404 | { | |
1405 | struct mptcp_subflow_context *subflow; | |
1406 | struct sock *sk = (struct sock *)msk; | |
1407 | u32 time, advmss = 1; | |
1408 | u64 rtt_us, mstamp; | |
1409 | ||
1410 | sock_owned_by_me(sk); | |
1411 | ||
1412 | if (copied <= 0) | |
1413 | return; | |
1414 | ||
1415 | msk->rcvq_space.copied += copied; | |
1416 | ||
1417 | mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC); | |
1418 | time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); | |
1419 | ||
1420 | rtt_us = msk->rcvq_space.rtt_us; | |
1421 | if (rtt_us && time < (rtt_us >> 3)) | |
1422 | return; | |
1423 | ||
1424 | rtt_us = 0; | |
1425 | mptcp_for_each_subflow(msk, subflow) { | |
1426 | const struct tcp_sock *tp; | |
1427 | u64 sf_rtt_us; | |
1428 | u32 sf_advmss; | |
1429 | ||
1430 | tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); | |
1431 | ||
1432 | sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); | |
1433 | sf_advmss = READ_ONCE(tp->advmss); | |
1434 | ||
1435 | rtt_us = max(sf_rtt_us, rtt_us); | |
1436 | advmss = max(sf_advmss, advmss); | |
1437 | } | |
1438 | ||
1439 | msk->rcvq_space.rtt_us = rtt_us; | |
1440 | if (time < (rtt_us >> 3) || rtt_us == 0) | |
1441 | return; | |
1442 | ||
1443 | if (msk->rcvq_space.copied <= msk->rcvq_space.space) | |
1444 | goto new_measure; | |
1445 | ||
1446 | if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && | |
1447 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { | |
1448 | int rcvmem, rcvbuf; | |
1449 | u64 rcvwin, grow; | |
1450 | ||
1451 | rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; | |
1452 | ||
1453 | grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); | |
1454 | ||
1455 | do_div(grow, msk->rcvq_space.space); | |
1456 | rcvwin += (grow << 1); | |
1457 | ||
1458 | rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER); | |
1459 | while (tcp_win_from_space(sk, rcvmem) < advmss) | |
1460 | rcvmem += 128; | |
1461 | ||
1462 | do_div(rcvwin, advmss); | |
1463 | rcvbuf = min_t(u64, rcvwin * rcvmem, | |
1464 | sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); | |
1465 | ||
1466 | if (rcvbuf > sk->sk_rcvbuf) { | |
1467 | u32 window_clamp; | |
1468 | ||
1469 | window_clamp = tcp_win_from_space(sk, rcvbuf); | |
1470 | WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); | |
1471 | ||
1472 | /* Make subflows follow along. If we do not do this, we | |
1473 | * get drops at subflow level if skbs can't be moved to | |
1474 | * the mptcp rx queue fast enough (announced rcv_win can | |
1475 | * exceed ssk->sk_rcvbuf). | |
1476 | */ | |
1477 | mptcp_for_each_subflow(msk, subflow) { | |
1478 | struct sock *ssk; | |
c76c6956 | 1479 | bool slow; |
a6b118fe FW |
1480 | |
1481 | ssk = mptcp_subflow_tcp_sock(subflow); | |
c76c6956 | 1482 | slow = lock_sock_fast(ssk); |
a6b118fe FW |
1483 | WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); |
1484 | tcp_sk(ssk)->window_clamp = window_clamp; | |
c76c6956 PA |
1485 | tcp_cleanup_rbuf(ssk, 1); |
1486 | unlock_sock_fast(ssk, slow); | |
a6b118fe FW |
1487 | } |
1488 | } | |
1489 | } | |
1490 | ||
1491 | msk->rcvq_space.space = msk->rcvq_space.copied; | |
1492 | new_measure: | |
1493 | msk->rcvq_space.copied = 0; | |
1494 | msk->rcvq_space.time = mstamp; | |
1495 | } | |
1496 | ||
6771bfd9 FW |
1497 | static bool __mptcp_move_skbs(struct mptcp_sock *msk) |
1498 | { | |
1499 | unsigned int moved = 0; | |
1500 | bool done; | |
1501 | ||
d5f49190 PA |
1502 | /* avoid looping forever below on racing close */ |
1503 | if (((struct sock *)msk)->sk_state == TCP_CLOSE) | |
1504 | return false; | |
1505 | ||
1506 | __mptcp_flush_join_list(msk); | |
6771bfd9 FW |
1507 | do { |
1508 | struct sock *ssk = mptcp_subflow_recv_lookup(msk); | |
65f49fe7 | 1509 | bool slowpath; |
6771bfd9 FW |
1510 | |
1511 | if (!ssk) | |
1512 | break; | |
1513 | ||
65f49fe7 | 1514 | slowpath = lock_sock_fast(ssk); |
6771bfd9 | 1515 | done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); |
65f49fe7 | 1516 | unlock_sock_fast(ssk, slowpath); |
6771bfd9 FW |
1517 | } while (!done); |
1518 | ||
ab174ad8 PA |
1519 | if (mptcp_ofo_queue(msk) || moved > 0) { |
1520 | mptcp_check_data_fin((struct sock *)msk); | |
1521 | return true; | |
1522 | } | |
1523 | return false; | |
6771bfd9 FW |
1524 | } |
1525 | ||
f870fa0b MM |
1526 | static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
1527 | int nonblock, int flags, int *addr_len) | |
1528 | { | |
1529 | struct mptcp_sock *msk = mptcp_sk(sk); | |
cec37a6e | 1530 | int copied = 0; |
7a6a6cbc PA |
1531 | int target; |
1532 | long timeo; | |
f870fa0b MM |
1533 | |
1534 | if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT)) | |
1535 | return -EOPNOTSUPP; | |
1536 | ||
cec37a6e | 1537 | lock_sock(sk); |
7a6a6cbc PA |
1538 | timeo = sock_rcvtimeo(sk, nonblock); |
1539 | ||
1540 | len = min_t(size_t, len, INT_MAX); | |
1541 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | |
ec3edaa7 | 1542 | __mptcp_flush_join_list(msk); |
7a6a6cbc | 1543 | |
6771bfd9 | 1544 | while (len > (size_t)copied) { |
7a6a6cbc PA |
1545 | int bytes_read; |
1546 | ||
6771bfd9 FW |
1547 | bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied); |
1548 | if (unlikely(bytes_read < 0)) { | |
1549 | if (!copied) | |
1550 | copied = bytes_read; | |
1551 | goto out_err; | |
1552 | } | |
7a6a6cbc | 1553 | |
6771bfd9 | 1554 | copied += bytes_read; |
7a6a6cbc | 1555 | |
6771bfd9 FW |
1556 | if (skb_queue_empty(&sk->sk_receive_queue) && |
1557 | __mptcp_move_skbs(msk)) | |
1558 | continue; | |
7a6a6cbc PA |
1559 | |
1560 | /* only the master socket status is relevant here. The exit | |
1561 | * conditions mirror closely tcp_recvmsg() | |
1562 | */ | |
1563 | if (copied >= target) | |
1564 | break; | |
1565 | ||
1566 | if (copied) { | |
1567 | if (sk->sk_err || | |
1568 | sk->sk_state == TCP_CLOSE || | |
1569 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
1570 | !timeo || | |
1571 | signal_pending(current)) | |
1572 | break; | |
1573 | } else { | |
1574 | if (sk->sk_err) { | |
1575 | copied = sock_error(sk); | |
1576 | break; | |
1577 | } | |
1578 | ||
5969856a PA |
1579 | if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) |
1580 | mptcp_check_for_eof(msk); | |
1581 | ||
7a6a6cbc PA |
1582 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
1583 | break; | |
1584 | ||
1585 | if (sk->sk_state == TCP_CLOSE) { | |
1586 | copied = -ENOTCONN; | |
1587 | break; | |
1588 | } | |
1589 | ||
1590 | if (!timeo) { | |
1591 | copied = -EAGAIN; | |
1592 | break; | |
1593 | } | |
1594 | ||
1595 | if (signal_pending(current)) { | |
1596 | copied = sock_intr_errno(timeo); | |
1597 | break; | |
1598 | } | |
1599 | } | |
1600 | ||
1601 | pr_debug("block timeout %ld", timeo); | |
7a6a6cbc | 1602 | mptcp_wait_data(sk, &timeo); |
cec37a6e PK |
1603 | } |
1604 | ||
6771bfd9 FW |
1605 | if (skb_queue_empty(&sk->sk_receive_queue)) { |
1606 | /* entire backlog drained, clear DATA_READY. */ | |
7a6a6cbc | 1607 | clear_bit(MPTCP_DATA_READY, &msk->flags); |
cec37a6e | 1608 | |
6771bfd9 FW |
1609 | /* .. race-breaker: ssk might have gotten new data |
1610 | * after last __mptcp_move_skbs() returned false. | |
7a6a6cbc | 1611 | */ |
6771bfd9 | 1612 | if (unlikely(__mptcp_move_skbs(msk))) |
7a6a6cbc | 1613 | set_bit(MPTCP_DATA_READY, &msk->flags); |
6771bfd9 FW |
1614 | } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) { |
1615 | /* data to read but mptcp_wait_data() cleared DATA_READY */ | |
1616 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
7a6a6cbc | 1617 | } |
6771bfd9 | 1618 | out_err: |
6719331c PA |
1619 | pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d", |
1620 | msk, test_bit(MPTCP_DATA_READY, &msk->flags), | |
1621 | skb_queue_empty(&sk->sk_receive_queue), copied); | |
a6b118fe FW |
1622 | mptcp_rcv_space_adjust(msk, copied); |
1623 | ||
7a6a6cbc | 1624 | release_sock(sk); |
cec37a6e PK |
1625 | return copied; |
1626 | } | |
1627 | ||
b51f9b80 PA |
1628 | static void mptcp_retransmit_handler(struct sock *sk) |
1629 | { | |
1630 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1631 | ||
c7529392 | 1632 | if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) { |
b51f9b80 | 1633 | mptcp_stop_timer(sk); |
3b1d6210 PA |
1634 | } else { |
1635 | set_bit(MPTCP_WORK_RTX, &msk->flags); | |
ba8f48f7 | 1636 | mptcp_schedule_work(sk); |
3b1d6210 | 1637 | } |
b51f9b80 PA |
1638 | } |
1639 | ||
1640 | static void mptcp_retransmit_timer(struct timer_list *t) | |
1641 | { | |
1642 | struct inet_connection_sock *icsk = from_timer(icsk, t, | |
1643 | icsk_retransmit_timer); | |
1644 | struct sock *sk = &icsk->icsk_inet.sk; | |
1645 | ||
1646 | bh_lock_sock(sk); | |
1647 | if (!sock_owned_by_user(sk)) { | |
1648 | mptcp_retransmit_handler(sk); | |
1649 | } else { | |
1650 | /* delegate our work to tcp_release_cb() */ | |
1651 | if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, | |
1652 | &sk->sk_tsq_flags)) | |
1653 | sock_hold(sk); | |
1654 | } | |
1655 | bh_unlock_sock(sk); | |
1656 | sock_put(sk); | |
1657 | } | |
1658 | ||
3b1d6210 PA |
1659 | /* Find an idle subflow. Return NULL if there is unacked data at tcp |
1660 | * level. | |
1661 | * | |
1662 | * A backup subflow is returned only if that is the only kind available. | |
1663 | */ | |
1664 | static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk) | |
1665 | { | |
1666 | struct mptcp_subflow_context *subflow; | |
1667 | struct sock *backup = NULL; | |
1668 | ||
1669 | sock_owned_by_me((const struct sock *)msk); | |
1670 | ||
d5f49190 PA |
1671 | if (__mptcp_check_fallback(msk)) |
1672 | return msk->first; | |
1673 | ||
3b1d6210 PA |
1674 | mptcp_for_each_subflow(msk, subflow) { |
1675 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
1676 | ||
d5f49190 PA |
1677 | if (!mptcp_subflow_active(subflow)) |
1678 | continue; | |
1679 | ||
3b1d6210 PA |
1680 | /* still data outstanding at TCP level? Don't retransmit. */ |
1681 | if (!tcp_write_queue_empty(ssk)) | |
1682 | return NULL; | |
1683 | ||
1684 | if (subflow->backup) { | |
1685 | if (!backup) | |
1686 | backup = ssk; | |
1687 | continue; | |
1688 | } | |
1689 | ||
1690 | return ssk; | |
1691 | } | |
1692 | ||
1693 | return backup; | |
1694 | } | |
1695 | ||
cec37a6e PK |
1696 | /* subflow sockets can be either outgoing (connect) or incoming |
1697 | * (accept). | |
1698 | * | |
1699 | * Outgoing subflows use in-kernel sockets. | |
1700 | * Incoming subflows do not have their own 'struct socket' allocated, | |
1701 | * so we need to use tcp_close() after detaching them from the mptcp | |
1702 | * parent socket. | |
1703 | */ | |
d0876b22 GT |
1704 | void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, |
1705 | struct mptcp_subflow_context *subflow, | |
1706 | long timeout) | |
cec37a6e PK |
1707 | { |
1708 | struct socket *sock = READ_ONCE(ssk->sk_socket); | |
1709 | ||
1710 | list_del(&subflow->node); | |
1711 | ||
1712 | if (sock && sock != sk->sk_socket) { | |
1713 | /* outgoing subflow */ | |
1714 | sock_release(sock); | |
1715 | } else { | |
1716 | /* incoming subflow */ | |
1717 | tcp_close(ssk, timeout); | |
1718 | } | |
f870fa0b MM |
1719 | } |
1720 | ||
dc24f8b4 PA |
1721 | static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) |
1722 | { | |
1723 | return 0; | |
1724 | } | |
1725 | ||
b416268b FW |
1726 | static void pm_work(struct mptcp_sock *msk) |
1727 | { | |
1728 | struct mptcp_pm_data *pm = &msk->pm; | |
1729 | ||
1730 | spin_lock_bh(&msk->pm.lock); | |
1731 | ||
1732 | pr_debug("msk=%p status=%x", msk, pm->status); | |
1733 | if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { | |
1734 | pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); | |
1735 | mptcp_pm_nl_add_addr_received(msk); | |
1736 | } | |
d0876b22 GT |
1737 | if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) { |
1738 | pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED); | |
1739 | mptcp_pm_nl_rm_addr_received(msk); | |
1740 | } | |
b416268b FW |
1741 | if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { |
1742 | pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); | |
1743 | mptcp_pm_nl_fully_established(msk); | |
1744 | } | |
1745 | if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { | |
1746 | pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); | |
1747 | mptcp_pm_nl_subflow_established(msk); | |
1748 | } | |
1749 | ||
1750 | spin_unlock_bh(&msk->pm.lock); | |
1751 | } | |
1752 | ||
0e4f35d7 PA |
1753 | static void __mptcp_close_subflow(struct mptcp_sock *msk) |
1754 | { | |
1755 | struct mptcp_subflow_context *subflow, *tmp; | |
1756 | ||
1757 | list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { | |
1758 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
1759 | ||
1760 | if (inet_sk_state_load(ssk) != TCP_CLOSE) | |
1761 | continue; | |
1762 | ||
1763 | __mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0); | |
1764 | } | |
1765 | } | |
1766 | ||
80992017 PA |
1767 | static void mptcp_worker(struct work_struct *work) |
1768 | { | |
1769 | struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); | |
3b1d6210 | 1770 | struct sock *ssk, *sk = &msk->sk.icsk_inet.sk; |
caf971df | 1771 | struct mptcp_sendmsg_info info = {}; |
3b1d6210 | 1772 | struct mptcp_data_frag *dfrag; |
caf971df | 1773 | int orig_len, orig_offset; |
3b1d6210 PA |
1774 | u64 orig_write_seq; |
1775 | size_t copied = 0; | |
b3b2854d FW |
1776 | struct msghdr msg = { |
1777 | .msg_flags = MSG_DONTWAIT, | |
1778 | }; | |
caf971df | 1779 | int ret; |
80992017 PA |
1780 | |
1781 | lock_sock(sk); | |
95ed690e | 1782 | mptcp_clean_una_wakeup(sk); |
43b54c6e | 1783 | mptcp_check_data_fin_ack(sk); |
ec3edaa7 | 1784 | __mptcp_flush_join_list(msk); |
0e4f35d7 PA |
1785 | if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) |
1786 | __mptcp_close_subflow(msk); | |
1787 | ||
6771bfd9 | 1788 | __mptcp_move_skbs(msk); |
3b1d6210 | 1789 | |
b416268b FW |
1790 | if (msk->pm.status) |
1791 | pm_work(msk); | |
1792 | ||
59832e24 FW |
1793 | if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) |
1794 | mptcp_check_for_eof(msk); | |
1795 | ||
43b54c6e MM |
1796 | mptcp_check_data_fin(sk); |
1797 | ||
3b1d6210 PA |
1798 | if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) |
1799 | goto unlock; | |
1800 | ||
1801 | dfrag = mptcp_rtx_head(sk); | |
1802 | if (!dfrag) | |
1803 | goto unlock; | |
1804 | ||
149f7c71 FW |
1805 | if (!mptcp_ext_cache_refill(msk)) |
1806 | goto reset_unlock; | |
1807 | ||
3b1d6210 PA |
1808 | ssk = mptcp_subflow_get_retrans(msk); |
1809 | if (!ssk) | |
1810 | goto reset_unlock; | |
1811 | ||
1812 | lock_sock(ssk); | |
1813 | ||
3b1d6210 PA |
1814 | orig_len = dfrag->data_len; |
1815 | orig_offset = dfrag->offset; | |
1816 | orig_write_seq = dfrag->data_seq; | |
1817 | while (dfrag->data_len > 0) { | |
caf971df | 1818 | ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &info); |
3b1d6210 PA |
1819 | if (ret < 0) |
1820 | break; | |
1821 | ||
fc518953 | 1822 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS); |
3b1d6210 PA |
1823 | copied += ret; |
1824 | dfrag->data_len -= ret; | |
1825 | dfrag->offset += ret; | |
149f7c71 FW |
1826 | |
1827 | if (!mptcp_ext_cache_refill(msk)) | |
1828 | break; | |
3b1d6210 PA |
1829 | } |
1830 | if (copied) | |
caf971df PA |
1831 | tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, |
1832 | info.size_goal); | |
3b1d6210 PA |
1833 | |
1834 | dfrag->data_seq = orig_write_seq; | |
1835 | dfrag->offset = orig_offset; | |
1836 | dfrag->data_len = orig_len; | |
1837 | ||
1838 | mptcp_set_timeout(sk, ssk); | |
1839 | release_sock(ssk); | |
1840 | ||
1841 | reset_unlock: | |
1842 | if (!mptcp_timer_pending(sk)) | |
1843 | mptcp_reset_timer(sk); | |
1844 | ||
1845 | unlock: | |
80992017 PA |
1846 | release_sock(sk); |
1847 | sock_put(sk); | |
1848 | } | |
1849 | ||
784325e9 | 1850 | static int __mptcp_init_sock(struct sock *sk) |
f870fa0b | 1851 | { |
cec37a6e PK |
1852 | struct mptcp_sock *msk = mptcp_sk(sk); |
1853 | ||
ec3edaa7 PK |
1854 | spin_lock_init(&msk->join_list_lock); |
1855 | ||
cec37a6e | 1856 | INIT_LIST_HEAD(&msk->conn_list); |
ec3edaa7 | 1857 | INIT_LIST_HEAD(&msk->join_list); |
18b683bf | 1858 | INIT_LIST_HEAD(&msk->rtx_queue); |
1891c4a0 | 1859 | __set_bit(MPTCP_SEND_SPACE, &msk->flags); |
80992017 | 1860 | INIT_WORK(&msk->work, mptcp_worker); |
ab174ad8 | 1861 | msk->out_of_order_queue = RB_ROOT; |
f0e6a4cf | 1862 | msk->first_pending = NULL; |
cec37a6e | 1863 | |
8ab183de | 1864 | msk->first = NULL; |
dc24f8b4 | 1865 | inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; |
8ab183de | 1866 | |
1b1c7a0e PK |
1867 | mptcp_pm_data_init(msk); |
1868 | ||
b51f9b80 PA |
1869 | /* re-use the csk retrans timer for MPTCP-level retrans */ |
1870 | timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); | |
1871 | ||
f870fa0b MM |
1872 | return 0; |
1873 | } | |
1874 | ||
784325e9 MB |
1875 | static int mptcp_init_sock(struct sock *sk) |
1876 | { | |
fc518953 FW |
1877 | struct net *net = sock_net(sk); |
1878 | int ret; | |
18b683bf | 1879 | |
b6c08380 GT |
1880 | ret = __mptcp_init_sock(sk); |
1881 | if (ret) | |
1882 | return ret; | |
1883 | ||
fc518953 FW |
1884 | if (!mptcp_is_enabled(net)) |
1885 | return -ENOPROTOOPT; | |
1886 | ||
1887 | if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net)) | |
1888 | return -ENOMEM; | |
1889 | ||
fa68018d PA |
1890 | ret = __mptcp_socket_create(mptcp_sk(sk)); |
1891 | if (ret) | |
1892 | return ret; | |
1893 | ||
d027236c | 1894 | sk_sockets_allocated_inc(sk); |
a6b118fe | 1895 | sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; |
da51aef5 | 1896 | sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; |
d027236c | 1897 | |
18b683bf PA |
1898 | return 0; |
1899 | } | |
1900 | ||
1901 | static void __mptcp_clear_xmit(struct sock *sk) | |
1902 | { | |
1903 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1904 | struct mptcp_data_frag *dtmp, *dfrag; | |
1905 | ||
b51f9b80 PA |
1906 | sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer); |
1907 | ||
18b683bf | 1908 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) |
d027236c | 1909 | dfrag_clear(sk, dfrag); |
784325e9 MB |
1910 | } |
1911 | ||
80992017 PA |
1912 | static void mptcp_cancel_work(struct sock *sk) |
1913 | { | |
1914 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1915 | ||
1916 | if (cancel_work_sync(&msk->work)) | |
1917 | sock_put(sk); | |
1918 | } | |
1919 | ||
d0876b22 | 1920 | void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) |
21498490 PK |
1921 | { |
1922 | lock_sock(ssk); | |
1923 | ||
1924 | switch (ssk->sk_state) { | |
1925 | case TCP_LISTEN: | |
1926 | if (!(how & RCV_SHUTDOWN)) | |
1927 | break; | |
df561f66 | 1928 | fallthrough; |
21498490 PK |
1929 | case TCP_SYN_SENT: |
1930 | tcp_disconnect(ssk, O_NONBLOCK); | |
1931 | break; | |
1932 | default: | |
43b54c6e MM |
1933 | if (__mptcp_check_fallback(mptcp_sk(sk))) { |
1934 | pr_debug("Fallback"); | |
1935 | ssk->sk_shutdown |= how; | |
1936 | tcp_shutdown(ssk, how); | |
1937 | } else { | |
1938 | pr_debug("Sending DATA_FIN on subflow %p", ssk); | |
1939 | mptcp_set_timeout(sk, ssk); | |
1940 | tcp_send_ack(ssk); | |
1941 | } | |
21498490 PK |
1942 | break; |
1943 | } | |
1944 | ||
21498490 PK |
1945 | release_sock(ssk); |
1946 | } | |
1947 | ||
6920b851 MM |
1948 | static const unsigned char new_state[16] = { |
1949 | /* current state: new state: action: */ | |
1950 | [0 /* (Invalid) */] = TCP_CLOSE, | |
1951 | [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1952 | [TCP_SYN_SENT] = TCP_CLOSE, | |
1953 | [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1954 | [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, | |
1955 | [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, | |
1956 | [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */ | |
1957 | [TCP_CLOSE] = TCP_CLOSE, | |
1958 | [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, | |
1959 | [TCP_LAST_ACK] = TCP_LAST_ACK, | |
1960 | [TCP_LISTEN] = TCP_CLOSE, | |
1961 | [TCP_CLOSING] = TCP_CLOSING, | |
1962 | [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ | |
1963 | }; | |
1964 | ||
1965 | static int mptcp_close_state(struct sock *sk) | |
1966 | { | |
1967 | int next = (int)new_state[sk->sk_state]; | |
1968 | int ns = next & TCP_STATE_MASK; | |
1969 | ||
1970 | inet_sk_state_store(sk, ns); | |
1971 | ||
1972 | return next & TCP_ACTION_FIN; | |
1973 | } | |
1974 | ||
2c22c06c | 1975 | static void mptcp_close(struct sock *sk, long timeout) |
f870fa0b | 1976 | { |
cec37a6e | 1977 | struct mptcp_subflow_context *subflow, *tmp; |
f870fa0b | 1978 | struct mptcp_sock *msk = mptcp_sk(sk); |
b2c5b614 | 1979 | LIST_HEAD(conn_list); |
f870fa0b | 1980 | |
2c22c06c | 1981 | lock_sock(sk); |
43b54c6e MM |
1982 | sk->sk_shutdown = SHUTDOWN_MASK; |
1983 | ||
1984 | if (sk->sk_state == TCP_LISTEN) { | |
1985 | inet_sk_state_store(sk, TCP_CLOSE); | |
1986 | goto cleanup; | |
1987 | } else if (sk->sk_state == TCP_CLOSE) { | |
1988 | goto cleanup; | |
1989 | } | |
1990 | ||
1991 | if (__mptcp_check_fallback(msk)) { | |
1992 | goto update_state; | |
1993 | } else if (mptcp_close_state(sk)) { | |
1994 | pr_debug("Sending DATA_FIN sk=%p", sk); | |
1995 | WRITE_ONCE(msk->write_seq, msk->write_seq + 1); | |
1996 | WRITE_ONCE(msk->snd_data_fin_enable, 1); | |
1997 | ||
1998 | mptcp_for_each_subflow(msk, subflow) { | |
1999 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); | |
2000 | ||
2001 | mptcp_subflow_shutdown(sk, tcp_sk, SHUTDOWN_MASK); | |
2002 | } | |
2003 | } | |
2c22c06c | 2004 | |
43b54c6e MM |
2005 | sk_stream_wait_close(sk, timeout); |
2006 | ||
2007 | update_state: | |
f870fa0b MM |
2008 | inet_sk_state_store(sk, TCP_CLOSE); |
2009 | ||
43b54c6e | 2010 | cleanup: |
10f6d46c PA |
2011 | /* be sure to always acquire the join list lock, to sync vs |
2012 | * mptcp_finish_join(). | |
2013 | */ | |
2014 | spin_lock_bh(&msk->join_list_lock); | |
2015 | list_splice_tail_init(&msk->join_list, &msk->conn_list); | |
2016 | spin_unlock_bh(&msk->join_list_lock); | |
b2c5b614 FW |
2017 | list_splice_init(&msk->conn_list, &conn_list); |
2018 | ||
18b683bf PA |
2019 | __mptcp_clear_xmit(sk); |
2020 | ||
b2c5b614 FW |
2021 | release_sock(sk); |
2022 | ||
2023 | list_for_each_entry_safe(subflow, tmp, &conn_list, node) { | |
cec37a6e | 2024 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
cec37a6e | 2025 | __mptcp_close_ssk(sk, ssk, subflow, timeout); |
f870fa0b MM |
2026 | } |
2027 | ||
80992017 PA |
2028 | mptcp_cancel_work(sk); |
2029 | ||
6771bfd9 FW |
2030 | __skb_queue_purge(&sk->sk_receive_queue); |
2031 | ||
cec37a6e | 2032 | sk_common_release(sk); |
f870fa0b MM |
2033 | } |
2034 | ||
cf7da0d6 PK |
2035 | static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) |
2036 | { | |
2037 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
2038 | const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); | |
2039 | struct ipv6_pinfo *msk6 = inet6_sk(msk); | |
2040 | ||
2041 | msk->sk_v6_daddr = ssk->sk_v6_daddr; | |
2042 | msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; | |
2043 | ||
2044 | if (msk6 && ssk6) { | |
2045 | msk6->saddr = ssk6->saddr; | |
2046 | msk6->flow_label = ssk6->flow_label; | |
2047 | } | |
2048 | #endif | |
2049 | ||
2050 | inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; | |
2051 | inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; | |
2052 | inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; | |
2053 | inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; | |
2054 | inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; | |
2055 | inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; | |
2056 | } | |
2057 | ||
18b683bf PA |
2058 | static int mptcp_disconnect(struct sock *sk, int flags) |
2059 | { | |
42c556fe FW |
2060 | /* Should never be called. |
2061 | * inet_stream_connect() calls ->disconnect, but that | |
2062 | * refers to the subflow socket, not the mptcp one. | |
2063 | */ | |
2064 | WARN_ON_ONCE(1); | |
2065 | return 0; | |
18b683bf PA |
2066 | } |
2067 | ||
b0519de8 FW |
2068 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
2069 | static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) | |
2070 | { | |
2071 | unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo); | |
2072 | ||
2073 | return (struct ipv6_pinfo *)(((u8 *)sk) + offset); | |
2074 | } | |
2075 | #endif | |
2076 | ||
fca5c82c | 2077 | struct sock *mptcp_sk_clone(const struct sock *sk, |
cfde141e | 2078 | const struct mptcp_options_received *mp_opt, |
fca5c82c | 2079 | struct request_sock *req) |
b0519de8 | 2080 | { |
58b09919 | 2081 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); |
b0519de8 | 2082 | struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); |
58b09919 PA |
2083 | struct mptcp_sock *msk; |
2084 | u64 ack_seq; | |
b0519de8 FW |
2085 | |
2086 | if (!nsk) | |
2087 | return NULL; | |
2088 | ||
2089 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
2090 | if (nsk->sk_family == AF_INET6) | |
2091 | inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); | |
2092 | #endif | |
2093 | ||
58b09919 PA |
2094 | __mptcp_init_sock(nsk); |
2095 | ||
2096 | msk = mptcp_sk(nsk); | |
2097 | msk->local_key = subflow_req->local_key; | |
2098 | msk->token = subflow_req->token; | |
2099 | msk->subflow = NULL; | |
b93df08c | 2100 | WRITE_ONCE(msk->fully_established, false); |
58b09919 | 2101 | |
58b09919 | 2102 | msk->write_seq = subflow_req->idsn + 1; |
cc9d2566 | 2103 | atomic64_set(&msk->snd_una, msk->write_seq); |
cfde141e | 2104 | if (mp_opt->mp_capable) { |
58b09919 | 2105 | msk->can_ack = true; |
cfde141e | 2106 | msk->remote_key = mp_opt->sndr_key; |
58b09919 PA |
2107 | mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq); |
2108 | ack_seq++; | |
917944da | 2109 | WRITE_ONCE(msk->ack_seq, ack_seq); |
58b09919 | 2110 | } |
7f20d5fc | 2111 | |
5e20087d | 2112 | sock_reset_flag(nsk, SOCK_RCU_FREE); |
7f20d5fc PA |
2113 | /* will be fully established after successful MPC subflow creation */ |
2114 | inet_sk_state_store(nsk, TCP_SYN_RECV); | |
58b09919 PA |
2115 | bh_unlock_sock(nsk); |
2116 | ||
2117 | /* keep a single reference */ | |
2118 | __sock_put(nsk); | |
b0519de8 FW |
2119 | return nsk; |
2120 | } | |
2121 | ||
a6b118fe FW |
2122 | void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) |
2123 | { | |
2124 | const struct tcp_sock *tp = tcp_sk(ssk); | |
2125 | ||
2126 | msk->rcvq_space.copied = 0; | |
2127 | msk->rcvq_space.rtt_us = 0; | |
2128 | ||
2129 | msk->rcvq_space.time = tp->tcp_mstamp; | |
2130 | ||
2131 | /* initial rcv_space offering made to peer */ | |
2132 | msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, | |
2133 | TCP_INIT_CWND * tp->advmss); | |
2134 | if (msk->rcvq_space.space == 0) | |
2135 | msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; | |
2136 | } | |
2137 | ||
cf7da0d6 PK |
2138 | static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, |
2139 | bool kern) | |
2140 | { | |
2141 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2142 | struct socket *listener; | |
2143 | struct sock *newsk; | |
2144 | ||
2145 | listener = __mptcp_nmpc_socket(msk); | |
2146 | if (WARN_ON_ONCE(!listener)) { | |
2147 | *err = -EINVAL; | |
2148 | return NULL; | |
2149 | } | |
2150 | ||
2151 | pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk)); | |
2152 | newsk = inet_csk_accept(listener->sk, flags, err, kern); | |
2153 | if (!newsk) | |
2154 | return NULL; | |
2155 | ||
2156 | pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk)); | |
cf7da0d6 PK |
2157 | if (sk_is_mptcp(newsk)) { |
2158 | struct mptcp_subflow_context *subflow; | |
2159 | struct sock *new_mptcp_sock; | |
2160 | struct sock *ssk = newsk; | |
2161 | ||
2162 | subflow = mptcp_subflow_ctx(newsk); | |
58b09919 | 2163 | new_mptcp_sock = subflow->conn; |
cf7da0d6 | 2164 | |
58b09919 PA |
2165 | /* is_mptcp should be false if subflow->conn is missing, see |
2166 | * subflow_syn_recv_sock() | |
2167 | */ | |
2168 | if (WARN_ON_ONCE(!new_mptcp_sock)) { | |
2169 | tcp_sk(newsk)->is_mptcp = 0; | |
2170 | return newsk; | |
cf7da0d6 PK |
2171 | } |
2172 | ||
58b09919 PA |
2173 | /* acquire the 2nd reference for the owning socket */ |
2174 | sock_hold(new_mptcp_sock); | |
cf7da0d6 | 2175 | |
58b09919 PA |
2176 | local_bh_disable(); |
2177 | bh_lock_sock(new_mptcp_sock); | |
cf7da0d6 | 2178 | msk = mptcp_sk(new_mptcp_sock); |
8ab183de | 2179 | msk->first = newsk; |
cf7da0d6 PK |
2180 | |
2181 | newsk = new_mptcp_sock; | |
2182 | mptcp_copy_inaddrs(newsk, ssk); | |
2183 | list_add(&subflow->node, &msk->conn_list); | |
2184 | ||
a6b118fe | 2185 | mptcp_rcv_space_init(msk, ssk); |
cf7da0d6 | 2186 | bh_unlock_sock(new_mptcp_sock); |
fc518953 FW |
2187 | |
2188 | __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK); | |
cf7da0d6 | 2189 | local_bh_enable(); |
fc518953 FW |
2190 | } else { |
2191 | MPTCP_INC_STATS(sock_net(sk), | |
2192 | MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); | |
cf7da0d6 PK |
2193 | } |
2194 | ||
2195 | return newsk; | |
2196 | } | |
2197 | ||
5c8c1640 GT |
2198 | void mptcp_destroy_common(struct mptcp_sock *msk) |
2199 | { | |
2200 | skb_rbtree_purge(&msk->out_of_order_queue); | |
2201 | mptcp_token_destroy(msk); | |
2202 | mptcp_pm_free_anno_list(msk); | |
2203 | } | |
2204 | ||
79c0949e PK |
2205 | static void mptcp_destroy(struct sock *sk) |
2206 | { | |
c9fd9c5f FW |
2207 | struct mptcp_sock *msk = mptcp_sk(sk); |
2208 | ||
2209 | if (msk->cached_ext) | |
2210 | __skb_ext_put(msk->cached_ext); | |
d027236c | 2211 | |
5c8c1640 | 2212 | mptcp_destroy_common(msk); |
d027236c | 2213 | sk_sockets_allocated_dec(sk); |
79c0949e PK |
2214 | } |
2215 | ||
fd1452d8 | 2216 | static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, |
a7b75c5a | 2217 | sockptr_t optval, unsigned int optlen) |
fd1452d8 FW |
2218 | { |
2219 | struct sock *sk = (struct sock *)msk; | |
2220 | struct socket *ssock; | |
2221 | int ret; | |
2222 | ||
2223 | switch (optname) { | |
2224 | case SO_REUSEPORT: | |
2225 | case SO_REUSEADDR: | |
2226 | lock_sock(sk); | |
2227 | ssock = __mptcp_nmpc_socket(msk); | |
2228 | if (!ssock) { | |
2229 | release_sock(sk); | |
2230 | return -EINVAL; | |
2231 | } | |
2232 | ||
a7b75c5a | 2233 | ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen); |
fd1452d8 FW |
2234 | if (ret == 0) { |
2235 | if (optname == SO_REUSEPORT) | |
2236 | sk->sk_reuseport = ssock->sk->sk_reuseport; | |
2237 | else if (optname == SO_REUSEADDR) | |
2238 | sk->sk_reuse = ssock->sk->sk_reuse; | |
2239 | } | |
2240 | release_sock(sk); | |
2241 | return ret; | |
2242 | } | |
2243 | ||
a7b75c5a | 2244 | return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); |
fd1452d8 FW |
2245 | } |
2246 | ||
c9b95a13 | 2247 | static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, |
a7b75c5a | 2248 | sockptr_t optval, unsigned int optlen) |
c9b95a13 FW |
2249 | { |
2250 | struct sock *sk = (struct sock *)msk; | |
2251 | int ret = -EOPNOTSUPP; | |
2252 | struct socket *ssock; | |
2253 | ||
2254 | switch (optname) { | |
2255 | case IPV6_V6ONLY: | |
2256 | lock_sock(sk); | |
2257 | ssock = __mptcp_nmpc_socket(msk); | |
2258 | if (!ssock) { | |
2259 | release_sock(sk); | |
2260 | return -EINVAL; | |
2261 | } | |
2262 | ||
2263 | ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen); | |
2264 | if (ret == 0) | |
2265 | sk->sk_ipv6only = ssock->sk->sk_ipv6only; | |
2266 | ||
2267 | release_sock(sk); | |
2268 | break; | |
2269 | } | |
2270 | ||
2271 | return ret; | |
2272 | } | |
2273 | ||
717e79c8 | 2274 | static int mptcp_setsockopt(struct sock *sk, int level, int optname, |
a7b75c5a | 2275 | sockptr_t optval, unsigned int optlen) |
717e79c8 PK |
2276 | { |
2277 | struct mptcp_sock *msk = mptcp_sk(sk); | |
76660afb | 2278 | struct sock *ssk; |
717e79c8 PK |
2279 | |
2280 | pr_debug("msk=%p", msk); | |
2281 | ||
83f0c10b | 2282 | if (level == SOL_SOCKET) |
fd1452d8 | 2283 | return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen); |
83f0c10b | 2284 | |
717e79c8 | 2285 | /* @@ the meaning of setsockopt() when the socket is connected and |
b6e4a1ae MM |
2286 | * there are multiple subflows is not yet defined. It is up to the |
2287 | * MPTCP-level socket to configure the subflows until the subflow | |
2288 | * is in TCP fallback, when TCP socket options are passed through | |
2289 | * to the one remaining subflow. | |
717e79c8 PK |
2290 | */ |
2291 | lock_sock(sk); | |
76660afb | 2292 | ssk = __mptcp_tcp_fallback(msk); |
e154659b | 2293 | release_sock(sk); |
76660afb PA |
2294 | if (ssk) |
2295 | return tcp_setsockopt(ssk, level, optname, optval, optlen); | |
50e741bb | 2296 | |
c9b95a13 FW |
2297 | if (level == SOL_IPV6) |
2298 | return mptcp_setsockopt_v6(msk, optname, optval, optlen); | |
2299 | ||
b6e4a1ae | 2300 | return -EOPNOTSUPP; |
717e79c8 PK |
2301 | } |
2302 | ||
2303 | static int mptcp_getsockopt(struct sock *sk, int level, int optname, | |
50e741bb | 2304 | char __user *optval, int __user *option) |
717e79c8 PK |
2305 | { |
2306 | struct mptcp_sock *msk = mptcp_sk(sk); | |
76660afb | 2307 | struct sock *ssk; |
717e79c8 PK |
2308 | |
2309 | pr_debug("msk=%p", msk); | |
2310 | ||
b6e4a1ae MM |
2311 | /* @@ the meaning of setsockopt() when the socket is connected and |
2312 | * there are multiple subflows is not yet defined. It is up to the | |
2313 | * MPTCP-level socket to configure the subflows until the subflow | |
2314 | * is in TCP fallback, when socket options are passed through | |
2315 | * to the one remaining subflow. | |
717e79c8 PK |
2316 | */ |
2317 | lock_sock(sk); | |
76660afb | 2318 | ssk = __mptcp_tcp_fallback(msk); |
e154659b | 2319 | release_sock(sk); |
76660afb PA |
2320 | if (ssk) |
2321 | return tcp_getsockopt(ssk, level, optname, optval, option); | |
50e741bb | 2322 | |
b6e4a1ae | 2323 | return -EOPNOTSUPP; |
717e79c8 PK |
2324 | } |
2325 | ||
b51f9b80 PA |
2326 | #define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \ |
2327 | TCPF_WRITE_TIMER_DEFERRED) | |
14c441b5 PA |
2328 | |
2329 | /* this is very alike tcp_release_cb() but we must handle differently a | |
2330 | * different set of events | |
2331 | */ | |
2332 | static void mptcp_release_cb(struct sock *sk) | |
2333 | { | |
2334 | unsigned long flags, nflags; | |
2335 | ||
2336 | do { | |
2337 | flags = sk->sk_tsq_flags; | |
2338 | if (!(flags & MPTCP_DEFERRED_ALL)) | |
2339 | return; | |
2340 | nflags = flags & ~MPTCP_DEFERRED_ALL; | |
2341 | } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); | |
2342 | ||
b51f9b80 PA |
2343 | sock_release_ownership(sk); |
2344 | ||
14c441b5 PA |
2345 | if (flags & TCPF_DELACK_TIMER_DEFERRED) { |
2346 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2347 | struct sock *ssk; | |
2348 | ||
2349 | ssk = mptcp_subflow_recv_lookup(msk); | |
ba8f48f7 PA |
2350 | if (!ssk || sk->sk_state == TCP_CLOSE || |
2351 | !schedule_work(&msk->work)) | |
14c441b5 PA |
2352 | __sock_put(sk); |
2353 | } | |
b51f9b80 PA |
2354 | |
2355 | if (flags & TCPF_WRITE_TIMER_DEFERRED) { | |
2356 | mptcp_retransmit_handler(sk); | |
2357 | __sock_put(sk); | |
2358 | } | |
14c441b5 PA |
2359 | } |
2360 | ||
2c5ebd00 PA |
2361 | static int mptcp_hash(struct sock *sk) |
2362 | { | |
2363 | /* should never be called, | |
2364 | * we hash the TCP subflows not the master socket | |
2365 | */ | |
2366 | WARN_ON_ONCE(1); | |
2367 | return 0; | |
2368 | } | |
2369 | ||
2370 | static void mptcp_unhash(struct sock *sk) | |
2371 | { | |
2372 | /* called from sk_common_release(), but nothing to do here */ | |
2373 | } | |
2374 | ||
cec37a6e | 2375 | static int mptcp_get_port(struct sock *sk, unsigned short snum) |
f870fa0b MM |
2376 | { |
2377 | struct mptcp_sock *msk = mptcp_sk(sk); | |
cec37a6e | 2378 | struct socket *ssock; |
f870fa0b | 2379 | |
cec37a6e PK |
2380 | ssock = __mptcp_nmpc_socket(msk); |
2381 | pr_debug("msk=%p, subflow=%p", msk, ssock); | |
2382 | if (WARN_ON_ONCE(!ssock)) | |
2383 | return -EINVAL; | |
f870fa0b | 2384 | |
cec37a6e PK |
2385 | return inet_csk_get_port(ssock->sk, snum); |
2386 | } | |
f870fa0b | 2387 | |
cec37a6e PK |
2388 | void mptcp_finish_connect(struct sock *ssk) |
2389 | { | |
2390 | struct mptcp_subflow_context *subflow; | |
2391 | struct mptcp_sock *msk; | |
2392 | struct sock *sk; | |
6d0060f6 | 2393 | u64 ack_seq; |
f870fa0b | 2394 | |
cec37a6e | 2395 | subflow = mptcp_subflow_ctx(ssk); |
cec37a6e PK |
2396 | sk = subflow->conn; |
2397 | msk = mptcp_sk(sk); | |
2398 | ||
648ef4b8 MM |
2399 | pr_debug("msk=%p, token=%u", sk, subflow->token); |
2400 | ||
6d0060f6 MM |
2401 | mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); |
2402 | ack_seq++; | |
648ef4b8 MM |
2403 | subflow->map_seq = ack_seq; |
2404 | subflow->map_subflow_seq = 1; | |
6d0060f6 | 2405 | |
cec37a6e PK |
2406 | /* the socket is not connected yet, no msk/subflow ops can access/race |
2407 | * accessing the field below | |
2408 | */ | |
2409 | WRITE_ONCE(msk->remote_key, subflow->remote_key); | |
2410 | WRITE_ONCE(msk->local_key, subflow->local_key); | |
6d0060f6 MM |
2411 | WRITE_ONCE(msk->write_seq, subflow->idsn + 1); |
2412 | WRITE_ONCE(msk->ack_seq, ack_seq); | |
d22f4988 | 2413 | WRITE_ONCE(msk->can_ack, 1); |
cc9d2566 | 2414 | atomic64_set(&msk->snd_una, msk->write_seq); |
1b1c7a0e PK |
2415 | |
2416 | mptcp_pm_new_connection(msk, 0); | |
a6b118fe FW |
2417 | |
2418 | mptcp_rcv_space_init(msk, ssk); | |
f870fa0b MM |
2419 | } |
2420 | ||
cf7da0d6 PK |
2421 | static void mptcp_sock_graft(struct sock *sk, struct socket *parent) |
2422 | { | |
2423 | write_lock_bh(&sk->sk_callback_lock); | |
2424 | rcu_assign_pointer(sk->sk_wq, &parent->wq); | |
2425 | sk_set_socket(sk, parent); | |
2426 | sk->sk_uid = SOCK_INODE(parent)->i_uid; | |
2427 | write_unlock_bh(&sk->sk_callback_lock); | |
2428 | } | |
2429 | ||
f296234c PK |
2430 | bool mptcp_finish_join(struct sock *sk) |
2431 | { | |
2432 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
2433 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); | |
2434 | struct sock *parent = (void *)msk; | |
2435 | struct socket *parent_sock; | |
ec3edaa7 | 2436 | bool ret; |
f296234c PK |
2437 | |
2438 | pr_debug("msk=%p, subflow=%p", msk, subflow); | |
2439 | ||
2440 | /* mptcp socket already closing? */ | |
b93df08c | 2441 | if (!mptcp_is_fully_established(parent)) |
f296234c PK |
2442 | return false; |
2443 | ||
2444 | if (!msk->pm.server_side) | |
2445 | return true; | |
2446 | ||
10f6d46c PA |
2447 | if (!mptcp_pm_allow_new_subflow(msk)) |
2448 | return false; | |
2449 | ||
2450 | /* active connections are already on conn_list, and we can't acquire | |
2451 | * msk lock here. | |
2452 | * use the join list lock as synchronization point and double-check | |
2453 | * msk status to avoid racing with mptcp_close() | |
2454 | */ | |
2455 | spin_lock_bh(&msk->join_list_lock); | |
2456 | ret = inet_sk_state_load(parent) == TCP_ESTABLISHED; | |
2457 | if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) | |
2458 | list_add_tail(&subflow->node, &msk->join_list); | |
2459 | spin_unlock_bh(&msk->join_list_lock); | |
2460 | if (!ret) | |
2461 | return false; | |
2462 | ||
2463 | /* attach to msk socket only after we are sure he will deal with us | |
2464 | * at close time | |
2465 | */ | |
f296234c PK |
2466 | parent_sock = READ_ONCE(parent->sk_socket); |
2467 | if (parent_sock && !sk->sk_socket) | |
2468 | mptcp_sock_graft(sk, parent_sock); | |
917944da | 2469 | subflow->map_seq = READ_ONCE(msk->ack_seq); |
10f6d46c | 2470 | return true; |
f296234c PK |
2471 | } |
2472 | ||
1891c4a0 FW |
2473 | static bool mptcp_memory_free(const struct sock *sk, int wake) |
2474 | { | |
2475 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2476 | ||
2477 | return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true; | |
2478 | } | |
2479 | ||
f870fa0b MM |
2480 | static struct proto mptcp_prot = { |
2481 | .name = "MPTCP", | |
2482 | .owner = THIS_MODULE, | |
2483 | .init = mptcp_init_sock, | |
18b683bf | 2484 | .disconnect = mptcp_disconnect, |
f870fa0b | 2485 | .close = mptcp_close, |
cf7da0d6 | 2486 | .accept = mptcp_accept, |
717e79c8 PK |
2487 | .setsockopt = mptcp_setsockopt, |
2488 | .getsockopt = mptcp_getsockopt, | |
f870fa0b | 2489 | .shutdown = tcp_shutdown, |
79c0949e | 2490 | .destroy = mptcp_destroy, |
f870fa0b MM |
2491 | .sendmsg = mptcp_sendmsg, |
2492 | .recvmsg = mptcp_recvmsg, | |
14c441b5 | 2493 | .release_cb = mptcp_release_cb, |
2c5ebd00 PA |
2494 | .hash = mptcp_hash, |
2495 | .unhash = mptcp_unhash, | |
cec37a6e | 2496 | .get_port = mptcp_get_port, |
d027236c PA |
2497 | .sockets_allocated = &mptcp_sockets_allocated, |
2498 | .memory_allocated = &tcp_memory_allocated, | |
2499 | .memory_pressure = &tcp_memory_pressure, | |
1891c4a0 | 2500 | .stream_memory_free = mptcp_memory_free, |
d027236c | 2501 | .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), |
989ef49b | 2502 | .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), |
d027236c | 2503 | .sysctl_mem = sysctl_tcp_mem, |
f870fa0b | 2504 | .obj_size = sizeof(struct mptcp_sock), |
2c5ebd00 | 2505 | .slab_flags = SLAB_TYPESAFE_BY_RCU, |
f870fa0b MM |
2506 | .no_autobind = true, |
2507 | }; | |
2508 | ||
2303f994 PK |
2509 | static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
2510 | { | |
2511 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2512 | struct socket *ssock; | |
cf7da0d6 | 2513 | int err; |
2303f994 PK |
2514 | |
2515 | lock_sock(sock->sk); | |
fa68018d PA |
2516 | ssock = __mptcp_nmpc_socket(msk); |
2517 | if (!ssock) { | |
2518 | err = -EINVAL; | |
2303f994 PK |
2519 | goto unlock; |
2520 | } | |
2521 | ||
2522 | err = ssock->ops->bind(ssock, uaddr, addr_len); | |
cf7da0d6 PK |
2523 | if (!err) |
2524 | mptcp_copy_inaddrs(sock->sk, ssock->sk); | |
2303f994 PK |
2525 | |
2526 | unlock: | |
2527 | release_sock(sock->sk); | |
2528 | return err; | |
2529 | } | |
2530 | ||
0235d075 PA |
2531 | static void mptcp_subflow_early_fallback(struct mptcp_sock *msk, |
2532 | struct mptcp_subflow_context *subflow) | |
2533 | { | |
2534 | subflow->request_mptcp = 0; | |
2535 | __mptcp_do_fallback(msk); | |
2536 | } | |
2537 | ||
2303f994 PK |
2538 | static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, |
2539 | int addr_len, int flags) | |
2540 | { | |
2541 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2c5ebd00 | 2542 | struct mptcp_subflow_context *subflow; |
2303f994 PK |
2543 | struct socket *ssock; |
2544 | int err; | |
2545 | ||
2546 | lock_sock(sock->sk); | |
41be81a8 PA |
2547 | if (sock->state != SS_UNCONNECTED && msk->subflow) { |
2548 | /* pending connection or invalid state, let existing subflow | |
2549 | * cope with that | |
2550 | */ | |
2551 | ssock = msk->subflow; | |
2552 | goto do_connect; | |
2553 | } | |
2554 | ||
fa68018d PA |
2555 | ssock = __mptcp_nmpc_socket(msk); |
2556 | if (!ssock) { | |
2557 | err = -EINVAL; | |
2303f994 PK |
2558 | goto unlock; |
2559 | } | |
2560 | ||
fa68018d PA |
2561 | mptcp_token_destroy(msk); |
2562 | inet_sk_state_store(sock->sk, TCP_SYN_SENT); | |
2c5ebd00 | 2563 | subflow = mptcp_subflow_ctx(ssock->sk); |
cf7da0d6 PK |
2564 | #ifdef CONFIG_TCP_MD5SIG |
2565 | /* no MPTCP if MD5SIG is enabled on this socket or we may run out of | |
2566 | * TCP option space. | |
2567 | */ | |
2568 | if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) | |
0235d075 | 2569 | mptcp_subflow_early_fallback(msk, subflow); |
cf7da0d6 | 2570 | #endif |
2c5ebd00 | 2571 | if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) |
0235d075 | 2572 | mptcp_subflow_early_fallback(msk, subflow); |
cf7da0d6 | 2573 | |
41be81a8 | 2574 | do_connect: |
2303f994 | 2575 | err = ssock->ops->connect(ssock, uaddr, addr_len, flags); |
41be81a8 PA |
2576 | sock->state = ssock->state; |
2577 | ||
2578 | /* on successful connect, the msk state will be moved to established by | |
2579 | * subflow_finish_connect() | |
2580 | */ | |
367fe04e | 2581 | if (!err || err == -EINPROGRESS) |
41be81a8 PA |
2582 | mptcp_copy_inaddrs(sock->sk, ssock->sk); |
2583 | else | |
2584 | inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); | |
2303f994 PK |
2585 | |
2586 | unlock: | |
2587 | release_sock(sock->sk); | |
2588 | return err; | |
2589 | } | |
2590 | ||
cf7da0d6 PK |
2591 | static int mptcp_listen(struct socket *sock, int backlog) |
2592 | { | |
2593 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2594 | struct socket *ssock; | |
2595 | int err; | |
2596 | ||
2597 | pr_debug("msk=%p", msk); | |
2598 | ||
2599 | lock_sock(sock->sk); | |
fa68018d PA |
2600 | ssock = __mptcp_nmpc_socket(msk); |
2601 | if (!ssock) { | |
2602 | err = -EINVAL; | |
cf7da0d6 PK |
2603 | goto unlock; |
2604 | } | |
2605 | ||
fa68018d PA |
2606 | mptcp_token_destroy(msk); |
2607 | inet_sk_state_store(sock->sk, TCP_LISTEN); | |
5e20087d FW |
2608 | sock_set_flag(sock->sk, SOCK_RCU_FREE); |
2609 | ||
cf7da0d6 PK |
2610 | err = ssock->ops->listen(ssock, backlog); |
2611 | inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); | |
2612 | if (!err) | |
2613 | mptcp_copy_inaddrs(sock->sk, ssock->sk); | |
2614 | ||
2615 | unlock: | |
2616 | release_sock(sock->sk); | |
2617 | return err; | |
2618 | } | |
2619 | ||
cf7da0d6 PK |
2620 | static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, |
2621 | int flags, bool kern) | |
2622 | { | |
2623 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2624 | struct socket *ssock; | |
2625 | int err; | |
2626 | ||
2627 | pr_debug("msk=%p", msk); | |
2628 | ||
2629 | lock_sock(sock->sk); | |
2630 | if (sock->sk->sk_state != TCP_LISTEN) | |
2631 | goto unlock_fail; | |
2632 | ||
2633 | ssock = __mptcp_nmpc_socket(msk); | |
2634 | if (!ssock) | |
2635 | goto unlock_fail; | |
2636 | ||
8a05661b | 2637 | clear_bit(MPTCP_DATA_READY, &msk->flags); |
cf7da0d6 PK |
2638 | sock_hold(ssock->sk); |
2639 | release_sock(sock->sk); | |
2640 | ||
2641 | err = ssock->ops->accept(sock, newsock, flags, kern); | |
d2f77c53 | 2642 | if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) { |
cf7da0d6 PK |
2643 | struct mptcp_sock *msk = mptcp_sk(newsock->sk); |
2644 | struct mptcp_subflow_context *subflow; | |
2645 | ||
2646 | /* set ssk->sk_socket of accept()ed flows to mptcp socket. | |
2647 | * This is needed so NOSPACE flag can be set from tcp stack. | |
2648 | */ | |
ec3edaa7 | 2649 | __mptcp_flush_join_list(msk); |
190f8b06 | 2650 | mptcp_for_each_subflow(msk, subflow) { |
cf7da0d6 PK |
2651 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
2652 | ||
2653 | if (!ssk->sk_socket) | |
2654 | mptcp_sock_graft(ssk, newsock); | |
2655 | } | |
cf7da0d6 PK |
2656 | } |
2657 | ||
8a05661b PA |
2658 | if (inet_csk_listen_poll(ssock->sk)) |
2659 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
cf7da0d6 PK |
2660 | sock_put(ssock->sk); |
2661 | return err; | |
2662 | ||
2663 | unlock_fail: | |
2664 | release_sock(sock->sk); | |
2665 | return -EINVAL; | |
2666 | } | |
2667 | ||
8a05661b PA |
2668 | static __poll_t mptcp_check_readable(struct mptcp_sock *msk) |
2669 | { | |
2670 | return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : | |
2671 | 0; | |
2672 | } | |
2673 | ||
2303f994 PK |
2674 | static __poll_t mptcp_poll(struct file *file, struct socket *sock, |
2675 | struct poll_table_struct *wait) | |
2676 | { | |
1891c4a0 | 2677 | struct sock *sk = sock->sk; |
8ab183de | 2678 | struct mptcp_sock *msk; |
2303f994 | 2679 | __poll_t mask = 0; |
8a05661b | 2680 | int state; |
2303f994 | 2681 | |
1891c4a0 | 2682 | msk = mptcp_sk(sk); |
1891c4a0 | 2683 | sock_poll_wait(file, sock, wait); |
1891c4a0 | 2684 | |
8a05661b | 2685 | state = inet_sk_state_load(sk); |
6719331c | 2686 | pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); |
8a05661b PA |
2687 | if (state == TCP_LISTEN) |
2688 | return mptcp_check_readable(msk); | |
2689 | ||
2690 | if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { | |
2691 | mask |= mptcp_check_readable(msk); | |
63561a40 | 2692 | if (test_bit(MPTCP_SEND_SPACE, &msk->flags)) |
8a05661b PA |
2693 | mask |= EPOLLOUT | EPOLLWRNORM; |
2694 | } | |
1891c4a0 FW |
2695 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
2696 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; | |
2697 | ||
2303f994 PK |
2698 | return mask; |
2699 | } | |
2700 | ||
21498490 PK |
2701 | static int mptcp_shutdown(struct socket *sock, int how) |
2702 | { | |
2703 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2704 | struct mptcp_subflow_context *subflow; | |
2705 | int ret = 0; | |
2706 | ||
2707 | pr_debug("sk=%p, how=%d", msk, how); | |
2708 | ||
2709 | lock_sock(sock->sk); | |
21498490 PK |
2710 | |
2711 | how++; | |
21498490 PK |
2712 | if ((how & ~SHUTDOWN_MASK) || !how) { |
2713 | ret = -EINVAL; | |
2714 | goto out_unlock; | |
2715 | } | |
2716 | ||
2717 | if (sock->state == SS_CONNECTING) { | |
2718 | if ((1 << sock->sk->sk_state) & | |
2719 | (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)) | |
2720 | sock->state = SS_DISCONNECTING; | |
2721 | else | |
2722 | sock->state = SS_CONNECTED; | |
2723 | } | |
2724 | ||
43b54c6e MM |
2725 | /* If we've already sent a FIN, or it's a closed state, skip this. */ |
2726 | if (__mptcp_check_fallback(msk)) { | |
2727 | if (how == SHUT_WR || how == SHUT_RDWR) | |
2728 | inet_sk_state_store(sock->sk, TCP_FIN_WAIT1); | |
7279da61 | 2729 | |
43b54c6e MM |
2730 | mptcp_for_each_subflow(msk, subflow) { |
2731 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); | |
21498490 | 2732 | |
43b54c6e MM |
2733 | mptcp_subflow_shutdown(sock->sk, tcp_sk, how); |
2734 | } | |
2735 | } else if ((how & SEND_SHUTDOWN) && | |
2736 | ((1 << sock->sk->sk_state) & | |
2737 | (TCPF_ESTABLISHED | TCPF_SYN_SENT | | |
2738 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) && | |
2739 | mptcp_close_state(sock->sk)) { | |
2740 | __mptcp_flush_join_list(msk); | |
2741 | ||
2742 | WRITE_ONCE(msk->write_seq, msk->write_seq + 1); | |
2743 | WRITE_ONCE(msk->snd_data_fin_enable, 1); | |
2744 | ||
2745 | mptcp_for_each_subflow(msk, subflow) { | |
2746 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); | |
2747 | ||
2748 | mptcp_subflow_shutdown(sock->sk, tcp_sk, how); | |
2749 | } | |
21498490 PK |
2750 | } |
2751 | ||
e1ff9e82 DC |
2752 | /* Wake up anyone sleeping in poll. */ |
2753 | sock->sk->sk_state_change(sock->sk); | |
2754 | ||
21498490 PK |
2755 | out_unlock: |
2756 | release_sock(sock->sk); | |
2757 | ||
2758 | return ret; | |
2759 | } | |
2760 | ||
e42f1ac6 FW |
2761 | static const struct proto_ops mptcp_stream_ops = { |
2762 | .family = PF_INET, | |
2763 | .owner = THIS_MODULE, | |
2764 | .release = inet_release, | |
2765 | .bind = mptcp_bind, | |
2766 | .connect = mptcp_stream_connect, | |
2767 | .socketpair = sock_no_socketpair, | |
2768 | .accept = mptcp_stream_accept, | |
d2f77c53 | 2769 | .getname = inet_getname, |
e42f1ac6 FW |
2770 | .poll = mptcp_poll, |
2771 | .ioctl = inet_ioctl, | |
2772 | .gettstamp = sock_gettstamp, | |
2773 | .listen = mptcp_listen, | |
2774 | .shutdown = mptcp_shutdown, | |
2775 | .setsockopt = sock_common_setsockopt, | |
2776 | .getsockopt = sock_common_getsockopt, | |
2777 | .sendmsg = inet_sendmsg, | |
2778 | .recvmsg = inet_recvmsg, | |
2779 | .mmap = sock_no_mmap, | |
2780 | .sendpage = inet_sendpage, | |
e42f1ac6 | 2781 | }; |
2303f994 | 2782 | |
f870fa0b MM |
2783 | static struct inet_protosw mptcp_protosw = { |
2784 | .type = SOCK_STREAM, | |
2785 | .protocol = IPPROTO_MPTCP, | |
2786 | .prot = &mptcp_prot, | |
2303f994 PK |
2787 | .ops = &mptcp_stream_ops, |
2788 | .flags = INET_PROTOSW_ICSK, | |
f870fa0b MM |
2789 | }; |
2790 | ||
d39dceca | 2791 | void __init mptcp_proto_init(void) |
f870fa0b | 2792 | { |
2303f994 | 2793 | mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; |
2303f994 | 2794 | |
d027236c PA |
2795 | if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL)) |
2796 | panic("Failed to allocate MPTCP pcpu counter\n"); | |
2797 | ||
2303f994 | 2798 | mptcp_subflow_init(); |
1b1c7a0e | 2799 | mptcp_pm_init(); |
2c5ebd00 | 2800 | mptcp_token_init(); |
2303f994 | 2801 | |
f870fa0b MM |
2802 | if (proto_register(&mptcp_prot, 1) != 0) |
2803 | panic("Failed to register MPTCP proto.\n"); | |
2804 | ||
2805 | inet_register_protosw(&mptcp_protosw); | |
6771bfd9 FW |
2806 | |
2807 | BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb)); | |
f870fa0b MM |
2808 | } |
2809 | ||
2810 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
e42f1ac6 FW |
2811 | static const struct proto_ops mptcp_v6_stream_ops = { |
2812 | .family = PF_INET6, | |
2813 | .owner = THIS_MODULE, | |
2814 | .release = inet6_release, | |
2815 | .bind = mptcp_bind, | |
2816 | .connect = mptcp_stream_connect, | |
2817 | .socketpair = sock_no_socketpair, | |
2818 | .accept = mptcp_stream_accept, | |
d2f77c53 | 2819 | .getname = inet6_getname, |
e42f1ac6 FW |
2820 | .poll = mptcp_poll, |
2821 | .ioctl = inet6_ioctl, | |
2822 | .gettstamp = sock_gettstamp, | |
2823 | .listen = mptcp_listen, | |
2824 | .shutdown = mptcp_shutdown, | |
2825 | .setsockopt = sock_common_setsockopt, | |
2826 | .getsockopt = sock_common_getsockopt, | |
2827 | .sendmsg = inet6_sendmsg, | |
2828 | .recvmsg = inet6_recvmsg, | |
2829 | .mmap = sock_no_mmap, | |
2830 | .sendpage = inet_sendpage, | |
2831 | #ifdef CONFIG_COMPAT | |
3986912f | 2832 | .compat_ioctl = inet6_compat_ioctl, |
e42f1ac6 FW |
2833 | #endif |
2834 | }; | |
2835 | ||
f870fa0b MM |
2836 | static struct proto mptcp_v6_prot; |
2837 | ||
79c0949e PK |
2838 | static void mptcp_v6_destroy(struct sock *sk) |
2839 | { | |
2840 | mptcp_destroy(sk); | |
2841 | inet6_destroy_sock(sk); | |
2842 | } | |
2843 | ||
f870fa0b MM |
2844 | static struct inet_protosw mptcp_v6_protosw = { |
2845 | .type = SOCK_STREAM, | |
2846 | .protocol = IPPROTO_MPTCP, | |
2847 | .prot = &mptcp_v6_prot, | |
2303f994 | 2848 | .ops = &mptcp_v6_stream_ops, |
f870fa0b MM |
2849 | .flags = INET_PROTOSW_ICSK, |
2850 | }; | |
2851 | ||
d39dceca | 2852 | int __init mptcp_proto_v6_init(void) |
f870fa0b MM |
2853 | { |
2854 | int err; | |
2855 | ||
2856 | mptcp_v6_prot = mptcp_prot; | |
2857 | strcpy(mptcp_v6_prot.name, "MPTCPv6"); | |
2858 | mptcp_v6_prot.slab = NULL; | |
79c0949e | 2859 | mptcp_v6_prot.destroy = mptcp_v6_destroy; |
b0519de8 | 2860 | mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); |
f870fa0b MM |
2861 | |
2862 | err = proto_register(&mptcp_v6_prot, 1); | |
2863 | if (err) | |
2864 | return err; | |
2865 | ||
2866 | err = inet6_register_protosw(&mptcp_v6_protosw); | |
2867 | if (err) | |
2868 | proto_unregister(&mptcp_v6_prot); | |
2869 | ||
2870 | return err; | |
2871 | } | |
2872 | #endif |