Merge tag 'perf-tools-fixes-for-v6.4-1-2023-05-20' of git://git.kernel.org/pub/scm...
[linux-block.git] / net / mptcp / options.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <crypto/sha2.h>
11 #include <net/tcp.h>
12 #include <net/mptcp.h>
13 #include "protocol.h"
14 #include "mib.h"
15
16 #include <trace/events/mptcp.h>
17
18 static bool mptcp_cap_flag_sha256(u8 flags)
19 {
20         return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256;
21 }
22
23 static void mptcp_parse_option(const struct sk_buff *skb,
24                                const unsigned char *ptr, int opsize,
25                                struct mptcp_options_received *mp_opt)
26 {
27         u8 subtype = *ptr >> 4;
28         int expected_opsize;
29         u16 subopt;
30         u8 version;
31         u8 flags;
32         u8 i;
33
34         switch (subtype) {
35         case MPTCPOPT_MP_CAPABLE:
36                 /* strict size checking */
37                 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
38                         if (skb->len > tcp_hdr(skb)->doff << 2)
39                                 expected_opsize = TCPOLEN_MPTCP_MPC_ACK_DATA;
40                         else
41                                 expected_opsize = TCPOLEN_MPTCP_MPC_ACK;
42                         subopt = OPTION_MPTCP_MPC_ACK;
43                 } else {
44                         if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) {
45                                 expected_opsize = TCPOLEN_MPTCP_MPC_SYNACK;
46                                 subopt = OPTION_MPTCP_MPC_SYNACK;
47                         } else {
48                                 expected_opsize = TCPOLEN_MPTCP_MPC_SYN;
49                                 subopt = OPTION_MPTCP_MPC_SYN;
50                         }
51                 }
52
53                 /* Cfr RFC 8684 Section 3.3.0:
54                  * If a checksum is present but its use had
55                  * not been negotiated in the MP_CAPABLE handshake, the receiver MUST
56                  * close the subflow with a RST, as it is not behaving as negotiated.
57                  * If a checksum is not present when its use has been negotiated, the
58                  * receiver MUST close the subflow with a RST, as it is considered
59                  * broken
60                  * We parse even option with mismatching csum presence, so that
61                  * later in subflow_data_ready we can trigger the reset.
62                  */
63                 if (opsize != expected_opsize &&
64                     (expected_opsize != TCPOLEN_MPTCP_MPC_ACK_DATA ||
65                      opsize != TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM))
66                         break;
67
68                 /* try to be gentle vs future versions on the initial syn */
69                 version = *ptr++ & MPTCP_VERSION_MASK;
70                 if (opsize != TCPOLEN_MPTCP_MPC_SYN) {
71                         if (version != MPTCP_SUPPORTED_VERSION)
72                                 break;
73                 } else if (version < MPTCP_SUPPORTED_VERSION) {
74                         break;
75                 }
76
77                 flags = *ptr++;
78                 if (!mptcp_cap_flag_sha256(flags) ||
79                     (flags & MPTCP_CAP_EXTENSIBILITY))
80                         break;
81
82                 /* RFC 6824, Section 3.1:
83                  * "For the Checksum Required bit (labeled "A"), if either
84                  * host requires the use of checksums, checksums MUST be used.
85                  * In other words, the only way for checksums not to be used
86                  * is if both hosts in their SYNs set A=0."
87                  */
88                 if (flags & MPTCP_CAP_CHECKSUM_REQD)
89                         mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
90
91                 mp_opt->deny_join_id0 = !!(flags & MPTCP_CAP_DENY_JOIN_ID0);
92
93                 mp_opt->suboptions |= subopt;
94                 if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) {
95                         mp_opt->sndr_key = get_unaligned_be64(ptr);
96                         ptr += 8;
97                 }
98                 if (opsize >= TCPOLEN_MPTCP_MPC_ACK) {
99                         mp_opt->rcvr_key = get_unaligned_be64(ptr);
100                         ptr += 8;
101                 }
102                 if (opsize >= TCPOLEN_MPTCP_MPC_ACK_DATA) {
103                         /* Section 3.1.:
104                          * "the data parameters in a MP_CAPABLE are semantically
105                          * equivalent to those in a DSS option and can be used
106                          * interchangeably."
107                          */
108                         mp_opt->suboptions |= OPTION_MPTCP_DSS;
109                         mp_opt->use_map = 1;
110                         mp_opt->mpc_map = 1;
111                         mp_opt->data_len = get_unaligned_be16(ptr);
112                         ptr += 2;
113                 }
114                 if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) {
115                         mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
116                         mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
117                         ptr += 2;
118                 }
119                 pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
120                          version, flags, opsize, mp_opt->sndr_key,
121                          mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
122                 break;
123
124         case MPTCPOPT_MP_JOIN:
125                 mp_opt->suboptions |= OPTIONS_MPTCP_MPJ;
126                 if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
127                         mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
128                         mp_opt->join_id = *ptr++;
129                         mp_opt->token = get_unaligned_be32(ptr);
130                         ptr += 4;
131                         mp_opt->nonce = get_unaligned_be32(ptr);
132                         ptr += 4;
133                         pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
134                                  mp_opt->backup, mp_opt->join_id,
135                                  mp_opt->token, mp_opt->nonce);
136                 } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
137                         mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
138                         mp_opt->join_id = *ptr++;
139                         mp_opt->thmac = get_unaligned_be64(ptr);
140                         ptr += 8;
141                         mp_opt->nonce = get_unaligned_be32(ptr);
142                         ptr += 4;
143                         pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
144                                  mp_opt->backup, mp_opt->join_id,
145                                  mp_opt->thmac, mp_opt->nonce);
146                 } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
147                         ptr += 2;
148                         memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
149                         pr_debug("MP_JOIN hmac");
150                 } else {
151                         mp_opt->suboptions &= ~OPTIONS_MPTCP_MPJ;
152                 }
153                 break;
154
155         case MPTCPOPT_DSS:
156                 pr_debug("DSS");
157                 ptr++;
158
159                 /* we must clear 'mpc_map' be able to detect MP_CAPABLE
160                  * map vs DSS map in mptcp_incoming_options(), and reconstruct
161                  * map info accordingly
162                  */
163                 mp_opt->mpc_map = 0;
164                 flags = (*ptr++) & MPTCP_DSS_FLAG_MASK;
165                 mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0;
166                 mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0;
167                 mp_opt->use_map = (flags & MPTCP_DSS_HAS_MAP) != 0;
168                 mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
169                 mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
170
171                 pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
172                          mp_opt->data_fin, mp_opt->dsn64,
173                          mp_opt->use_map, mp_opt->ack64,
174                          mp_opt->use_ack);
175
176                 expected_opsize = TCPOLEN_MPTCP_DSS_BASE;
177
178                 if (mp_opt->use_ack) {
179                         if (mp_opt->ack64)
180                                 expected_opsize += TCPOLEN_MPTCP_DSS_ACK64;
181                         else
182                                 expected_opsize += TCPOLEN_MPTCP_DSS_ACK32;
183                 }
184
185                 if (mp_opt->use_map) {
186                         if (mp_opt->dsn64)
187                                 expected_opsize += TCPOLEN_MPTCP_DSS_MAP64;
188                         else
189                                 expected_opsize += TCPOLEN_MPTCP_DSS_MAP32;
190                 }
191
192                 /* Always parse any csum presence combination, we will enforce
193                  * RFC 8684 Section 3.3.0 checks later in subflow_data_ready
194                  */
195                 if (opsize != expected_opsize &&
196                     opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM)
197                         break;
198
199                 mp_opt->suboptions |= OPTION_MPTCP_DSS;
200                 if (mp_opt->use_ack) {
201                         if (mp_opt->ack64) {
202                                 mp_opt->data_ack = get_unaligned_be64(ptr);
203                                 ptr += 8;
204                         } else {
205                                 mp_opt->data_ack = get_unaligned_be32(ptr);
206                                 ptr += 4;
207                         }
208
209                         pr_debug("data_ack=%llu", mp_opt->data_ack);
210                 }
211
212                 if (mp_opt->use_map) {
213                         if (mp_opt->dsn64) {
214                                 mp_opt->data_seq = get_unaligned_be64(ptr);
215                                 ptr += 8;
216                         } else {
217                                 mp_opt->data_seq = get_unaligned_be32(ptr);
218                                 ptr += 4;
219                         }
220
221                         mp_opt->subflow_seq = get_unaligned_be32(ptr);
222                         ptr += 4;
223
224                         mp_opt->data_len = get_unaligned_be16(ptr);
225                         ptr += 2;
226
227                         if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) {
228                                 mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
229                                 mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
230                                 ptr += 2;
231                         }
232
233                         pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
234                                  mp_opt->data_seq, mp_opt->subflow_seq,
235                                  mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
236                                  mp_opt->csum);
237                 }
238
239                 break;
240
241         case MPTCPOPT_ADD_ADDR:
242                 mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO;
243                 if (!mp_opt->echo) {
244                         if (opsize == TCPOLEN_MPTCP_ADD_ADDR ||
245                             opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT)
246                                 mp_opt->addr.family = AF_INET;
247 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
248                         else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 ||
249                                  opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT)
250                                 mp_opt->addr.family = AF_INET6;
251 #endif
252                         else
253                                 break;
254                 } else {
255                         if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE ||
256                             opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT)
257                                 mp_opt->addr.family = AF_INET;
258 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
259                         else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE ||
260                                  opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT)
261                                 mp_opt->addr.family = AF_INET6;
262 #endif
263                         else
264                                 break;
265                 }
266
267                 mp_opt->suboptions |= OPTION_MPTCP_ADD_ADDR;
268                 mp_opt->addr.id = *ptr++;
269                 mp_opt->addr.port = 0;
270                 mp_opt->ahmac = 0;
271                 if (mp_opt->addr.family == AF_INET) {
272                         memcpy((u8 *)&mp_opt->addr.addr.s_addr, (u8 *)ptr, 4);
273                         ptr += 4;
274                         if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT ||
275                             opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) {
276                                 mp_opt->addr.port = htons(get_unaligned_be16(ptr));
277                                 ptr += 2;
278                         }
279                 }
280 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
281                 else {
282                         memcpy(mp_opt->addr.addr6.s6_addr, (u8 *)ptr, 16);
283                         ptr += 16;
284                         if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT ||
285                             opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) {
286                                 mp_opt->addr.port = htons(get_unaligned_be16(ptr));
287                                 ptr += 2;
288                         }
289                 }
290 #endif
291                 if (!mp_opt->echo) {
292                         mp_opt->ahmac = get_unaligned_be64(ptr);
293                         ptr += 8;
294                 }
295                 pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
296                          (mp_opt->addr.family == AF_INET6) ? "6" : "",
297                          mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
298                 break;
299
300         case MPTCPOPT_RM_ADDR:
301                 if (opsize < TCPOLEN_MPTCP_RM_ADDR_BASE + 1 ||
302                     opsize > TCPOLEN_MPTCP_RM_ADDR_BASE + MPTCP_RM_IDS_MAX)
303                         break;
304
305                 ptr++;
306
307                 mp_opt->suboptions |= OPTION_MPTCP_RM_ADDR;
308                 mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
309                 for (i = 0; i < mp_opt->rm_list.nr; i++)
310                         mp_opt->rm_list.ids[i] = *ptr++;
311                 pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
312                 break;
313
314         case MPTCPOPT_MP_PRIO:
315                 if (opsize != TCPOLEN_MPTCP_PRIO)
316                         break;
317
318                 mp_opt->suboptions |= OPTION_MPTCP_PRIO;
319                 mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
320                 pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
321                 break;
322
323         case MPTCPOPT_MP_FASTCLOSE:
324                 if (opsize != TCPOLEN_MPTCP_FASTCLOSE)
325                         break;
326
327                 ptr += 2;
328                 mp_opt->rcvr_key = get_unaligned_be64(ptr);
329                 ptr += 8;
330                 mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE;
331                 pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key);
332                 break;
333
334         case MPTCPOPT_RST:
335                 if (opsize != TCPOLEN_MPTCP_RST)
336                         break;
337
338                 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
339                         break;
340
341                 mp_opt->suboptions |= OPTION_MPTCP_RST;
342                 flags = *ptr++;
343                 mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
344                 mp_opt->reset_reason = *ptr;
345                 pr_debug("MP_RST: transient=%u reason=%u",
346                          mp_opt->reset_transient, mp_opt->reset_reason);
347                 break;
348
349         case MPTCPOPT_MP_FAIL:
350                 if (opsize != TCPOLEN_MPTCP_FAIL)
351                         break;
352
353                 ptr += 2;
354                 mp_opt->suboptions |= OPTION_MPTCP_FAIL;
355                 mp_opt->fail_seq = get_unaligned_be64(ptr);
356                 pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
357                 break;
358
359         default:
360                 break;
361         }
362 }
363
364 void mptcp_get_options(const struct sk_buff *skb,
365                        struct mptcp_options_received *mp_opt)
366 {
367         const struct tcphdr *th = tcp_hdr(skb);
368         const unsigned char *ptr;
369         int length;
370
371         /* initialize option status */
372         mp_opt->suboptions = 0;
373
374         length = (th->doff * 4) - sizeof(struct tcphdr);
375         ptr = (const unsigned char *)(th + 1);
376
377         while (length > 0) {
378                 int opcode = *ptr++;
379                 int opsize;
380
381                 switch (opcode) {
382                 case TCPOPT_EOL:
383                         return;
384                 case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
385                         length--;
386                         continue;
387                 default:
388                         if (length < 2)
389                                 return;
390                         opsize = *ptr++;
391                         if (opsize < 2) /* "silly options" */
392                                 return;
393                         if (opsize > length)
394                                 return; /* don't parse partial options */
395                         if (opcode == TCPOPT_MPTCP)
396                                 mptcp_parse_option(skb, ptr, opsize, mp_opt);
397                         ptr += opsize - 2;
398                         length -= opsize;
399                 }
400         }
401 }
402
403 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
404                        unsigned int *size, struct mptcp_out_options *opts)
405 {
406         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
407
408         /* we will use snd_isn to detect first pkt [re]transmission
409          * in mptcp_established_options_mp()
410          */
411         subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
412         if (subflow->request_mptcp) {
413                 opts->suboptions = OPTION_MPTCP_MPC_SYN;
414                 opts->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk));
415                 opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk));
416                 *size = TCPOLEN_MPTCP_MPC_SYN;
417                 return true;
418         } else if (subflow->request_join) {
419                 pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
420                          subflow->local_nonce);
421                 opts->suboptions = OPTION_MPTCP_MPJ_SYN;
422                 opts->join_id = subflow->local_id;
423                 opts->token = subflow->remote_token;
424                 opts->nonce = subflow->local_nonce;
425                 opts->backup = subflow->request_bkup;
426                 *size = TCPOLEN_MPTCP_MPJ_SYN;
427                 return true;
428         }
429         return false;
430 }
431
432 static void clear_3rdack_retransmission(struct sock *sk)
433 {
434         struct inet_connection_sock *icsk = inet_csk(sk);
435
436         sk_stop_timer(sk, &icsk->icsk_delack_timer);
437         icsk->icsk_ack.timeout = 0;
438         icsk->icsk_ack.ato = 0;
439         icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER);
440 }
441
442 static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
443                                          bool snd_data_fin_enable,
444                                          unsigned int *size,
445                                          struct mptcp_out_options *opts)
446 {
447         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
448         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
449         struct mptcp_ext *mpext;
450         unsigned int data_len;
451         u8 len;
452
453         /* When skb is not available, we better over-estimate the emitted
454          * options len. A full DSS option (28 bytes) is longer than
455          * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so
456          * tell the caller to defer the estimate to
457          * mptcp_established_options_dss(), which will reserve enough space.
458          */
459         if (!skb)
460                 return false;
461
462         /* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */
463         if (subflow->fully_established || snd_data_fin_enable ||
464             subflow->snd_isn != TCP_SKB_CB(skb)->seq ||
465             sk->sk_state != TCP_ESTABLISHED)
466                 return false;
467
468         if (subflow->mp_capable) {
469                 mpext = mptcp_get_ext(skb);
470                 data_len = mpext ? mpext->data_len : 0;
471
472                 /* we will check ops->data_len in mptcp_write_options() to
473                  * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and
474                  * TCPOLEN_MPTCP_MPC_ACK
475                  */
476                 opts->data_len = data_len;
477                 opts->suboptions = OPTION_MPTCP_MPC_ACK;
478                 opts->sndr_key = subflow->local_key;
479                 opts->rcvr_key = subflow->remote_key;
480                 opts->csum_reqd = READ_ONCE(msk->csum_enabled);
481                 opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk));
482
483                 /* Section 3.1.
484                  * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK
485                  * packets that start the first subflow of an MPTCP connection,
486                  * as well as the first packet that carries data
487                  */
488                 if (data_len > 0) {
489                         len = TCPOLEN_MPTCP_MPC_ACK_DATA;
490                         if (opts->csum_reqd) {
491                                 /* we need to propagate more info to csum the pseudo hdr */
492                                 opts->data_seq = mpext->data_seq;
493                                 opts->subflow_seq = mpext->subflow_seq;
494                                 opts->csum = mpext->csum;
495                                 len += TCPOLEN_MPTCP_DSS_CHECKSUM;
496                         }
497                         *size = ALIGN(len, 4);
498                 } else {
499                         *size = TCPOLEN_MPTCP_MPC_ACK;
500                 }
501
502                 pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
503                          subflow, subflow->local_key, subflow->remote_key,
504                          data_len);
505
506                 return true;
507         } else if (subflow->mp_join) {
508                 opts->suboptions = OPTION_MPTCP_MPJ_ACK;
509                 memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
510                 *size = TCPOLEN_MPTCP_MPJ_ACK;
511                 pr_debug("subflow=%p", subflow);
512
513                 /* we can use the full delegate action helper only from BH context
514                  * If we are in process context - sk is flushing the backlog at
515                  * socket lock release time - just set the appropriate flag, will
516                  * be handled by the release callback
517                  */
518                 if (sock_owned_by_user(sk))
519                         set_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status);
520                 else
521                         mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_ACK);
522                 return true;
523         }
524         return false;
525 }
526
527 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
528                                  struct sk_buff *skb, struct mptcp_ext *ext)
529 {
530         /* The write_seq value has already been incremented, so the actual
531          * sequence number for the DATA_FIN is one less.
532          */
533         u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1;
534
535         if (!ext->use_map || !skb->len) {
536                 /* RFC6824 requires a DSS mapping with specific values
537                  * if DATA_FIN is set but no data payload is mapped
538                  */
539                 ext->data_fin = 1;
540                 ext->use_map = 1;
541                 ext->dsn64 = 1;
542                 ext->data_seq = data_fin_tx_seq;
543                 ext->subflow_seq = 0;
544                 ext->data_len = 1;
545         } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) {
546                 /* If there's an existing DSS mapping and it is the
547                  * final mapping, DATA_FIN consumes 1 additional byte of
548                  * mapping space.
549                  */
550                 ext->data_fin = 1;
551                 ext->data_len++;
552         }
553 }
554
555 static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
556                                           bool snd_data_fin_enable,
557                                           unsigned int *size,
558                                           struct mptcp_out_options *opts)
559 {
560         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
561         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
562         unsigned int dss_size = 0;
563         struct mptcp_ext *mpext;
564         unsigned int ack_size;
565         bool ret = false;
566         u64 ack_seq;
567
568         opts->csum_reqd = READ_ONCE(msk->csum_enabled);
569         mpext = skb ? mptcp_get_ext(skb) : NULL;
570
571         if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) {
572                 unsigned int map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64;
573
574                 if (mpext) {
575                         if (opts->csum_reqd)
576                                 map_size += TCPOLEN_MPTCP_DSS_CHECKSUM;
577
578                         opts->ext_copy = *mpext;
579                 }
580
581                 dss_size = map_size;
582                 if (skb && snd_data_fin_enable)
583                         mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
584                 opts->suboptions = OPTION_MPTCP_DSS;
585                 ret = true;
586         }
587
588         /* passive sockets msk will set the 'can_ack' after accept(), even
589          * if the first subflow may have the already the remote key handy
590          */
591         opts->ext_copy.use_ack = 0;
592         if (!READ_ONCE(msk->can_ack)) {
593                 *size = ALIGN(dss_size, 4);
594                 return ret;
595         }
596
597         ack_seq = READ_ONCE(msk->ack_seq);
598         if (READ_ONCE(msk->use_64bit_ack)) {
599                 ack_size = TCPOLEN_MPTCP_DSS_ACK64;
600                 opts->ext_copy.data_ack = ack_seq;
601                 opts->ext_copy.ack64 = 1;
602         } else {
603                 ack_size = TCPOLEN_MPTCP_DSS_ACK32;
604                 opts->ext_copy.data_ack32 = (uint32_t)ack_seq;
605                 opts->ext_copy.ack64 = 0;
606         }
607         opts->ext_copy.use_ack = 1;
608         opts->suboptions = OPTION_MPTCP_DSS;
609         WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
610
611         /* Add kind/length/subtype/flag overhead if mapping is not populated */
612         if (dss_size == 0)
613                 ack_size += TCPOLEN_MPTCP_DSS_BASE;
614
615         dss_size += ack_size;
616
617         *size = ALIGN(dss_size, 4);
618         return true;
619 }
620
621 static u64 add_addr_generate_hmac(u64 key1, u64 key2,
622                                   struct mptcp_addr_info *addr)
623 {
624         u16 port = ntohs(addr->port);
625         u8 hmac[SHA256_DIGEST_SIZE];
626         u8 msg[19];
627         int i = 0;
628
629         msg[i++] = addr->id;
630         if (addr->family == AF_INET) {
631                 memcpy(&msg[i], &addr->addr.s_addr, 4);
632                 i += 4;
633         }
634 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
635         else if (addr->family == AF_INET6) {
636                 memcpy(&msg[i], &addr->addr6.s6_addr, 16);
637                 i += 16;
638         }
639 #endif
640         msg[i++] = port >> 8;
641         msg[i++] = port & 0xFF;
642
643         mptcp_crypto_hmac_sha(key1, key2, msg, i, hmac);
644
645         return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]);
646 }
647
648 static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *skb,
649                                                unsigned int *size,
650                                                unsigned int remaining,
651                                                struct mptcp_out_options *opts)
652 {
653         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
654         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
655         bool drop_other_suboptions = false;
656         unsigned int opt_size = *size;
657         bool echo;
658         int len;
659
660         /* add addr will strip the existing options, be sure to avoid breaking
661          * MPC/MPJ handshakes
662          */
663         if (!mptcp_pm_should_add_signal(msk) ||
664             (opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) ||
665             !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr,
666                     &echo, &drop_other_suboptions))
667                 return false;
668
669         if (drop_other_suboptions)
670                 remaining += opt_size;
671         len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
672         if (remaining < len)
673                 return false;
674
675         *size = len;
676         if (drop_other_suboptions) {
677                 pr_debug("drop other suboptions");
678                 opts->suboptions = 0;
679
680                 /* note that e.g. DSS could have written into the memory
681                  * aliased by ahmac, we must reset the field here
682                  * to avoid appending the hmac even for ADD_ADDR echo
683                  * options
684                  */
685                 opts->ahmac = 0;
686                 *size -= opt_size;
687         }
688         opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
689         if (!echo) {
690                 opts->ahmac = add_addr_generate_hmac(msk->local_key,
691                                                      msk->remote_key,
692                                                      &opts->addr);
693         }
694         pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
695                  opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
696
697         return true;
698 }
699
700 static bool mptcp_established_options_rm_addr(struct sock *sk,
701                                               unsigned int *size,
702                                               unsigned int remaining,
703                                               struct mptcp_out_options *opts)
704 {
705         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
706         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
707         struct mptcp_rm_list rm_list;
708         int i, len;
709
710         if (!mptcp_pm_should_rm_signal(msk) ||
711             !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_list)))
712                 return false;
713
714         len = mptcp_rm_addr_len(&rm_list);
715         if (len < 0)
716                 return false;
717         if (remaining < len)
718                 return false;
719
720         *size = len;
721         opts->suboptions |= OPTION_MPTCP_RM_ADDR;
722         opts->rm_list = rm_list;
723
724         for (i = 0; i < opts->rm_list.nr; i++)
725                 pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
726
727         return true;
728 }
729
730 static bool mptcp_established_options_mp_prio(struct sock *sk,
731                                               unsigned int *size,
732                                               unsigned int remaining,
733                                               struct mptcp_out_options *opts)
734 {
735         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
736
737         /* can't send MP_PRIO with MPC, as they share the same option space:
738          * 'backup'. Also it makes no sense at all
739          */
740         if (!subflow->send_mp_prio || (opts->suboptions & OPTIONS_MPTCP_MPC))
741                 return false;
742
743         /* account for the trailing 'nop' option */
744         if (remaining < TCPOLEN_MPTCP_PRIO_ALIGN)
745                 return false;
746
747         *size = TCPOLEN_MPTCP_PRIO_ALIGN;
748         opts->suboptions |= OPTION_MPTCP_PRIO;
749         opts->backup = subflow->request_bkup;
750
751         pr_debug("prio=%d", opts->backup);
752
753         return true;
754 }
755
756 static noinline bool mptcp_established_options_rst(struct sock *sk, struct sk_buff *skb,
757                                                    unsigned int *size,
758                                                    unsigned int remaining,
759                                                    struct mptcp_out_options *opts)
760 {
761         const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
762
763         if (remaining < TCPOLEN_MPTCP_RST)
764                 return false;
765
766         *size = TCPOLEN_MPTCP_RST;
767         opts->suboptions |= OPTION_MPTCP_RST;
768         opts->reset_transient = subflow->reset_transient;
769         opts->reset_reason = subflow->reset_reason;
770         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPRSTTX);
771
772         return true;
773 }
774
775 static bool mptcp_established_options_fastclose(struct sock *sk,
776                                                 unsigned int *size,
777                                                 unsigned int remaining,
778                                                 struct mptcp_out_options *opts)
779 {
780         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
781         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
782
783         if (likely(!subflow->send_fastclose))
784                 return false;
785
786         if (remaining < TCPOLEN_MPTCP_FASTCLOSE)
787                 return false;
788
789         *size = TCPOLEN_MPTCP_FASTCLOSE;
790         opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
791         opts->rcvr_key = msk->remote_key;
792
793         pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
794         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
795         return true;
796 }
797
798 static bool mptcp_established_options_mp_fail(struct sock *sk,
799                                               unsigned int *size,
800                                               unsigned int remaining,
801                                               struct mptcp_out_options *opts)
802 {
803         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
804
805         if (likely(!subflow->send_mp_fail))
806                 return false;
807
808         if (remaining < TCPOLEN_MPTCP_FAIL)
809                 return false;
810
811         *size = TCPOLEN_MPTCP_FAIL;
812         opts->suboptions |= OPTION_MPTCP_FAIL;
813         opts->fail_seq = subflow->map_seq;
814
815         pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
816         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
817
818         return true;
819 }
820
821 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
822                                unsigned int *size, unsigned int remaining,
823                                struct mptcp_out_options *opts)
824 {
825         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
826         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
827         unsigned int opt_size = 0;
828         bool snd_data_fin;
829         bool ret = false;
830
831         opts->suboptions = 0;
832
833         if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb)))
834                 return false;
835
836         if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
837                 if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) ||
838                     mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) {
839                         *size += opt_size;
840                         remaining -= opt_size;
841                 }
842                 /* MP_RST can be used with MP_FASTCLOSE and MP_FAIL if there is room */
843                 if (mptcp_established_options_rst(sk, skb, &opt_size, remaining, opts)) {
844                         *size += opt_size;
845                         remaining -= opt_size;
846                 }
847                 return true;
848         }
849
850         snd_data_fin = mptcp_data_fin_enabled(msk);
851         if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, opts))
852                 ret = true;
853         else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, opts)) {
854                 unsigned int mp_fail_size;
855
856                 ret = true;
857                 if (mptcp_established_options_mp_fail(sk, &mp_fail_size,
858                                                       remaining - opt_size, opts)) {
859                         *size += opt_size + mp_fail_size;
860                         remaining -= opt_size - mp_fail_size;
861                         return true;
862                 }
863         }
864
865         /* we reserved enough space for the above options, and exceeding the
866          * TCP option space would be fatal
867          */
868         if (WARN_ON_ONCE(opt_size > remaining))
869                 return false;
870
871         *size += opt_size;
872         remaining -= opt_size;
873         if (mptcp_established_options_add_addr(sk, skb, &opt_size, remaining, opts)) {
874                 *size += opt_size;
875                 remaining -= opt_size;
876                 ret = true;
877         } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) {
878                 *size += opt_size;
879                 remaining -= opt_size;
880                 ret = true;
881         }
882
883         if (mptcp_established_options_mp_prio(sk, &opt_size, remaining, opts)) {
884                 *size += opt_size;
885                 remaining -= opt_size;
886                 ret = true;
887         }
888
889         return ret;
890 }
891
892 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
893                           struct mptcp_out_options *opts)
894 {
895         struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
896
897         if (subflow_req->mp_capable) {
898                 opts->suboptions = OPTION_MPTCP_MPC_SYNACK;
899                 opts->sndr_key = subflow_req->local_key;
900                 opts->csum_reqd = subflow_req->csum_reqd;
901                 opts->allow_join_id0 = subflow_req->allow_join_id0;
902                 *size = TCPOLEN_MPTCP_MPC_SYNACK;
903                 pr_debug("subflow_req=%p, local_key=%llu",
904                          subflow_req, subflow_req->local_key);
905                 return true;
906         } else if (subflow_req->mp_join) {
907                 opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
908                 opts->backup = subflow_req->backup;
909                 opts->join_id = subflow_req->local_id;
910                 opts->thmac = subflow_req->thmac;
911                 opts->nonce = subflow_req->local_nonce;
912                 pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
913                          subflow_req, opts->backup, opts->join_id,
914                          opts->thmac, opts->nonce);
915                 *size = TCPOLEN_MPTCP_MPJ_SYNACK;
916                 return true;
917         }
918         return false;
919 }
920
921 static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
922                                     struct mptcp_subflow_context *subflow,
923                                     struct sk_buff *skb,
924                                     struct mptcp_options_received *mp_opt)
925 {
926         /* here we can process OoO, in-window pkts, only in-sequence 4th ack
927          * will make the subflow fully established
928          */
929         if (likely(subflow->fully_established)) {
930                 /* on passive sockets, check for 3rd ack retransmission
931                  * note that msk is always set by subflow_syn_recv_sock()
932                  * for mp_join subflows
933                  */
934                 if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 &&
935                     TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
936                     subflow->mp_join && (mp_opt->suboptions & OPTIONS_MPTCP_MPJ) &&
937                     !subflow->request_join)
938                         tcp_send_ack(ssk);
939                 goto check_notify;
940         }
941
942         /* we must process OoO packets before the first subflow is fully
943          * established. OoO packets are instead a protocol violation
944          * for MP_JOIN subflows as the peer must not send any data
945          * before receiving the forth ack - cfr. RFC 8684 section 3.2.
946          */
947         if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
948                 if (subflow->mp_join)
949                         goto reset;
950                 if (subflow->is_mptfo && mp_opt->suboptions & OPTION_MPTCP_MPC_ACK)
951                         goto set_fully_established;
952                 return subflow->mp_capable;
953         }
954
955         if (subflow->remote_key_valid &&
956             (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) ||
957              ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo))) {
958                 /* subflows are fully established as soon as we get any
959                  * additional ack, including ADD_ADDR.
960                  */
961                 subflow->fully_established = 1;
962                 WRITE_ONCE(msk->fully_established, true);
963                 goto check_notify;
964         }
965
966         /* If the first established packet does not contain MP_CAPABLE + data
967          * then fallback to TCP. Fallback scenarios requires a reset for
968          * MP_JOIN subflows.
969          */
970         if (!(mp_opt->suboptions & OPTIONS_MPTCP_MPC)) {
971                 if (subflow->mp_join)
972                         goto reset;
973                 subflow->mp_capable = 0;
974                 pr_fallback(msk);
975                 mptcp_do_fallback(ssk);
976                 return false;
977         }
978
979         if (mp_opt->deny_join_id0)
980                 WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
981
982 set_fully_established:
983         if (unlikely(!READ_ONCE(msk->pm.server_side)))
984                 pr_warn_once("bogus mpc option on established client sk");
985         mptcp_subflow_fully_established(subflow, mp_opt);
986
987 check_notify:
988         /* if the subflow is not already linked into the conn_list, we can't
989          * notify the PM: this subflow is still on the listener queue
990          * and the PM possibly acquiring the subflow lock could race with
991          * the listener close
992          */
993         if (likely(subflow->pm_notified) || list_empty(&subflow->node))
994                 return true;
995
996         subflow->pm_notified = 1;
997         if (subflow->mp_join) {
998                 clear_3rdack_retransmission(ssk);
999                 mptcp_pm_subflow_established(msk);
1000         } else {
1001                 mptcp_pm_fully_established(msk, ssk);
1002         }
1003         return true;
1004
1005 reset:
1006         mptcp_subflow_reset(ssk);
1007         return false;
1008 }
1009
1010 u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq)
1011 {
1012         u32 old_seq32, cur_seq32;
1013
1014         old_seq32 = (u32)old_seq;
1015         cur_seq32 = (u32)cur_seq;
1016         cur_seq = (old_seq & GENMASK_ULL(63, 32)) + cur_seq32;
1017         if (unlikely(cur_seq32 < old_seq32 && before(old_seq32, cur_seq32)))
1018                 return cur_seq + (1LL << 32);
1019
1020         /* reverse wrap could happen, too */
1021         if (unlikely(cur_seq32 > old_seq32 && after(old_seq32, cur_seq32)))
1022                 return cur_seq - (1LL << 32);
1023         return cur_seq;
1024 }
1025
1026 static void ack_update_msk(struct mptcp_sock *msk,
1027                            struct sock *ssk,
1028                            struct mptcp_options_received *mp_opt)
1029 {
1030         u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt);
1031         struct sock *sk = (struct sock *)msk;
1032         u64 old_snd_una;
1033
1034         mptcp_data_lock(sk);
1035
1036         /* avoid ack expansion on update conflict, to reduce the risk of
1037          * wrongly expanding to a future ack sequence number, which is way
1038          * more dangerous than missing an ack
1039          */
1040         old_snd_una = msk->snd_una;
1041         new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
1042
1043         /* ACK for data not even sent yet? Ignore.*/
1044         if (unlikely(after64(new_snd_una, snd_nxt)))
1045                 new_snd_una = old_snd_una;
1046
1047         new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
1048
1049         if (after64(new_wnd_end, msk->wnd_end))
1050                 msk->wnd_end = new_wnd_end;
1051
1052         /* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
1053         if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
1054                 __mptcp_check_push(sk, ssk);
1055
1056         if (after64(new_snd_una, old_snd_una)) {
1057                 msk->snd_una = new_snd_una;
1058                 __mptcp_data_acked(sk);
1059         }
1060         mptcp_data_unlock(sk);
1061
1062         trace_ack_update_msk(mp_opt->data_ack,
1063                              old_snd_una, new_snd_una,
1064                              new_wnd_end, msk->wnd_end);
1065 }
1066
1067 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
1068 {
1069         /* Skip if DATA_FIN was already received.
1070          * If updating simultaneously with the recvmsg loop, values
1071          * should match. If they mismatch, the peer is misbehaving and
1072          * we will prefer the most recent information.
1073          */
1074         if (READ_ONCE(msk->rcv_data_fin))
1075                 return false;
1076
1077         WRITE_ONCE(msk->rcv_data_fin_seq,
1078                    mptcp_expand_seq(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
1079         WRITE_ONCE(msk->rcv_data_fin, 1);
1080
1081         return true;
1082 }
1083
1084 static bool add_addr_hmac_valid(struct mptcp_sock *msk,
1085                                 struct mptcp_options_received *mp_opt)
1086 {
1087         u64 hmac = 0;
1088
1089         if (mp_opt->echo)
1090                 return true;
1091
1092         hmac = add_addr_generate_hmac(msk->remote_key,
1093                                       msk->local_key,
1094                                       &mp_opt->addr);
1095
1096         pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
1097                  msk, hmac, mp_opt->ahmac);
1098
1099         return hmac == mp_opt->ahmac;
1100 }
1101
1102 /* Return false if a subflow has been reset, else return true */
1103 bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
1104 {
1105         struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1106         struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1107         struct mptcp_options_received mp_opt;
1108         struct mptcp_ext *mpext;
1109
1110         if (__mptcp_check_fallback(msk)) {
1111                 /* Keep it simple and unconditionally trigger send data cleanup and
1112                  * pending queue spooling. We will need to acquire the data lock
1113                  * for more accurate checks, and once the lock is acquired, such
1114                  * helpers are cheap.
1115                  */
1116                 mptcp_data_lock(subflow->conn);
1117                 if (sk_stream_memory_free(sk))
1118                         __mptcp_check_push(subflow->conn, sk);
1119                 __mptcp_data_acked(subflow->conn);
1120                 mptcp_data_unlock(subflow->conn);
1121                 return true;
1122         }
1123
1124         mptcp_get_options(skb, &mp_opt);
1125
1126         /* The subflow can be in close state only if check_fully_established()
1127          * just sent a reset. If so, tell the caller to ignore the current packet.
1128          */
1129         if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
1130                 return sk->sk_state != TCP_CLOSE;
1131
1132         if (unlikely(mp_opt.suboptions != OPTION_MPTCP_DSS)) {
1133                 if ((mp_opt.suboptions & OPTION_MPTCP_FASTCLOSE) &&
1134                     msk->local_key == mp_opt.rcvr_key) {
1135                         WRITE_ONCE(msk->rcv_fastclose, true);
1136                         mptcp_schedule_work((struct sock *)msk);
1137                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSERX);
1138                 }
1139
1140                 if ((mp_opt.suboptions & OPTION_MPTCP_ADD_ADDR) &&
1141                     add_addr_hmac_valid(msk, &mp_opt)) {
1142                         if (!mp_opt.echo) {
1143                                 mptcp_pm_add_addr_received(sk, &mp_opt.addr);
1144                                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
1145                         } else {
1146                                 mptcp_pm_add_addr_echoed(msk, &mp_opt.addr);
1147                                 mptcp_pm_del_add_timer(msk, &mp_opt.addr, true);
1148                                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
1149                         }
1150
1151                         if (mp_opt.addr.port)
1152                                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD);
1153                 }
1154
1155                 if (mp_opt.suboptions & OPTION_MPTCP_RM_ADDR)
1156                         mptcp_pm_rm_addr_received(msk, &mp_opt.rm_list);
1157
1158                 if (mp_opt.suboptions & OPTION_MPTCP_PRIO) {
1159                         mptcp_pm_mp_prio_received(sk, mp_opt.backup);
1160                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIORX);
1161                 }
1162
1163                 if (mp_opt.suboptions & OPTION_MPTCP_FAIL) {
1164                         mptcp_pm_mp_fail_received(sk, mp_opt.fail_seq);
1165                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILRX);
1166                 }
1167
1168                 if (mp_opt.suboptions & OPTION_MPTCP_RST) {
1169                         subflow->reset_seen = 1;
1170                         subflow->reset_reason = mp_opt.reset_reason;
1171                         subflow->reset_transient = mp_opt.reset_transient;
1172                         MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPRSTRX);
1173                 }
1174
1175                 if (!(mp_opt.suboptions & OPTION_MPTCP_DSS))
1176                         return true;
1177         }
1178
1179         /* we can't wait for recvmsg() to update the ack_seq, otherwise
1180          * monodirectional flows will stuck
1181          */
1182         if (mp_opt.use_ack)
1183                 ack_update_msk(msk, sk, &mp_opt);
1184
1185         /* Zero-data-length packets are dropped by the caller and not
1186          * propagated to the MPTCP layer, so the skb extension does not
1187          * need to be allocated or populated. DATA_FIN information, if
1188          * present, needs to be updated here before the skb is freed.
1189          */
1190         if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
1191                 if (mp_opt.data_fin && mp_opt.data_len == 1 &&
1192                     mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
1193                         mptcp_schedule_work((struct sock *)msk);
1194
1195                 return true;
1196         }
1197
1198         mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
1199         if (!mpext)
1200                 return true;
1201
1202         memset(mpext, 0, sizeof(*mpext));
1203
1204         if (likely(mp_opt.use_map)) {
1205                 if (mp_opt.mpc_map) {
1206                         /* this is an MP_CAPABLE carrying MPTCP data
1207                          * we know this map the first chunk of data
1208                          */
1209                         mptcp_crypto_key_sha(subflow->remote_key, NULL,
1210                                              &mpext->data_seq);
1211                         mpext->data_seq++;
1212                         mpext->subflow_seq = 1;
1213                         mpext->dsn64 = 1;
1214                         mpext->mpc_map = 1;
1215                         mpext->data_fin = 0;
1216                 } else {
1217                         mpext->data_seq = mp_opt.data_seq;
1218                         mpext->subflow_seq = mp_opt.subflow_seq;
1219                         mpext->dsn64 = mp_opt.dsn64;
1220                         mpext->data_fin = mp_opt.data_fin;
1221                 }
1222                 mpext->data_len = mp_opt.data_len;
1223                 mpext->use_map = 1;
1224                 mpext->csum_reqd = !!(mp_opt.suboptions & OPTION_MPTCP_CSUMREQD);
1225
1226                 if (mpext->csum_reqd)
1227                         mpext->csum = mp_opt.csum;
1228         }
1229
1230         return true;
1231 }
1232
1233 static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
1234 {
1235         const struct sock *ssk = (const struct sock *)tp;
1236         struct mptcp_subflow_context *subflow;
1237         u64 ack_seq, rcv_wnd_old, rcv_wnd_new;
1238         struct mptcp_sock *msk;
1239         u32 new_win;
1240         u64 win;
1241
1242         subflow = mptcp_subflow_ctx(ssk);
1243         msk = mptcp_sk(subflow->conn);
1244
1245         ack_seq = READ_ONCE(msk->ack_seq);
1246         rcv_wnd_new = ack_seq + tp->rcv_wnd;
1247
1248         rcv_wnd_old = atomic64_read(&msk->rcv_wnd_sent);
1249         if (after64(rcv_wnd_new, rcv_wnd_old)) {
1250                 u64 rcv_wnd;
1251
1252                 for (;;) {
1253                         rcv_wnd = atomic64_cmpxchg(&msk->rcv_wnd_sent, rcv_wnd_old, rcv_wnd_new);
1254
1255                         if (rcv_wnd == rcv_wnd_old)
1256                                 break;
1257                         if (before64(rcv_wnd_new, rcv_wnd)) {
1258                                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICTUPDATE);
1259                                 goto raise_win;
1260                         }
1261                         MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT);
1262                         rcv_wnd_old = rcv_wnd;
1263                 }
1264                 return;
1265         }
1266
1267         if (rcv_wnd_new != rcv_wnd_old) {
1268 raise_win:
1269                 win = rcv_wnd_old - ack_seq;
1270                 tp->rcv_wnd = min_t(u64, win, U32_MAX);
1271                 new_win = tp->rcv_wnd;
1272
1273                 /* Make sure we do not exceed the maximum possible
1274                  * scaled window.
1275                  */
1276                 if (unlikely(th->syn))
1277                         new_win = min(new_win, 65535U) << tp->rx_opt.rcv_wscale;
1278                 if (!tp->rx_opt.rcv_wscale &&
1279                     READ_ONCE(sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows))
1280                         new_win = min(new_win, MAX_TCP_WINDOW);
1281                 else
1282                         new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
1283
1284                 /* RFC1323 scaling applied */
1285                 new_win >>= tp->rx_opt.rcv_wscale;
1286                 th->window = htons(new_win);
1287                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDSHARED);
1288         }
1289 }
1290
1291 __sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
1292 {
1293         struct csum_pseudo_header header;
1294         __wsum csum;
1295
1296         /* cfr RFC 8684 3.3.1.:
1297          * the data sequence number used in the pseudo-header is
1298          * always the 64-bit value, irrespective of what length is used in the
1299          * DSS option itself.
1300          */
1301         header.data_seq = cpu_to_be64(data_seq);
1302         header.subflow_seq = htonl(subflow_seq);
1303         header.data_len = htons(data_len);
1304         header.csum = 0;
1305
1306         csum = csum_partial(&header, sizeof(header), sum);
1307         return csum_fold(csum);
1308 }
1309
1310 static __sum16 mptcp_make_csum(const struct mptcp_ext *mpext)
1311 {
1312         return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1313                                  ~csum_unfold(mpext->csum));
1314 }
1315
1316 static void put_len_csum(u16 len, __sum16 csum, void *data)
1317 {
1318         __sum16 *sumptr = data + 2;
1319         __be16 *ptr = data;
1320
1321         put_unaligned_be16(len, ptr);
1322
1323         put_unaligned(csum, sumptr);
1324 }
1325
1326 void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
1327                          struct mptcp_out_options *opts)
1328 {
1329         const struct sock *ssk = (const struct sock *)tp;
1330         struct mptcp_subflow_context *subflow;
1331
1332         /* Which options can be used together?
1333          *
1334          * X: mutually exclusive
1335          * O: often used together
1336          * C: can be used together in some cases
1337          * P: could be used together but we prefer not to (optimisations)
1338          *
1339          *  Opt: | MPC  | MPJ  | DSS  | ADD  |  RM  | PRIO | FAIL |  FC  |
1340          * ------|------|------|------|------|------|------|------|------|
1341          *  MPC  |------|------|------|------|------|------|------|------|
1342          *  MPJ  |  X   |------|------|------|------|------|------|------|
1343          *  DSS  |  X   |  X   |------|------|------|------|------|------|
1344          *  ADD  |  X   |  X   |  P   |------|------|------|------|------|
1345          *  RM   |  C   |  C   |  C   |  P   |------|------|------|------|
1346          *  PRIO |  X   |  C   |  C   |  C   |  C   |------|------|------|
1347          *  FAIL |  X   |  X   |  C   |  X   |  X   |  X   |------|------|
1348          *  FC   |  X   |  X   |  X   |  X   |  X   |  X   |  X   |------|
1349          *  RST  |  X   |  X   |  X   |  X   |  X   |  X   |  O   |  O   |
1350          * ------|------|------|------|------|------|------|------|------|
1351          *
1352          * The same applies in mptcp_established_options() function.
1353          */
1354         if (likely(OPTION_MPTCP_DSS & opts->suboptions)) {
1355                 struct mptcp_ext *mpext = &opts->ext_copy;
1356                 u8 len = TCPOLEN_MPTCP_DSS_BASE;
1357                 u8 flags = 0;
1358
1359                 if (mpext->use_ack) {
1360                         flags = MPTCP_DSS_HAS_ACK;
1361                         if (mpext->ack64) {
1362                                 len += TCPOLEN_MPTCP_DSS_ACK64;
1363                                 flags |= MPTCP_DSS_ACK64;
1364                         } else {
1365                                 len += TCPOLEN_MPTCP_DSS_ACK32;
1366                         }
1367                 }
1368
1369                 if (mpext->use_map) {
1370                         len += TCPOLEN_MPTCP_DSS_MAP64;
1371
1372                         /* Use only 64-bit mapping flags for now, add
1373                          * support for optional 32-bit mappings later.
1374                          */
1375                         flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64;
1376                         if (mpext->data_fin)
1377                                 flags |= MPTCP_DSS_DATA_FIN;
1378
1379                         if (opts->csum_reqd)
1380                                 len += TCPOLEN_MPTCP_DSS_CHECKSUM;
1381                 }
1382
1383                 *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags);
1384
1385                 if (mpext->use_ack) {
1386                         if (mpext->ack64) {
1387                                 put_unaligned_be64(mpext->data_ack, ptr);
1388                                 ptr += 2;
1389                         } else {
1390                                 put_unaligned_be32(mpext->data_ack32, ptr);
1391                                 ptr += 1;
1392                         }
1393                 }
1394
1395                 if (mpext->use_map) {
1396                         put_unaligned_be64(mpext->data_seq, ptr);
1397                         ptr += 2;
1398                         put_unaligned_be32(mpext->subflow_seq, ptr);
1399                         ptr += 1;
1400                         if (opts->csum_reqd) {
1401                                 /* data_len == 0 is reserved for the infinite mapping,
1402                                  * the checksum will also be set to 0.
1403                                  */
1404                                 put_len_csum(mpext->data_len,
1405                                              (mpext->data_len ? mptcp_make_csum(mpext) : 0),
1406                                              ptr);
1407                         } else {
1408                                 put_unaligned_be32(mpext->data_len << 16 |
1409                                                    TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1410                         }
1411                         ptr += 1;
1412                 }
1413
1414                 /* We might need to add MP_FAIL options in rare cases */
1415                 if (unlikely(OPTION_MPTCP_FAIL & opts->suboptions))
1416                         goto mp_fail;
1417         } else if (OPTIONS_MPTCP_MPC & opts->suboptions) {
1418                 u8 len, flag = MPTCP_CAP_HMAC_SHA256;
1419
1420                 if (OPTION_MPTCP_MPC_SYN & opts->suboptions) {
1421                         len = TCPOLEN_MPTCP_MPC_SYN;
1422                 } else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) {
1423                         len = TCPOLEN_MPTCP_MPC_SYNACK;
1424                 } else if (opts->data_len) {
1425                         len = TCPOLEN_MPTCP_MPC_ACK_DATA;
1426                         if (opts->csum_reqd)
1427                                 len += TCPOLEN_MPTCP_DSS_CHECKSUM;
1428                 } else {
1429                         len = TCPOLEN_MPTCP_MPC_ACK;
1430                 }
1431
1432                 if (opts->csum_reqd)
1433                         flag |= MPTCP_CAP_CHECKSUM_REQD;
1434
1435                 if (!opts->allow_join_id0)
1436                         flag |= MPTCP_CAP_DENY_JOIN_ID0;
1437
1438                 *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len,
1439                                       MPTCP_SUPPORTED_VERSION,
1440                                       flag);
1441
1442                 if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) &
1443                     opts->suboptions))
1444                         goto mp_capable_done;
1445
1446                 put_unaligned_be64(opts->sndr_key, ptr);
1447                 ptr += 2;
1448                 if (!((OPTION_MPTCP_MPC_ACK) & opts->suboptions))
1449                         goto mp_capable_done;
1450
1451                 put_unaligned_be64(opts->rcvr_key, ptr);
1452                 ptr += 2;
1453                 if (!opts->data_len)
1454                         goto mp_capable_done;
1455
1456                 if (opts->csum_reqd) {
1457                         put_len_csum(opts->data_len,
1458                                      __mptcp_make_csum(opts->data_seq,
1459                                                        opts->subflow_seq,
1460                                                        opts->data_len,
1461                                                        ~csum_unfold(opts->csum)),
1462                                      ptr);
1463                 } else {
1464                         put_unaligned_be32(opts->data_len << 16 |
1465                                            TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1466                 }
1467                 ptr += 1;
1468
1469                 /* MPC is additionally mutually exclusive with MP_PRIO */
1470                 goto mp_capable_done;
1471         } else if (OPTIONS_MPTCP_MPJ & opts->suboptions) {
1472                 if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) {
1473                         *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1474                                               TCPOLEN_MPTCP_MPJ_SYN,
1475                                               opts->backup, opts->join_id);
1476                         put_unaligned_be32(opts->token, ptr);
1477                         ptr += 1;
1478                         put_unaligned_be32(opts->nonce, ptr);
1479                         ptr += 1;
1480                 } else if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) {
1481                         *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1482                                               TCPOLEN_MPTCP_MPJ_SYNACK,
1483                                               opts->backup, opts->join_id);
1484                         put_unaligned_be64(opts->thmac, ptr);
1485                         ptr += 2;
1486                         put_unaligned_be32(opts->nonce, ptr);
1487                         ptr += 1;
1488                 } else {
1489                         *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1490                                               TCPOLEN_MPTCP_MPJ_ACK, 0, 0);
1491                         memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN);
1492                         ptr += 5;
1493                 }
1494         } else if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) {
1495                 u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
1496                 u8 echo = MPTCP_ADDR_ECHO;
1497
1498 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1499                 if (opts->addr.family == AF_INET6)
1500                         len = TCPOLEN_MPTCP_ADD_ADDR6_BASE;
1501 #endif
1502
1503                 if (opts->addr.port)
1504                         len += TCPOLEN_MPTCP_PORT_LEN;
1505
1506                 if (opts->ahmac) {
1507                         len += sizeof(opts->ahmac);
1508                         echo = 0;
1509                 }
1510
1511                 *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
1512                                       len, echo, opts->addr.id);
1513                 if (opts->addr.family == AF_INET) {
1514                         memcpy((u8 *)ptr, (u8 *)&opts->addr.addr.s_addr, 4);
1515                         ptr += 1;
1516                 }
1517 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1518                 else if (opts->addr.family == AF_INET6) {
1519                         memcpy((u8 *)ptr, opts->addr.addr6.s6_addr, 16);
1520                         ptr += 4;
1521                 }
1522 #endif
1523
1524                 if (!opts->addr.port) {
1525                         if (opts->ahmac) {
1526                                 put_unaligned_be64(opts->ahmac, ptr);
1527                                 ptr += 2;
1528                         }
1529                 } else {
1530                         u16 port = ntohs(opts->addr.port);
1531
1532                         if (opts->ahmac) {
1533                                 u8 *bptr = (u8 *)ptr;
1534
1535                                 put_unaligned_be16(port, bptr);
1536                                 bptr += 2;
1537                                 put_unaligned_be64(opts->ahmac, bptr);
1538                                 bptr += 8;
1539                                 put_unaligned_be16(TCPOPT_NOP << 8 |
1540                                                    TCPOPT_NOP, bptr);
1541
1542                                 ptr += 3;
1543                         } else {
1544                                 put_unaligned_be32(port << 16 |
1545                                                    TCPOPT_NOP << 8 |
1546                                                    TCPOPT_NOP, ptr);
1547                                 ptr += 1;
1548                         }
1549                 }
1550         } else if (unlikely(OPTION_MPTCP_FASTCLOSE & opts->suboptions)) {
1551                 /* FASTCLOSE is mutually exclusive with others except RST */
1552                 *ptr++ = mptcp_option(MPTCPOPT_MP_FASTCLOSE,
1553                                       TCPOLEN_MPTCP_FASTCLOSE,
1554                                       0, 0);
1555                 put_unaligned_be64(opts->rcvr_key, ptr);
1556                 ptr += 2;
1557
1558                 if (OPTION_MPTCP_RST & opts->suboptions)
1559                         goto mp_rst;
1560                 return;
1561         } else if (unlikely(OPTION_MPTCP_FAIL & opts->suboptions)) {
1562 mp_fail:
1563                 /* MP_FAIL is mutually exclusive with others except RST */
1564                 subflow = mptcp_subflow_ctx(ssk);
1565                 subflow->send_mp_fail = 0;
1566
1567                 *ptr++ = mptcp_option(MPTCPOPT_MP_FAIL,
1568                                       TCPOLEN_MPTCP_FAIL,
1569                                       0, 0);
1570                 put_unaligned_be64(opts->fail_seq, ptr);
1571                 ptr += 2;
1572
1573                 if (OPTION_MPTCP_RST & opts->suboptions)
1574                         goto mp_rst;
1575                 return;
1576         } else if (unlikely(OPTION_MPTCP_RST & opts->suboptions)) {
1577 mp_rst:
1578                 *ptr++ = mptcp_option(MPTCPOPT_RST,
1579                                       TCPOLEN_MPTCP_RST,
1580                                       opts->reset_transient,
1581                                       opts->reset_reason);
1582                 return;
1583         }
1584
1585         if (OPTION_MPTCP_PRIO & opts->suboptions) {
1586                 subflow = mptcp_subflow_ctx(ssk);
1587                 subflow->send_mp_prio = 0;
1588
1589                 *ptr++ = mptcp_option(MPTCPOPT_MP_PRIO,
1590                                       TCPOLEN_MPTCP_PRIO,
1591                                       opts->backup, TCPOPT_NOP);
1592
1593                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPPRIOTX);
1594         }
1595
1596 mp_capable_done:
1597         if (OPTION_MPTCP_RM_ADDR & opts->suboptions) {
1598                 u8 i = 1;
1599
1600                 *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR,
1601                                       TCPOLEN_MPTCP_RM_ADDR_BASE + opts->rm_list.nr,
1602                                       0, opts->rm_list.ids[0]);
1603
1604                 while (i < opts->rm_list.nr) {
1605                         u8 id1, id2, id3, id4;
1606
1607                         id1 = opts->rm_list.ids[i];
1608                         id2 = i + 1 < opts->rm_list.nr ? opts->rm_list.ids[i + 1] : TCPOPT_NOP;
1609                         id3 = i + 2 < opts->rm_list.nr ? opts->rm_list.ids[i + 2] : TCPOPT_NOP;
1610                         id4 = i + 3 < opts->rm_list.nr ? opts->rm_list.ids[i + 3] : TCPOPT_NOP;
1611                         put_unaligned_be32(id1 << 24 | id2 << 16 | id3 << 8 | id4, ptr);
1612                         ptr += 1;
1613                         i += 4;
1614                 }
1615         }
1616
1617         if (tp)
1618                 mptcp_set_rwin(tp, th);
1619 }
1620
1621 __be32 mptcp_get_reset_option(const struct sk_buff *skb)
1622 {
1623         const struct mptcp_ext *ext = mptcp_get_ext(skb);
1624         u8 flags, reason;
1625
1626         if (ext) {
1627                 flags = ext->reset_transient;
1628                 reason = ext->reset_reason;
1629
1630                 return mptcp_option(MPTCPOPT_RST, TCPOLEN_MPTCP_RST,
1631                                     flags, reason);
1632         }
1633
1634         return htonl(0u);
1635 }
1636 EXPORT_SYMBOL_GPL(mptcp_get_reset_option);