tcp/dccp: do not care about families in inet_twsk_purge()
[linux-block.git] / net / ipv4 / tcp_minisocks.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
02c30a84 9 * Authors: Ross Biro
1da177e4
LT
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
1da177e4 22#include <net/tcp.h>
1da177e4 23#include <net/xfrm.h>
e5907459 24#include <net/busy_poll.h>
1da177e4 25
a2a385d6 26static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
1da177e4
LT
27{
28 if (seq == s_win)
a2a385d6 29 return true;
1da177e4 30 if (after(end_seq, s_win) && before(seq, e_win))
a2a385d6 31 return true;
a02cec21 32 return seq == e_win && seq == end_seq;
1da177e4
LT
33}
34
4fb17a60
NC
35static enum tcp_tw_status
36tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
37 const struct sk_buff *skb, int mib_idx)
38{
39 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
40
41 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
42 &tcptw->tw_last_oow_ack_time)) {
43 /* Send ACK. Note, we do not put the bucket,
44 * it will be released by caller.
45 */
46 return TCP_TW_ACK;
47 }
48
49 /* We are rate-limiting, so just release the tw sock and drop skb. */
50 inet_twsk_put(tw);
51 return TCP_TW_SUCCESS;
52}
53
64382c71
DS
54static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
55{
56#ifdef CONFIG_TCP_AO
57 struct tcp_ao_info *ao;
58
59 ao = rcu_dereference(tcptw->ao_info);
60 if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
61 WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
62#endif
63 tcptw->tw_rcv_nxt = seq;
64}
65
e905a9ed 66/*
1da177e4
LT
67 * * Main purpose of TIME-WAIT state is to close connection gracefully,
68 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
69 * (and, probably, tail of data) and one or more our ACKs are lost.
70 * * What is TIME-WAIT timeout? It is associated with maximal packet
71 * lifetime in the internet, which results in wrong conclusion, that
72 * it is set to catch "old duplicate segments" wandering out of their path.
73 * It is not quite correct. This timeout is calculated so that it exceeds
74 * maximal retransmission timeout enough to allow to lose one (or more)
75 * segments sent by peer and our ACKs. This time may be calculated from RTO.
76 * * When TIME-WAIT socket receives RST, it means that another end
77 * finally closed and we are allowed to kill TIME-WAIT too.
78 * * Second purpose of TIME-WAIT is catching old duplicate segments.
79 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
80 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
81 * * If we invented some more clever way to catch duplicates
82 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
83 *
84 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
85 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
86 * from the very beginning.
87 *
88 * NOTE. With recycling (and later with fin-wait-2) TW bucket
89 * is _not_ stateless. It means, that strictly speaking we must
90 * spinlock it. I do not want! Well, probability of misbehaviour
91 * is ridiculously low and, seems, we could use some mb() tricks
92 * to avoid misread sequence numbers, states etc. --ANK
4308fc58
AC
93 *
94 * We don't need to initialize tmp_out.sack_ok as we don't use the results
1da177e4
LT
95 */
96enum tcp_tw_status
8feaf0c0
ACM
97tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
98 const struct tcphdr *th)
1da177e4
LT
99{
100 struct tcp_options_received tmp_opt;
4957faad 101 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
a2a385d6 102 bool paws_reject = false;
1da177e4 103
bb5b7c11 104 tmp_opt.saw_tstamp = 0;
8feaf0c0 105 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
eed29f17 106 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
1da177e4
LT
107
108 if (tmp_opt.saw_tstamp) {
eee2faab
AK
109 if (tmp_opt.rcv_tsecr)
110 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
8feaf0c0
ACM
111 tmp_opt.ts_recent = tcptw->tw_ts_recent;
112 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
c887e6d2 113 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
1da177e4
LT
114 }
115 }
116
117 if (tw->tw_substate == TCP_FIN_WAIT2) {
118 /* Just repeat all the checks of tcp_rcv_state_process() */
119
120 /* Out of window, send ACK */
121 if (paws_reject ||
122 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
8feaf0c0
ACM
123 tcptw->tw_rcv_nxt,
124 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
4fb17a60
NC
125 return tcp_timewait_check_oow_rate_limit(
126 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
1da177e4
LT
127
128 if (th->rst)
129 goto kill;
130
8feaf0c0 131 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
271c3b9b 132 return TCP_TW_RST;
1da177e4
LT
133
134 /* Dup ACK? */
1ac530b3
WY
135 if (!th->ack ||
136 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
1da177e4 137 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
8feaf0c0 138 inet_twsk_put(tw);
1da177e4
LT
139 return TCP_TW_SUCCESS;
140 }
141
142 /* New data or FIN. If new data arrive after half-duplex close,
143 * reset.
144 */
145 if (!th->fin ||
271c3b9b 146 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
1da177e4 147 return TCP_TW_RST;
1da177e4
LT
148
149 /* FIN arrived, enter true time-wait state. */
8feaf0c0 150 tw->tw_substate = TCP_TIME_WAIT;
64382c71
DS
151 twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
152
1da177e4 153 if (tmp_opt.saw_tstamp) {
cca9bab1 154 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
8feaf0c0 155 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
1da177e4
LT
156 }
157
d82bae12 158 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1da177e4
LT
159 return TCP_TW_ACK;
160 }
161
162 /*
163 * Now real TIME-WAIT state.
164 *
165 * RFC 1122:
166 * "When a connection is [...] on TIME-WAIT state [...]
167 * [a TCP] MAY accept a new SYN from the remote TCP to
168 * reopen the connection directly, if it:
e905a9ed 169 *
1da177e4
LT
170 * (1) assigns its initial sequence number for the new
171 * connection to be larger than the largest sequence
172 * number it used on the previous connection incarnation,
173 * and
174 *
e905a9ed 175 * (2) returns to TIME-WAIT state if the SYN turns out
1da177e4
LT
176 * to be an old duplicate".
177 */
178
179 if (!paws_reject &&
8feaf0c0 180 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
1da177e4
LT
181 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
182 /* In window segment, it may be only reset or bare ack. */
183
184 if (th->rst) {
caa20d9a 185 /* This is TIME_WAIT assassination, in two flavors.
1da177e4
LT
186 * Oh well... nobody has a sufficient solution to this
187 * protocol bug yet.
188 */
0b484c91 189 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
1da177e4 190kill:
dbe7faa4 191 inet_twsk_deschedule_put(tw);
1da177e4
LT
192 return TCP_TW_SUCCESS;
193 }
63cc357f
FW
194 } else {
195 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1da177e4 196 }
1da177e4
LT
197
198 if (tmp_opt.saw_tstamp) {
8feaf0c0 199 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
cca9bab1 200 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
1da177e4
LT
201 }
202
8feaf0c0 203 inet_twsk_put(tw);
1da177e4
LT
204 return TCP_TW_SUCCESS;
205 }
206
207 /* Out of window segment.
208
209 All the segments are ACKed immediately.
210
211 The only exception is new SYN. We accept it, if it is
212 not old duplicate and we are not in danger to be killed
213 by delayed old duplicates. RFC check is that it has
214 newer sequence number works at rates <40Mbit/sec.
215 However, if paws works, it is reliable AND even more,
216 we even may relax silly seq space cutoff.
217
218 RED-PEN: we violate main RFC requirement, if this SYN will appear
219 old duplicate (i.e. we receive RST in reply to SYN-ACK),
220 we must return socket to time-wait state. It is not good,
221 but not fatal yet.
222 */
223
224 if (th->syn && !th->rst && !th->ack && !paws_reject &&
8feaf0c0
ACM
225 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
226 (tmp_opt.saw_tstamp &&
227 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
228 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
1da177e4
LT
229 if (isn == 0)
230 isn++;
04317daf 231 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
1da177e4
LT
232 return TCP_TW_SYN;
233 }
234
235 if (paws_reject)
02a1d6e7 236 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
1da177e4 237
2de979bd 238 if (!th->rst) {
1da177e4
LT
239 /* In this case we must reset the TIMEWAIT timer.
240 *
241 * If it is ACKless SYN it may be both old duplicate
242 * and new good SYN with random sequence number <rcv_nxt.
243 * Do not reschedule in the last case.
244 */
245 if (paws_reject || th->ack)
ed2e9239 246 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1da177e4 247
4fb17a60
NC
248 return tcp_timewait_check_oow_rate_limit(
249 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
1da177e4 250 }
8feaf0c0 251 inet_twsk_put(tw);
1da177e4
LT
252 return TCP_TW_SUCCESS;
253}
4bc2f18b 254EXPORT_SYMBOL(tcp_timewait_state_process);
1da177e4 255
c5b8b515
DS
256static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
257{
258#ifdef CONFIG_TCP_MD5SIG
259 const struct tcp_sock *tp = tcp_sk(sk);
260 struct tcp_md5sig_key *key;
261
262 /*
263 * The timewait bucket does not have the key DB from the
264 * sock structure. We just make a quick copy of the
265 * md5 key being used (if indeed we are using one)
266 * so the timewait ack generating code has the key.
267 */
268 tcptw->tw_md5_key = NULL;
269 if (!static_branch_unlikely(&tcp_md5_needed.key))
270 return;
271
272 key = tp->af_specific->md5_lookup(sk, sk);
273 if (key) {
274 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
275 if (!tcptw->tw_md5_key)
276 return;
c5b8b515
DS
277 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
278 goto out_free;
8c73b263 279 tcp_md5_add_sigpool();
c5b8b515
DS
280 }
281 return;
282out_free:
283 WARN_ON_ONCE(1);
284 kfree(tcptw->tw_md5_key);
285 tcptw->tw_md5_key = NULL;
286#endif
287}
288
e905a9ed 289/*
1da177e4 290 * Move a socket to time-wait or dead fin-wait-2 state.
e905a9ed 291 */
1da177e4
LT
292void tcp_time_wait(struct sock *sk, int state, int timeo)
293{
8292a17a 294 const struct inet_connection_sock *icsk = inet_csk(sk);
decde258 295 struct tcp_sock *tp = tcp_sk(sk);
08eaef90 296 struct net *net = sock_net(sk);
789f558c 297 struct inet_timewait_sock *tw;
1da177e4 298
e9bd0cca 299 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
1da177e4 300
00db4124 301 if (tw) {
8feaf0c0 302 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
463c84b9 303 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
8feaf0c0 304
4bd0623f 305 tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
00483690 306 tw->tw_mark = sk->sk_mark;
10bbf165 307 tw->tw_priority = READ_ONCE(sk->sk_priority);
1da177e4 308 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
8feaf0c0
ACM
309 tcptw->tw_rcv_nxt = tp->rcv_nxt;
310 tcptw->tw_snd_nxt = tp->snd_nxt;
311 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
312 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
313 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
ceaa1fef 314 tcptw->tw_ts_offset = tp->tsoffset;
614e8316 315 tw->tw_usec_ts = tp->tcp_usec_ts;
4fb17a60 316 tcptw->tw_last_oow_ack_time = 0;
a842fe14 317 tcptw->tw_tx_delay = tp->tcp_tx_delay;
4fbfde4e 318 tw->tw_txhash = sk->sk_txhash;
dfd56b8b 319#if IS_ENABLED(CONFIG_IPV6)
1da177e4
LT
320 if (tw->tw_family == PF_INET6) {
321 struct ipv6_pinfo *np = inet6_sk(sk);
322
efe4208f
ED
323 tw->tw_v6_daddr = sk->sk_v6_daddr;
324 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
b903d324 325 tw->tw_tclass = np->tclass;
21858cd0 326 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
9fe516ba 327 tw->tw_ipv6only = sk->sk_ipv6only;
c676270b 328 }
1da177e4 329#endif
cfb6eeb4 330
c5b8b515 331 tcp_time_wait_init(sk, tcptw);
decde258 332 tcp_ao_time_wait(tcptw, tp);
cfb6eeb4 333
1da177e4
LT
334 /* Get the TIME_WAIT timeout firing. */
335 if (timeo < rto)
336 timeo = rto;
337
d82bae12
SHY
338 if (state == TCP_TIME_WAIT)
339 timeo = TCP_TIMEWAIT_LEN;
1da177e4 340
cfac7f83
ED
341 /* tw_timer is pinned, so we need to make sure BH are disabled
342 * in following section, otherwise timer handler could run before
343 * we complete the initialization.
344 */
345 local_bh_disable();
789f558c 346 inet_twsk_schedule(tw, timeo);
ec94c269
ED
347 /* Linkage updates.
348 * Note that access to tw after this point is illegal.
349 */
4461568a 350 inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
cfac7f83 351 local_bh_enable();
1da177e4
LT
352 } else {
353 /* Sorry, if we're out of memory, just CLOSE this
354 * socket up. We've got bigger problems than
355 * non-graceful socket closings.
356 */
08eaef90 357 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
1da177e4
LT
358 }
359
360 tcp_update_metrics(sk);
361 tcp_done(sk);
362}
cc35c88a 363EXPORT_SYMBOL(tcp_time_wait);
1da177e4 364
8c73b263
DS
365#ifdef CONFIG_TCP_MD5SIG
366static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
367{
368 struct tcp_md5sig_key *key;
369
370 key = container_of(head, struct tcp_md5sig_key, rcu);
371 kfree(key);
372 static_branch_slow_dec_deferred(&tcp_md5_needed);
373 tcp_md5_release_sigpool();
374}
375#endif
376
cfb6eeb4
YH
377void tcp_twsk_destructor(struct sock *sk)
378{
b6242b9b 379#ifdef CONFIG_TCP_MD5SIG
459837b5 380 if (static_branch_unlikely(&tcp_md5_needed.key)) {
6aedbf98 381 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
2397849b 382
8c73b263
DS
383 if (twsk->tw_md5_key)
384 call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
6aedbf98 385 }
cfb6eeb4 386#endif
decde258 387 tcp_ao_destroy_sock(sk, true);
cfb6eeb4 388}
cfb6eeb4
YH
389EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
390
1eeb5043 391void tcp_twsk_purge(struct list_head *net_exit_list)
edc12f03 392{
d1e5e640 393 bool purged_once = false;
edc12f03
KI
394 struct net *net;
395
396 list_for_each_entry(net, net_exit_list, exit_list) {
d1e5e640 397 if (net->ipv4.tcp_death_row.hashinfo->pernet) {
740ea3c4 398 /* Even if tw_refcount == 1, we must clean up kernel reqsk */
1eeb5043 399 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
d1e5e640 400 } else if (!purged_once) {
1eeb5043 401 inet_twsk_purge(&tcp_hashinfo);
d1e5e640
KI
402 purged_once = true;
403 }
edc12f03
KI
404 }
405}
406EXPORT_SYMBOL_GPL(tcp_twsk_purge);
407
b1964b5f
ED
408/* Warning : This function is called without sk_listener being locked.
409 * Be sure to read socket fields once, as their value could change under us.
410 */
843f4a55 411void tcp_openreq_init_rwin(struct request_sock *req,
b1964b5f
ED
412 const struct sock *sk_listener,
413 const struct dst_entry *dst)
843f4a55
YC
414{
415 struct inet_request_sock *ireq = inet_rsk(req);
b1964b5f 416 const struct tcp_sock *tp = tcp_sk(sk_listener);
b1964b5f 417 int full_space = tcp_full_space(sk_listener);
b1964b5f
ED
418 u32 window_clamp;
419 __u8 rcv_wscale;
13d3b1eb 420 u32 rcv_wnd;
3541f9e8 421 int mss;
843f4a55 422
3541f9e8 423 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
b1964b5f 424 window_clamp = READ_ONCE(tp->window_clamp);
843f4a55 425 /* Set this up on the first call only */
ed53d0ab 426 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
843f4a55
YC
427
428 /* limit the window selection if the user enforce a smaller rx buffer */
b1964b5f 429 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
ed53d0ab
ED
430 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
431 req->rsk_window_clamp = full_space;
843f4a55 432
13d3b1eb
LB
433 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
434 if (rcv_wnd == 0)
435 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
436 else if (full_space < rcv_wnd * mss)
437 full_space = rcv_wnd * mss;
438
843f4a55 439 /* tcp_full_space because it is guaranteed to be the first packet */
ceef9ab6 440 tcp_select_initial_window(sk_listener, full_space,
843f4a55 441 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
ed53d0ab
ED
442 &req->rsk_rcv_wnd,
443 &req->rsk_window_clamp,
843f4a55
YC
444 ireq->wscale_ok,
445 &rcv_wscale,
13d3b1eb 446 rcv_wnd);
843f4a55
YC
447 ireq->rcv_wscale = rcv_wscale;
448}
449EXPORT_SYMBOL(tcp_openreq_init_rwin);
450
735d3831
FW
451static void tcp_ecn_openreq_child(struct tcp_sock *tp,
452 const struct request_sock *req)
bdf1ee5d
IJ
453{
454 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
455}
456
81164413
DB
457void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
458{
459 struct inet_connection_sock *icsk = inet_csk(sk);
460 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
461 bool ca_got_dst = false;
462
463 if (ca_key != TCP_CA_UNSPEC) {
464 const struct tcp_congestion_ops *ca;
465
466 rcu_read_lock();
467 ca = tcp_ca_find_key(ca_key);
0baf26b0 468 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
81164413
DB
469 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
470 icsk->icsk_ca_ops = ca;
471 ca_got_dst = true;
472 }
473 rcu_read_unlock();
474 }
475
9f950415
NC
476 /* If no valid choice made yet, assign current system default ca. */
477 if (!ca_got_dst &&
478 (!icsk->icsk_ca_setsockopt ||
0baf26b0 479 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
81164413
DB
480 tcp_assign_congestion_control(sk);
481
482 tcp_set_ca_state(sk, TCP_CA_Open);
483}
484EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
485
e9d9da91 486static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
60e2a778
UB
487 struct request_sock *req,
488 struct tcp_sock *newtp)
489{
490#if IS_ENABLED(CONFIG_SMC)
491 struct inet_request_sock *ireq;
492
493 if (static_branch_unlikely(&tcp_have_smc)) {
494 ireq = inet_rsk(req);
495 if (oldtp->syn_smc && !ireq->smc_ok)
496 newtp->syn_smc = 0;
497 }
498#endif
499}
500
1da177e4
LT
501/* This is not only more efficient than what we used to do, it eliminates
502 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
503 *
504 * Actually, we could lots of memory writes here. tp of listening
505 * socket contains all necessary default parameters.
506 */
c28c6f04
ED
507struct sock *tcp_create_openreq_child(const struct sock *sk,
508 struct request_sock *req,
509 struct sk_buff *skb)
1da177e4 510{
e56c57d0 511 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
242b1bbe
ED
512 const struct inet_request_sock *ireq = inet_rsk(req);
513 struct tcp_request_sock *treq = tcp_rsk(req);
514 struct inet_connection_sock *newicsk;
e9d9da91
ED
515 const struct tcp_sock *oldtp;
516 struct tcp_sock *newtp;
dba7d9b8 517 u32 seq;
06b22ef2
DS
518#ifdef CONFIG_TCP_AO
519 struct tcp_ao_key *ao_key;
520#endif
242b1bbe
ED
521
522 if (!newsk)
523 return NULL;
524
525 newicsk = inet_csk(newsk);
526 newtp = tcp_sk(newsk);
527 oldtp = tcp_sk(sk);
528
529 smc_check_reset_syn_req(oldtp, req, newtp);
530
531 /* Now setup tcp_sock */
532 newtp->pred_flags = 0;
533
dba7d9b8
ED
534 seq = treq->rcv_isn + 1;
535 newtp->rcv_wup = seq;
7db48e98 536 WRITE_ONCE(newtp->copied_seq, seq);
dba7d9b8 537 WRITE_ONCE(newtp->rcv_nxt, seq);
242b1bbe
ED
538 newtp->segs_in = 1;
539
e0d694d6
ED
540 seq = treq->snt_isn + 1;
541 newtp->snd_sml = newtp->snd_una = seq;
542 WRITE_ONCE(newtp->snd_nxt, seq);
543 newtp->snd_up = seq;
242b1bbe
ED
544
545 INIT_LIST_HEAD(&newtp->tsq_node);
546 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
547
548 tcp_init_wl(newtp, treq->rcv_isn);
549
242b1bbe 550 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
242b1bbe
ED
551 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
552
242b1bbe 553 newtp->lsndtime = tcp_jiffies32;
5e526552 554 newsk->sk_txhash = READ_ONCE(treq->txhash);
242b1bbe
ED
555 newtp->total_retrans = req->num_retrans;
556
242b1bbe 557 tcp_init_xmit_timers(newsk);
0f317464 558 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
242b1bbe 559
242b1bbe
ED
560 if (sock_flag(newsk, SOCK_KEEPOPEN))
561 inet_csk_reset_keepalive_timer(newsk,
562 keepalive_time_when(newtp));
563
564 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
565 newtp->rx_opt.sack_ok = ireq->sack_ok;
566 newtp->window_clamp = req->rsk_window_clamp;
567 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
568 newtp->rcv_wnd = req->rsk_rcv_wnd;
569 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
570 if (newtp->rx_opt.wscale_ok) {
571 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
572 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
573 } else {
574 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
575 newtp->window_clamp = min(newtp->window_clamp, 65535U);
576 }
577 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
578 newtp->max_window = newtp->snd_wnd;
579
580 if (newtp->rx_opt.tstamp_ok) {
614e8316 581 newtp->tcp_usec_ts = treq->req_usec_ts;
eba20811 582 newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
cca9bab1 583 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
242b1bbe
ED
584 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
585 } else {
614e8316 586 newtp->tcp_usec_ts = 0;
242b1bbe
ED
587 newtp->rx_opt.ts_recent_stamp = 0;
588 newtp->tcp_header_len = sizeof(struct tcphdr);
589 }
336c39a0 590 if (req->num_timeout) {
3868ab0f 591 newtp->total_rto = req->num_timeout;
614e8316
ED
592 newtp->undo_marker = treq->snt_isn;
593 if (newtp->tcp_usec_ts) {
594 newtp->retrans_stamp = treq->snt_synack;
595 newtp->total_rto_time = (u32)(tcp_clock_us() -
596 newtp->retrans_stamp) / USEC_PER_MSEC;
597 } else {
598 newtp->retrans_stamp = div_u64(treq->snt_synack,
599 USEC_PER_SEC / TCP_TS_HZ);
600 newtp->total_rto_time = tcp_clock_ms() -
601 newtp->retrans_stamp;
602 }
3868ab0f 603 newtp->total_rto_recoveries = 1;
336c39a0 604 }
242b1bbe 605 newtp->tsoffset = treq->ts_off;
cfb6eeb4 606#ifdef CONFIG_TCP_MD5SIG
242b1bbe 607 newtp->md5sig_info = NULL; /*XXX*/
cfb6eeb4 608#endif
06b22ef2
DS
609#ifdef CONFIG_TCP_AO
610 newtp->ao_info = NULL;
611 ao_key = treq->af_specific->ao_lookup(sk, req,
612 tcp_rsk(req)->ao_keyid, -1);
613 if (ao_key)
da7dfaa6 614 newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
06b22ef2 615 #endif
242b1bbe
ED
616 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
617 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
618 newtp->rx_opt.mss_clamp = req->mss;
619 tcp_ecn_openreq_child(newtp, req);
620 newtp->fastopen_req = NULL;
d983ea6f 621 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
242b1bbe 622
061ff040 623 newtp->bpf_chg_cc_inprogress = 0;
e8025155
JS
624 tcp_bpf_clone(sk, newsk);
625
242b1bbe
ED
626 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
627
1da177e4
LT
628 return newsk;
629}
4bc2f18b 630EXPORT_SYMBOL(tcp_create_openreq_child);
1da177e4 631
e905a9ed 632/*
8336886f
JC
633 * Process an incoming packet for SYN_RECV sockets represented as a
634 * request_sock. Normally sk is the listener socket but for TFO it
635 * points to the child socket.
636 *
637 * XXX (TFO) - The current impl contains a special check for ack
638 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
4308fc58
AC
639 *
640 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
580f98cc
ED
641 *
642 * Note: If @fastopen is true, this can be called from process context.
643 * Otherwise, this is from BH context.
1da177e4
LT
644 */
645
5a5f3a8d 646struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
60236fdd 647 struct request_sock *req,
e0f9759f 648 bool fastopen, bool *req_stolen)
1da177e4 649{
4957faad 650 struct tcp_options_received tmp_opt;
4957faad 651 struct sock *child;
aa8223c7 652 const struct tcphdr *th = tcp_hdr(skb);
714e85be 653 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
a2a385d6 654 bool paws_reject = false;
5e0724d0 655 bool own_req;
1da177e4 656
bb5b7c11
DM
657 tmp_opt.saw_tstamp = 0;
658 if (th->doff > (sizeof(struct tcphdr)>>2)) {
eed29f17 659 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
1da177e4
LT
660
661 if (tmp_opt.saw_tstamp) {
eba20811 662 tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
95a22cae
FW
663 if (tmp_opt.rcv_tsecr)
664 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
1da177e4
LT
665 /* We do not store true stamp, but it is not required,
666 * it can be estimated (approximately)
667 * from another data.
668 */
5903123f 669 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
c887e6d2 670 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
1da177e4
LT
671 }
672 }
673
674 /* Check for pure retransmitted SYN. */
2e6599cb 675 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
1da177e4
LT
676 flg == TCP_FLAG_SYN &&
677 !paws_reject) {
678 /*
679 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
680 * this case on figure 6 and figure 8, but formal
681 * protocol description says NOTHING.
682 * To be more exact, it says that we should send ACK,
683 * because this segment (at least, if it has no data)
684 * is out of window.
685 *
686 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
687 * describe SYN-RECV state. All the description
688 * is wrong, we cannot believe to it and should
689 * rely only on common sense and implementation
690 * experience.
691 *
692 * Enforce "SYN-ACK" according to figure 8, figure 6
693 * of RFC793, fixed by RFC1122.
8336886f
JC
694 *
695 * Note that even if there is new data in the SYN packet
696 * they will be thrown away too.
cd75eff6
YC
697 *
698 * Reset timer after retransmitting SYNACK, similar to
699 * the idea of fast retransmit in recovery.
1da177e4 700 */
a9b2c06d
NC
701 if (!tcp_oow_rate_limited(sock_net(sk), skb,
702 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
703 &tcp_rsk(req)->last_oow_ack_time) &&
704
dd929c1b
ED
705 !inet_rtx_syn_ack(sk, req)) {
706 unsigned long expires = jiffies;
707
5903123f 708 expires += reqsk_timeout(req, TCP_RTO_MAX);
dd929c1b
ED
709 if (!fastopen)
710 mod_timer_pending(&req->rsk_timer, expires);
711 else
712 req->rsk_timer.expires = expires;
713 }
1da177e4
LT
714 return NULL;
715 }
716
717 /* Further reproduces section "SEGMENT ARRIVES"
718 for state SYN-RECEIVED of RFC793.
719 It is broken, however, it does not work only
720 when SYNs are crossed.
721
722 You would think that SYN crossing is impossible here, since
723 we should have a SYN_SENT socket (from connect()) on our end,
724 but this is not true if the crossed SYNs were sent to both
725 ends by a malicious third party. We must defend against this,
726 and to do that we first verify the ACK (as per RFC793, page
727 36) and reset if it is invalid. Is this a true full defense?
728 To convince ourselves, let us consider a way in which the ACK
729 test can still pass in this 'malicious crossed SYNs' case.
730 Malicious sender sends identical SYNs (and thus identical sequence
731 numbers) to both A and B:
732
733 A: gets SYN, seq=7
734 B: gets SYN, seq=7
735
736 By our good fortune, both A and B select the same initial
737 send sequence number of seven :-)
738
739 A: sends SYN|ACK, seq=7, ack_seq=8
740 B: sends SYN|ACK, seq=7, ack_seq=8
741
742 So we are now A eating this SYN|ACK, ACK test passes. So
743 does sequence test, SYN is truncated, and thus we consider
744 it a bare ACK.
745
ec0a1966
DM
746 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
747 bare ACK. Otherwise, we create an established connection. Both
748 ends (listening sockets) accept the new incoming connection and try
749 to talk to each other. 8-)
1da177e4
LT
750
751 Note: This case is both harmless, and rare. Possibility is about the
752 same as us discovering intelligent life on another plant tomorrow.
753
754 But generally, we should (RFC lies!) to accept ACK
755 from SYNACK both here and in tcp_rcv_state_process().
756 tcp_rcv_state_process() does not, hence, we do not too.
757
758 Note that the case is absolutely generic:
759 we cannot optimize anything here without
760 violating protocol. All the checks must be made
761 before attempt to create socket.
762 */
763
764 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
765 * and the incoming segment acknowledges something not yet
caa20d9a 766 * sent (the segment carries an unacceptable ACK) ...
1da177e4
LT
767 * a reset is sent."
768 *
8336886f
JC
769 * Invalid ACK: reset will be sent by listening socket.
770 * Note that the ACK validity check for a Fast Open socket is done
771 * elsewhere and is checked directly against the child socket rather
772 * than req because user data may have been sent out.
1da177e4 773 */
8336886f 774 if ((flg & TCP_FLAG_ACK) && !fastopen &&
435cf559 775 (TCP_SKB_CB(skb)->ack_seq !=
1a2c6181 776 tcp_rsk(req)->snt_isn + 1))
1da177e4
LT
777 return sk;
778
779 /* Also, it would be not so bad idea to check rcv_tsecr, which
780 * is essentially ACK extension and too early or too late values
781 * should cause reset in unsynchronized states.
782 */
783
784 /* RFC793: "first check sequence number". */
785
786 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
ed53d0ab 787 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
1da177e4 788 /* Out of window: send ACK and drop. */
4ce7e93c
ED
789 if (!(flg & TCP_FLAG_RST) &&
790 !tcp_oow_rate_limited(sock_net(sk), skb,
791 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
792 &tcp_rsk(req)->last_oow_ack_time))
6edafaaf 793 req->rsk_ops->send_ack(sk, skb, req);
1da177e4 794 if (paws_reject)
580f98cc 795 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
1da177e4
LT
796 return NULL;
797 }
798
799 /* In sequence, PAWS is OK. */
800
eba20811
ED
801 /* TODO: We probably should defer ts_recent change once
802 * we take ownership of @req.
803 */
8336886f 804 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
eba20811 805 WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
1da177e4 806
2aaab9a0
AL
807 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
808 /* Truncate SYN, it is out of window starting
809 at tcp_rsk(req)->rcv_isn + 1. */
810 flg &= ~TCP_FLAG_SYN;
811 }
1da177e4 812
2aaab9a0
AL
813 /* RFC793: "second check the RST bit" and
814 * "fourth, check the SYN bit"
815 */
816 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
580f98cc 817 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
2aaab9a0
AL
818 goto embryonic_reset;
819 }
1da177e4 820
2aaab9a0
AL
821 /* ACK sequence verified above, just make sure ACK is
822 * set. If ACK not set, just silently drop the packet.
8336886f
JC
823 *
824 * XXX (TFO) - if we ever allow "data after SYN", the
825 * following check needs to be removed.
2aaab9a0
AL
826 */
827 if (!(flg & TCP_FLAG_ACK))
828 return NULL;
ec0a1966 829
8336886f
JC
830 /* For Fast Open no more processing is needed (sk is the
831 * child socket).
832 */
833 if (fastopen)
834 return sk;
835
d1b99ba4 836 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
6e97ba55 837 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
2aaab9a0
AL
838 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
839 inet_rsk(req)->acked = 1;
02a1d6e7 840 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
2aaab9a0
AL
841 return NULL;
842 }
843
844 /* OK, ACK is valid, create big socket and
845 * feed this segment to it. It will repeat all
846 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
847 * ESTABLISHED STATE. If it will be dropped after
848 * socket is created, wait for troubles.
849 */
5e0724d0
ED
850 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
851 req, &own_req);
51456b29 852 if (!child)
2aaab9a0 853 goto listen_overflow;
1da177e4 854
90bf4513 855 if (own_req && rsk_drop_req(req)) {
d4f2c86b
KI
856 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
857 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
f296234c
PK
858 return child;
859 }
860
6bcfd7f8 861 sock_rps_save_rxhash(child, skb);
0f1c28ae 862 tcp_synack_rtt_meas(child, req);
e0f9759f 863 *req_stolen = !own_req;
5e0724d0 864 return inet_csk_complete_hashdance(sk, child, req, own_req);
1da177e4 865
2aaab9a0 866listen_overflow:
55d444b3
KI
867 if (sk != req->rsk_listener)
868 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
869
2d17d9c7 870 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
2aaab9a0
AL
871 inet_rsk(req)->acked = 1;
872 return NULL;
873 }
1da177e4 874
2aaab9a0 875embryonic_reset:
8336886f
JC
876 if (!(flg & TCP_FLAG_RST)) {
877 /* Received a bad SYN pkt - for TFO We try not to reset
878 * the local connection unless it's really necessary to
879 * avoid becoming vulnerable to outside attack aiming at
880 * resetting legit local connections.
881 */
2aaab9a0 882 req->rsk_ops->send_reset(sk, skb);
8336886f
JC
883 } else if (fastopen) { /* received a valid RST pkt */
884 reqsk_fastopen_remove(sk, req, true);
049fe386 885 tcp_reset(sk, skb);
8336886f
JC
886 }
887 if (!fastopen) {
7233da86
AO
888 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
889
890 if (unlinked)
891 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
892 *req_stolen = !unlinked;
8336886f 893 }
2aaab9a0 894 return NULL;
1da177e4 895}
4bc2f18b 896EXPORT_SYMBOL(tcp_check_req);
1da177e4
LT
897
898/*
899 * Queue segment on the new socket if the new socket is active,
900 * otherwise we just shortcircuit this and continue with
901 * the new socket.
8336886f
JC
902 *
903 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
904 * when entering. But other states are possible due to a race condition
905 * where after __inet_lookup_established() fails but before the listener
906 * locked is obtained, other packets cause the same connection to
907 * be created.
1da177e4
LT
908 */
909
b9825695
JX
910enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
911 struct sk_buff *skb)
734c8f75 912 __releases(&((child)->sk_lock.slock))
1da177e4 913{
b9825695 914 enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
1da177e4
LT
915 int state = child->sk_state;
916
03cfda4f
ED
917 /* record sk_napi_id and sk_rx_queue_mapping of child. */
918 sk_mark_napi_id_set(child, skb);
e5907459 919
a44d6eac 920 tcp_segs_in(tcp_sk(child), skb);
1da177e4 921 if (!sock_owned_by_user(child)) {
b9825695 922 reason = tcp_rcv_state_process(child, skb);
1da177e4
LT
923 /* Wakeup parent, send SIGIO */
924 if (state == TCP_SYN_RECV && child->sk_state != state)
676d2369 925 parent->sk_data_ready(parent);
1da177e4
LT
926 } else {
927 /* Alas, it is possible again, because we do lookup
928 * in main socket hash table and lock on listening
929 * socket does not protect us more.
930 */
a3a858ff 931 __sk_add_backlog(child, skb);
1da177e4
LT
932 }
933
934 bh_unlock_sock(child);
935 sock_put(child);
b9825695 936 return reason;
1da177e4 937}
1da177e4 938EXPORT_SYMBOL(tcp_child_process);