Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / net / ipv4 / tcp_minisocks.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
02c30a84 9 * Authors: Ross Biro
1da177e4
LT
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
1da177e4 22#include <net/tcp.h>
1da177e4 23#include <net/xfrm.h>
e5907459 24#include <net/busy_poll.h>
6be49dea 25#include <net/rstreason.h>
1da177e4 26
a2a385d6 27static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
1da177e4
LT
28{
29 if (seq == s_win)
a2a385d6 30 return true;
1da177e4 31 if (after(end_seq, s_win) && before(seq, e_win))
a2a385d6 32 return true;
a02cec21 33 return seq == e_win && seq == end_seq;
1da177e4
LT
34}
35
4fb17a60
NC
36static enum tcp_tw_status
37tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
38 const struct sk_buff *skb, int mib_idx)
39{
40 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
41
42 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
43 &tcptw->tw_last_oow_ack_time)) {
44 /* Send ACK. Note, we do not put the bucket,
45 * it will be released by caller.
46 */
4618e195 47 return TCP_TW_ACK_OOW;
4fb17a60
NC
48 }
49
50 /* We are rate-limiting, so just release the tw sock and drop skb. */
51 inet_twsk_put(tw);
52 return TCP_TW_SUCCESS;
53}
54
c0a11493
ED
55static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq,
56 u32 rcv_nxt)
64382c71
DS
57{
58#ifdef CONFIG_TCP_AO
59 struct tcp_ao_info *ao;
60
61 ao = rcu_dereference(tcptw->ao_info);
c0a11493 62 if (unlikely(ao && seq < rcv_nxt))
64382c71
DS
63 WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
64#endif
c0a11493 65 WRITE_ONCE(tcptw->tw_rcv_nxt, seq);
64382c71
DS
66}
67
e905a9ed 68/*
1da177e4
LT
69 * * Main purpose of TIME-WAIT state is to close connection gracefully,
70 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
71 * (and, probably, tail of data) and one or more our ACKs are lost.
72 * * What is TIME-WAIT timeout? It is associated with maximal packet
73 * lifetime in the internet, which results in wrong conclusion, that
74 * it is set to catch "old duplicate segments" wandering out of their path.
75 * It is not quite correct. This timeout is calculated so that it exceeds
76 * maximal retransmission timeout enough to allow to lose one (or more)
77 * segments sent by peer and our ACKs. This time may be calculated from RTO.
78 * * When TIME-WAIT socket receives RST, it means that another end
79 * finally closed and we are allowed to kill TIME-WAIT too.
80 * * Second purpose of TIME-WAIT is catching old duplicate segments.
81 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
82 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
83 * * If we invented some more clever way to catch duplicates
84 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
85 *
86 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
87 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
88 * from the very beginning.
89 *
90 * NOTE. With recycling (and later with fin-wait-2) TW bucket
91 * is _not_ stateless. It means, that strictly speaking we must
92 * spinlock it. I do not want! Well, probability of misbehaviour
93 * is ridiculously low and, seems, we could use some mb() tricks
94 * to avoid misread sequence numbers, states etc. --ANK
4308fc58
AC
95 *
96 * We don't need to initialize tmp_out.sack_ok as we don't use the results
1da177e4
LT
97 */
98enum tcp_tw_status
8feaf0c0 99tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
04271411
JC
100 const struct tcphdr *th, u32 *tw_isn,
101 enum skb_drop_reason *drop_reason)
1da177e4 102{
4957faad 103 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
c0a11493
ED
104 u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt);
105 struct tcp_options_received tmp_opt;
a2a385d6 106 bool paws_reject = false;
69e0b33a 107 int ts_recent_stamp;
1da177e4 108
bb5b7c11 109 tmp_opt.saw_tstamp = 0;
69e0b33a
ED
110 ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
111 if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) {
eed29f17 112 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
1da177e4
LT
113
114 if (tmp_opt.saw_tstamp) {
eee2faab
AK
115 if (tmp_opt.rcv_tsecr)
116 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
69e0b33a
ED
117 tmp_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent);
118 tmp_opt.ts_recent_stamp = ts_recent_stamp;
c887e6d2 119 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
1da177e4
LT
120 }
121 }
122
3e5cbbb1 123 if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) {
1da177e4
LT
124 /* Just repeat all the checks of tcp_rcv_state_process() */
125
126 /* Out of window, send ACK */
127 if (paws_reject ||
128 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
c0a11493
ED
129 rcv_nxt,
130 rcv_nxt + tcptw->tw_rcv_wnd))
4fb17a60
NC
131 return tcp_timewait_check_oow_rate_limit(
132 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
1da177e4
LT
133
134 if (th->rst)
135 goto kill;
136
c0a11493 137 if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt))
271c3b9b 138 return TCP_TW_RST;
1da177e4
LT
139
140 /* Dup ACK? */
1ac530b3 141 if (!th->ack ||
c0a11493 142 !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) ||
1da177e4 143 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
8feaf0c0 144 inet_twsk_put(tw);
1da177e4
LT
145 return TCP_TW_SUCCESS;
146 }
147
148 /* New data or FIN. If new data arrive after half-duplex close,
149 * reset.
150 */
151 if (!th->fin ||
c0a11493 152 TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1)
1da177e4 153 return TCP_TW_RST;
1da177e4
LT
154
155 /* FIN arrived, enter true time-wait state. */
3e5cbbb1 156 WRITE_ONCE(tw->tw_substate, TCP_TIME_WAIT);
c0a11493
ED
157 twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq,
158 rcv_nxt);
64382c71 159
1da177e4 160 if (tmp_opt.saw_tstamp) {
19ce8cd3
JS
161 u64 ts = tcp_clock_ms();
162
163 WRITE_ONCE(tw->tw_entry_stamp, ts);
69e0b33a 164 WRITE_ONCE(tcptw->tw_ts_recent_stamp,
19ce8cd3 165 div_u64(ts, MSEC_PER_SEC));
69e0b33a
ED
166 WRITE_ONCE(tcptw->tw_ts_recent,
167 tmp_opt.rcv_tsval);
1da177e4
LT
168 }
169
d82bae12 170 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1da177e4
LT
171 return TCP_TW_ACK;
172 }
173
174 /*
175 * Now real TIME-WAIT state.
176 *
177 * RFC 1122:
178 * "When a connection is [...] on TIME-WAIT state [...]
179 * [a TCP] MAY accept a new SYN from the remote TCP to
180 * reopen the connection directly, if it:
e905a9ed 181 *
1da177e4
LT
182 * (1) assigns its initial sequence number for the new
183 * connection to be larger than the largest sequence
184 * number it used on the previous connection incarnation,
185 * and
186 *
e905a9ed 187 * (2) returns to TIME-WAIT state if the SYN turns out
1da177e4
LT
188 * to be an old duplicate".
189 */
190
191 if (!paws_reject &&
c0a11493 192 (TCP_SKB_CB(skb)->seq == rcv_nxt &&
1da177e4
LT
193 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
194 /* In window segment, it may be only reset or bare ack. */
195
196 if (th->rst) {
caa20d9a 197 /* This is TIME_WAIT assassination, in two flavors.
1da177e4
LT
198 * Oh well... nobody has a sufficient solution to this
199 * protocol bug yet.
200 */
0b484c91 201 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
1da177e4 202kill:
dbe7faa4 203 inet_twsk_deschedule_put(tw);
1da177e4
LT
204 return TCP_TW_SUCCESS;
205 }
63cc357f
FW
206 } else {
207 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1da177e4 208 }
1da177e4
LT
209
210 if (tmp_opt.saw_tstamp) {
69e0b33a
ED
211 WRITE_ONCE(tcptw->tw_ts_recent,
212 tmp_opt.rcv_tsval);
213 WRITE_ONCE(tcptw->tw_ts_recent_stamp,
214 ktime_get_seconds());
1da177e4
LT
215 }
216
8feaf0c0 217 inet_twsk_put(tw);
1da177e4
LT
218 return TCP_TW_SUCCESS;
219 }
220
221 /* Out of window segment.
222
223 All the segments are ACKed immediately.
224
225 The only exception is new SYN. We accept it, if it is
226 not old duplicate and we are not in danger to be killed
227 by delayed old duplicates. RFC check is that it has
228 newer sequence number works at rates <40Mbit/sec.
229 However, if paws works, it is reliable AND even more,
230 we even may relax silly seq space cutoff.
231
232 RED-PEN: we violate main RFC requirement, if this SYN will appear
233 old duplicate (i.e. we receive RST in reply to SYN-ACK),
234 we must return socket to time-wait state. It is not good,
235 but not fatal yet.
236 */
237
238 if (th->syn && !th->rst && !th->ack && !paws_reject &&
c0a11493 239 (after(TCP_SKB_CB(skb)->seq, rcv_nxt) ||
8feaf0c0 240 (tmp_opt.saw_tstamp &&
69e0b33a 241 (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) {
8feaf0c0 242 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
1da177e4
LT
243 if (isn == 0)
244 isn++;
41eecbd7 245 *tw_isn = isn;
1da177e4
LT
246 return TCP_TW_SYN;
247 }
248
04271411
JC
249 if (paws_reject) {
250 *drop_reason = SKB_DROP_REASON_TCP_RFC7323_TW_PAWS;
c449d5f3 251 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWS_TW_REJECTED);
04271411 252 }
1da177e4 253
2de979bd 254 if (!th->rst) {
1da177e4
LT
255 /* In this case we must reset the TIMEWAIT timer.
256 *
257 * If it is ACKless SYN it may be both old duplicate
258 * and new good SYN with random sequence number <rcv_nxt.
259 * Do not reschedule in the last case.
260 */
261 if (paws_reject || th->ack)
ed2e9239 262 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1da177e4 263
4fb17a60
NC
264 return tcp_timewait_check_oow_rate_limit(
265 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
1da177e4 266 }
8feaf0c0 267 inet_twsk_put(tw);
1da177e4
LT
268 return TCP_TW_SUCCESS;
269}
6dc4c252 270EXPORT_IPV6_MOD(tcp_timewait_state_process);
1da177e4 271
c5b8b515
DS
272static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
273{
274#ifdef CONFIG_TCP_MD5SIG
275 const struct tcp_sock *tp = tcp_sk(sk);
276 struct tcp_md5sig_key *key;
277
278 /*
279 * The timewait bucket does not have the key DB from the
280 * sock structure. We just make a quick copy of the
281 * md5 key being used (if indeed we are using one)
282 * so the timewait ack generating code has the key.
283 */
284 tcptw->tw_md5_key = NULL;
285 if (!static_branch_unlikely(&tcp_md5_needed.key))
286 return;
287
288 key = tp->af_specific->md5_lookup(sk, sk);
289 if (key) {
290 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
291 if (!tcptw->tw_md5_key)
292 return;
c5b8b515
DS
293 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
294 goto out_free;
8c73b263 295 tcp_md5_add_sigpool();
c5b8b515
DS
296 }
297 return;
298out_free:
299 WARN_ON_ONCE(1);
300 kfree(tcptw->tw_md5_key);
301 tcptw->tw_md5_key = NULL;
302#endif
303}
304
e905a9ed 305/*
1da177e4 306 * Move a socket to time-wait or dead fin-wait-2 state.
e905a9ed 307 */
1da177e4
LT
308void tcp_time_wait(struct sock *sk, int state, int timeo)
309{
8292a17a 310 const struct inet_connection_sock *icsk = inet_csk(sk);
decde258 311 struct tcp_sock *tp = tcp_sk(sk);
08eaef90 312 struct net *net = sock_net(sk);
789f558c 313 struct inet_timewait_sock *tw;
1da177e4 314
e9bd0cca 315 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
1da177e4 316
00db4124 317 if (tw) {
8feaf0c0 318 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
463c84b9 319 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
8feaf0c0 320
4bd0623f 321 tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
00483690 322 tw->tw_mark = sk->sk_mark;
10bbf165 323 tw->tw_priority = READ_ONCE(sk->sk_priority);
1da177e4 324 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
19ce8cd3
JS
325 /* refreshed when we enter true TIME-WAIT state */
326 tw->tw_entry_stamp = tcp_time_stamp_ms(tp);
8feaf0c0
ACM
327 tcptw->tw_rcv_nxt = tp->rcv_nxt;
328 tcptw->tw_snd_nxt = tp->snd_nxt;
329 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
330 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
331 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
ceaa1fef 332 tcptw->tw_ts_offset = tp->tsoffset;
614e8316 333 tw->tw_usec_ts = tp->tcp_usec_ts;
4fb17a60 334 tcptw->tw_last_oow_ack_time = 0;
a842fe14 335 tcptw->tw_tx_delay = tp->tcp_tx_delay;
4fbfde4e 336 tw->tw_txhash = sk->sk_txhash;
0a4cc4ac
ED
337 tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping;
338#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
339 tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping;
340#endif
dfd56b8b 341#if IS_ENABLED(CONFIG_IPV6)
1da177e4
LT
342 if (tw->tw_family == PF_INET6) {
343 struct ipv6_pinfo *np = inet6_sk(sk);
344
efe4208f
ED
345 tw->tw_v6_daddr = sk->sk_v6_daddr;
346 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
b903d324 347 tw->tw_tclass = np->tclass;
21858cd0 348 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
9fe516ba 349 tw->tw_ipv6only = sk->sk_ipv6only;
c676270b 350 }
1da177e4 351#endif
cfb6eeb4 352
c5b8b515 353 tcp_time_wait_init(sk, tcptw);
decde258 354 tcp_ao_time_wait(tcptw, tp);
cfb6eeb4 355
1da177e4
LT
356 /* Get the TIME_WAIT timeout firing. */
357 if (timeo < rto)
358 timeo = rto;
359
d82bae12
SHY
360 if (state == TCP_TIME_WAIT)
361 timeo = TCP_TIMEWAIT_LEN;
1da177e4 362
ec94c269
ED
363 /* Linkage updates.
364 * Note that access to tw after this point is illegal.
365 */
b334b924 366 inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
1da177e4
LT
367 } else {
368 /* Sorry, if we're out of memory, just CLOSE this
369 * socket up. We've got bigger problems than
370 * non-graceful socket closings.
371 */
08eaef90 372 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
1da177e4
LT
373 }
374
375 tcp_update_metrics(sk);
376 tcp_done(sk);
377}
cc35c88a 378EXPORT_SYMBOL(tcp_time_wait);
1da177e4 379
8c73b263
DS
380#ifdef CONFIG_TCP_MD5SIG
381static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
382{
383 struct tcp_md5sig_key *key;
384
385 key = container_of(head, struct tcp_md5sig_key, rcu);
386 kfree(key);
387 static_branch_slow_dec_deferred(&tcp_md5_needed);
388 tcp_md5_release_sigpool();
389}
390#endif
391
cfb6eeb4
YH
392void tcp_twsk_destructor(struct sock *sk)
393{
b6242b9b 394#ifdef CONFIG_TCP_MD5SIG
459837b5 395 if (static_branch_unlikely(&tcp_md5_needed.key)) {
6aedbf98 396 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
2397849b 397
8c73b263
DS
398 if (twsk->tw_md5_key)
399 call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
6aedbf98 400 }
cfb6eeb4 401#endif
decde258 402 tcp_ao_destroy_sock(sk, true);
cfb6eeb4 403}
6dc4c252 404EXPORT_IPV6_MOD_GPL(tcp_twsk_destructor);
cfb6eeb4 405
1eeb5043 406void tcp_twsk_purge(struct list_head *net_exit_list)
edc12f03 407{
d1e5e640 408 bool purged_once = false;
edc12f03
KI
409 struct net *net;
410
411 list_for_each_entry(net, net_exit_list, exit_list) {
d1e5e640 412 if (net->ipv4.tcp_death_row.hashinfo->pernet) {
740ea3c4 413 /* Even if tw_refcount == 1, we must clean up kernel reqsk */
1eeb5043 414 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
d1e5e640 415 } else if (!purged_once) {
1eeb5043 416 inet_twsk_purge(&tcp_hashinfo);
d1e5e640
KI
417 purged_once = true;
418 }
edc12f03
KI
419 }
420}
edc12f03 421
b1964b5f
ED
422/* Warning : This function is called without sk_listener being locked.
423 * Be sure to read socket fields once, as their value could change under us.
424 */
843f4a55 425void tcp_openreq_init_rwin(struct request_sock *req,
b1964b5f
ED
426 const struct sock *sk_listener,
427 const struct dst_entry *dst)
843f4a55
YC
428{
429 struct inet_request_sock *ireq = inet_rsk(req);
b1964b5f 430 const struct tcp_sock *tp = tcp_sk(sk_listener);
b1964b5f 431 int full_space = tcp_full_space(sk_listener);
b1964b5f
ED
432 u32 window_clamp;
433 __u8 rcv_wscale;
13d3b1eb 434 u32 rcv_wnd;
3541f9e8 435 int mss;
843f4a55 436
3541f9e8 437 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
b1964b5f 438 window_clamp = READ_ONCE(tp->window_clamp);
843f4a55 439 /* Set this up on the first call only */
ed53d0ab 440 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
843f4a55
YC
441
442 /* limit the window selection if the user enforce a smaller rx buffer */
b1964b5f 443 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
ed53d0ab
ED
444 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
445 req->rsk_window_clamp = full_space;
843f4a55 446
13d3b1eb
LB
447 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
448 if (rcv_wnd == 0)
449 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
450 else if (full_space < rcv_wnd * mss)
451 full_space = rcv_wnd * mss;
452
843f4a55 453 /* tcp_full_space because it is guaranteed to be the first packet */
ceef9ab6 454 tcp_select_initial_window(sk_listener, full_space,
843f4a55 455 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
ed53d0ab
ED
456 &req->rsk_rcv_wnd,
457 &req->rsk_window_clamp,
843f4a55
YC
458 ireq->wscale_ok,
459 &rcv_wscale,
13d3b1eb 460 rcv_wnd);
843f4a55
YC
461 ireq->rcv_wscale = rcv_wscale;
462}
843f4a55 463
735d3831
FW
464static void tcp_ecn_openreq_child(struct tcp_sock *tp,
465 const struct request_sock *req)
bdf1ee5d 466{
041fb11d
IJ
467 tcp_ecn_mode_set(tp, inet_rsk(req)->ecn_ok ?
468 TCP_ECN_MODE_RFC3168 :
469 TCP_ECN_DISABLED);
bdf1ee5d
IJ
470}
471
81164413
DB
472void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
473{
474 struct inet_connection_sock *icsk = inet_csk(sk);
475 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
476 bool ca_got_dst = false;
477
478 if (ca_key != TCP_CA_UNSPEC) {
479 const struct tcp_congestion_ops *ca;
480
481 rcu_read_lock();
482 ca = tcp_ca_find_key(ca_key);
0baf26b0 483 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
81164413
DB
484 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
485 icsk->icsk_ca_ops = ca;
486 ca_got_dst = true;
487 }
488 rcu_read_unlock();
489 }
490
9f950415
NC
491 /* If no valid choice made yet, assign current system default ca. */
492 if (!ca_got_dst &&
493 (!icsk->icsk_ca_setsockopt ||
0baf26b0 494 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
81164413
DB
495 tcp_assign_congestion_control(sk);
496
497 tcp_set_ca_state(sk, TCP_CA_Open);
498}
6dc4c252 499EXPORT_IPV6_MOD_GPL(tcp_ca_openreq_child);
81164413 500
e9d9da91 501static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
60e2a778
UB
502 struct request_sock *req,
503 struct tcp_sock *newtp)
504{
505#if IS_ENABLED(CONFIG_SMC)
506 struct inet_request_sock *ireq;
507
508 if (static_branch_unlikely(&tcp_have_smc)) {
509 ireq = inet_rsk(req);
510 if (oldtp->syn_smc && !ireq->smc_ok)
511 newtp->syn_smc = 0;
512 }
513#endif
514}
515
1da177e4
LT
516/* This is not only more efficient than what we used to do, it eliminates
517 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
518 *
519 * Actually, we could lots of memory writes here. tp of listening
520 * socket contains all necessary default parameters.
521 */
c28c6f04
ED
522struct sock *tcp_create_openreq_child(const struct sock *sk,
523 struct request_sock *req,
524 struct sk_buff *skb)
1da177e4 525{
e56c57d0 526 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
242b1bbe
ED
527 const struct inet_request_sock *ireq = inet_rsk(req);
528 struct tcp_request_sock *treq = tcp_rsk(req);
529 struct inet_connection_sock *newicsk;
e9d9da91
ED
530 const struct tcp_sock *oldtp;
531 struct tcp_sock *newtp;
dba7d9b8 532 u32 seq;
242b1bbe
ED
533
534 if (!newsk)
535 return NULL;
536
537 newicsk = inet_csk(newsk);
538 newtp = tcp_sk(newsk);
539 oldtp = tcp_sk(sk);
540
541 smc_check_reset_syn_req(oldtp, req, newtp);
542
543 /* Now setup tcp_sock */
544 newtp->pred_flags = 0;
545
dba7d9b8
ED
546 seq = treq->rcv_isn + 1;
547 newtp->rcv_wup = seq;
7db48e98 548 WRITE_ONCE(newtp->copied_seq, seq);
dba7d9b8 549 WRITE_ONCE(newtp->rcv_nxt, seq);
242b1bbe
ED
550 newtp->segs_in = 1;
551
e0d694d6
ED
552 seq = treq->snt_isn + 1;
553 newtp->snd_sml = newtp->snd_una = seq;
554 WRITE_ONCE(newtp->snd_nxt, seq);
555 newtp->snd_up = seq;
242b1bbe
ED
556
557 INIT_LIST_HEAD(&newtp->tsq_node);
558 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
559
560 tcp_init_wl(newtp, treq->rcv_isn);
561
242b1bbe 562 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
242b1bbe
ED
563 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
564
242b1bbe 565 newtp->lsndtime = tcp_jiffies32;
5e526552 566 newsk->sk_txhash = READ_ONCE(treq->txhash);
242b1bbe
ED
567 newtp->total_retrans = req->num_retrans;
568
242b1bbe 569 tcp_init_xmit_timers(newsk);
0f317464 570 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
242b1bbe 571
242b1bbe 572 if (sock_flag(newsk, SOCK_KEEPOPEN))
be258f65 573 tcp_reset_keepalive_timer(newsk, keepalive_time_when(newtp));
242b1bbe
ED
574
575 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
576 newtp->rx_opt.sack_ok = ireq->sack_ok;
577 newtp->window_clamp = req->rsk_window_clamp;
578 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
579 newtp->rcv_wnd = req->rsk_rcv_wnd;
580 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
581 if (newtp->rx_opt.wscale_ok) {
582 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
583 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
584 } else {
585 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
586 newtp->window_clamp = min(newtp->window_clamp, 65535U);
587 }
588 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
589 newtp->max_window = newtp->snd_wnd;
590
591 if (newtp->rx_opt.tstamp_ok) {
614e8316 592 newtp->tcp_usec_ts = treq->req_usec_ts;
5282de17 593 newtp->rx_opt.ts_recent = req->ts_recent;
cca9bab1 594 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
242b1bbe
ED
595 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
596 } else {
614e8316 597 newtp->tcp_usec_ts = 0;
242b1bbe
ED
598 newtp->rx_opt.ts_recent_stamp = 0;
599 newtp->tcp_header_len = sizeof(struct tcphdr);
600 }
336c39a0 601 if (req->num_timeout) {
3868ab0f 602 newtp->total_rto = req->num_timeout;
614e8316
ED
603 newtp->undo_marker = treq->snt_isn;
604 if (newtp->tcp_usec_ts) {
605 newtp->retrans_stamp = treq->snt_synack;
606 newtp->total_rto_time = (u32)(tcp_clock_us() -
607 newtp->retrans_stamp) / USEC_PER_MSEC;
608 } else {
609 newtp->retrans_stamp = div_u64(treq->snt_synack,
610 USEC_PER_SEC / TCP_TS_HZ);
611 newtp->total_rto_time = tcp_clock_ms() -
612 newtp->retrans_stamp;
613 }
3868ab0f 614 newtp->total_rto_recoveries = 1;
336c39a0 615 }
242b1bbe 616 newtp->tsoffset = treq->ts_off;
cfb6eeb4 617#ifdef CONFIG_TCP_MD5SIG
242b1bbe 618 newtp->md5sig_info = NULL; /*XXX*/
cfb6eeb4 619#endif
06b22ef2
DS
620#ifdef CONFIG_TCP_AO
621 newtp->ao_info = NULL;
3f451813
KI
622
623 if (tcp_rsk_used_ao(req)) {
624 struct tcp_ao_key *ao_key;
625
626 ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1);
627 if (ao_key)
628 newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
629 }
06b22ef2 630 #endif
242b1bbe
ED
631 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
632 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
633 newtp->rx_opt.mss_clamp = req->mss;
634 tcp_ecn_openreq_child(newtp, req);
635 newtp->fastopen_req = NULL;
d983ea6f 636 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
242b1bbe 637
061ff040 638 newtp->bpf_chg_cc_inprogress = 0;
e8025155
JS
639 tcp_bpf_clone(sk, newsk);
640
242b1bbe
ED
641 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
642
8f0b3cc9
MA
643 xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1);
644
1da177e4
LT
645 return newsk;
646}
4bc2f18b 647EXPORT_SYMBOL(tcp_create_openreq_child);
1da177e4 648
e905a9ed 649/*
8336886f
JC
650 * Process an incoming packet for SYN_RECV sockets represented as a
651 * request_sock. Normally sk is the listener socket but for TFO it
652 * points to the child socket.
653 *
654 * XXX (TFO) - The current impl contains a special check for ack
655 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
4308fc58
AC
656 *
657 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
580f98cc
ED
658 *
659 * Note: If @fastopen is true, this can be called from process context.
660 * Otherwise, this is from BH context.
1da177e4
LT
661 */
662
5a5f3a8d 663struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
60236fdd 664 struct request_sock *req,
e34100c2
ED
665 bool fastopen, bool *req_stolen,
666 enum skb_drop_reason *drop_reason)
1da177e4 667{
4957faad 668 struct tcp_options_received tmp_opt;
4957faad 669 struct sock *child;
aa8223c7 670 const struct tcphdr *th = tcp_hdr(skb);
714e85be 671 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
3ba07527 672 bool tsecr_reject = false;
a2a385d6 673 bool paws_reject = false;
5e0724d0 674 bool own_req;
1da177e4 675
bb5b7c11
DM
676 tmp_opt.saw_tstamp = 0;
677 if (th->doff > (sizeof(struct tcphdr)>>2)) {
eed29f17 678 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
1da177e4
LT
679
680 if (tmp_opt.saw_tstamp) {
5282de17 681 tmp_opt.ts_recent = req->ts_recent;
3ba07527
ED
682 if (tmp_opt.rcv_tsecr) {
683 if (inet_rsk(req)->tstamp_ok && !fastopen)
684 tsecr_reject = !between(tmp_opt.rcv_tsecr,
685 tcp_rsk(req)->snt_tsval_first,
686 READ_ONCE(tcp_rsk(req)->snt_tsval_last));
95a22cae 687 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
3ba07527 688 }
1da177e4
LT
689 /* We do not store true stamp, but it is not required,
690 * it can be estimated (approximately)
691 * from another data.
692 */
5903123f 693 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
c887e6d2 694 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
1da177e4
LT
695 }
696 }
697
698 /* Check for pure retransmitted SYN. */
2e6599cb 699 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
1da177e4
LT
700 flg == TCP_FLAG_SYN &&
701 !paws_reject) {
702 /*
703 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
704 * this case on figure 6 and figure 8, but formal
705 * protocol description says NOTHING.
706 * To be more exact, it says that we should send ACK,
707 * because this segment (at least, if it has no data)
708 * is out of window.
709 *
710 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
711 * describe SYN-RECV state. All the description
712 * is wrong, we cannot believe to it and should
713 * rely only on common sense and implementation
714 * experience.
715 *
716 * Enforce "SYN-ACK" according to figure 8, figure 6
717 * of RFC793, fixed by RFC1122.
8336886f
JC
718 *
719 * Note that even if there is new data in the SYN packet
720 * they will be thrown away too.
cd75eff6
YC
721 *
722 * Reset timer after retransmitting SYNACK, similar to
723 * the idea of fast retransmit in recovery.
1da177e4 724 */
a9b2c06d
NC
725 if (!tcp_oow_rate_limited(sock_net(sk), skb,
726 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
727 &tcp_rsk(req)->last_oow_ack_time) &&
728
dd929c1b
ED
729 !inet_rtx_syn_ack(sk, req)) {
730 unsigned long expires = jiffies;
731
5903123f 732 expires += reqsk_timeout(req, TCP_RTO_MAX);
dd929c1b
ED
733 if (!fastopen)
734 mod_timer_pending(&req->rsk_timer, expires);
735 else
736 req->rsk_timer.expires = expires;
737 }
1da177e4
LT
738 return NULL;
739 }
740
741 /* Further reproduces section "SEGMENT ARRIVES"
742 for state SYN-RECEIVED of RFC793.
743 It is broken, however, it does not work only
744 when SYNs are crossed.
745
746 You would think that SYN crossing is impossible here, since
747 we should have a SYN_SENT socket (from connect()) on our end,
748 but this is not true if the crossed SYNs were sent to both
749 ends by a malicious third party. We must defend against this,
750 and to do that we first verify the ACK (as per RFC793, page
751 36) and reset if it is invalid. Is this a true full defense?
752 To convince ourselves, let us consider a way in which the ACK
753 test can still pass in this 'malicious crossed SYNs' case.
754 Malicious sender sends identical SYNs (and thus identical sequence
755 numbers) to both A and B:
756
757 A: gets SYN, seq=7
758 B: gets SYN, seq=7
759
760 By our good fortune, both A and B select the same initial
761 send sequence number of seven :-)
762
763 A: sends SYN|ACK, seq=7, ack_seq=8
764 B: sends SYN|ACK, seq=7, ack_seq=8
765
766 So we are now A eating this SYN|ACK, ACK test passes. So
767 does sequence test, SYN is truncated, and thus we consider
768 it a bare ACK.
769
ec0a1966
DM
770 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
771 bare ACK. Otherwise, we create an established connection. Both
772 ends (listening sockets) accept the new incoming connection and try
773 to talk to each other. 8-)
1da177e4
LT
774
775 Note: This case is both harmless, and rare. Possibility is about the
776 same as us discovering intelligent life on another plant tomorrow.
777
778 But generally, we should (RFC lies!) to accept ACK
779 from SYNACK both here and in tcp_rcv_state_process().
780 tcp_rcv_state_process() does not, hence, we do not too.
781
782 Note that the case is absolutely generic:
783 we cannot optimize anything here without
784 violating protocol. All the checks must be made
785 before attempt to create socket.
786 */
787
788 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
789 * and the incoming segment acknowledges something not yet
caa20d9a 790 * sent (the segment carries an unacceptable ACK) ...
1da177e4
LT
791 * a reset is sent."
792 *
8336886f
JC
793 * Invalid ACK: reset will be sent by listening socket.
794 * Note that the ACK validity check for a Fast Open socket is done
795 * elsewhere and is checked directly against the child socket rather
796 * than req because user data may have been sent out.
1da177e4 797 */
8336886f 798 if ((flg & TCP_FLAG_ACK) && !fastopen &&
435cf559 799 (TCP_SKB_CB(skb)->ack_seq !=
1a2c6181 800 tcp_rsk(req)->snt_isn + 1))
1da177e4
LT
801 return sk;
802
1da177e4
LT
803 /* RFC793: "first check sequence number". */
804
3ba07527
ED
805 if (paws_reject || tsecr_reject ||
806 !tcp_in_window(TCP_SKB_CB(skb)->seq,
807 TCP_SKB_CB(skb)->end_seq,
808 tcp_rsk(req)->rcv_nxt,
809 tcp_rsk(req)->rcv_nxt +
810 tcp_synack_window(req))) {
1da177e4 811 /* Out of window: send ACK and drop. */
4ce7e93c
ED
812 if (!(flg & TCP_FLAG_RST) &&
813 !tcp_oow_rate_limited(sock_net(sk), skb,
814 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
815 &tcp_rsk(req)->last_oow_ack_time))
6edafaaf 816 req->rsk_ops->send_ack(sk, skb, req);
a11a791c
ED
817 if (paws_reject) {
818 SKB_DR_SET(*drop_reason, TCP_RFC7323_PAWS);
580f98cc 819 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
a11a791c
ED
820 } else if (tsecr_reject) {
821 SKB_DR_SET(*drop_reason, TCP_RFC7323_TSECR);
3ba07527 822 NET_INC_STATS(sock_net(sk), LINUX_MIB_TSECRREJECTED);
a11a791c
ED
823 } else {
824 SKB_DR_SET(*drop_reason, TCP_OVERWINDOW);
825 }
1da177e4
LT
826 return NULL;
827 }
828
829 /* In sequence, PAWS is OK. */
830
2aaab9a0
AL
831 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
832 /* Truncate SYN, it is out of window starting
833 at tcp_rsk(req)->rcv_isn + 1. */
834 flg &= ~TCP_FLAG_SYN;
835 }
1da177e4 836
2aaab9a0
AL
837 /* RFC793: "second check the RST bit" and
838 * "fourth, check the SYN bit"
839 */
840 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
580f98cc 841 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
2aaab9a0
AL
842 goto embryonic_reset;
843 }
1da177e4 844
2aaab9a0
AL
845 /* ACK sequence verified above, just make sure ACK is
846 * set. If ACK not set, just silently drop the packet.
8336886f
JC
847 *
848 * XXX (TFO) - if we ever allow "data after SYN", the
849 * following check needs to be removed.
2aaab9a0
AL
850 */
851 if (!(flg & TCP_FLAG_ACK))
852 return NULL;
ec0a1966 853
8336886f
JC
854 /* For Fast Open no more processing is needed (sk is the
855 * child socket).
856 */
857 if (fastopen)
858 return sk;
859
d1b99ba4 860 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
6e97ba55 861 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
2aaab9a0
AL
862 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
863 inet_rsk(req)->acked = 1;
02a1d6e7 864 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
2aaab9a0
AL
865 return NULL;
866 }
867
868 /* OK, ACK is valid, create big socket and
869 * feed this segment to it. It will repeat all
870 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
871 * ESTABLISHED STATE. If it will be dropped after
872 * socket is created, wait for troubles.
873 */
5e0724d0
ED
874 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
875 req, &own_req);
51456b29 876 if (!child)
2aaab9a0 877 goto listen_overflow;
1da177e4 878
8d52da23
WH
879 if (own_req && tmp_opt.saw_tstamp &&
880 !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
881 tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
882
90bf4513 883 if (own_req && rsk_drop_req(req)) {
d4f2c86b
KI
884 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
885 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
f296234c
PK
886 return child;
887 }
888
6bcfd7f8 889 sock_rps_save_rxhash(child, skb);
0f1c28ae 890 tcp_synack_rtt_meas(child, req);
e0f9759f 891 *req_stolen = !own_req;
5e0724d0 892 return inet_csk_complete_hashdance(sk, child, req, own_req);
1da177e4 893
2aaab9a0 894listen_overflow:
a11a791c 895 SKB_DR_SET(*drop_reason, TCP_LISTEN_OVERFLOW);
55d444b3
KI
896 if (sk != req->rsk_listener)
897 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
898
2d17d9c7 899 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
2aaab9a0
AL
900 inet_rsk(req)->acked = 1;
901 return NULL;
902 }
1da177e4 903
2aaab9a0 904embryonic_reset:
8336886f
JC
905 if (!(flg & TCP_FLAG_RST)) {
906 /* Received a bad SYN pkt - for TFO We try not to reset
907 * the local connection unless it's really necessary to
908 * avoid becoming vulnerable to outside attack aiming at
909 * resetting legit local connections.
910 */
11f46ea9 911 req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN);
8336886f
JC
912 } else if (fastopen) { /* received a valid RST pkt */
913 reqsk_fastopen_remove(sk, req, true);
049fe386 914 tcp_reset(sk, skb);
8336886f
JC
915 }
916 if (!fastopen) {
7233da86
AO
917 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
918
919 if (unlinked)
920 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
921 *req_stolen = !unlinked;
8336886f 922 }
2aaab9a0 923 return NULL;
1da177e4 924}
6dc4c252 925EXPORT_IPV6_MOD(tcp_check_req);
1da177e4
LT
926
927/*
928 * Queue segment on the new socket if the new socket is active,
929 * otherwise we just shortcircuit this and continue with
930 * the new socket.
8336886f
JC
931 *
932 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
933 * when entering. But other states are possible due to a race condition
934 * where after __inet_lookup_established() fails but before the listener
935 * locked is obtained, other packets cause the same connection to
936 * be created.
1da177e4
LT
937 */
938
b9825695
JX
939enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
940 struct sk_buff *skb)
734c8f75 941 __releases(&((child)->sk_lock.slock))
1da177e4 942{
b9825695 943 enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
1da177e4
LT
944 int state = child->sk_state;
945
03cfda4f
ED
946 /* record sk_napi_id and sk_rx_queue_mapping of child. */
947 sk_mark_napi_id_set(child, skb);
e5907459 948
a44d6eac 949 tcp_segs_in(tcp_sk(child), skb);
1da177e4 950 if (!sock_owned_by_user(child)) {
b9825695 951 reason = tcp_rcv_state_process(child, skb);
1da177e4
LT
952 /* Wakeup parent, send SIGIO */
953 if (state == TCP_SYN_RECV && child->sk_state != state)
676d2369 954 parent->sk_data_ready(parent);
1da177e4
LT
955 } else {
956 /* Alas, it is possible again, because we do lookup
957 * in main socket hash table and lock on listening
958 * socket does not protect us more.
959 */
a3a858ff 960 __sk_add_backlog(child, skb);
1da177e4
LT
961 }
962
963 bh_unlock_sock(child);
964 sock_put(child);
b9825695 965 return reason;
1da177e4 966}
6dc4c252 967EXPORT_IPV6_MOD(tcp_child_process);