| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 4 | * operating system. INET is implemented using the BSD Socket |
| 5 | * interface as the means of communication with the user level. |
| 6 | * |
| 7 | * Implementation of the Transmission Control Protocol(TCP). |
| 8 | * |
| 9 | * Authors: Ross Biro |
| 10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| 11 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
| 12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
| 13 | * Florian La Roche, <flla@stud.uni-sb.de> |
| 14 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> |
| 15 | * Linus Torvalds, <torvalds@cs.helsinki.fi> |
| 16 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
| 17 | * Matthew Dillon, <dillon@apollo.west.oic.com> |
| 18 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
| 19 | * Jorge Cwik, <jorge@laser.satlink.net> |
| 20 | */ |
| 21 | |
| 22 | #include <net/tcp.h> |
| 23 | #include <net/xfrm.h> |
| 24 | #include <net/busy_poll.h> |
| 25 | #include <net/rstreason.h> |
| 26 | |
| 27 | static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) |
| 28 | { |
| 29 | if (seq == s_win) |
| 30 | return true; |
| 31 | if (after(end_seq, s_win) && before(seq, e_win)) |
| 32 | return true; |
| 33 | return seq == e_win && seq == end_seq; |
| 34 | } |
| 35 | |
| 36 | static enum tcp_tw_status |
| 37 | tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, |
| 38 | const struct sk_buff *skb, int mib_idx) |
| 39 | { |
| 40 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
| 41 | |
| 42 | if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, |
| 43 | &tcptw->tw_last_oow_ack_time)) { |
| 44 | /* Send ACK. Note, we do not put the bucket, |
| 45 | * it will be released by caller. |
| 46 | */ |
| 47 | return TCP_TW_ACK_OOW; |
| 48 | } |
| 49 | |
| 50 | /* We are rate-limiting, so just release the tw sock and drop skb. */ |
| 51 | inet_twsk_put(tw); |
| 52 | return TCP_TW_SUCCESS; |
| 53 | } |
| 54 | |
| 55 | static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq, |
| 56 | u32 rcv_nxt) |
| 57 | { |
| 58 | #ifdef CONFIG_TCP_AO |
| 59 | struct tcp_ao_info *ao; |
| 60 | |
| 61 | ao = rcu_dereference(tcptw->ao_info); |
| 62 | if (unlikely(ao && seq < rcv_nxt)) |
| 63 | WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1); |
| 64 | #endif |
| 65 | WRITE_ONCE(tcptw->tw_rcv_nxt, seq); |
| 66 | } |
| 67 | |
| 68 | /* |
| 69 | * * Main purpose of TIME-WAIT state is to close connection gracefully, |
| 70 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN |
| 71 | * (and, probably, tail of data) and one or more our ACKs are lost. |
| 72 | * * What is TIME-WAIT timeout? It is associated with maximal packet |
| 73 | * lifetime in the internet, which results in wrong conclusion, that |
| 74 | * it is set to catch "old duplicate segments" wandering out of their path. |
| 75 | * It is not quite correct. This timeout is calculated so that it exceeds |
| 76 | * maximal retransmission timeout enough to allow to lose one (or more) |
| 77 | * segments sent by peer and our ACKs. This time may be calculated from RTO. |
| 78 | * * When TIME-WAIT socket receives RST, it means that another end |
| 79 | * finally closed and we are allowed to kill TIME-WAIT too. |
| 80 | * * Second purpose of TIME-WAIT is catching old duplicate segments. |
| 81 | * Well, certainly it is pure paranoia, but if we load TIME-WAIT |
| 82 | * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. |
| 83 | * * If we invented some more clever way to catch duplicates |
| 84 | * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. |
| 85 | * |
| 86 | * The algorithm below is based on FORMAL INTERPRETATION of RFCs. |
| 87 | * When you compare it to RFCs, please, read section SEGMENT ARRIVES |
| 88 | * from the very beginning. |
| 89 | * |
| 90 | * NOTE. With recycling (and later with fin-wait-2) TW bucket |
| 91 | * is _not_ stateless. It means, that strictly speaking we must |
| 92 | * spinlock it. I do not want! Well, probability of misbehaviour |
| 93 | * is ridiculously low and, seems, we could use some mb() tricks |
| 94 | * to avoid misread sequence numbers, states etc. --ANK |
| 95 | * |
| 96 | * We don't need to initialize tmp_out.sack_ok as we don't use the results |
| 97 | */ |
| 98 | enum tcp_tw_status |
| 99 | tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, |
| 100 | const struct tcphdr *th, u32 *tw_isn, |
| 101 | enum skb_drop_reason *drop_reason) |
| 102 | { |
| 103 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
| 104 | u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt); |
| 105 | struct tcp_options_received tmp_opt; |
| 106 | bool paws_reject = false; |
| 107 | int ts_recent_stamp; |
| 108 | |
| 109 | tmp_opt.saw_tstamp = 0; |
| 110 | ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp); |
| 111 | if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) { |
| 112 | tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); |
| 113 | |
| 114 | if (tmp_opt.saw_tstamp) { |
| 115 | if (tmp_opt.rcv_tsecr) |
| 116 | tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; |
| 117 | tmp_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent); |
| 118 | tmp_opt.ts_recent_stamp = ts_recent_stamp; |
| 119 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) { |
| 124 | /* Just repeat all the checks of tcp_rcv_state_process() */ |
| 125 | |
| 126 | /* Out of window, send ACK */ |
| 127 | if (paws_reject || |
| 128 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, |
| 129 | rcv_nxt, |
| 130 | rcv_nxt + tcptw->tw_rcv_wnd)) |
| 131 | return tcp_timewait_check_oow_rate_limit( |
| 132 | tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); |
| 133 | |
| 134 | if (th->rst) |
| 135 | goto kill; |
| 136 | |
| 137 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt)) |
| 138 | return TCP_TW_RST; |
| 139 | |
| 140 | /* Dup ACK? */ |
| 141 | if (!th->ack || |
| 142 | !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) || |
| 143 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { |
| 144 | inet_twsk_put(tw); |
| 145 | return TCP_TW_SUCCESS; |
| 146 | } |
| 147 | |
| 148 | /* New data or FIN. If new data arrive after half-duplex close, |
| 149 | * reset. |
| 150 | */ |
| 151 | if (!th->fin || |
| 152 | TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1) |
| 153 | return TCP_TW_RST; |
| 154 | |
| 155 | /* FIN arrived, enter true time-wait state. */ |
| 156 | WRITE_ONCE(tw->tw_substate, TCP_TIME_WAIT); |
| 157 | twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq, |
| 158 | rcv_nxt); |
| 159 | |
| 160 | if (tmp_opt.saw_tstamp) { |
| 161 | u64 ts = tcp_clock_ms(); |
| 162 | |
| 163 | WRITE_ONCE(tw->tw_entry_stamp, ts); |
| 164 | WRITE_ONCE(tcptw->tw_ts_recent_stamp, |
| 165 | div_u64(ts, MSEC_PER_SEC)); |
| 166 | WRITE_ONCE(tcptw->tw_ts_recent, |
| 167 | tmp_opt.rcv_tsval); |
| 168 | } |
| 169 | |
| 170 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
| 171 | return TCP_TW_ACK; |
| 172 | } |
| 173 | |
| 174 | /* |
| 175 | * Now real TIME-WAIT state. |
| 176 | * |
| 177 | * RFC 1122: |
| 178 | * "When a connection is [...] on TIME-WAIT state [...] |
| 179 | * [a TCP] MAY accept a new SYN from the remote TCP to |
| 180 | * reopen the connection directly, if it: |
| 181 | * |
| 182 | * (1) assigns its initial sequence number for the new |
| 183 | * connection to be larger than the largest sequence |
| 184 | * number it used on the previous connection incarnation, |
| 185 | * and |
| 186 | * |
| 187 | * (2) returns to TIME-WAIT state if the SYN turns out |
| 188 | * to be an old duplicate". |
| 189 | */ |
| 190 | |
| 191 | if (!paws_reject && |
| 192 | (TCP_SKB_CB(skb)->seq == rcv_nxt && |
| 193 | (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { |
| 194 | /* In window segment, it may be only reset or bare ack. */ |
| 195 | |
| 196 | if (th->rst) { |
| 197 | /* This is TIME_WAIT assassination, in two flavors. |
| 198 | * Oh well... nobody has a sufficient solution to this |
| 199 | * protocol bug yet. |
| 200 | */ |
| 201 | if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { |
| 202 | kill: |
| 203 | inet_twsk_deschedule_put(tw); |
| 204 | return TCP_TW_SUCCESS; |
| 205 | } |
| 206 | } else { |
| 207 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
| 208 | } |
| 209 | |
| 210 | if (tmp_opt.saw_tstamp) { |
| 211 | WRITE_ONCE(tcptw->tw_ts_recent, |
| 212 | tmp_opt.rcv_tsval); |
| 213 | WRITE_ONCE(tcptw->tw_ts_recent_stamp, |
| 214 | ktime_get_seconds()); |
| 215 | } |
| 216 | |
| 217 | inet_twsk_put(tw); |
| 218 | return TCP_TW_SUCCESS; |
| 219 | } |
| 220 | |
| 221 | /* Out of window segment. |
| 222 | |
| 223 | All the segments are ACKed immediately. |
| 224 | |
| 225 | The only exception is new SYN. We accept it, if it is |
| 226 | not old duplicate and we are not in danger to be killed |
| 227 | by delayed old duplicates. RFC check is that it has |
| 228 | newer sequence number works at rates <40Mbit/sec. |
| 229 | However, if paws works, it is reliable AND even more, |
| 230 | we even may relax silly seq space cutoff. |
| 231 | |
| 232 | RED-PEN: we violate main RFC requirement, if this SYN will appear |
| 233 | old duplicate (i.e. we receive RST in reply to SYN-ACK), |
| 234 | we must return socket to time-wait state. It is not good, |
| 235 | but not fatal yet. |
| 236 | */ |
| 237 | |
| 238 | if (th->syn && !th->rst && !th->ack && !paws_reject && |
| 239 | (after(TCP_SKB_CB(skb)->seq, rcv_nxt) || |
| 240 | (tmp_opt.saw_tstamp && |
| 241 | (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) { |
| 242 | u32 isn = tcptw->tw_snd_nxt + 65535 + 2; |
| 243 | if (isn == 0) |
| 244 | isn++; |
| 245 | *tw_isn = isn; |
| 246 | return TCP_TW_SYN; |
| 247 | } |
| 248 | |
| 249 | if (paws_reject) { |
| 250 | *drop_reason = SKB_DROP_REASON_TCP_RFC7323_TW_PAWS; |
| 251 | __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWS_TW_REJECTED); |
| 252 | } |
| 253 | |
| 254 | if (!th->rst) { |
| 255 | /* In this case we must reset the TIMEWAIT timer. |
| 256 | * |
| 257 | * If it is ACKless SYN it may be both old duplicate |
| 258 | * and new good SYN with random sequence number <rcv_nxt. |
| 259 | * Do not reschedule in the last case. |
| 260 | */ |
| 261 | if (paws_reject || th->ack) |
| 262 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
| 263 | |
| 264 | return tcp_timewait_check_oow_rate_limit( |
| 265 | tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); |
| 266 | } |
| 267 | inet_twsk_put(tw); |
| 268 | return TCP_TW_SUCCESS; |
| 269 | } |
| 270 | EXPORT_IPV6_MOD(tcp_timewait_state_process); |
| 271 | |
| 272 | static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw) |
| 273 | { |
| 274 | #ifdef CONFIG_TCP_MD5SIG |
| 275 | const struct tcp_sock *tp = tcp_sk(sk); |
| 276 | struct tcp_md5sig_key *key; |
| 277 | |
| 278 | /* |
| 279 | * The timewait bucket does not have the key DB from the |
| 280 | * sock structure. We just make a quick copy of the |
| 281 | * md5 key being used (if indeed we are using one) |
| 282 | * so the timewait ack generating code has the key. |
| 283 | */ |
| 284 | tcptw->tw_md5_key = NULL; |
| 285 | if (!static_branch_unlikely(&tcp_md5_needed.key)) |
| 286 | return; |
| 287 | |
| 288 | key = tp->af_specific->md5_lookup(sk, sk); |
| 289 | if (key) { |
| 290 | tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); |
| 291 | if (!tcptw->tw_md5_key) |
| 292 | return; |
| 293 | if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) |
| 294 | goto out_free; |
| 295 | tcp_md5_add_sigpool(); |
| 296 | } |
| 297 | return; |
| 298 | out_free: |
| 299 | WARN_ON_ONCE(1); |
| 300 | kfree(tcptw->tw_md5_key); |
| 301 | tcptw->tw_md5_key = NULL; |
| 302 | #endif |
| 303 | } |
| 304 | |
| 305 | /* |
| 306 | * Move a socket to time-wait or dead fin-wait-2 state. |
| 307 | */ |
| 308 | void tcp_time_wait(struct sock *sk, int state, int timeo) |
| 309 | { |
| 310 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 311 | struct tcp_sock *tp = tcp_sk(sk); |
| 312 | struct net *net = sock_net(sk); |
| 313 | struct inet_timewait_sock *tw; |
| 314 | |
| 315 | tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state); |
| 316 | |
| 317 | if (tw) { |
| 318 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
| 319 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); |
| 320 | |
| 321 | tw->tw_transparent = inet_test_bit(TRANSPARENT, sk); |
| 322 | tw->tw_mark = sk->sk_mark; |
| 323 | tw->tw_priority = READ_ONCE(sk->sk_priority); |
| 324 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; |
| 325 | /* refreshed when we enter true TIME-WAIT state */ |
| 326 | tw->tw_entry_stamp = tcp_time_stamp_ms(tp); |
| 327 | tcptw->tw_rcv_nxt = tp->rcv_nxt; |
| 328 | tcptw->tw_snd_nxt = tp->snd_nxt; |
| 329 | tcptw->tw_rcv_wnd = tcp_receive_window(tp); |
| 330 | tcptw->tw_ts_recent = tp->rx_opt.ts_recent; |
| 331 | tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; |
| 332 | tcptw->tw_ts_offset = tp->tsoffset; |
| 333 | tw->tw_usec_ts = tp->tcp_usec_ts; |
| 334 | tcptw->tw_last_oow_ack_time = 0; |
| 335 | tcptw->tw_tx_delay = tp->tcp_tx_delay; |
| 336 | tw->tw_txhash = sk->sk_txhash; |
| 337 | tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping; |
| 338 | #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING |
| 339 | tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping; |
| 340 | #endif |
| 341 | #if IS_ENABLED(CONFIG_IPV6) |
| 342 | if (tw->tw_family == PF_INET6) { |
| 343 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 344 | |
| 345 | tw->tw_v6_daddr = sk->sk_v6_daddr; |
| 346 | tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; |
| 347 | tw->tw_tclass = np->tclass; |
| 348 | tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); |
| 349 | tw->tw_ipv6only = sk->sk_ipv6only; |
| 350 | } |
| 351 | #endif |
| 352 | |
| 353 | tcp_time_wait_init(sk, tcptw); |
| 354 | tcp_ao_time_wait(tcptw, tp); |
| 355 | |
| 356 | /* Get the TIME_WAIT timeout firing. */ |
| 357 | if (timeo < rto) |
| 358 | timeo = rto; |
| 359 | |
| 360 | if (state == TCP_TIME_WAIT) |
| 361 | timeo = TCP_TIMEWAIT_LEN; |
| 362 | |
| 363 | /* Linkage updates. |
| 364 | * Note that access to tw after this point is illegal. |
| 365 | */ |
| 366 | inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo); |
| 367 | } else { |
| 368 | /* Sorry, if we're out of memory, just CLOSE this |
| 369 | * socket up. We've got bigger problems than |
| 370 | * non-graceful socket closings. |
| 371 | */ |
| 372 | NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW); |
| 373 | } |
| 374 | |
| 375 | tcp_update_metrics(sk); |
| 376 | tcp_done(sk); |
| 377 | } |
| 378 | EXPORT_SYMBOL(tcp_time_wait); |
| 379 | |
| 380 | #ifdef CONFIG_TCP_MD5SIG |
| 381 | static void tcp_md5_twsk_free_rcu(struct rcu_head *head) |
| 382 | { |
| 383 | struct tcp_md5sig_key *key; |
| 384 | |
| 385 | key = container_of(head, struct tcp_md5sig_key, rcu); |
| 386 | kfree(key); |
| 387 | static_branch_slow_dec_deferred(&tcp_md5_needed); |
| 388 | tcp_md5_release_sigpool(); |
| 389 | } |
| 390 | #endif |
| 391 | |
| 392 | void tcp_twsk_destructor(struct sock *sk) |
| 393 | { |
| 394 | #ifdef CONFIG_TCP_MD5SIG |
| 395 | if (static_branch_unlikely(&tcp_md5_needed.key)) { |
| 396 | struct tcp_timewait_sock *twsk = tcp_twsk(sk); |
| 397 | |
| 398 | if (twsk->tw_md5_key) |
| 399 | call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu); |
| 400 | } |
| 401 | #endif |
| 402 | tcp_ao_destroy_sock(sk, true); |
| 403 | } |
| 404 | EXPORT_IPV6_MOD_GPL(tcp_twsk_destructor); |
| 405 | |
| 406 | void tcp_twsk_purge(struct list_head *net_exit_list) |
| 407 | { |
| 408 | bool purged_once = false; |
| 409 | struct net *net; |
| 410 | |
| 411 | list_for_each_entry(net, net_exit_list, exit_list) { |
| 412 | if (net->ipv4.tcp_death_row.hashinfo->pernet) { |
| 413 | /* Even if tw_refcount == 1, we must clean up kernel reqsk */ |
| 414 | inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo); |
| 415 | } else if (!purged_once) { |
| 416 | inet_twsk_purge(&tcp_hashinfo); |
| 417 | purged_once = true; |
| 418 | } |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | /* Warning : This function is called without sk_listener being locked. |
| 423 | * Be sure to read socket fields once, as their value could change under us. |
| 424 | */ |
| 425 | void tcp_openreq_init_rwin(struct request_sock *req, |
| 426 | const struct sock *sk_listener, |
| 427 | const struct dst_entry *dst) |
| 428 | { |
| 429 | struct inet_request_sock *ireq = inet_rsk(req); |
| 430 | const struct tcp_sock *tp = tcp_sk(sk_listener); |
| 431 | int full_space = tcp_full_space(sk_listener); |
| 432 | u32 window_clamp; |
| 433 | __u8 rcv_wscale; |
| 434 | u32 rcv_wnd; |
| 435 | int mss; |
| 436 | |
| 437 | mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); |
| 438 | window_clamp = READ_ONCE(tp->window_clamp); |
| 439 | /* Set this up on the first call only */ |
| 440 | req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
| 441 | |
| 442 | /* limit the window selection if the user enforce a smaller rx buffer */ |
| 443 | if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && |
| 444 | (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) |
| 445 | req->rsk_window_clamp = full_space; |
| 446 | |
| 447 | rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); |
| 448 | if (rcv_wnd == 0) |
| 449 | rcv_wnd = dst_metric(dst, RTAX_INITRWND); |
| 450 | else if (full_space < rcv_wnd * mss) |
| 451 | full_space = rcv_wnd * mss; |
| 452 | |
| 453 | /* tcp_full_space because it is guaranteed to be the first packet */ |
| 454 | tcp_select_initial_window(sk_listener, full_space, |
| 455 | mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), |
| 456 | &req->rsk_rcv_wnd, |
| 457 | &req->rsk_window_clamp, |
| 458 | ireq->wscale_ok, |
| 459 | &rcv_wscale, |
| 460 | rcv_wnd); |
| 461 | ireq->rcv_wscale = rcv_wscale; |
| 462 | } |
| 463 | |
| 464 | static void tcp_ecn_openreq_child(struct tcp_sock *tp, |
| 465 | const struct request_sock *req) |
| 466 | { |
| 467 | tcp_ecn_mode_set(tp, inet_rsk(req)->ecn_ok ? |
| 468 | TCP_ECN_MODE_RFC3168 : |
| 469 | TCP_ECN_DISABLED); |
| 470 | } |
| 471 | |
| 472 | void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) |
| 473 | { |
| 474 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 475 | u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); |
| 476 | bool ca_got_dst = false; |
| 477 | |
| 478 | if (ca_key != TCP_CA_UNSPEC) { |
| 479 | const struct tcp_congestion_ops *ca; |
| 480 | |
| 481 | rcu_read_lock(); |
| 482 | ca = tcp_ca_find_key(ca_key); |
| 483 | if (likely(ca && bpf_try_module_get(ca, ca->owner))) { |
| 484 | icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); |
| 485 | icsk->icsk_ca_ops = ca; |
| 486 | ca_got_dst = true; |
| 487 | } |
| 488 | rcu_read_unlock(); |
| 489 | } |
| 490 | |
| 491 | /* If no valid choice made yet, assign current system default ca. */ |
| 492 | if (!ca_got_dst && |
| 493 | (!icsk->icsk_ca_setsockopt || |
| 494 | !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner))) |
| 495 | tcp_assign_congestion_control(sk); |
| 496 | |
| 497 | tcp_set_ca_state(sk, TCP_CA_Open); |
| 498 | } |
| 499 | EXPORT_IPV6_MOD_GPL(tcp_ca_openreq_child); |
| 500 | |
| 501 | static void smc_check_reset_syn_req(const struct tcp_sock *oldtp, |
| 502 | struct request_sock *req, |
| 503 | struct tcp_sock *newtp) |
| 504 | { |
| 505 | #if IS_ENABLED(CONFIG_SMC) |
| 506 | struct inet_request_sock *ireq; |
| 507 | |
| 508 | if (static_branch_unlikely(&tcp_have_smc)) { |
| 509 | ireq = inet_rsk(req); |
| 510 | if (oldtp->syn_smc && !ireq->smc_ok) |
| 511 | newtp->syn_smc = 0; |
| 512 | } |
| 513 | #endif |
| 514 | } |
| 515 | |
| 516 | /* This is not only more efficient than what we used to do, it eliminates |
| 517 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM |
| 518 | * |
| 519 | * Actually, we could lots of memory writes here. tp of listening |
| 520 | * socket contains all necessary default parameters. |
| 521 | */ |
| 522 | struct sock *tcp_create_openreq_child(const struct sock *sk, |
| 523 | struct request_sock *req, |
| 524 | struct sk_buff *skb) |
| 525 | { |
| 526 | struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); |
| 527 | const struct inet_request_sock *ireq = inet_rsk(req); |
| 528 | struct tcp_request_sock *treq = tcp_rsk(req); |
| 529 | struct inet_connection_sock *newicsk; |
| 530 | const struct tcp_sock *oldtp; |
| 531 | struct tcp_sock *newtp; |
| 532 | u32 seq; |
| 533 | |
| 534 | if (!newsk) |
| 535 | return NULL; |
| 536 | |
| 537 | newicsk = inet_csk(newsk); |
| 538 | newtp = tcp_sk(newsk); |
| 539 | oldtp = tcp_sk(sk); |
| 540 | |
| 541 | smc_check_reset_syn_req(oldtp, req, newtp); |
| 542 | |
| 543 | /* Now setup tcp_sock */ |
| 544 | newtp->pred_flags = 0; |
| 545 | |
| 546 | seq = treq->rcv_isn + 1; |
| 547 | newtp->rcv_wup = seq; |
| 548 | WRITE_ONCE(newtp->copied_seq, seq); |
| 549 | WRITE_ONCE(newtp->rcv_nxt, seq); |
| 550 | newtp->segs_in = 1; |
| 551 | |
| 552 | seq = treq->snt_isn + 1; |
| 553 | newtp->snd_sml = newtp->snd_una = seq; |
| 554 | WRITE_ONCE(newtp->snd_nxt, seq); |
| 555 | newtp->snd_up = seq; |
| 556 | |
| 557 | INIT_LIST_HEAD(&newtp->tsq_node); |
| 558 | INIT_LIST_HEAD(&newtp->tsorted_sent_queue); |
| 559 | |
| 560 | tcp_init_wl(newtp, treq->rcv_isn); |
| 561 | |
| 562 | minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); |
| 563 | newicsk->icsk_ack.lrcvtime = tcp_jiffies32; |
| 564 | |
| 565 | newtp->lsndtime = tcp_jiffies32; |
| 566 | newsk->sk_txhash = READ_ONCE(treq->txhash); |
| 567 | newtp->total_retrans = req->num_retrans; |
| 568 | |
| 569 | tcp_init_xmit_timers(newsk); |
| 570 | WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1); |
| 571 | |
| 572 | if (sock_flag(newsk, SOCK_KEEPOPEN)) |
| 573 | tcp_reset_keepalive_timer(newsk, keepalive_time_when(newtp)); |
| 574 | |
| 575 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
| 576 | newtp->rx_opt.sack_ok = ireq->sack_ok; |
| 577 | newtp->window_clamp = req->rsk_window_clamp; |
| 578 | newtp->rcv_ssthresh = req->rsk_rcv_wnd; |
| 579 | newtp->rcv_wnd = req->rsk_rcv_wnd; |
| 580 | newtp->rx_opt.wscale_ok = ireq->wscale_ok; |
| 581 | if (newtp->rx_opt.wscale_ok) { |
| 582 | newtp->rx_opt.snd_wscale = ireq->snd_wscale; |
| 583 | newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; |
| 584 | } else { |
| 585 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; |
| 586 | newtp->window_clamp = min(newtp->window_clamp, 65535U); |
| 587 | } |
| 588 | newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; |
| 589 | newtp->max_window = newtp->snd_wnd; |
| 590 | |
| 591 | if (newtp->rx_opt.tstamp_ok) { |
| 592 | newtp->tcp_usec_ts = treq->req_usec_ts; |
| 593 | newtp->rx_opt.ts_recent = req->ts_recent; |
| 594 | newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); |
| 595 | newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; |
| 596 | } else { |
| 597 | newtp->tcp_usec_ts = 0; |
| 598 | newtp->rx_opt.ts_recent_stamp = 0; |
| 599 | newtp->tcp_header_len = sizeof(struct tcphdr); |
| 600 | } |
| 601 | if (req->num_timeout) { |
| 602 | newtp->total_rto = req->num_timeout; |
| 603 | newtp->undo_marker = treq->snt_isn; |
| 604 | if (newtp->tcp_usec_ts) { |
| 605 | newtp->retrans_stamp = treq->snt_synack; |
| 606 | newtp->total_rto_time = (u32)(tcp_clock_us() - |
| 607 | newtp->retrans_stamp) / USEC_PER_MSEC; |
| 608 | } else { |
| 609 | newtp->retrans_stamp = div_u64(treq->snt_synack, |
| 610 | USEC_PER_SEC / TCP_TS_HZ); |
| 611 | newtp->total_rto_time = tcp_clock_ms() - |
| 612 | newtp->retrans_stamp; |
| 613 | } |
| 614 | newtp->total_rto_recoveries = 1; |
| 615 | } |
| 616 | newtp->tsoffset = treq->ts_off; |
| 617 | #ifdef CONFIG_TCP_MD5SIG |
| 618 | newtp->md5sig_info = NULL; /*XXX*/ |
| 619 | #endif |
| 620 | #ifdef CONFIG_TCP_AO |
| 621 | newtp->ao_info = NULL; |
| 622 | |
| 623 | if (tcp_rsk_used_ao(req)) { |
| 624 | struct tcp_ao_key *ao_key; |
| 625 | |
| 626 | ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1); |
| 627 | if (ao_key) |
| 628 | newtp->tcp_header_len += tcp_ao_len_aligned(ao_key); |
| 629 | } |
| 630 | #endif |
| 631 | if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) |
| 632 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
| 633 | newtp->rx_opt.mss_clamp = req->mss; |
| 634 | tcp_ecn_openreq_child(newtp, req); |
| 635 | newtp->fastopen_req = NULL; |
| 636 | RCU_INIT_POINTER(newtp->fastopen_rsk, NULL); |
| 637 | |
| 638 | newtp->bpf_chg_cc_inprogress = 0; |
| 639 | tcp_bpf_clone(sk, newsk); |
| 640 | |
| 641 | __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); |
| 642 | |
| 643 | xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1); |
| 644 | |
| 645 | return newsk; |
| 646 | } |
| 647 | EXPORT_SYMBOL(tcp_create_openreq_child); |
| 648 | |
| 649 | /* |
| 650 | * Process an incoming packet for SYN_RECV sockets represented as a |
| 651 | * request_sock. Normally sk is the listener socket but for TFO it |
| 652 | * points to the child socket. |
| 653 | * |
| 654 | * XXX (TFO) - The current impl contains a special check for ack |
| 655 | * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? |
| 656 | * |
| 657 | * We don't need to initialize tmp_opt.sack_ok as we don't use the results |
| 658 | * |
| 659 | * Note: If @fastopen is true, this can be called from process context. |
| 660 | * Otherwise, this is from BH context. |
| 661 | */ |
| 662 | |
| 663 | struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, |
| 664 | struct request_sock *req, |
| 665 | bool fastopen, bool *req_stolen, |
| 666 | enum skb_drop_reason *drop_reason) |
| 667 | { |
| 668 | struct tcp_options_received tmp_opt; |
| 669 | struct sock *child; |
| 670 | const struct tcphdr *th = tcp_hdr(skb); |
| 671 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); |
| 672 | bool tsecr_reject = false; |
| 673 | bool paws_reject = false; |
| 674 | bool own_req; |
| 675 | |
| 676 | tmp_opt.saw_tstamp = 0; |
| 677 | if (th->doff > (sizeof(struct tcphdr)>>2)) { |
| 678 | tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); |
| 679 | |
| 680 | if (tmp_opt.saw_tstamp) { |
| 681 | tmp_opt.ts_recent = req->ts_recent; |
| 682 | if (tmp_opt.rcv_tsecr) { |
| 683 | if (inet_rsk(req)->tstamp_ok && !fastopen) |
| 684 | tsecr_reject = !between(tmp_opt.rcv_tsecr, |
| 685 | tcp_rsk(req)->snt_tsval_first, |
| 686 | READ_ONCE(tcp_rsk(req)->snt_tsval_last)); |
| 687 | tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; |
| 688 | } |
| 689 | /* We do not store true stamp, but it is not required, |
| 690 | * it can be estimated (approximately) |
| 691 | * from another data. |
| 692 | */ |
| 693 | tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ; |
| 694 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | /* Check for pure retransmitted SYN. */ |
| 699 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && |
| 700 | flg == TCP_FLAG_SYN && |
| 701 | !paws_reject) { |
| 702 | /* |
| 703 | * RFC793 draws (Incorrectly! It was fixed in RFC1122) |
| 704 | * this case on figure 6 and figure 8, but formal |
| 705 | * protocol description says NOTHING. |
| 706 | * To be more exact, it says that we should send ACK, |
| 707 | * because this segment (at least, if it has no data) |
| 708 | * is out of window. |
| 709 | * |
| 710 | * CONCLUSION: RFC793 (even with RFC1122) DOES NOT |
| 711 | * describe SYN-RECV state. All the description |
| 712 | * is wrong, we cannot believe to it and should |
| 713 | * rely only on common sense and implementation |
| 714 | * experience. |
| 715 | * |
| 716 | * Enforce "SYN-ACK" according to figure 8, figure 6 |
| 717 | * of RFC793, fixed by RFC1122. |
| 718 | * |
| 719 | * Note that even if there is new data in the SYN packet |
| 720 | * they will be thrown away too. |
| 721 | * |
| 722 | * Reset timer after retransmitting SYNACK, similar to |
| 723 | * the idea of fast retransmit in recovery. |
| 724 | */ |
| 725 | if (!tcp_oow_rate_limited(sock_net(sk), skb, |
| 726 | LINUX_MIB_TCPACKSKIPPEDSYNRECV, |
| 727 | &tcp_rsk(req)->last_oow_ack_time) && |
| 728 | |
| 729 | !inet_rtx_syn_ack(sk, req)) { |
| 730 | unsigned long expires = jiffies; |
| 731 | |
| 732 | expires += reqsk_timeout(req, TCP_RTO_MAX); |
| 733 | if (!fastopen) |
| 734 | mod_timer_pending(&req->rsk_timer, expires); |
| 735 | else |
| 736 | req->rsk_timer.expires = expires; |
| 737 | } |
| 738 | return NULL; |
| 739 | } |
| 740 | |
| 741 | /* Further reproduces section "SEGMENT ARRIVES" |
| 742 | for state SYN-RECEIVED of RFC793. |
| 743 | It is broken, however, it does not work only |
| 744 | when SYNs are crossed. |
| 745 | |
| 746 | You would think that SYN crossing is impossible here, since |
| 747 | we should have a SYN_SENT socket (from connect()) on our end, |
| 748 | but this is not true if the crossed SYNs were sent to both |
| 749 | ends by a malicious third party. We must defend against this, |
| 750 | and to do that we first verify the ACK (as per RFC793, page |
| 751 | 36) and reset if it is invalid. Is this a true full defense? |
| 752 | To convince ourselves, let us consider a way in which the ACK |
| 753 | test can still pass in this 'malicious crossed SYNs' case. |
| 754 | Malicious sender sends identical SYNs (and thus identical sequence |
| 755 | numbers) to both A and B: |
| 756 | |
| 757 | A: gets SYN, seq=7 |
| 758 | B: gets SYN, seq=7 |
| 759 | |
| 760 | By our good fortune, both A and B select the same initial |
| 761 | send sequence number of seven :-) |
| 762 | |
| 763 | A: sends SYN|ACK, seq=7, ack_seq=8 |
| 764 | B: sends SYN|ACK, seq=7, ack_seq=8 |
| 765 | |
| 766 | So we are now A eating this SYN|ACK, ACK test passes. So |
| 767 | does sequence test, SYN is truncated, and thus we consider |
| 768 | it a bare ACK. |
| 769 | |
| 770 | If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this |
| 771 | bare ACK. Otherwise, we create an established connection. Both |
| 772 | ends (listening sockets) accept the new incoming connection and try |
| 773 | to talk to each other. 8-) |
| 774 | |
| 775 | Note: This case is both harmless, and rare. Possibility is about the |
| 776 | same as us discovering intelligent life on another plant tomorrow. |
| 777 | |
| 778 | But generally, we should (RFC lies!) to accept ACK |
| 779 | from SYNACK both here and in tcp_rcv_state_process(). |
| 780 | tcp_rcv_state_process() does not, hence, we do not too. |
| 781 | |
| 782 | Note that the case is absolutely generic: |
| 783 | we cannot optimize anything here without |
| 784 | violating protocol. All the checks must be made |
| 785 | before attempt to create socket. |
| 786 | */ |
| 787 | |
| 788 | /* RFC793 page 36: "If the connection is in any non-synchronized state ... |
| 789 | * and the incoming segment acknowledges something not yet |
| 790 | * sent (the segment carries an unacceptable ACK) ... |
| 791 | * a reset is sent." |
| 792 | * |
| 793 | * Invalid ACK: reset will be sent by listening socket. |
| 794 | * Note that the ACK validity check for a Fast Open socket is done |
| 795 | * elsewhere and is checked directly against the child socket rather |
| 796 | * than req because user data may have been sent out. |
| 797 | */ |
| 798 | if ((flg & TCP_FLAG_ACK) && !fastopen && |
| 799 | (TCP_SKB_CB(skb)->ack_seq != |
| 800 | tcp_rsk(req)->snt_isn + 1)) |
| 801 | return sk; |
| 802 | |
| 803 | /* RFC793: "first check sequence number". */ |
| 804 | |
| 805 | if (paws_reject || tsecr_reject || |
| 806 | !tcp_in_window(TCP_SKB_CB(skb)->seq, |
| 807 | TCP_SKB_CB(skb)->end_seq, |
| 808 | tcp_rsk(req)->rcv_nxt, |
| 809 | tcp_rsk(req)->rcv_nxt + |
| 810 | tcp_synack_window(req))) { |
| 811 | /* Out of window: send ACK and drop. */ |
| 812 | if (!(flg & TCP_FLAG_RST) && |
| 813 | !tcp_oow_rate_limited(sock_net(sk), skb, |
| 814 | LINUX_MIB_TCPACKSKIPPEDSYNRECV, |
| 815 | &tcp_rsk(req)->last_oow_ack_time)) |
| 816 | req->rsk_ops->send_ack(sk, skb, req); |
| 817 | if (paws_reject) { |
| 818 | SKB_DR_SET(*drop_reason, TCP_RFC7323_PAWS); |
| 819 | NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); |
| 820 | } else if (tsecr_reject) { |
| 821 | SKB_DR_SET(*drop_reason, TCP_RFC7323_TSECR); |
| 822 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TSECRREJECTED); |
| 823 | } else { |
| 824 | SKB_DR_SET(*drop_reason, TCP_OVERWINDOW); |
| 825 | } |
| 826 | return NULL; |
| 827 | } |
| 828 | |
| 829 | /* In sequence, PAWS is OK. */ |
| 830 | |
| 831 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { |
| 832 | /* Truncate SYN, it is out of window starting |
| 833 | at tcp_rsk(req)->rcv_isn + 1. */ |
| 834 | flg &= ~TCP_FLAG_SYN; |
| 835 | } |
| 836 | |
| 837 | /* RFC793: "second check the RST bit" and |
| 838 | * "fourth, check the SYN bit" |
| 839 | */ |
| 840 | if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { |
| 841 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
| 842 | goto embryonic_reset; |
| 843 | } |
| 844 | |
| 845 | /* ACK sequence verified above, just make sure ACK is |
| 846 | * set. If ACK not set, just silently drop the packet. |
| 847 | * |
| 848 | * XXX (TFO) - if we ever allow "data after SYN", the |
| 849 | * following check needs to be removed. |
| 850 | */ |
| 851 | if (!(flg & TCP_FLAG_ACK)) |
| 852 | return NULL; |
| 853 | |
| 854 | /* For Fast Open no more processing is needed (sk is the |
| 855 | * child socket). |
| 856 | */ |
| 857 | if (fastopen) |
| 858 | return sk; |
| 859 | |
| 860 | /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ |
| 861 | if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) && |
| 862 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
| 863 | inet_rsk(req)->acked = 1; |
| 864 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); |
| 865 | return NULL; |
| 866 | } |
| 867 | |
| 868 | /* OK, ACK is valid, create big socket and |
| 869 | * feed this segment to it. It will repeat all |
| 870 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO |
| 871 | * ESTABLISHED STATE. If it will be dropped after |
| 872 | * socket is created, wait for troubles. |
| 873 | */ |
| 874 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, |
| 875 | req, &own_req); |
| 876 | if (!child) |
| 877 | goto listen_overflow; |
| 878 | |
| 879 | if (own_req && tmp_opt.saw_tstamp && |
| 880 | !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) |
| 881 | tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval; |
| 882 | |
| 883 | if (own_req && rsk_drop_req(req)) { |
| 884 | reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); |
| 885 | inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); |
| 886 | return child; |
| 887 | } |
| 888 | |
| 889 | sock_rps_save_rxhash(child, skb); |
| 890 | tcp_synack_rtt_meas(child, req); |
| 891 | *req_stolen = !own_req; |
| 892 | return inet_csk_complete_hashdance(sk, child, req, own_req); |
| 893 | |
| 894 | listen_overflow: |
| 895 | SKB_DR_SET(*drop_reason, TCP_LISTEN_OVERFLOW); |
| 896 | if (sk != req->rsk_listener) |
| 897 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); |
| 898 | |
| 899 | if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { |
| 900 | inet_rsk(req)->acked = 1; |
| 901 | return NULL; |
| 902 | } |
| 903 | |
| 904 | embryonic_reset: |
| 905 | if (!(flg & TCP_FLAG_RST)) { |
| 906 | /* Received a bad SYN pkt - for TFO We try not to reset |
| 907 | * the local connection unless it's really necessary to |
| 908 | * avoid becoming vulnerable to outside attack aiming at |
| 909 | * resetting legit local connections. |
| 910 | */ |
| 911 | req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN); |
| 912 | } else if (fastopen) { /* received a valid RST pkt */ |
| 913 | reqsk_fastopen_remove(sk, req, true); |
| 914 | tcp_reset(sk, skb); |
| 915 | } |
| 916 | if (!fastopen) { |
| 917 | bool unlinked = inet_csk_reqsk_queue_drop(sk, req); |
| 918 | |
| 919 | if (unlinked) |
| 920 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); |
| 921 | *req_stolen = !unlinked; |
| 922 | } |
| 923 | return NULL; |
| 924 | } |
| 925 | EXPORT_IPV6_MOD(tcp_check_req); |
| 926 | |
| 927 | /* |
| 928 | * Queue segment on the new socket if the new socket is active, |
| 929 | * otherwise we just shortcircuit this and continue with |
| 930 | * the new socket. |
| 931 | * |
| 932 | * For the vast majority of cases child->sk_state will be TCP_SYN_RECV |
| 933 | * when entering. But other states are possible due to a race condition |
| 934 | * where after __inet_lookup_established() fails but before the listener |
| 935 | * locked is obtained, other packets cause the same connection to |
| 936 | * be created. |
| 937 | */ |
| 938 | |
| 939 | enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child, |
| 940 | struct sk_buff *skb) |
| 941 | __releases(&((child)->sk_lock.slock)) |
| 942 | { |
| 943 | enum skb_drop_reason reason = SKB_NOT_DROPPED_YET; |
| 944 | int state = child->sk_state; |
| 945 | |
| 946 | /* record sk_napi_id and sk_rx_queue_mapping of child. */ |
| 947 | sk_mark_napi_id_set(child, skb); |
| 948 | |
| 949 | tcp_segs_in(tcp_sk(child), skb); |
| 950 | if (!sock_owned_by_user(child)) { |
| 951 | reason = tcp_rcv_state_process(child, skb); |
| 952 | /* Wakeup parent, send SIGIO */ |
| 953 | if (state == TCP_SYN_RECV && child->sk_state != state) |
| 954 | parent->sk_data_ready(parent); |
| 955 | } else { |
| 956 | /* Alas, it is possible again, because we do lookup |
| 957 | * in main socket hash table and lock on listening |
| 958 | * socket does not protect us more. |
| 959 | */ |
| 960 | __sk_add_backlog(child, skb); |
| 961 | } |
| 962 | |
| 963 | bh_unlock_sock(child); |
| 964 | sock_put(child); |
| 965 | return reason; |
| 966 | } |
| 967 | EXPORT_IPV6_MOD(tcp_child_process); |