2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
84 #include <linux/crypto.h>
85 #include <linux/scatterlist.h>
87 int sysctl_tcp_tw_reuse __read_mostly;
88 int sysctl_tcp_low_latency __read_mostly;
89 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 #ifdef CONFIG_TCP_MD5SIG
92 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
93 __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 struct inet_hashinfo tcp_hashinfo;
97 EXPORT_SYMBOL(tcp_hashinfo);
99 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 tcp_hdr(skb)->source);
107 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
112 /* With PAWS, it is safe from the viewpoint
113 of data integrity. Even without PAWS it is safe provided sequence
114 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116 Actually, the idea is close to VJ's one, only timestamp cache is
117 held not per host, but per port pair and TW bucket is used as state
120 If TW bucket has been already destroyed we fall back to VJ's scheme
121 and use initial timestamp retrieved from peer table.
123 if (tcptw->tw_ts_recent_stamp &&
124 (!twp || (sysctl_tcp_tw_reuse &&
125 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
126 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
127 if (tp->write_seq == 0)
129 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
130 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
137 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139 /* This will initiate an outgoing connection. */
140 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
143 struct inet_sock *inet = inet_sk(sk);
144 struct tcp_sock *tp = tcp_sk(sk);
145 __be16 orig_sport, orig_dport;
146 __be32 daddr, nexthop;
150 struct ip_options_rcu *inet_opt;
152 if (addr_len < sizeof(struct sockaddr_in))
155 if (usin->sin_family != AF_INET)
156 return -EAFNOSUPPORT;
158 nexthop = daddr = usin->sin_addr.s_addr;
159 inet_opt = rcu_dereference_protected(inet->inet_opt,
160 sock_owned_by_user(sk));
161 if (inet_opt && inet_opt->opt.srr) {
164 nexthop = inet_opt->opt.faddr;
167 orig_sport = inet->inet_sport;
168 orig_dport = usin->sin_port;
169 fl4 = &inet->cork.fl.u.ip4;
170 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
171 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 orig_sport, orig_dport, sk);
176 if (err == -ENETUNREACH)
177 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
186 if (!inet_opt || !inet_opt->opt.srr)
189 if (!inet->inet_saddr)
190 inet->inet_saddr = fl4->saddr;
191 sk_rcv_saddr_set(sk, inet->inet_saddr);
193 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
194 /* Reset inherited state */
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
197 if (likely(!tp->repair))
201 if (tcp_death_row.sysctl_tw_recycle &&
202 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
203 tcp_fetch_timewait_stamp(sk, &rt->dst);
205 inet->inet_dport = usin->sin_port;
206 sk_daddr_set(sk, daddr);
208 inet_csk(sk)->icsk_ext_hdr_len = 0;
210 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214 /* Socket identity is still unknown (sport may be zero).
215 * However we set state to SYN-SENT and not releasing socket
216 * lock select source port, enter ourselves into the hash tables and
217 * complete initialization after this.
219 tcp_set_state(sk, TCP_SYN_SENT);
220 err = inet_hash_connect(&tcp_death_row, sk);
226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 inet->inet_sport, inet->inet_dport, sk);
233 /* OK, now commit destination to socket. */
234 sk->sk_gso_type = SKB_GSO_TCPV4;
235 sk_setup_caps(sk, &rt->dst);
237 if (!tp->write_seq && likely(!tp->repair))
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
243 inet->inet_id = tp->write_seq ^ jiffies;
245 err = tcp_connect(sk);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk, TCP_CLOSE);
260 sk->sk_route_caps = 0;
261 inet->inet_dport = 0;
264 EXPORT_SYMBOL(tcp_v4_connect);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 void tcp_v4_mtu_reduced(struct sock *sk)
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info;
277 dst = inet_csk_update_pmtu(sk, mtu);
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
284 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
285 sk->sk_err_soft = EMSGSIZE;
289 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
290 ip_sk_accept_pmtu(sk) &&
291 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
292 tcp_sync_mss(sk, mtu);
294 /* Resend the TCP packet because it's
295 * clear that the old packet has been
296 * dropped. This is the new "fast" path mtu
299 tcp_simple_retransmit(sk);
300 } /* else let the usual retransmit timer handle it */
302 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 struct dst_entry *dst = __sk_dst_check(sk, 0);
309 dst->ops->redirect(dst, sk, skb);
313 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
314 void tcp_req_err(struct sock *sk, u32 seq)
316 struct request_sock *req = inet_reqsk(sk);
317 struct net *net = sock_net(sk);
319 /* ICMPs are not backlogged, hence we cannot get
320 * an established socket here.
324 if (seq != tcp_rsk(req)->snt_isn) {
325 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
328 * Still in SYN_RECV, just remove it silently.
329 * There is no good way to pass the error to the newly
330 * created socket, and POSIX does not want network
331 * errors returned from accept().
333 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
334 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
338 EXPORT_SYMBOL(tcp_req_err);
341 * This routine is called by the ICMP module when it gets some
342 * sort of error condition. If err < 0 then the socket should
343 * be closed and the error returned to the user. If err > 0
344 * it's just the icmp type << 8 | icmp code. After adjustment
345 * header points to the first 8 bytes of the tcp header. We need
346 * to find the appropriate port.
348 * The locking strategy used here is very "optimistic". When
349 * someone else accesses the socket the ICMP is just dropped
350 * and for some paths there is no check at all.
351 * A more general error queue to queue errors for later handling
352 * is probably better.
356 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
359 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
360 struct inet_connection_sock *icsk;
362 struct inet_sock *inet;
363 const int type = icmp_hdr(icmp_skb)->type;
364 const int code = icmp_hdr(icmp_skb)->code;
367 struct request_sock *fastopen;
371 struct net *net = dev_net(icmp_skb->dev);
373 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
374 th->dest, iph->saddr, ntohs(th->source),
377 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
380 if (sk->sk_state == TCP_TIME_WAIT) {
381 inet_twsk_put(inet_twsk(sk));
384 seq = ntohl(th->seq);
385 if (sk->sk_state == TCP_NEW_SYN_RECV)
386 return tcp_req_err(sk, seq);
389 /* If too many ICMPs get dropped on busy
390 * servers this needs to be solved differently.
391 * We do take care of PMTU discovery (RFC1191) special case :
392 * we can receive locally generated ICMP messages while socket is held.
394 if (sock_owned_by_user(sk)) {
395 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
396 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 if (sk->sk_state == TCP_CLOSE)
401 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
402 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
408 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
409 fastopen = tp->fastopen_rsk;
410 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
411 if (sk->sk_state != TCP_LISTEN &&
412 !between(seq, snd_una, tp->snd_nxt)) {
413 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
419 do_redirect(icmp_skb, sk);
421 case ICMP_SOURCE_QUENCH:
422 /* Just silently ignore these. */
424 case ICMP_PARAMETERPROB:
427 case ICMP_DEST_UNREACH:
428 if (code > NR_ICMP_UNREACH)
431 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
432 /* We are not interested in TCP_LISTEN and open_requests
433 * (SYN-ACKs send out by Linux are always <576bytes so
434 * they should go through unfragmented).
436 if (sk->sk_state == TCP_LISTEN)
440 if (!sock_owned_by_user(sk)) {
441 tcp_v4_mtu_reduced(sk);
443 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
449 err = icmp_err_convert[code].errno;
450 /* check if icmp_skb allows revert of backoff
451 * (see draft-zimmermann-tcp-lcd) */
452 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
455 !icsk->icsk_backoff || fastopen)
458 if (sock_owned_by_user(sk))
461 icsk->icsk_backoff--;
462 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466 skb = tcp_write_queue_head(sk);
469 remaining = icsk->icsk_rto -
471 tcp_time_stamp - tcp_skb_timestamp(skb));
474 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
475 remaining, TCP_RTO_MAX);
477 /* RTO revert clocked out retransmission.
478 * Will retransmit now */
479 tcp_retransmit_timer(sk);
483 case ICMP_TIME_EXCEEDED:
490 switch (sk->sk_state) {
493 /* Only in fast or simultaneous open. If a fast open socket is
494 * is already accepted it is treated as a connected one below.
496 if (fastopen && !fastopen->sk)
499 if (!sock_owned_by_user(sk)) {
502 sk->sk_error_report(sk);
506 sk->sk_err_soft = err;
511 /* If we've already connected we will keep trying
512 * until we time out, or the user gives up.
514 * rfc1122 4.2.3.9 allows to consider as hard errors
515 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
516 * but it is obsoleted by pmtu discovery).
518 * Note, that in modern internet, where routing is unreliable
519 * and in each dark corner broken firewalls sit, sending random
520 * errors ordered by their masters even this two messages finally lose
521 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 * Now we are in compliance with RFCs.
528 if (!sock_owned_by_user(sk) && inet->recverr) {
530 sk->sk_error_report(sk);
531 } else { /* Only an error on timeout */
532 sk->sk_err_soft = err;
540 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 struct tcphdr *th = tcp_hdr(skb);
544 if (skb->ip_summed == CHECKSUM_PARTIAL) {
545 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
546 skb->csum_start = skb_transport_header(skb) - skb->head;
547 skb->csum_offset = offsetof(struct tcphdr, check);
549 th->check = tcp_v4_check(skb->len, saddr, daddr,
556 /* This routine computes an IPv4 TCP checksum. */
557 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 const struct inet_sock *inet = inet_sk(sk);
561 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 EXPORT_SYMBOL(tcp_v4_send_check);
566 * This routine will send an RST to the other tcp.
568 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * Answer: if a packet caused RST, it is not for a socket
571 * existing in our system, if it is matched to a socket,
572 * it is just duplicate segment or bug in other side's TCP.
573 * So that we build reply only basing on parameters
574 * arrived with segment.
575 * Exception: precedence violation. We do not implement it in any case.
578 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
580 const struct tcphdr *th = tcp_hdr(skb);
583 #ifdef CONFIG_TCP_MD5SIG
584 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
587 struct ip_reply_arg arg;
588 #ifdef CONFIG_TCP_MD5SIG
589 struct tcp_md5sig_key *key = NULL;
590 const __u8 *hash_location = NULL;
591 unsigned char newhash[16];
593 struct sock *sk1 = NULL;
597 /* Never send a reset in response to a reset. */
601 /* If sk not NULL, it means we did a successful lookup and incoming
602 * route had to be correct. prequeue might have dropped our dst.
604 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
607 /* Swap the send and the receive. */
608 memset(&rep, 0, sizeof(rep));
609 rep.th.dest = th->source;
610 rep.th.source = th->dest;
611 rep.th.doff = sizeof(struct tcphdr) / 4;
615 rep.th.seq = th->ack_seq;
618 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
619 skb->len - (th->doff << 2));
622 memset(&arg, 0, sizeof(arg));
623 arg.iov[0].iov_base = (unsigned char *)&rep;
624 arg.iov[0].iov_len = sizeof(rep.th);
626 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
627 #ifdef CONFIG_TCP_MD5SIG
628 hash_location = tcp_parse_md5sig_option(th);
629 if (sk && sk_fullsock(sk)) {
630 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
631 &ip_hdr(skb)->saddr, AF_INET);
632 } else if (hash_location) {
634 * active side is lost. Try to find listening socket through
635 * source port, and then find md5 key through listening socket.
636 * we are not loose security here:
637 * Incoming packet is checked with md5 hash with finding key,
638 * no RST generated if md5 hash doesn't match.
640 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
642 th->source, ip_hdr(skb)->daddr,
643 ntohs(th->source), inet_iif(skb));
644 /* don't send rst if it can't find key */
648 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
649 &ip_hdr(skb)->saddr, AF_INET);
653 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
654 if (genhash || memcmp(hash_location, newhash, 16) != 0)
659 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
661 (TCPOPT_MD5SIG << 8) |
663 /* Update length and the length the header thinks exists */
664 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
665 rep.th.doff = arg.iov[0].iov_len / 4;
667 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
668 key, ip_hdr(skb)->saddr,
669 ip_hdr(skb)->daddr, &rep.th);
672 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
673 ip_hdr(skb)->saddr, /* XXX */
674 arg.iov[0].iov_len, IPPROTO_TCP, 0);
675 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
676 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
678 /* When socket is gone, all binding information is lost.
679 * routing might fail in this case. No choice here, if we choose to force
680 * input interface, we will misroute in case of asymmetric route.
683 arg.bound_dev_if = sk->sk_bound_dev_if;
685 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
686 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
688 arg.tos = ip_hdr(skb)->tos;
689 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
690 skb, &TCP_SKB_CB(skb)->header.h4.opt,
691 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
692 &arg, arg.iov[0].iov_len);
694 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
695 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
697 #ifdef CONFIG_TCP_MD5SIG
706 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
707 outside socket context is ugly, certainly. What can I do?
710 static void tcp_v4_send_ack(struct net *net,
711 struct sk_buff *skb, u32 seq, u32 ack,
712 u32 win, u32 tsval, u32 tsecr, int oif,
713 struct tcp_md5sig_key *key,
714 int reply_flags, u8 tos)
716 const struct tcphdr *th = tcp_hdr(skb);
719 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
720 #ifdef CONFIG_TCP_MD5SIG
721 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
725 struct ip_reply_arg arg;
727 memset(&rep.th, 0, sizeof(struct tcphdr));
728 memset(&arg, 0, sizeof(arg));
730 arg.iov[0].iov_base = (unsigned char *)&rep;
731 arg.iov[0].iov_len = sizeof(rep.th);
733 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
734 (TCPOPT_TIMESTAMP << 8) |
736 rep.opt[1] = htonl(tsval);
737 rep.opt[2] = htonl(tsecr);
738 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
741 /* Swap the send and the receive. */
742 rep.th.dest = th->source;
743 rep.th.source = th->dest;
744 rep.th.doff = arg.iov[0].iov_len / 4;
745 rep.th.seq = htonl(seq);
746 rep.th.ack_seq = htonl(ack);
748 rep.th.window = htons(win);
750 #ifdef CONFIG_TCP_MD5SIG
752 int offset = (tsecr) ? 3 : 0;
754 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
756 (TCPOPT_MD5SIG << 8) |
758 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
759 rep.th.doff = arg.iov[0].iov_len/4;
761 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
762 key, ip_hdr(skb)->saddr,
763 ip_hdr(skb)->daddr, &rep.th);
766 arg.flags = reply_flags;
767 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
768 ip_hdr(skb)->saddr, /* XXX */
769 arg.iov[0].iov_len, IPPROTO_TCP, 0);
770 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
772 arg.bound_dev_if = oif;
774 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
775 skb, &TCP_SKB_CB(skb)->header.h4.opt,
776 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
777 &arg, arg.iov[0].iov_len);
779 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
782 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
784 struct inet_timewait_sock *tw = inet_twsk(sk);
785 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
787 tcp_v4_send_ack(sock_net(sk), skb,
788 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
789 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
790 tcp_time_stamp + tcptw->tw_ts_offset,
793 tcp_twsk_md5_key(tcptw),
794 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
801 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
802 struct request_sock *req)
804 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
805 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
807 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
810 tcp_v4_send_ack(sock_net(sk), skb, seq,
811 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
815 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
817 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
822 * Send a SYN-ACK after having received a SYN.
823 * This still operates on a request_sock only, not on a big
826 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
828 struct request_sock *req,
829 struct tcp_fastopen_cookie *foc,
832 const struct inet_request_sock *ireq = inet_rsk(req);
837 /* First, grab a route. */
838 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
841 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
844 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
846 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
849 err = net_xmit_eval(err);
856 * IPv4 request_sock destructor.
858 static void tcp_v4_reqsk_destructor(struct request_sock *req)
860 kfree(inet_rsk(req)->opt);
863 #ifdef CONFIG_TCP_MD5SIG
865 * RFC2385 MD5 checksumming requires a mapping of
866 * IP address->MD5 Key.
867 * We need to maintain these in the sk structure.
870 /* Find the Key structure for an address. */
871 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
872 const union tcp_md5_addr *addr,
875 const struct tcp_sock *tp = tcp_sk(sk);
876 struct tcp_md5sig_key *key;
877 unsigned int size = sizeof(struct in_addr);
878 const struct tcp_md5sig_info *md5sig;
880 /* caller either holds rcu_read_lock() or socket lock */
881 md5sig = rcu_dereference_check(tp->md5sig_info,
882 sock_owned_by_user(sk) ||
883 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
886 #if IS_ENABLED(CONFIG_IPV6)
887 if (family == AF_INET6)
888 size = sizeof(struct in6_addr);
890 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
891 if (key->family != family)
893 if (!memcmp(&key->addr, addr, size))
898 EXPORT_SYMBOL(tcp_md5_do_lookup);
900 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
901 const struct sock *addr_sk)
903 const union tcp_md5_addr *addr;
905 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
906 return tcp_md5_do_lookup(sk, addr, AF_INET);
908 EXPORT_SYMBOL(tcp_v4_md5_lookup);
910 /* This can be called on a newly created socket, from other files */
911 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
912 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
914 /* Add Key to the list */
915 struct tcp_md5sig_key *key;
916 struct tcp_sock *tp = tcp_sk(sk);
917 struct tcp_md5sig_info *md5sig;
919 key = tcp_md5_do_lookup(sk, addr, family);
921 /* Pre-existing entry - just update that one. */
922 memcpy(key->key, newkey, newkeylen);
923 key->keylen = newkeylen;
927 md5sig = rcu_dereference_protected(tp->md5sig_info,
928 sock_owned_by_user(sk) ||
929 lockdep_is_held(&sk->sk_lock.slock));
931 md5sig = kmalloc(sizeof(*md5sig), gfp);
935 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
936 INIT_HLIST_HEAD(&md5sig->head);
937 rcu_assign_pointer(tp->md5sig_info, md5sig);
940 key = sock_kmalloc(sk, sizeof(*key), gfp);
943 if (!tcp_alloc_md5sig_pool()) {
944 sock_kfree_s(sk, key, sizeof(*key));
948 memcpy(key->key, newkey, newkeylen);
949 key->keylen = newkeylen;
950 key->family = family;
951 memcpy(&key->addr, addr,
952 (family == AF_INET6) ? sizeof(struct in6_addr) :
953 sizeof(struct in_addr));
954 hlist_add_head_rcu(&key->node, &md5sig->head);
957 EXPORT_SYMBOL(tcp_md5_do_add);
959 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
961 struct tcp_md5sig_key *key;
963 key = tcp_md5_do_lookup(sk, addr, family);
966 hlist_del_rcu(&key->node);
967 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
971 EXPORT_SYMBOL(tcp_md5_do_del);
973 static void tcp_clear_md5_list(struct sock *sk)
975 struct tcp_sock *tp = tcp_sk(sk);
976 struct tcp_md5sig_key *key;
977 struct hlist_node *n;
978 struct tcp_md5sig_info *md5sig;
980 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
982 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
983 hlist_del_rcu(&key->node);
984 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
989 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
992 struct tcp_md5sig cmd;
993 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
995 if (optlen < sizeof(cmd))
998 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1001 if (sin->sin_family != AF_INET)
1004 if (!cmd.tcpm_keylen)
1005 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1008 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1011 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1012 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1016 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1017 __be32 daddr, __be32 saddr, int nbytes)
1019 struct tcp4_pseudohdr *bp;
1020 struct scatterlist sg;
1022 bp = &hp->md5_blk.ip4;
1025 * 1. the TCP pseudo-header (in the order: source IP address,
1026 * destination IP address, zero-padded protocol number, and
1032 bp->protocol = IPPROTO_TCP;
1033 bp->len = cpu_to_be16(nbytes);
1035 sg_init_one(&sg, bp, sizeof(*bp));
1036 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1039 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1040 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1042 struct tcp_md5sig_pool *hp;
1043 struct hash_desc *desc;
1045 hp = tcp_get_md5sig_pool();
1047 goto clear_hash_noput;
1048 desc = &hp->md5_desc;
1050 if (crypto_hash_init(desc))
1052 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1054 if (tcp_md5_hash_header(hp, th))
1056 if (tcp_md5_hash_key(hp, key))
1058 if (crypto_hash_final(desc, md5_hash))
1061 tcp_put_md5sig_pool();
1065 tcp_put_md5sig_pool();
1067 memset(md5_hash, 0, 16);
1071 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1072 const struct sock *sk,
1073 const struct sk_buff *skb)
1075 struct tcp_md5sig_pool *hp;
1076 struct hash_desc *desc;
1077 const struct tcphdr *th = tcp_hdr(skb);
1078 __be32 saddr, daddr;
1080 if (sk) { /* valid for establish/request sockets */
1081 saddr = sk->sk_rcv_saddr;
1082 daddr = sk->sk_daddr;
1084 const struct iphdr *iph = ip_hdr(skb);
1089 hp = tcp_get_md5sig_pool();
1091 goto clear_hash_noput;
1092 desc = &hp->md5_desc;
1094 if (crypto_hash_init(desc))
1097 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1099 if (tcp_md5_hash_header(hp, th))
1101 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1103 if (tcp_md5_hash_key(hp, key))
1105 if (crypto_hash_final(desc, md5_hash))
1108 tcp_put_md5sig_pool();
1112 tcp_put_md5sig_pool();
1114 memset(md5_hash, 0, 16);
1117 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1121 /* Called with rcu_read_lock() */
1122 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1123 const struct sk_buff *skb)
1125 #ifdef CONFIG_TCP_MD5SIG
1127 * This gets called for each TCP segment that arrives
1128 * so we want to be efficient.
1129 * We have 3 drop cases:
1130 * o No MD5 hash and one expected.
1131 * o MD5 hash and we're not expecting one.
1132 * o MD5 hash and its wrong.
1134 const __u8 *hash_location = NULL;
1135 struct tcp_md5sig_key *hash_expected;
1136 const struct iphdr *iph = ip_hdr(skb);
1137 const struct tcphdr *th = tcp_hdr(skb);
1139 unsigned char newhash[16];
1141 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1143 hash_location = tcp_parse_md5sig_option(th);
1145 /* We've parsed the options - do we have a hash? */
1146 if (!hash_expected && !hash_location)
1149 if (hash_expected && !hash_location) {
1150 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1154 if (!hash_expected && hash_location) {
1155 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1159 /* Okay, so this is hash_expected and hash_location -
1160 * so we need to calculate the checksum.
1162 genhash = tcp_v4_md5_hash_skb(newhash,
1166 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1167 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1168 &iph->saddr, ntohs(th->source),
1169 &iph->daddr, ntohs(th->dest),
1170 genhash ? " tcp_v4_calc_md5_hash failed"
1179 static void tcp_v4_init_req(struct request_sock *req,
1180 const struct sock *sk_listener,
1181 struct sk_buff *skb)
1183 struct inet_request_sock *ireq = inet_rsk(req);
1185 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1186 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1187 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1188 ireq->opt = tcp_v4_save_options(skb);
1191 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1193 const struct request_sock *req,
1196 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1199 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1208 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1210 .obj_size = sizeof(struct tcp_request_sock),
1211 .rtx_syn_ack = tcp_rtx_synack,
1212 .send_ack = tcp_v4_reqsk_send_ack,
1213 .destructor = tcp_v4_reqsk_destructor,
1214 .send_reset = tcp_v4_send_reset,
1215 .syn_ack_timeout = tcp_syn_ack_timeout,
1218 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1219 .mss_clamp = TCP_MSS_DEFAULT,
1220 #ifdef CONFIG_TCP_MD5SIG
1221 .req_md5_lookup = tcp_v4_md5_lookup,
1222 .calc_md5_hash = tcp_v4_md5_hash_skb,
1224 .init_req = tcp_v4_init_req,
1225 #ifdef CONFIG_SYN_COOKIES
1226 .cookie_init_seq = cookie_v4_init_sequence,
1228 .route_req = tcp_v4_route_req,
1229 .init_seq = tcp_v4_init_sequence,
1230 .send_synack = tcp_v4_send_synack,
1233 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1235 /* Never answer to SYNs send to broadcast or multicast */
1236 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1239 return tcp_conn_request(&tcp_request_sock_ops,
1240 &tcp_request_sock_ipv4_ops, sk, skb);
1243 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1246 EXPORT_SYMBOL(tcp_v4_conn_request);
1250 * The three way handshake has completed - we got a valid synack -
1251 * now create the new socket.
1253 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1254 struct request_sock *req,
1255 struct dst_entry *dst,
1256 struct request_sock *req_unhash,
1259 struct inet_request_sock *ireq;
1260 struct inet_sock *newinet;
1261 struct tcp_sock *newtp;
1263 #ifdef CONFIG_TCP_MD5SIG
1264 struct tcp_md5sig_key *key;
1266 struct ip_options_rcu *inet_opt;
1268 if (sk_acceptq_is_full(sk))
1271 newsk = tcp_create_openreq_child(sk, req, skb);
1275 newsk->sk_gso_type = SKB_GSO_TCPV4;
1276 inet_sk_rx_dst_set(newsk, skb);
1278 newtp = tcp_sk(newsk);
1279 newinet = inet_sk(newsk);
1280 ireq = inet_rsk(req);
1281 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1282 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1283 newsk->sk_bound_dev_if = ireq->ir_iif;
1284 newinet->inet_saddr = ireq->ir_loc_addr;
1285 inet_opt = ireq->opt;
1286 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1288 newinet->mc_index = inet_iif(skb);
1289 newinet->mc_ttl = ip_hdr(skb)->ttl;
1290 newinet->rcv_tos = ip_hdr(skb)->tos;
1291 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1293 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1294 newinet->inet_id = newtp->write_seq ^ jiffies;
1297 dst = inet_csk_route_child_sock(sk, newsk, req);
1301 /* syncookie case : see end of cookie_v4_check() */
1303 sk_setup_caps(newsk, dst);
1305 tcp_ca_openreq_child(newsk, dst);
1307 tcp_sync_mss(newsk, dst_mtu(dst));
1308 newtp->advmss = dst_metric_advmss(dst);
1309 if (tcp_sk(sk)->rx_opt.user_mss &&
1310 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1311 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1313 tcp_initialize_rcv_mss(newsk);
1315 #ifdef CONFIG_TCP_MD5SIG
1316 /* Copy over the MD5 key from the original socket */
1317 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1321 * We're using one, so create a matching key
1322 * on the newsk structure. If we fail to get
1323 * memory, then we end up not copying the key
1326 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1327 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1328 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1332 if (__inet_inherit_port(sk, newsk) < 0)
1334 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1336 tcp_move_syn(newtp, req);
1341 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1345 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1348 inet_csk_prepare_forced_close(newsk);
1352 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1354 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1356 #ifdef CONFIG_SYN_COOKIES
1357 const struct tcphdr *th = tcp_hdr(skb);
1360 sk = cookie_v4_check(sk, skb);
1365 /* The socket must have it's spinlock held when we get
1366 * here, unless it is a TCP_LISTEN socket.
1368 * We have a potential double-lock case here, so even when
1369 * doing backlog processing we use the BH locking scheme.
1370 * This is because we cannot sleep with the original spinlock
1373 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1377 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1378 struct dst_entry *dst = sk->sk_rx_dst;
1380 sock_rps_save_rxhash(sk, skb);
1381 sk_mark_napi_id(sk, skb);
1383 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1384 !dst->ops->check(dst, 0)) {
1386 sk->sk_rx_dst = NULL;
1389 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1393 if (tcp_checksum_complete(skb))
1396 if (sk->sk_state == TCP_LISTEN) {
1397 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1402 sock_rps_save_rxhash(nsk, skb);
1403 sk_mark_napi_id(nsk, skb);
1404 if (tcp_child_process(sk, nsk, skb)) {
1411 sock_rps_save_rxhash(sk, skb);
1413 if (tcp_rcv_state_process(sk, skb)) {
1420 tcp_v4_send_reset(rsk, skb);
1423 /* Be careful here. If this function gets more complicated and
1424 * gcc suffers from register pressure on the x86, sk (in %ebx)
1425 * might be destroyed here. This current version compiles correctly,
1426 * but you have been warned.
1431 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1432 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1435 EXPORT_SYMBOL(tcp_v4_do_rcv);
1437 void tcp_v4_early_demux(struct sk_buff *skb)
1439 const struct iphdr *iph;
1440 const struct tcphdr *th;
1443 if (skb->pkt_type != PACKET_HOST)
1446 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1452 if (th->doff < sizeof(struct tcphdr) / 4)
1455 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1456 iph->saddr, th->source,
1457 iph->daddr, ntohs(th->dest),
1461 skb->destructor = sock_edemux;
1462 if (sk_fullsock(sk)) {
1463 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1466 dst = dst_check(dst, 0);
1468 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1469 skb_dst_set_noref(skb, dst);
1474 /* Packet is added to VJ-style prequeue for processing in process
1475 * context, if a reader task is waiting. Apparently, this exciting
1476 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1477 * failed somewhere. Latency? Burstiness? Well, at least now we will
1478 * see, why it failed. 8)8) --ANK
1481 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1483 struct tcp_sock *tp = tcp_sk(sk);
1485 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1488 if (skb->len <= tcp_hdrlen(skb) &&
1489 skb_queue_len(&tp->ucopy.prequeue) == 0)
1492 /* Before escaping RCU protected region, we need to take care of skb
1493 * dst. Prequeue is only enabled for established sockets.
1494 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1495 * Instead of doing full sk_rx_dst validity here, let's perform
1496 * an optimistic check.
1498 if (likely(sk->sk_rx_dst))
1501 skb_dst_force_safe(skb);
1503 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1504 tp->ucopy.memory += skb->truesize;
1505 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1506 struct sk_buff *skb1;
1508 BUG_ON(sock_owned_by_user(sk));
1510 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1511 sk_backlog_rcv(sk, skb1);
1512 NET_INC_STATS_BH(sock_net(sk),
1513 LINUX_MIB_TCPPREQUEUEDROPPED);
1516 tp->ucopy.memory = 0;
1517 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1518 wake_up_interruptible_sync_poll(sk_sleep(sk),
1519 POLLIN | POLLRDNORM | POLLRDBAND);
1520 if (!inet_csk_ack_scheduled(sk))
1521 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1522 (3 * tcp_rto_min(sk)) / 4,
1527 EXPORT_SYMBOL(tcp_prequeue);
1533 int tcp_v4_rcv(struct sk_buff *skb)
1535 const struct iphdr *iph;
1536 const struct tcphdr *th;
1539 struct net *net = dev_net(skb->dev);
1541 if (skb->pkt_type != PACKET_HOST)
1544 /* Count it even if it's bad */
1545 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1547 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1552 if (th->doff < sizeof(struct tcphdr) / 4)
1554 if (!pskb_may_pull(skb, th->doff * 4))
1557 /* An explanation is required here, I think.
1558 * Packet length and doff are validated by header prediction,
1559 * provided case of th->doff==0 is eliminated.
1560 * So, we defer the checks. */
1562 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1567 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1568 * barrier() makes sure compiler wont play fool^Waliasing games.
1570 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1571 sizeof(struct inet_skb_parm));
1574 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1575 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1576 skb->len - th->doff * 4);
1577 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1578 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1579 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1580 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1581 TCP_SKB_CB(skb)->sacked = 0;
1584 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1590 if (sk->sk_state == TCP_TIME_WAIT)
1593 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1594 struct request_sock *req = inet_reqsk(sk);
1595 struct sock *nsk = NULL;
1597 sk = req->rsk_listener;
1598 if (tcp_v4_inbound_md5_hash(sk, skb))
1599 goto discard_and_relse;
1600 if (likely(sk->sk_state == TCP_LISTEN)) {
1601 nsk = tcp_check_req(sk, skb, req, false);
1603 inet_csk_reqsk_queue_drop_and_put(sk, req);
1613 } else if (tcp_child_process(sk, nsk, skb)) {
1614 tcp_v4_send_reset(nsk, skb);
1620 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1621 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1622 goto discard_and_relse;
1625 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1626 goto discard_and_relse;
1628 if (tcp_v4_inbound_md5_hash(sk, skb))
1629 goto discard_and_relse;
1633 if (sk_filter(sk, skb))
1634 goto discard_and_relse;
1638 if (sk->sk_state == TCP_LISTEN) {
1639 ret = tcp_v4_do_rcv(sk, skb);
1640 goto put_and_return;
1643 sk_incoming_cpu_update(sk);
1645 bh_lock_sock_nested(sk);
1646 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1648 if (!sock_owned_by_user(sk)) {
1649 if (!tcp_prequeue(sk, skb))
1650 ret = tcp_v4_do_rcv(sk, skb);
1651 } else if (unlikely(sk_add_backlog(sk, skb,
1652 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1654 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1655 goto discard_and_relse;
1665 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1668 if (tcp_checksum_complete(skb)) {
1670 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1672 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1674 tcp_v4_send_reset(NULL, skb);
1678 /* Discard frame. */
1687 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1688 inet_twsk_put(inet_twsk(sk));
1692 if (tcp_checksum_complete(skb)) {
1693 inet_twsk_put(inet_twsk(sk));
1696 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1698 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1701 iph->saddr, th->source,
1702 iph->daddr, th->dest,
1705 inet_twsk_deschedule_put(inet_twsk(sk));
1709 /* Fall through to ACK */
1712 tcp_v4_timewait_ack(sk, skb);
1715 tcp_v4_send_reset(sk, skb);
1716 inet_twsk_deschedule_put(inet_twsk(sk));
1718 case TCP_TW_SUCCESS:;
1723 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1724 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1725 .twsk_unique = tcp_twsk_unique,
1726 .twsk_destructor= tcp_twsk_destructor,
1729 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1731 struct dst_entry *dst = skb_dst(skb);
1733 if (dst && dst_hold_safe(dst)) {
1734 sk->sk_rx_dst = dst;
1735 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1738 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1740 const struct inet_connection_sock_af_ops ipv4_specific = {
1741 .queue_xmit = ip_queue_xmit,
1742 .send_check = tcp_v4_send_check,
1743 .rebuild_header = inet_sk_rebuild_header,
1744 .sk_rx_dst_set = inet_sk_rx_dst_set,
1745 .conn_request = tcp_v4_conn_request,
1746 .syn_recv_sock = tcp_v4_syn_recv_sock,
1747 .net_header_len = sizeof(struct iphdr),
1748 .setsockopt = ip_setsockopt,
1749 .getsockopt = ip_getsockopt,
1750 .addr2sockaddr = inet_csk_addr2sockaddr,
1751 .sockaddr_len = sizeof(struct sockaddr_in),
1752 .bind_conflict = inet_csk_bind_conflict,
1753 #ifdef CONFIG_COMPAT
1754 .compat_setsockopt = compat_ip_setsockopt,
1755 .compat_getsockopt = compat_ip_getsockopt,
1757 .mtu_reduced = tcp_v4_mtu_reduced,
1759 EXPORT_SYMBOL(ipv4_specific);
1761 #ifdef CONFIG_TCP_MD5SIG
1762 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1763 .md5_lookup = tcp_v4_md5_lookup,
1764 .calc_md5_hash = tcp_v4_md5_hash_skb,
1765 .md5_parse = tcp_v4_parse_md5_keys,
1769 /* NOTE: A lot of things set to zero explicitly by call to
1770 * sk_alloc() so need not be done here.
1772 static int tcp_v4_init_sock(struct sock *sk)
1774 struct inet_connection_sock *icsk = inet_csk(sk);
1778 icsk->icsk_af_ops = &ipv4_specific;
1780 #ifdef CONFIG_TCP_MD5SIG
1781 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1787 void tcp_v4_destroy_sock(struct sock *sk)
1789 struct tcp_sock *tp = tcp_sk(sk);
1791 tcp_clear_xmit_timers(sk);
1793 tcp_cleanup_congestion_control(sk);
1795 /* Cleanup up the write buffer. */
1796 tcp_write_queue_purge(sk);
1798 /* Cleans up our, hopefully empty, out_of_order_queue. */
1799 __skb_queue_purge(&tp->out_of_order_queue);
1801 #ifdef CONFIG_TCP_MD5SIG
1802 /* Clean up the MD5 key list, if any */
1803 if (tp->md5sig_info) {
1804 tcp_clear_md5_list(sk);
1805 kfree_rcu(tp->md5sig_info, rcu);
1806 tp->md5sig_info = NULL;
1810 /* Clean prequeue, it must be empty really */
1811 __skb_queue_purge(&tp->ucopy.prequeue);
1813 /* Clean up a referenced TCP bind bucket. */
1814 if (inet_csk(sk)->icsk_bind_hash)
1817 BUG_ON(tp->fastopen_rsk);
1819 /* If socket is aborted during connect operation */
1820 tcp_free_fastopen_req(tp);
1821 tcp_saved_syn_free(tp);
1823 sk_sockets_allocated_dec(sk);
1825 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
1826 sock_release_memcg(sk);
1828 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1830 #ifdef CONFIG_PROC_FS
1831 /* Proc filesystem TCP sock list dumping. */
1834 * Get next listener socket follow cur. If cur is NULL, get first socket
1835 * starting from bucket given in st->bucket; when st->bucket is zero the
1836 * very first socket in the hash table is returned.
1838 static void *listening_get_next(struct seq_file *seq, void *cur)
1840 struct inet_connection_sock *icsk;
1841 struct hlist_nulls_node *node;
1842 struct sock *sk = cur;
1843 struct inet_listen_hashbucket *ilb;
1844 struct tcp_iter_state *st = seq->private;
1845 struct net *net = seq_file_net(seq);
1848 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1849 spin_lock_bh(&ilb->lock);
1850 sk = sk_nulls_head(&ilb->head);
1854 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1858 sk = sk_nulls_next(sk);
1860 sk_nulls_for_each_from(sk, node) {
1861 if (!net_eq(sock_net(sk), net))
1863 if (sk->sk_family == st->family) {
1867 icsk = inet_csk(sk);
1869 spin_unlock_bh(&ilb->lock);
1871 if (++st->bucket < INET_LHTABLE_SIZE) {
1872 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1873 spin_lock_bh(&ilb->lock);
1874 sk = sk_nulls_head(&ilb->head);
1882 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1884 struct tcp_iter_state *st = seq->private;
1889 rc = listening_get_next(seq, NULL);
1891 while (rc && *pos) {
1892 rc = listening_get_next(seq, rc);
1898 static inline bool empty_bucket(const struct tcp_iter_state *st)
1900 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1904 * Get first established socket starting from bucket given in st->bucket.
1905 * If st->bucket is zero, the very first socket in the hash is returned.
1907 static void *established_get_first(struct seq_file *seq)
1909 struct tcp_iter_state *st = seq->private;
1910 struct net *net = seq_file_net(seq);
1914 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1916 struct hlist_nulls_node *node;
1917 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1919 /* Lockless fast path for the common case of empty buckets */
1920 if (empty_bucket(st))
1924 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1925 if (sk->sk_family != st->family ||
1926 !net_eq(sock_net(sk), net)) {
1932 spin_unlock_bh(lock);
1938 static void *established_get_next(struct seq_file *seq, void *cur)
1940 struct sock *sk = cur;
1941 struct hlist_nulls_node *node;
1942 struct tcp_iter_state *st = seq->private;
1943 struct net *net = seq_file_net(seq);
1948 sk = sk_nulls_next(sk);
1950 sk_nulls_for_each_from(sk, node) {
1951 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1955 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1957 return established_get_first(seq);
1960 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1962 struct tcp_iter_state *st = seq->private;
1966 rc = established_get_first(seq);
1969 rc = established_get_next(seq, rc);
1975 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1978 struct tcp_iter_state *st = seq->private;
1980 st->state = TCP_SEQ_STATE_LISTENING;
1981 rc = listening_get_idx(seq, &pos);
1984 st->state = TCP_SEQ_STATE_ESTABLISHED;
1985 rc = established_get_idx(seq, pos);
1991 static void *tcp_seek_last_pos(struct seq_file *seq)
1993 struct tcp_iter_state *st = seq->private;
1994 int offset = st->offset;
1995 int orig_num = st->num;
1998 switch (st->state) {
1999 case TCP_SEQ_STATE_LISTENING:
2000 if (st->bucket >= INET_LHTABLE_SIZE)
2002 st->state = TCP_SEQ_STATE_LISTENING;
2003 rc = listening_get_next(seq, NULL);
2004 while (offset-- && rc)
2005 rc = listening_get_next(seq, rc);
2009 st->state = TCP_SEQ_STATE_ESTABLISHED;
2011 case TCP_SEQ_STATE_ESTABLISHED:
2012 if (st->bucket > tcp_hashinfo.ehash_mask)
2014 rc = established_get_first(seq);
2015 while (offset-- && rc)
2016 rc = established_get_next(seq, rc);
2024 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2026 struct tcp_iter_state *st = seq->private;
2029 if (*pos && *pos == st->last_pos) {
2030 rc = tcp_seek_last_pos(seq);
2035 st->state = TCP_SEQ_STATE_LISTENING;
2039 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2042 st->last_pos = *pos;
2046 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2048 struct tcp_iter_state *st = seq->private;
2051 if (v == SEQ_START_TOKEN) {
2052 rc = tcp_get_idx(seq, 0);
2056 switch (st->state) {
2057 case TCP_SEQ_STATE_LISTENING:
2058 rc = listening_get_next(seq, v);
2060 st->state = TCP_SEQ_STATE_ESTABLISHED;
2063 rc = established_get_first(seq);
2066 case TCP_SEQ_STATE_ESTABLISHED:
2067 rc = established_get_next(seq, v);
2072 st->last_pos = *pos;
2076 static void tcp_seq_stop(struct seq_file *seq, void *v)
2078 struct tcp_iter_state *st = seq->private;
2080 switch (st->state) {
2081 case TCP_SEQ_STATE_LISTENING:
2082 if (v != SEQ_START_TOKEN)
2083 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2085 case TCP_SEQ_STATE_ESTABLISHED:
2087 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2092 int tcp_seq_open(struct inode *inode, struct file *file)
2094 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2095 struct tcp_iter_state *s;
2098 err = seq_open_net(inode, file, &afinfo->seq_ops,
2099 sizeof(struct tcp_iter_state));
2103 s = ((struct seq_file *)file->private_data)->private;
2104 s->family = afinfo->family;
2108 EXPORT_SYMBOL(tcp_seq_open);
2110 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2113 struct proc_dir_entry *p;
2115 afinfo->seq_ops.start = tcp_seq_start;
2116 afinfo->seq_ops.next = tcp_seq_next;
2117 afinfo->seq_ops.stop = tcp_seq_stop;
2119 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2120 afinfo->seq_fops, afinfo);
2125 EXPORT_SYMBOL(tcp_proc_register);
2127 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2129 remove_proc_entry(afinfo->name, net->proc_net);
2131 EXPORT_SYMBOL(tcp_proc_unregister);
2133 static void get_openreq4(const struct request_sock *req,
2134 struct seq_file *f, int i)
2136 const struct inet_request_sock *ireq = inet_rsk(req);
2137 long delta = req->rsk_timer.expires - jiffies;
2139 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2140 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2145 ntohs(ireq->ir_rmt_port),
2147 0, 0, /* could print option size, but that is af dependent. */
2148 1, /* timers active (only the expire timer) */
2149 jiffies_delta_to_clock_t(delta),
2151 from_kuid_munged(seq_user_ns(f),
2152 sock_i_uid(req->rsk_listener)),
2153 0, /* non standard timer */
2154 0, /* open_requests have no inode */
2159 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2162 unsigned long timer_expires;
2163 const struct tcp_sock *tp = tcp_sk(sk);
2164 const struct inet_connection_sock *icsk = inet_csk(sk);
2165 const struct inet_sock *inet = inet_sk(sk);
2166 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2167 __be32 dest = inet->inet_daddr;
2168 __be32 src = inet->inet_rcv_saddr;
2169 __u16 destp = ntohs(inet->inet_dport);
2170 __u16 srcp = ntohs(inet->inet_sport);
2174 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2175 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2176 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2178 timer_expires = icsk->icsk_timeout;
2179 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2181 timer_expires = icsk->icsk_timeout;
2182 } else if (timer_pending(&sk->sk_timer)) {
2184 timer_expires = sk->sk_timer.expires;
2187 timer_expires = jiffies;
2190 state = sk_state_load(sk);
2191 if (state == TCP_LISTEN)
2192 rx_queue = sk->sk_ack_backlog;
2194 /* Because we don't lock the socket,
2195 * we might find a transient negative value.
2197 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2199 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2200 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2201 i, src, srcp, dest, destp, state,
2202 tp->write_seq - tp->snd_una,
2205 jiffies_delta_to_clock_t(timer_expires - jiffies),
2206 icsk->icsk_retransmits,
2207 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2208 icsk->icsk_probes_out,
2210 atomic_read(&sk->sk_refcnt), sk,
2211 jiffies_to_clock_t(icsk->icsk_rto),
2212 jiffies_to_clock_t(icsk->icsk_ack.ato),
2213 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2215 state == TCP_LISTEN ?
2216 fastopenq->max_qlen :
2217 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2220 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2221 struct seq_file *f, int i)
2223 long delta = tw->tw_timer.expires - jiffies;
2227 dest = tw->tw_daddr;
2228 src = tw->tw_rcv_saddr;
2229 destp = ntohs(tw->tw_dport);
2230 srcp = ntohs(tw->tw_sport);
2232 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2233 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2234 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2235 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2236 atomic_read(&tw->tw_refcnt), tw);
2241 static int tcp4_seq_show(struct seq_file *seq, void *v)
2243 struct tcp_iter_state *st;
2244 struct sock *sk = v;
2246 seq_setwidth(seq, TMPSZ - 1);
2247 if (v == SEQ_START_TOKEN) {
2248 seq_puts(seq, " sl local_address rem_address st tx_queue "
2249 "rx_queue tr tm->when retrnsmt uid timeout "
2255 if (sk->sk_state == TCP_TIME_WAIT)
2256 get_timewait4_sock(v, seq, st->num);
2257 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2258 get_openreq4(v, seq, st->num);
2260 get_tcp4_sock(v, seq, st->num);
2266 static const struct file_operations tcp_afinfo_seq_fops = {
2267 .owner = THIS_MODULE,
2268 .open = tcp_seq_open,
2270 .llseek = seq_lseek,
2271 .release = seq_release_net
2274 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2277 .seq_fops = &tcp_afinfo_seq_fops,
2279 .show = tcp4_seq_show,
2283 static int __net_init tcp4_proc_init_net(struct net *net)
2285 return tcp_proc_register(net, &tcp4_seq_afinfo);
2288 static void __net_exit tcp4_proc_exit_net(struct net *net)
2290 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2293 static struct pernet_operations tcp4_net_ops = {
2294 .init = tcp4_proc_init_net,
2295 .exit = tcp4_proc_exit_net,
2298 int __init tcp4_proc_init(void)
2300 return register_pernet_subsys(&tcp4_net_ops);
2303 void tcp4_proc_exit(void)
2305 unregister_pernet_subsys(&tcp4_net_ops);
2307 #endif /* CONFIG_PROC_FS */
2309 struct proto tcp_prot = {
2311 .owner = THIS_MODULE,
2313 .connect = tcp_v4_connect,
2314 .disconnect = tcp_disconnect,
2315 .accept = inet_csk_accept,
2317 .init = tcp_v4_init_sock,
2318 .destroy = tcp_v4_destroy_sock,
2319 .shutdown = tcp_shutdown,
2320 .setsockopt = tcp_setsockopt,
2321 .getsockopt = tcp_getsockopt,
2322 .recvmsg = tcp_recvmsg,
2323 .sendmsg = tcp_sendmsg,
2324 .sendpage = tcp_sendpage,
2325 .backlog_rcv = tcp_v4_do_rcv,
2326 .release_cb = tcp_release_cb,
2328 .unhash = inet_unhash,
2329 .get_port = inet_csk_get_port,
2330 .enter_memory_pressure = tcp_enter_memory_pressure,
2331 .stream_memory_free = tcp_stream_memory_free,
2332 .sockets_allocated = &tcp_sockets_allocated,
2333 .orphan_count = &tcp_orphan_count,
2334 .memory_allocated = &tcp_memory_allocated,
2335 .memory_pressure = &tcp_memory_pressure,
2336 .sysctl_mem = sysctl_tcp_mem,
2337 .sysctl_wmem = sysctl_tcp_wmem,
2338 .sysctl_rmem = sysctl_tcp_rmem,
2339 .max_header = MAX_TCP_HEADER,
2340 .obj_size = sizeof(struct tcp_sock),
2341 .slab_flags = SLAB_DESTROY_BY_RCU,
2342 .twsk_prot = &tcp_timewait_sock_ops,
2343 .rsk_prot = &tcp_request_sock_ops,
2344 .h.hashinfo = &tcp_hashinfo,
2345 .no_autobind = true,
2346 #ifdef CONFIG_COMPAT
2347 .compat_setsockopt = compat_tcp_setsockopt,
2348 .compat_getsockopt = compat_tcp_getsockopt,
2350 .diag_destroy = tcp_abort,
2352 EXPORT_SYMBOL(tcp_prot);
2354 static void __net_exit tcp_sk_exit(struct net *net)
2358 for_each_possible_cpu(cpu)
2359 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2360 free_percpu(net->ipv4.tcp_sk);
2363 static int __net_init tcp_sk_init(struct net *net)
2367 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2368 if (!net->ipv4.tcp_sk)
2371 for_each_possible_cpu(cpu) {
2374 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2378 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2381 net->ipv4.sysctl_tcp_ecn = 2;
2382 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2384 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2385 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2386 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2388 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2389 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2390 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2392 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2393 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2394 net->ipv4.sysctl_tcp_syncookies = 1;
2395 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2396 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2397 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2398 net->ipv4.sysctl_tcp_orphan_retries = 0;
2399 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2400 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2409 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2411 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2414 static struct pernet_operations __net_initdata tcp_sk_ops = {
2415 .init = tcp_sk_init,
2416 .exit = tcp_sk_exit,
2417 .exit_batch = tcp_sk_exit_batch,
2420 void __init tcp_v4_init(void)
2422 inet_hashinfo_init(&tcp_hashinfo);
2423 if (register_pernet_subsys(&tcp_sk_ops))
2424 panic("Failed to create the TCP control socket.\n");