2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
84 #include <crypto/hash.h>
85 #include <linux/scatterlist.h>
87 int sysctl_tcp_low_latency __read_mostly;
89 #ifdef CONFIG_TCP_MD5SIG
90 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
91 __be32 daddr, __be32 saddr, const struct tcphdr *th);
94 struct inet_hashinfo tcp_hashinfo;
95 EXPORT_SYMBOL(tcp_hashinfo);
97 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
99 return secure_tcp_seq(ip_hdr(skb)->daddr,
102 tcp_hdr(skb)->source);
105 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
107 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
110 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
113 struct tcp_sock *tp = tcp_sk(sk);
115 /* With PAWS, it is safe from the viewpoint
116 of data integrity. Even without PAWS it is safe provided sequence
117 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119 Actually, the idea is close to VJ's one, only timestamp cache is
120 held not per host, but per port pair and TW bucket is used as state
123 If TW bucket has been already destroyed we fall back to VJ's scheme
124 and use initial timestamp retrieved from peer table.
126 if (tcptw->tw_ts_recent_stamp &&
127 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
128 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
129 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
130 if (tp->write_seq == 0)
132 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
133 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
140 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
142 /* This will initiate an outgoing connection. */
143 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
145 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
146 struct inet_sock *inet = inet_sk(sk);
147 struct tcp_sock *tp = tcp_sk(sk);
148 __be16 orig_sport, orig_dport;
149 __be32 daddr, nexthop;
153 struct ip_options_rcu *inet_opt;
154 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
156 if (addr_len < sizeof(struct sockaddr_in))
159 if (usin->sin_family != AF_INET)
160 return -EAFNOSUPPORT;
162 nexthop = daddr = usin->sin_addr.s_addr;
163 inet_opt = rcu_dereference_protected(inet->inet_opt,
164 lockdep_sock_is_held(sk));
165 if (inet_opt && inet_opt->opt.srr) {
168 nexthop = inet_opt->opt.faddr;
171 orig_sport = inet->inet_sport;
172 orig_dport = usin->sin_port;
173 fl4 = &inet->cork.fl.u.ip4;
174 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
175 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
177 orig_sport, orig_dport, sk);
180 if (err == -ENETUNREACH)
181 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
185 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
190 if (!inet_opt || !inet_opt->opt.srr)
193 if (!inet->inet_saddr)
194 inet->inet_saddr = fl4->saddr;
195 sk_rcv_saddr_set(sk, inet->inet_saddr);
197 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
198 /* Reset inherited state */
199 tp->rx_opt.ts_recent = 0;
200 tp->rx_opt.ts_recent_stamp = 0;
201 if (likely(!tp->repair))
205 inet->inet_dport = usin->sin_port;
206 sk_daddr_set(sk, daddr);
208 inet_csk(sk)->icsk_ext_hdr_len = 0;
210 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214 /* Socket identity is still unknown (sport may be zero).
215 * However we set state to SYN-SENT and not releasing socket
216 * lock select source port, enter ourselves into the hash tables and
217 * complete initialization after this.
219 tcp_set_state(sk, TCP_SYN_SENT);
220 err = inet_hash_connect(tcp_death_row, sk);
226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 inet->inet_sport, inet->inet_dport, sk);
233 /* OK, now commit destination to socket. */
234 sk->sk_gso_type = SKB_GSO_TCPV4;
235 sk_setup_caps(sk, &rt->dst);
238 if (likely(!tp->repair)) {
240 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
244 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
249 inet->inet_id = tp->write_seq ^ jiffies;
251 if (tcp_fastopen_defer_connect(sk, &err))
256 err = tcp_connect(sk);
265 * This unhashes the socket and releases the local port,
268 tcp_set_state(sk, TCP_CLOSE);
270 sk->sk_route_caps = 0;
271 inet->inet_dport = 0;
274 EXPORT_SYMBOL(tcp_v4_connect);
277 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
278 * It can be called through tcp_release_cb() if socket was owned by user
279 * at the time tcp_v4_err() was called to handle ICMP message.
281 void tcp_v4_mtu_reduced(struct sock *sk)
283 struct inet_sock *inet = inet_sk(sk);
284 struct dst_entry *dst;
287 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
289 mtu = tcp_sk(sk)->mtu_info;
290 dst = inet_csk_update_pmtu(sk, mtu);
294 /* Something is about to be wrong... Remember soft error
295 * for the case, if this connection will not able to recover.
297 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
298 sk->sk_err_soft = EMSGSIZE;
302 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
303 ip_sk_accept_pmtu(sk) &&
304 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
305 tcp_sync_mss(sk, mtu);
307 /* Resend the TCP packet because it's
308 * clear that the old packet has been
309 * dropped. This is the new "fast" path mtu
312 tcp_simple_retransmit(sk);
313 } /* else let the usual retransmit timer handle it */
315 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
317 static void do_redirect(struct sk_buff *skb, struct sock *sk)
319 struct dst_entry *dst = __sk_dst_check(sk, 0);
322 dst->ops->redirect(dst, sk, skb);
326 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
327 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
329 struct request_sock *req = inet_reqsk(sk);
330 struct net *net = sock_net(sk);
332 /* ICMPs are not backlogged, hence we cannot get
333 * an established socket here.
335 if (seq != tcp_rsk(req)->snt_isn) {
336 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
339 * Still in SYN_RECV, just remove it silently.
340 * There is no good way to pass the error to the newly
341 * created socket, and POSIX does not want network
342 * errors returned from accept().
344 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
345 tcp_listendrop(req->rsk_listener);
349 EXPORT_SYMBOL(tcp_req_err);
352 * This routine is called by the ICMP module when it gets some
353 * sort of error condition. If err < 0 then the socket should
354 * be closed and the error returned to the user. If err > 0
355 * it's just the icmp type << 8 | icmp code. After adjustment
356 * header points to the first 8 bytes of the tcp header. We need
357 * to find the appropriate port.
359 * The locking strategy used here is very "optimistic". When
360 * someone else accesses the socket the ICMP is just dropped
361 * and for some paths there is no check at all.
362 * A more general error queue to queue errors for later handling
363 * is probably better.
367 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
369 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
370 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
371 struct inet_connection_sock *icsk;
373 struct inet_sock *inet;
374 const int type = icmp_hdr(icmp_skb)->type;
375 const int code = icmp_hdr(icmp_skb)->code;
378 struct request_sock *fastopen;
383 struct net *net = dev_net(icmp_skb->dev);
385 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
386 th->dest, iph->saddr, ntohs(th->source),
389 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
392 if (sk->sk_state == TCP_TIME_WAIT) {
393 inet_twsk_put(inet_twsk(sk));
396 seq = ntohl(th->seq);
397 if (sk->sk_state == TCP_NEW_SYN_RECV)
398 return tcp_req_err(sk, seq,
399 type == ICMP_PARAMETERPROB ||
400 type == ICMP_TIME_EXCEEDED ||
401 (type == ICMP_DEST_UNREACH &&
402 (code == ICMP_NET_UNREACH ||
403 code == ICMP_HOST_UNREACH)));
406 /* If too many ICMPs get dropped on busy
407 * servers this needs to be solved differently.
408 * We do take care of PMTU discovery (RFC1191) special case :
409 * we can receive locally generated ICMP messages while socket is held.
411 if (sock_owned_by_user(sk)) {
412 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
413 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
415 if (sk->sk_state == TCP_CLOSE)
418 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
419 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
425 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
426 fastopen = tp->fastopen_rsk;
427 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
428 if (sk->sk_state != TCP_LISTEN &&
429 !between(seq, snd_una, tp->snd_nxt)) {
430 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
436 if (!sock_owned_by_user(sk))
437 do_redirect(icmp_skb, sk);
439 case ICMP_SOURCE_QUENCH:
440 /* Just silently ignore these. */
442 case ICMP_PARAMETERPROB:
445 case ICMP_DEST_UNREACH:
446 if (code > NR_ICMP_UNREACH)
449 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
450 /* We are not interested in TCP_LISTEN and open_requests
451 * (SYN-ACKs send out by Linux are always <576bytes so
452 * they should go through unfragmented).
454 if (sk->sk_state == TCP_LISTEN)
458 if (!sock_owned_by_user(sk)) {
459 tcp_v4_mtu_reduced(sk);
461 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
467 err = icmp_err_convert[code].errno;
468 /* check if icmp_skb allows revert of backoff
469 * (see draft-zimmermann-tcp-lcd) */
470 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
472 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
473 !icsk->icsk_backoff || fastopen)
476 if (sock_owned_by_user(sk))
479 icsk->icsk_backoff--;
480 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
482 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
484 skb = tcp_write_queue_head(sk);
487 tcp_mstamp_refresh(tp);
488 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
489 remaining = icsk->icsk_rto -
490 usecs_to_jiffies(delta_us);
493 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
494 remaining, TCP_RTO_MAX);
496 /* RTO revert clocked out retransmission.
497 * Will retransmit now */
498 tcp_retransmit_timer(sk);
502 case ICMP_TIME_EXCEEDED:
509 switch (sk->sk_state) {
512 /* Only in fast or simultaneous open. If a fast open socket is
513 * is already accepted it is treated as a connected one below.
515 if (fastopen && !fastopen->sk)
518 if (!sock_owned_by_user(sk)) {
521 sk->sk_error_report(sk);
525 sk->sk_err_soft = err;
530 /* If we've already connected we will keep trying
531 * until we time out, or the user gives up.
533 * rfc1122 4.2.3.9 allows to consider as hard errors
534 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
535 * but it is obsoleted by pmtu discovery).
537 * Note, that in modern internet, where routing is unreliable
538 * and in each dark corner broken firewalls sit, sending random
539 * errors ordered by their masters even this two messages finally lose
540 * their original sense (even Linux sends invalid PORT_UNREACHs)
542 * Now we are in compliance with RFCs.
547 if (!sock_owned_by_user(sk) && inet->recverr) {
549 sk->sk_error_report(sk);
550 } else { /* Only an error on timeout */
551 sk->sk_err_soft = err;
559 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
561 struct tcphdr *th = tcp_hdr(skb);
563 if (skb->ip_summed == CHECKSUM_PARTIAL) {
564 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
565 skb->csum_start = skb_transport_header(skb) - skb->head;
566 skb->csum_offset = offsetof(struct tcphdr, check);
568 th->check = tcp_v4_check(skb->len, saddr, daddr,
575 /* This routine computes an IPv4 TCP checksum. */
576 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
578 const struct inet_sock *inet = inet_sk(sk);
580 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
582 EXPORT_SYMBOL(tcp_v4_send_check);
585 * This routine will send an RST to the other tcp.
587 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
589 * Answer: if a packet caused RST, it is not for a socket
590 * existing in our system, if it is matched to a socket,
591 * it is just duplicate segment or bug in other side's TCP.
592 * So that we build reply only basing on parameters
593 * arrived with segment.
594 * Exception: precedence violation. We do not implement it in any case.
597 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
599 const struct tcphdr *th = tcp_hdr(skb);
602 #ifdef CONFIG_TCP_MD5SIG
603 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
606 struct ip_reply_arg arg;
607 #ifdef CONFIG_TCP_MD5SIG
608 struct tcp_md5sig_key *key = NULL;
609 const __u8 *hash_location = NULL;
610 unsigned char newhash[16];
612 struct sock *sk1 = NULL;
616 /* Never send a reset in response to a reset. */
620 /* If sk not NULL, it means we did a successful lookup and incoming
621 * route had to be correct. prequeue might have dropped our dst.
623 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
626 /* Swap the send and the receive. */
627 memset(&rep, 0, sizeof(rep));
628 rep.th.dest = th->source;
629 rep.th.source = th->dest;
630 rep.th.doff = sizeof(struct tcphdr) / 4;
634 rep.th.seq = th->ack_seq;
637 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
638 skb->len - (th->doff << 2));
641 memset(&arg, 0, sizeof(arg));
642 arg.iov[0].iov_base = (unsigned char *)&rep;
643 arg.iov[0].iov_len = sizeof(rep.th);
645 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
646 #ifdef CONFIG_TCP_MD5SIG
648 hash_location = tcp_parse_md5sig_option(th);
649 if (sk && sk_fullsock(sk)) {
650 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
651 &ip_hdr(skb)->saddr, AF_INET);
652 } else if (hash_location) {
654 * active side is lost. Try to find listening socket through
655 * source port, and then find md5 key through listening socket.
656 * we are not loose security here:
657 * Incoming packet is checked with md5 hash with finding key,
658 * no RST generated if md5 hash doesn't match.
660 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
662 th->source, ip_hdr(skb)->daddr,
663 ntohs(th->source), inet_iif(skb));
664 /* don't send rst if it can't find key */
668 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
669 &ip_hdr(skb)->saddr, AF_INET);
674 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
675 if (genhash || memcmp(hash_location, newhash, 16) != 0)
681 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
683 (TCPOPT_MD5SIG << 8) |
685 /* Update length and the length the header thinks exists */
686 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
687 rep.th.doff = arg.iov[0].iov_len / 4;
689 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
690 key, ip_hdr(skb)->saddr,
691 ip_hdr(skb)->daddr, &rep.th);
694 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
695 ip_hdr(skb)->saddr, /* XXX */
696 arg.iov[0].iov_len, IPPROTO_TCP, 0);
697 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
698 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
700 /* When socket is gone, all binding information is lost.
701 * routing might fail in this case. No choice here, if we choose to force
702 * input interface, we will misroute in case of asymmetric route.
705 arg.bound_dev_if = sk->sk_bound_dev_if;
707 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
708 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
710 arg.tos = ip_hdr(skb)->tos;
711 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
713 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
714 skb, &TCP_SKB_CB(skb)->header.h4.opt,
715 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
716 &arg, arg.iov[0].iov_len);
718 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
719 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
722 #ifdef CONFIG_TCP_MD5SIG
728 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
729 outside socket context is ugly, certainly. What can I do?
732 static void tcp_v4_send_ack(const struct sock *sk,
733 struct sk_buff *skb, u32 seq, u32 ack,
734 u32 win, u32 tsval, u32 tsecr, int oif,
735 struct tcp_md5sig_key *key,
736 int reply_flags, u8 tos)
738 const struct tcphdr *th = tcp_hdr(skb);
741 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
742 #ifdef CONFIG_TCP_MD5SIG
743 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
747 struct net *net = sock_net(sk);
748 struct ip_reply_arg arg;
750 memset(&rep.th, 0, sizeof(struct tcphdr));
751 memset(&arg, 0, sizeof(arg));
753 arg.iov[0].iov_base = (unsigned char *)&rep;
754 arg.iov[0].iov_len = sizeof(rep.th);
756 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
757 (TCPOPT_TIMESTAMP << 8) |
759 rep.opt[1] = htonl(tsval);
760 rep.opt[2] = htonl(tsecr);
761 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
764 /* Swap the send and the receive. */
765 rep.th.dest = th->source;
766 rep.th.source = th->dest;
767 rep.th.doff = arg.iov[0].iov_len / 4;
768 rep.th.seq = htonl(seq);
769 rep.th.ack_seq = htonl(ack);
771 rep.th.window = htons(win);
773 #ifdef CONFIG_TCP_MD5SIG
775 int offset = (tsecr) ? 3 : 0;
777 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
779 (TCPOPT_MD5SIG << 8) |
781 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
782 rep.th.doff = arg.iov[0].iov_len/4;
784 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
785 key, ip_hdr(skb)->saddr,
786 ip_hdr(skb)->daddr, &rep.th);
789 arg.flags = reply_flags;
790 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
791 ip_hdr(skb)->saddr, /* XXX */
792 arg.iov[0].iov_len, IPPROTO_TCP, 0);
793 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
795 arg.bound_dev_if = oif;
797 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
799 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
800 skb, &TCP_SKB_CB(skb)->header.h4.opt,
801 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
802 &arg, arg.iov[0].iov_len);
804 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
808 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
810 struct inet_timewait_sock *tw = inet_twsk(sk);
811 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
813 tcp_v4_send_ack(sk, skb,
814 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
815 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
816 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
819 tcp_twsk_md5_key(tcptw),
820 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
827 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
828 struct request_sock *req)
830 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
831 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
833 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
837 * The window field (SEG.WND) of every outgoing segment, with the
838 * exception of <SYN> segments, MUST be right-shifted by
839 * Rcv.Wind.Shift bits:
841 tcp_v4_send_ack(sk, skb, seq,
842 tcp_rsk(req)->rcv_nxt,
843 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
844 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
847 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
849 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
854 * Send a SYN-ACK after having received a SYN.
855 * This still operates on a request_sock only, not on a big
858 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
860 struct request_sock *req,
861 struct tcp_fastopen_cookie *foc,
862 enum tcp_synack_type synack_type)
864 const struct inet_request_sock *ireq = inet_rsk(req);
869 /* First, grab a route. */
870 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
873 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
876 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
881 err = net_xmit_eval(err);
888 * IPv4 request_sock destructor.
890 static void tcp_v4_reqsk_destructor(struct request_sock *req)
892 kfree(inet_rsk(req)->opt);
895 #ifdef CONFIG_TCP_MD5SIG
897 * RFC2385 MD5 checksumming requires a mapping of
898 * IP address->MD5 Key.
899 * We need to maintain these in the sk structure.
902 /* Find the Key structure for an address. */
903 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
904 const union tcp_md5_addr *addr,
907 const struct tcp_sock *tp = tcp_sk(sk);
908 struct tcp_md5sig_key *key;
909 unsigned int size = sizeof(struct in_addr);
910 const struct tcp_md5sig_info *md5sig;
912 /* caller either holds rcu_read_lock() or socket lock */
913 md5sig = rcu_dereference_check(tp->md5sig_info,
914 lockdep_sock_is_held(sk));
917 #if IS_ENABLED(CONFIG_IPV6)
918 if (family == AF_INET6)
919 size = sizeof(struct in6_addr);
921 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
922 if (key->family != family)
924 if (!memcmp(&key->addr, addr, size))
929 EXPORT_SYMBOL(tcp_md5_do_lookup);
931 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
932 const struct sock *addr_sk)
934 const union tcp_md5_addr *addr;
936 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
937 return tcp_md5_do_lookup(sk, addr, AF_INET);
939 EXPORT_SYMBOL(tcp_v4_md5_lookup);
941 /* This can be called on a newly created socket, from other files */
942 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
943 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
945 /* Add Key to the list */
946 struct tcp_md5sig_key *key;
947 struct tcp_sock *tp = tcp_sk(sk);
948 struct tcp_md5sig_info *md5sig;
950 key = tcp_md5_do_lookup(sk, addr, family);
952 /* Pre-existing entry - just update that one. */
953 memcpy(key->key, newkey, newkeylen);
954 key->keylen = newkeylen;
958 md5sig = rcu_dereference_protected(tp->md5sig_info,
959 lockdep_sock_is_held(sk));
961 md5sig = kmalloc(sizeof(*md5sig), gfp);
965 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
966 INIT_HLIST_HEAD(&md5sig->head);
967 rcu_assign_pointer(tp->md5sig_info, md5sig);
970 key = sock_kmalloc(sk, sizeof(*key), gfp);
973 if (!tcp_alloc_md5sig_pool()) {
974 sock_kfree_s(sk, key, sizeof(*key));
978 memcpy(key->key, newkey, newkeylen);
979 key->keylen = newkeylen;
980 key->family = family;
981 memcpy(&key->addr, addr,
982 (family == AF_INET6) ? sizeof(struct in6_addr) :
983 sizeof(struct in_addr));
984 hlist_add_head_rcu(&key->node, &md5sig->head);
987 EXPORT_SYMBOL(tcp_md5_do_add);
989 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
991 struct tcp_md5sig_key *key;
993 key = tcp_md5_do_lookup(sk, addr, family);
996 hlist_del_rcu(&key->node);
997 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1001 EXPORT_SYMBOL(tcp_md5_do_del);
1003 static void tcp_clear_md5_list(struct sock *sk)
1005 struct tcp_sock *tp = tcp_sk(sk);
1006 struct tcp_md5sig_key *key;
1007 struct hlist_node *n;
1008 struct tcp_md5sig_info *md5sig;
1010 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1012 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1013 hlist_del_rcu(&key->node);
1014 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1015 kfree_rcu(key, rcu);
1019 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1022 struct tcp_md5sig cmd;
1023 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1025 if (optlen < sizeof(cmd))
1028 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1031 if (sin->sin_family != AF_INET)
1034 if (!cmd.tcpm_keylen)
1035 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1038 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1041 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1042 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1046 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1047 __be32 daddr, __be32 saddr,
1048 const struct tcphdr *th, int nbytes)
1050 struct tcp4_pseudohdr *bp;
1051 struct scatterlist sg;
1058 bp->protocol = IPPROTO_TCP;
1059 bp->len = cpu_to_be16(nbytes);
1061 _th = (struct tcphdr *)(bp + 1);
1062 memcpy(_th, th, sizeof(*th));
1065 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1066 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1067 sizeof(*bp) + sizeof(*th));
1068 return crypto_ahash_update(hp->md5_req);
1071 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1072 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1074 struct tcp_md5sig_pool *hp;
1075 struct ahash_request *req;
1077 hp = tcp_get_md5sig_pool();
1079 goto clear_hash_noput;
1082 if (crypto_ahash_init(req))
1084 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1086 if (tcp_md5_hash_key(hp, key))
1088 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1089 if (crypto_ahash_final(req))
1092 tcp_put_md5sig_pool();
1096 tcp_put_md5sig_pool();
1098 memset(md5_hash, 0, 16);
1102 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1103 const struct sock *sk,
1104 const struct sk_buff *skb)
1106 struct tcp_md5sig_pool *hp;
1107 struct ahash_request *req;
1108 const struct tcphdr *th = tcp_hdr(skb);
1109 __be32 saddr, daddr;
1111 if (sk) { /* valid for establish/request sockets */
1112 saddr = sk->sk_rcv_saddr;
1113 daddr = sk->sk_daddr;
1115 const struct iphdr *iph = ip_hdr(skb);
1120 hp = tcp_get_md5sig_pool();
1122 goto clear_hash_noput;
1125 if (crypto_ahash_init(req))
1128 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1130 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1132 if (tcp_md5_hash_key(hp, key))
1134 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1135 if (crypto_ahash_final(req))
1138 tcp_put_md5sig_pool();
1142 tcp_put_md5sig_pool();
1144 memset(md5_hash, 0, 16);
1147 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1151 /* Called with rcu_read_lock() */
1152 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1153 const struct sk_buff *skb)
1155 #ifdef CONFIG_TCP_MD5SIG
1157 * This gets called for each TCP segment that arrives
1158 * so we want to be efficient.
1159 * We have 3 drop cases:
1160 * o No MD5 hash and one expected.
1161 * o MD5 hash and we're not expecting one.
1162 * o MD5 hash and its wrong.
1164 const __u8 *hash_location = NULL;
1165 struct tcp_md5sig_key *hash_expected;
1166 const struct iphdr *iph = ip_hdr(skb);
1167 const struct tcphdr *th = tcp_hdr(skb);
1169 unsigned char newhash[16];
1171 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1173 hash_location = tcp_parse_md5sig_option(th);
1175 /* We've parsed the options - do we have a hash? */
1176 if (!hash_expected && !hash_location)
1179 if (hash_expected && !hash_location) {
1180 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1184 if (!hash_expected && hash_location) {
1185 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1189 /* Okay, so this is hash_expected and hash_location -
1190 * so we need to calculate the checksum.
1192 genhash = tcp_v4_md5_hash_skb(newhash,
1196 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1197 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1198 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1199 &iph->saddr, ntohs(th->source),
1200 &iph->daddr, ntohs(th->dest),
1201 genhash ? " tcp_v4_calc_md5_hash failed"
1210 static void tcp_v4_init_req(struct request_sock *req,
1211 const struct sock *sk_listener,
1212 struct sk_buff *skb)
1214 struct inet_request_sock *ireq = inet_rsk(req);
1216 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1217 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1218 ireq->opt = tcp_v4_save_options(skb);
1221 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1223 const struct request_sock *req)
1225 return inet_csk_route_req(sk, &fl->u.ip4, req);
1228 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1230 .obj_size = sizeof(struct tcp_request_sock),
1231 .rtx_syn_ack = tcp_rtx_synack,
1232 .send_ack = tcp_v4_reqsk_send_ack,
1233 .destructor = tcp_v4_reqsk_destructor,
1234 .send_reset = tcp_v4_send_reset,
1235 .syn_ack_timeout = tcp_syn_ack_timeout,
1238 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1239 .mss_clamp = TCP_MSS_DEFAULT,
1240 #ifdef CONFIG_TCP_MD5SIG
1241 .req_md5_lookup = tcp_v4_md5_lookup,
1242 .calc_md5_hash = tcp_v4_md5_hash_skb,
1244 .init_req = tcp_v4_init_req,
1245 #ifdef CONFIG_SYN_COOKIES
1246 .cookie_init_seq = cookie_v4_init_sequence,
1248 .route_req = tcp_v4_route_req,
1249 .init_seq = tcp_v4_init_seq,
1250 .init_ts_off = tcp_v4_init_ts_off,
1251 .send_synack = tcp_v4_send_synack,
1254 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1256 /* Never answer to SYNs send to broadcast or multicast */
1257 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1260 return tcp_conn_request(&tcp_request_sock_ops,
1261 &tcp_request_sock_ipv4_ops, sk, skb);
1267 EXPORT_SYMBOL(tcp_v4_conn_request);
1271 * The three way handshake has completed - we got a valid synack -
1272 * now create the new socket.
1274 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1275 struct request_sock *req,
1276 struct dst_entry *dst,
1277 struct request_sock *req_unhash,
1280 struct inet_request_sock *ireq;
1281 struct inet_sock *newinet;
1282 struct tcp_sock *newtp;
1284 #ifdef CONFIG_TCP_MD5SIG
1285 struct tcp_md5sig_key *key;
1287 struct ip_options_rcu *inet_opt;
1289 if (sk_acceptq_is_full(sk))
1292 newsk = tcp_create_openreq_child(sk, req, skb);
1296 newsk->sk_gso_type = SKB_GSO_TCPV4;
1297 inet_sk_rx_dst_set(newsk, skb);
1299 newtp = tcp_sk(newsk);
1300 newinet = inet_sk(newsk);
1301 ireq = inet_rsk(req);
1302 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1303 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1304 newsk->sk_bound_dev_if = ireq->ir_iif;
1305 newinet->inet_saddr = ireq->ir_loc_addr;
1306 inet_opt = ireq->opt;
1307 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1309 newinet->mc_index = inet_iif(skb);
1310 newinet->mc_ttl = ip_hdr(skb)->ttl;
1311 newinet->rcv_tos = ip_hdr(skb)->tos;
1312 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1314 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1315 newinet->inet_id = newtp->write_seq ^ jiffies;
1318 dst = inet_csk_route_child_sock(sk, newsk, req);
1322 /* syncookie case : see end of cookie_v4_check() */
1324 sk_setup_caps(newsk, dst);
1326 tcp_ca_openreq_child(newsk, dst);
1328 tcp_sync_mss(newsk, dst_mtu(dst));
1329 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1331 tcp_initialize_rcv_mss(newsk);
1333 #ifdef CONFIG_TCP_MD5SIG
1334 /* Copy over the MD5 key from the original socket */
1335 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1339 * We're using one, so create a matching key
1340 * on the newsk structure. If we fail to get
1341 * memory, then we end up not copying the key
1344 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1345 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1346 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1350 if (__inet_inherit_port(sk, newsk) < 0)
1352 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1354 tcp_move_syn(newtp, req);
1359 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1366 inet_csk_prepare_forced_close(newsk);
1370 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1372 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1374 #ifdef CONFIG_SYN_COOKIES
1375 const struct tcphdr *th = tcp_hdr(skb);
1378 sk = cookie_v4_check(sk, skb);
1383 /* The socket must have it's spinlock held when we get
1384 * here, unless it is a TCP_LISTEN socket.
1386 * We have a potential double-lock case here, so even when
1387 * doing backlog processing we use the BH locking scheme.
1388 * This is because we cannot sleep with the original spinlock
1391 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1395 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1396 struct dst_entry *dst = sk->sk_rx_dst;
1398 sock_rps_save_rxhash(sk, skb);
1399 sk_mark_napi_id(sk, skb);
1401 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1402 !dst->ops->check(dst, 0)) {
1404 sk->sk_rx_dst = NULL;
1407 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1411 if (tcp_checksum_complete(skb))
1414 if (sk->sk_state == TCP_LISTEN) {
1415 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1420 if (tcp_child_process(sk, nsk, skb)) {
1427 sock_rps_save_rxhash(sk, skb);
1429 if (tcp_rcv_state_process(sk, skb)) {
1436 tcp_v4_send_reset(rsk, skb);
1439 /* Be careful here. If this function gets more complicated and
1440 * gcc suffers from register pressure on the x86, sk (in %ebx)
1441 * might be destroyed here. This current version compiles correctly,
1442 * but you have been warned.
1447 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1448 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1451 EXPORT_SYMBOL(tcp_v4_do_rcv);
1453 void tcp_v4_early_demux(struct sk_buff *skb)
1455 const struct iphdr *iph;
1456 const struct tcphdr *th;
1459 if (skb->pkt_type != PACKET_HOST)
1462 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1468 if (th->doff < sizeof(struct tcphdr) / 4)
1471 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1472 iph->saddr, th->source,
1473 iph->daddr, ntohs(th->dest),
1477 skb->destructor = sock_edemux;
1478 if (sk_fullsock(sk)) {
1479 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1482 dst = dst_check(dst, 0);
1484 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1485 skb_dst_set_noref(skb, dst);
1490 /* Packet is added to VJ-style prequeue for processing in process
1491 * context, if a reader task is waiting. Apparently, this exciting
1492 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1493 * failed somewhere. Latency? Burstiness? Well, at least now we will
1494 * see, why it failed. 8)8) --ANK
1497 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1499 struct tcp_sock *tp = tcp_sk(sk);
1501 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1504 if (skb->len <= tcp_hdrlen(skb) &&
1505 skb_queue_len(&tp->ucopy.prequeue) == 0)
1508 /* Before escaping RCU protected region, we need to take care of skb
1509 * dst. Prequeue is only enabled for established sockets.
1510 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1511 * Instead of doing full sk_rx_dst validity here, let's perform
1512 * an optimistic check.
1514 if (likely(sk->sk_rx_dst))
1517 skb_dst_force_safe(skb);
1519 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1520 tp->ucopy.memory += skb->truesize;
1521 if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
1522 tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
1523 struct sk_buff *skb1;
1525 BUG_ON(sock_owned_by_user(sk));
1526 __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
1527 skb_queue_len(&tp->ucopy.prequeue));
1529 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1530 sk_backlog_rcv(sk, skb1);
1532 tp->ucopy.memory = 0;
1533 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1534 wake_up_interruptible_sync_poll(sk_sleep(sk),
1535 POLLIN | POLLRDNORM | POLLRDBAND);
1536 if (!inet_csk_ack_scheduled(sk))
1537 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1538 (3 * tcp_rto_min(sk)) / 4,
1543 EXPORT_SYMBOL(tcp_prequeue);
1545 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1547 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1549 /* Only socket owner can try to collapse/prune rx queues
1550 * to reduce memory overhead, so add a little headroom here.
1551 * Few sockets backlog are possibly concurrently non empty.
1555 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1556 * we can fix skb->truesize to its real value to avoid future drops.
1557 * This is valid because skb is not yet charged to the socket.
1558 * It has been noticed pure SACK packets were sometimes dropped
1559 * (if cooked by drivers without copybreak feature).
1563 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1565 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1570 EXPORT_SYMBOL(tcp_add_backlog);
1572 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1574 struct tcphdr *th = (struct tcphdr *)skb->data;
1575 unsigned int eaten = skb->len;
1578 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1581 TCP_SKB_CB(skb)->end_seq -= eaten;
1585 EXPORT_SYMBOL(tcp_filter);
1591 int tcp_v4_rcv(struct sk_buff *skb)
1593 struct net *net = dev_net(skb->dev);
1594 const struct iphdr *iph;
1595 const struct tcphdr *th;
1600 if (skb->pkt_type != PACKET_HOST)
1603 /* Count it even if it's bad */
1604 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1606 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1609 th = (const struct tcphdr *)skb->data;
1611 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1613 if (!pskb_may_pull(skb, th->doff * 4))
1616 /* An explanation is required here, I think.
1617 * Packet length and doff are validated by header prediction,
1618 * provided case of th->doff==0 is eliminated.
1619 * So, we defer the checks. */
1621 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1624 th = (const struct tcphdr *)skb->data;
1626 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1627 * barrier() makes sure compiler wont play fool^Waliasing games.
1629 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1630 sizeof(struct inet_skb_parm));
1633 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1634 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1635 skb->len - th->doff * 4);
1636 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1637 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1638 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1639 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1640 TCP_SKB_CB(skb)->sacked = 0;
1643 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1644 th->dest, &refcounted);
1649 if (sk->sk_state == TCP_TIME_WAIT)
1652 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1653 struct request_sock *req = inet_reqsk(sk);
1656 sk = req->rsk_listener;
1657 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1658 sk_drops_add(sk, skb);
1662 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1663 inet_csk_reqsk_queue_drop_and_put(sk, req);
1666 /* We own a reference on the listener, increase it again
1667 * as we might lose it too soon.
1671 nsk = tcp_check_req(sk, skb, req, false);
1674 goto discard_and_relse;
1678 } else if (tcp_child_process(sk, nsk, skb)) {
1679 tcp_v4_send_reset(nsk, skb);
1680 goto discard_and_relse;
1686 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1687 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1688 goto discard_and_relse;
1691 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1692 goto discard_and_relse;
1694 if (tcp_v4_inbound_md5_hash(sk, skb))
1695 goto discard_and_relse;
1699 if (tcp_filter(sk, skb))
1700 goto discard_and_relse;
1701 th = (const struct tcphdr *)skb->data;
1706 if (sk->sk_state == TCP_LISTEN) {
1707 ret = tcp_v4_do_rcv(sk, skb);
1708 goto put_and_return;
1711 sk_incoming_cpu_update(sk);
1713 bh_lock_sock_nested(sk);
1714 tcp_segs_in(tcp_sk(sk), skb);
1716 if (!sock_owned_by_user(sk)) {
1717 if (!tcp_prequeue(sk, skb))
1718 ret = tcp_v4_do_rcv(sk, skb);
1719 } else if (tcp_add_backlog(sk, skb)) {
1720 goto discard_and_relse;
1731 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1734 if (tcp_checksum_complete(skb)) {
1736 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1738 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1740 tcp_v4_send_reset(NULL, skb);
1744 /* Discard frame. */
1749 sk_drops_add(sk, skb);
1755 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1756 inet_twsk_put(inet_twsk(sk));
1760 if (tcp_checksum_complete(skb)) {
1761 inet_twsk_put(inet_twsk(sk));
1764 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1766 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1769 iph->saddr, th->source,
1770 iph->daddr, th->dest,
1773 inet_twsk_deschedule_put(inet_twsk(sk));
1778 /* Fall through to ACK */
1781 tcp_v4_timewait_ack(sk, skb);
1784 tcp_v4_send_reset(sk, skb);
1785 inet_twsk_deschedule_put(inet_twsk(sk));
1787 case TCP_TW_SUCCESS:;
1792 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1793 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1794 .twsk_unique = tcp_twsk_unique,
1795 .twsk_destructor= tcp_twsk_destructor,
1798 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1800 struct dst_entry *dst = skb_dst(skb);
1802 if (dst && dst_hold_safe(dst)) {
1803 sk->sk_rx_dst = dst;
1804 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1807 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1809 const struct inet_connection_sock_af_ops ipv4_specific = {
1810 .queue_xmit = ip_queue_xmit,
1811 .send_check = tcp_v4_send_check,
1812 .rebuild_header = inet_sk_rebuild_header,
1813 .sk_rx_dst_set = inet_sk_rx_dst_set,
1814 .conn_request = tcp_v4_conn_request,
1815 .syn_recv_sock = tcp_v4_syn_recv_sock,
1816 .net_header_len = sizeof(struct iphdr),
1817 .setsockopt = ip_setsockopt,
1818 .getsockopt = ip_getsockopt,
1819 .addr2sockaddr = inet_csk_addr2sockaddr,
1820 .sockaddr_len = sizeof(struct sockaddr_in),
1821 #ifdef CONFIG_COMPAT
1822 .compat_setsockopt = compat_ip_setsockopt,
1823 .compat_getsockopt = compat_ip_getsockopt,
1825 .mtu_reduced = tcp_v4_mtu_reduced,
1827 EXPORT_SYMBOL(ipv4_specific);
1829 #ifdef CONFIG_TCP_MD5SIG
1830 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1831 .md5_lookup = tcp_v4_md5_lookup,
1832 .calc_md5_hash = tcp_v4_md5_hash_skb,
1833 .md5_parse = tcp_v4_parse_md5_keys,
1837 /* NOTE: A lot of things set to zero explicitly by call to
1838 * sk_alloc() so need not be done here.
1840 static int tcp_v4_init_sock(struct sock *sk)
1842 struct inet_connection_sock *icsk = inet_csk(sk);
1846 icsk->icsk_af_ops = &ipv4_specific;
1848 #ifdef CONFIG_TCP_MD5SIG
1849 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1855 void tcp_v4_destroy_sock(struct sock *sk)
1857 struct tcp_sock *tp = tcp_sk(sk);
1859 tcp_clear_xmit_timers(sk);
1861 tcp_cleanup_congestion_control(sk);
1863 tcp_cleanup_ulp(sk);
1865 /* Cleanup up the write buffer. */
1866 tcp_write_queue_purge(sk);
1868 /* Check if we want to disable active TFO */
1869 tcp_fastopen_active_disable_ofo_check(sk);
1871 /* Cleans up our, hopefully empty, out_of_order_queue. */
1872 skb_rbtree_purge(&tp->out_of_order_queue);
1874 #ifdef CONFIG_TCP_MD5SIG
1875 /* Clean up the MD5 key list, if any */
1876 if (tp->md5sig_info) {
1877 tcp_clear_md5_list(sk);
1878 kfree_rcu(tp->md5sig_info, rcu);
1879 tp->md5sig_info = NULL;
1883 /* Clean prequeue, it must be empty really */
1884 __skb_queue_purge(&tp->ucopy.prequeue);
1886 /* Clean up a referenced TCP bind bucket. */
1887 if (inet_csk(sk)->icsk_bind_hash)
1890 BUG_ON(tp->fastopen_rsk);
1892 /* If socket is aborted during connect operation */
1893 tcp_free_fastopen_req(tp);
1894 tcp_saved_syn_free(tp);
1896 sk_sockets_allocated_dec(sk);
1898 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1900 #ifdef CONFIG_PROC_FS
1901 /* Proc filesystem TCP sock list dumping. */
1904 * Get next listener socket follow cur. If cur is NULL, get first socket
1905 * starting from bucket given in st->bucket; when st->bucket is zero the
1906 * very first socket in the hash table is returned.
1908 static void *listening_get_next(struct seq_file *seq, void *cur)
1910 struct tcp_iter_state *st = seq->private;
1911 struct net *net = seq_file_net(seq);
1912 struct inet_listen_hashbucket *ilb;
1913 struct sock *sk = cur;
1917 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1918 spin_lock(&ilb->lock);
1919 sk = sk_head(&ilb->head);
1923 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1929 sk_for_each_from(sk) {
1930 if (!net_eq(sock_net(sk), net))
1932 if (sk->sk_family == st->family)
1935 spin_unlock(&ilb->lock);
1937 if (++st->bucket < INET_LHTABLE_SIZE)
1942 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1944 struct tcp_iter_state *st = seq->private;
1949 rc = listening_get_next(seq, NULL);
1951 while (rc && *pos) {
1952 rc = listening_get_next(seq, rc);
1958 static inline bool empty_bucket(const struct tcp_iter_state *st)
1960 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1964 * Get first established socket starting from bucket given in st->bucket.
1965 * If st->bucket is zero, the very first socket in the hash is returned.
1967 static void *established_get_first(struct seq_file *seq)
1969 struct tcp_iter_state *st = seq->private;
1970 struct net *net = seq_file_net(seq);
1974 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1976 struct hlist_nulls_node *node;
1977 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1979 /* Lockless fast path for the common case of empty buckets */
1980 if (empty_bucket(st))
1984 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1985 if (sk->sk_family != st->family ||
1986 !net_eq(sock_net(sk), net)) {
1992 spin_unlock_bh(lock);
1998 static void *established_get_next(struct seq_file *seq, void *cur)
2000 struct sock *sk = cur;
2001 struct hlist_nulls_node *node;
2002 struct tcp_iter_state *st = seq->private;
2003 struct net *net = seq_file_net(seq);
2008 sk = sk_nulls_next(sk);
2010 sk_nulls_for_each_from(sk, node) {
2011 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2015 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2017 return established_get_first(seq);
2020 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2022 struct tcp_iter_state *st = seq->private;
2026 rc = established_get_first(seq);
2029 rc = established_get_next(seq, rc);
2035 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2038 struct tcp_iter_state *st = seq->private;
2040 st->state = TCP_SEQ_STATE_LISTENING;
2041 rc = listening_get_idx(seq, &pos);
2044 st->state = TCP_SEQ_STATE_ESTABLISHED;
2045 rc = established_get_idx(seq, pos);
2051 static void *tcp_seek_last_pos(struct seq_file *seq)
2053 struct tcp_iter_state *st = seq->private;
2054 int offset = st->offset;
2055 int orig_num = st->num;
2058 switch (st->state) {
2059 case TCP_SEQ_STATE_LISTENING:
2060 if (st->bucket >= INET_LHTABLE_SIZE)
2062 st->state = TCP_SEQ_STATE_LISTENING;
2063 rc = listening_get_next(seq, NULL);
2064 while (offset-- && rc)
2065 rc = listening_get_next(seq, rc);
2069 st->state = TCP_SEQ_STATE_ESTABLISHED;
2071 case TCP_SEQ_STATE_ESTABLISHED:
2072 if (st->bucket > tcp_hashinfo.ehash_mask)
2074 rc = established_get_first(seq);
2075 while (offset-- && rc)
2076 rc = established_get_next(seq, rc);
2084 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2086 struct tcp_iter_state *st = seq->private;
2089 if (*pos && *pos == st->last_pos) {
2090 rc = tcp_seek_last_pos(seq);
2095 st->state = TCP_SEQ_STATE_LISTENING;
2099 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2102 st->last_pos = *pos;
2106 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2108 struct tcp_iter_state *st = seq->private;
2111 if (v == SEQ_START_TOKEN) {
2112 rc = tcp_get_idx(seq, 0);
2116 switch (st->state) {
2117 case TCP_SEQ_STATE_LISTENING:
2118 rc = listening_get_next(seq, v);
2120 st->state = TCP_SEQ_STATE_ESTABLISHED;
2123 rc = established_get_first(seq);
2126 case TCP_SEQ_STATE_ESTABLISHED:
2127 rc = established_get_next(seq, v);
2132 st->last_pos = *pos;
2136 static void tcp_seq_stop(struct seq_file *seq, void *v)
2138 struct tcp_iter_state *st = seq->private;
2140 switch (st->state) {
2141 case TCP_SEQ_STATE_LISTENING:
2142 if (v != SEQ_START_TOKEN)
2143 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2145 case TCP_SEQ_STATE_ESTABLISHED:
2147 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2152 int tcp_seq_open(struct inode *inode, struct file *file)
2154 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2155 struct tcp_iter_state *s;
2158 err = seq_open_net(inode, file, &afinfo->seq_ops,
2159 sizeof(struct tcp_iter_state));
2163 s = ((struct seq_file *)file->private_data)->private;
2164 s->family = afinfo->family;
2168 EXPORT_SYMBOL(tcp_seq_open);
2170 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2173 struct proc_dir_entry *p;
2175 afinfo->seq_ops.start = tcp_seq_start;
2176 afinfo->seq_ops.next = tcp_seq_next;
2177 afinfo->seq_ops.stop = tcp_seq_stop;
2179 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2180 afinfo->seq_fops, afinfo);
2185 EXPORT_SYMBOL(tcp_proc_register);
2187 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2189 remove_proc_entry(afinfo->name, net->proc_net);
2191 EXPORT_SYMBOL(tcp_proc_unregister);
2193 static void get_openreq4(const struct request_sock *req,
2194 struct seq_file *f, int i)
2196 const struct inet_request_sock *ireq = inet_rsk(req);
2197 long delta = req->rsk_timer.expires - jiffies;
2199 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2200 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2205 ntohs(ireq->ir_rmt_port),
2207 0, 0, /* could print option size, but that is af dependent. */
2208 1, /* timers active (only the expire timer) */
2209 jiffies_delta_to_clock_t(delta),
2211 from_kuid_munged(seq_user_ns(f),
2212 sock_i_uid(req->rsk_listener)),
2213 0, /* non standard timer */
2214 0, /* open_requests have no inode */
2219 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2222 unsigned long timer_expires;
2223 const struct tcp_sock *tp = tcp_sk(sk);
2224 const struct inet_connection_sock *icsk = inet_csk(sk);
2225 const struct inet_sock *inet = inet_sk(sk);
2226 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2227 __be32 dest = inet->inet_daddr;
2228 __be32 src = inet->inet_rcv_saddr;
2229 __u16 destp = ntohs(inet->inet_dport);
2230 __u16 srcp = ntohs(inet->inet_sport);
2234 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2235 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2236 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2238 timer_expires = icsk->icsk_timeout;
2239 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2241 timer_expires = icsk->icsk_timeout;
2242 } else if (timer_pending(&sk->sk_timer)) {
2244 timer_expires = sk->sk_timer.expires;
2247 timer_expires = jiffies;
2250 state = sk_state_load(sk);
2251 if (state == TCP_LISTEN)
2252 rx_queue = sk->sk_ack_backlog;
2254 /* Because we don't lock the socket,
2255 * we might find a transient negative value.
2257 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2259 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2260 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2261 i, src, srcp, dest, destp, state,
2262 tp->write_seq - tp->snd_una,
2265 jiffies_delta_to_clock_t(timer_expires - jiffies),
2266 icsk->icsk_retransmits,
2267 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2268 icsk->icsk_probes_out,
2270 atomic_read(&sk->sk_refcnt), sk,
2271 jiffies_to_clock_t(icsk->icsk_rto),
2272 jiffies_to_clock_t(icsk->icsk_ack.ato),
2273 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2275 state == TCP_LISTEN ?
2276 fastopenq->max_qlen :
2277 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2280 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2281 struct seq_file *f, int i)
2283 long delta = tw->tw_timer.expires - jiffies;
2287 dest = tw->tw_daddr;
2288 src = tw->tw_rcv_saddr;
2289 destp = ntohs(tw->tw_dport);
2290 srcp = ntohs(tw->tw_sport);
2292 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2293 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2294 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2295 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2296 atomic_read(&tw->tw_refcnt), tw);
2301 static int tcp4_seq_show(struct seq_file *seq, void *v)
2303 struct tcp_iter_state *st;
2304 struct sock *sk = v;
2306 seq_setwidth(seq, TMPSZ - 1);
2307 if (v == SEQ_START_TOKEN) {
2308 seq_puts(seq, " sl local_address rem_address st tx_queue "
2309 "rx_queue tr tm->when retrnsmt uid timeout "
2315 if (sk->sk_state == TCP_TIME_WAIT)
2316 get_timewait4_sock(v, seq, st->num);
2317 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2318 get_openreq4(v, seq, st->num);
2320 get_tcp4_sock(v, seq, st->num);
2326 static const struct file_operations tcp_afinfo_seq_fops = {
2327 .owner = THIS_MODULE,
2328 .open = tcp_seq_open,
2330 .llseek = seq_lseek,
2331 .release = seq_release_net
2334 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2337 .seq_fops = &tcp_afinfo_seq_fops,
2339 .show = tcp4_seq_show,
2343 static int __net_init tcp4_proc_init_net(struct net *net)
2345 return tcp_proc_register(net, &tcp4_seq_afinfo);
2348 static void __net_exit tcp4_proc_exit_net(struct net *net)
2350 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2353 static struct pernet_operations tcp4_net_ops = {
2354 .init = tcp4_proc_init_net,
2355 .exit = tcp4_proc_exit_net,
2358 int __init tcp4_proc_init(void)
2360 return register_pernet_subsys(&tcp4_net_ops);
2363 void tcp4_proc_exit(void)
2365 unregister_pernet_subsys(&tcp4_net_ops);
2367 #endif /* CONFIG_PROC_FS */
2369 struct proto tcp_prot = {
2371 .owner = THIS_MODULE,
2373 .connect = tcp_v4_connect,
2374 .disconnect = tcp_disconnect,
2375 .accept = inet_csk_accept,
2377 .init = tcp_v4_init_sock,
2378 .destroy = tcp_v4_destroy_sock,
2379 .shutdown = tcp_shutdown,
2380 .setsockopt = tcp_setsockopt,
2381 .getsockopt = tcp_getsockopt,
2382 .keepalive = tcp_set_keepalive,
2383 .recvmsg = tcp_recvmsg,
2384 .sendmsg = tcp_sendmsg,
2385 .sendpage = tcp_sendpage,
2386 .backlog_rcv = tcp_v4_do_rcv,
2387 .release_cb = tcp_release_cb,
2389 .unhash = inet_unhash,
2390 .get_port = inet_csk_get_port,
2391 .enter_memory_pressure = tcp_enter_memory_pressure,
2392 .leave_memory_pressure = tcp_leave_memory_pressure,
2393 .stream_memory_free = tcp_stream_memory_free,
2394 .sockets_allocated = &tcp_sockets_allocated,
2395 .orphan_count = &tcp_orphan_count,
2396 .memory_allocated = &tcp_memory_allocated,
2397 .memory_pressure = &tcp_memory_pressure,
2398 .sysctl_mem = sysctl_tcp_mem,
2399 .sysctl_wmem = sysctl_tcp_wmem,
2400 .sysctl_rmem = sysctl_tcp_rmem,
2401 .max_header = MAX_TCP_HEADER,
2402 .obj_size = sizeof(struct tcp_sock),
2403 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2404 .twsk_prot = &tcp_timewait_sock_ops,
2405 .rsk_prot = &tcp_request_sock_ops,
2406 .h.hashinfo = &tcp_hashinfo,
2407 .no_autobind = true,
2408 #ifdef CONFIG_COMPAT
2409 .compat_setsockopt = compat_tcp_setsockopt,
2410 .compat_getsockopt = compat_tcp_getsockopt,
2412 .diag_destroy = tcp_abort,
2414 EXPORT_SYMBOL(tcp_prot);
2416 static void __net_exit tcp_sk_exit(struct net *net)
2420 for_each_possible_cpu(cpu)
2421 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2422 free_percpu(net->ipv4.tcp_sk);
2425 static int __net_init tcp_sk_init(struct net *net)
2429 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2430 if (!net->ipv4.tcp_sk)
2433 for_each_possible_cpu(cpu) {
2436 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2440 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2441 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2444 net->ipv4.sysctl_tcp_ecn = 2;
2445 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2447 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2448 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2449 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2451 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2452 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2453 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2455 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2456 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2457 net->ipv4.sysctl_tcp_syncookies = 1;
2458 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2459 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2460 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2461 net->ipv4.sysctl_tcp_orphan_retries = 0;
2462 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2463 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2464 net->ipv4.sysctl_tcp_tw_reuse = 0;
2466 cnt = tcp_hashinfo.ehash_mask + 1;
2467 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2468 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2470 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2471 net->ipv4.sysctl_tcp_sack = 1;
2472 net->ipv4.sysctl_tcp_window_scaling = 1;
2473 net->ipv4.sysctl_tcp_timestamps = 1;
2482 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2484 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2487 static struct pernet_operations __net_initdata tcp_sk_ops = {
2488 .init = tcp_sk_init,
2489 .exit = tcp_sk_exit,
2490 .exit_batch = tcp_sk_exit_batch,
2493 void __init tcp_v4_init(void)
2495 if (register_pernet_subsys(&tcp_sk_ops))
2496 panic("Failed to create the TCP control socket.\n");