2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
84 #include <linux/crypto.h>
85 #include <linux/scatterlist.h>
87 int sysctl_tcp_tw_reuse __read_mostly;
88 int sysctl_tcp_low_latency __read_mostly;
89 EXPORT_SYMBOL(sysctl_tcp_low_latency);
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
100 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
105 tcp_hdr(skb)->source);
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
124 if (tcptw->tw_ts_recent_stamp &&
125 (twp == NULL || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
151 struct ip_options_rcu *inet_opt;
153 if (addr_len < sizeof(struct sockaddr_in))
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
165 nexthop = inet_opt->opt.faddr;
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 orig_sport, orig_dport, sk, true);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
187 if (!inet_opt || !inet_opt->opt.srr)
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 inet->inet_rcv_saddr = inet->inet_saddr;
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
201 if (tcp_death_row.sysctl_tw_recycle &&
202 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
203 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
205 * VJ's idea. We save last timestamp seen from
206 * the destination in peer table, when entering state
207 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
208 * when trying new connection.
211 inet_peer_refcheck(peer);
212 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
213 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
214 tp->rx_opt.ts_recent = peer->tcp_ts;
219 inet->inet_dport = usin->sin_port;
220 inet->inet_daddr = daddr;
222 inet_csk(sk)->icsk_ext_hdr_len = 0;
224 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
226 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
228 /* Socket identity is still unknown (sport may be zero).
229 * However we set state to SYN-SENT and not releasing socket
230 * lock select source port, enter ourselves into the hash tables and
231 * complete initialization after this.
233 tcp_set_state(sk, TCP_SYN_SENT);
234 err = inet_hash_connect(&tcp_death_row, sk);
238 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
239 inet->inet_sport, inet->inet_dport, sk);
245 /* OK, now commit destination to socket. */
246 sk->sk_gso_type = SKB_GSO_TCPV4;
247 sk_setup_caps(sk, &rt->dst);
250 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
255 inet->inet_id = tp->write_seq ^ jiffies;
257 err = tcp_connect(sk);
266 * This unhashes the socket and releases the local port,
269 tcp_set_state(sk, TCP_CLOSE);
271 sk->sk_route_caps = 0;
272 inet->inet_dport = 0;
275 EXPORT_SYMBOL(tcp_v4_connect);
278 * This routine does path mtu discovery as defined in RFC1191.
280 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
282 struct dst_entry *dst;
283 struct inet_sock *inet = inet_sk(sk);
285 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
286 * send out by Linux are always <576bytes so they should go through
289 if (sk->sk_state == TCP_LISTEN)
292 /* We don't check in the destentry if pmtu discovery is forbidden
293 * on this route. We just assume that no packet_to_big packets
294 * are send back when pmtu discovery is not active.
295 * There is a small race when the user changes this flag in the
296 * route, but I think that's acceptable.
298 if ((dst = __sk_dst_check(sk, 0)) == NULL)
301 dst->ops->update_pmtu(dst, mtu);
303 /* Something is about to be wrong... Remember soft error
304 * for the case, if this connection will not able to recover.
306 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
307 sk->sk_err_soft = EMSGSIZE;
311 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
312 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
313 tcp_sync_mss(sk, mtu);
315 /* Resend the TCP packet because it's
316 * clear that the old packet has been
317 * dropped. This is the new "fast" path mtu
320 tcp_simple_retransmit(sk);
321 } /* else let the usual retransmit timer handle it */
325 * This routine is called by the ICMP module when it gets some
326 * sort of error condition. If err < 0 then the socket should
327 * be closed and the error returned to the user. If err > 0
328 * it's just the icmp type << 8 | icmp code. After adjustment
329 * header points to the first 8 bytes of the tcp header. We need
330 * to find the appropriate port.
332 * The locking strategy used here is very "optimistic". When
333 * someone else accesses the socket the ICMP is just dropped
334 * and for some paths there is no check at all.
335 * A more general error queue to queue errors for later handling
336 * is probably better.
340 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
342 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
343 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
344 struct inet_connection_sock *icsk;
346 struct inet_sock *inet;
347 const int type = icmp_hdr(icmp_skb)->type;
348 const int code = icmp_hdr(icmp_skb)->code;
354 struct net *net = dev_net(icmp_skb->dev);
356 if (icmp_skb->len < (iph->ihl << 2) + 8) {
357 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
361 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
362 iph->saddr, th->source, inet_iif(icmp_skb));
364 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
367 if (sk->sk_state == TCP_TIME_WAIT) {
368 inet_twsk_put(inet_twsk(sk));
373 /* If too many ICMPs get dropped on busy
374 * servers this needs to be solved differently.
376 if (sock_owned_by_user(sk))
377 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
379 if (sk->sk_state == TCP_CLOSE)
382 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
383 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
389 seq = ntohl(th->seq);
390 if (sk->sk_state != TCP_LISTEN &&
391 !between(seq, tp->snd_una, tp->snd_nxt)) {
392 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
397 case ICMP_SOURCE_QUENCH:
398 /* Just silently ignore these. */
400 case ICMP_PARAMETERPROB:
403 case ICMP_DEST_UNREACH:
404 if (code > NR_ICMP_UNREACH)
407 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
408 if (!sock_owned_by_user(sk))
409 do_pmtu_discovery(sk, iph, info);
413 err = icmp_err_convert[code].errno;
414 /* check if icmp_skb allows revert of backoff
415 * (see draft-zimmermann-tcp-lcd) */
416 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
418 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
422 if (sock_owned_by_user(sk))
425 icsk->icsk_backoff--;
426 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
427 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
430 skb = tcp_write_queue_head(sk);
433 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
434 tcp_time_stamp - TCP_SKB_CB(skb)->when);
437 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
438 remaining, TCP_RTO_MAX);
440 /* RTO revert clocked out retransmission.
441 * Will retransmit now */
442 tcp_retransmit_timer(sk);
446 case ICMP_TIME_EXCEEDED:
453 switch (sk->sk_state) {
454 struct request_sock *req, **prev;
456 if (sock_owned_by_user(sk))
459 req = inet_csk_search_req(sk, &prev, th->dest,
460 iph->daddr, iph->saddr);
464 /* ICMPs are not backlogged, hence we cannot get
465 an established socket here.
469 if (seq != tcp_rsk(req)->snt_isn) {
470 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
475 * Still in SYN_RECV, just remove it silently.
476 * There is no good way to pass the error to the newly
477 * created socket, and POSIX does not want network
478 * errors returned from accept().
480 inet_csk_reqsk_queue_drop(sk, req, prev);
484 case TCP_SYN_RECV: /* Cannot happen.
485 It can f.e. if SYNs crossed.
487 if (!sock_owned_by_user(sk)) {
490 sk->sk_error_report(sk);
494 sk->sk_err_soft = err;
499 /* If we've already connected we will keep trying
500 * until we time out, or the user gives up.
502 * rfc1122 4.2.3.9 allows to consider as hard errors
503 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
504 * but it is obsoleted by pmtu discovery).
506 * Note, that in modern internet, where routing is unreliable
507 * and in each dark corner broken firewalls sit, sending random
508 * errors ordered by their masters even this two messages finally lose
509 * their original sense (even Linux sends invalid PORT_UNREACHs)
511 * Now we are in compliance with RFCs.
516 if (!sock_owned_by_user(sk) && inet->recverr) {
518 sk->sk_error_report(sk);
519 } else { /* Only an error on timeout */
520 sk->sk_err_soft = err;
528 static void __tcp_v4_send_check(struct sk_buff *skb,
529 __be32 saddr, __be32 daddr)
531 struct tcphdr *th = tcp_hdr(skb);
533 if (skb->ip_summed == CHECKSUM_PARTIAL) {
534 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
535 skb->csum_start = skb_transport_header(skb) - skb->head;
536 skb->csum_offset = offsetof(struct tcphdr, check);
538 th->check = tcp_v4_check(skb->len, saddr, daddr,
545 /* This routine computes an IPv4 TCP checksum. */
546 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
548 const struct inet_sock *inet = inet_sk(sk);
550 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
552 EXPORT_SYMBOL(tcp_v4_send_check);
554 int tcp_v4_gso_send_check(struct sk_buff *skb)
556 const struct iphdr *iph;
559 if (!pskb_may_pull(skb, sizeof(*th)))
566 skb->ip_summed = CHECKSUM_PARTIAL;
567 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
572 * This routine will send an RST to the other tcp.
574 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
576 * Answer: if a packet caused RST, it is not for a socket
577 * existing in our system, if it is matched to a socket,
578 * it is just duplicate segment or bug in other side's TCP.
579 * So that we build reply only basing on parameters
580 * arrived with segment.
581 * Exception: precedence violation. We do not implement it in any case.
584 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
586 const struct tcphdr *th = tcp_hdr(skb);
589 #ifdef CONFIG_TCP_MD5SIG
590 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
593 struct ip_reply_arg arg;
594 #ifdef CONFIG_TCP_MD5SIG
595 struct tcp_md5sig_key *key;
596 const __u8 *hash_location = NULL;
597 unsigned char newhash[16];
599 struct sock *sk1 = NULL;
603 /* Never send a reset in response to a reset. */
607 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
610 /* Swap the send and the receive. */
611 memset(&rep, 0, sizeof(rep));
612 rep.th.dest = th->source;
613 rep.th.source = th->dest;
614 rep.th.doff = sizeof(struct tcphdr) / 4;
618 rep.th.seq = th->ack_seq;
621 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
622 skb->len - (th->doff << 2));
625 memset(&arg, 0, sizeof(arg));
626 arg.iov[0].iov_base = (unsigned char *)&rep;
627 arg.iov[0].iov_len = sizeof(rep.th);
629 #ifdef CONFIG_TCP_MD5SIG
630 hash_location = tcp_parse_md5sig_option(th);
631 if (!sk && hash_location) {
633 * active side is lost. Try to find listening socket through
634 * source port, and then find md5 key through listening socket.
635 * we are not loose security here:
636 * Incoming packet is checked with md5 hash with finding key,
637 * no RST generated if md5 hash doesn't match.
639 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
640 &tcp_hashinfo, ip_hdr(skb)->daddr,
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
663 (TCPOPT_MD5SIG << 8) |
665 /* Update length and the length the header thinks exists */
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 rep.th.doff = arg.iov[0].iov_len / 4;
669 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 key, ip_hdr(skb)->saddr,
671 ip_hdr(skb)->daddr, &rep.th);
674 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 ip_hdr(skb)->saddr, /* XXX */
676 arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 /* When socket is gone, all binding information is lost.
680 * routing might fail in this case. using iif for oif to
681 * make sure we can deliver it
683 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
685 net = dev_net(skb_dst(skb)->dev);
686 arg.tos = ip_hdr(skb)->tos;
687 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
688 &arg, arg.iov[0].iov_len);
690 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
691 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
693 #ifdef CONFIG_TCP_MD5SIG
702 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
703 outside socket context is ugly, certainly. What can I do?
706 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
707 u32 win, u32 ts, int oif,
708 struct tcp_md5sig_key *key,
709 int reply_flags, u8 tos)
711 const struct tcphdr *th = tcp_hdr(skb);
714 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
715 #ifdef CONFIG_TCP_MD5SIG
716 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
720 struct ip_reply_arg arg;
721 struct net *net = dev_net(skb_dst(skb)->dev);
723 memset(&rep.th, 0, sizeof(struct tcphdr));
724 memset(&arg, 0, sizeof(arg));
726 arg.iov[0].iov_base = (unsigned char *)&rep;
727 arg.iov[0].iov_len = sizeof(rep.th);
729 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
730 (TCPOPT_TIMESTAMP << 8) |
732 rep.opt[1] = htonl(tcp_time_stamp);
733 rep.opt[2] = htonl(ts);
734 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 /* Swap the send and the receive. */
738 rep.th.dest = th->source;
739 rep.th.source = th->dest;
740 rep.th.doff = arg.iov[0].iov_len / 4;
741 rep.th.seq = htonl(seq);
742 rep.th.ack_seq = htonl(ack);
744 rep.th.window = htons(win);
746 #ifdef CONFIG_TCP_MD5SIG
748 int offset = (ts) ? 3 : 0;
750 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
752 (TCPOPT_MD5SIG << 8) |
754 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
755 rep.th.doff = arg.iov[0].iov_len/4;
757 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
758 key, ip_hdr(skb)->saddr,
759 ip_hdr(skb)->daddr, &rep.th);
762 arg.flags = reply_flags;
763 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
764 ip_hdr(skb)->saddr, /* XXX */
765 arg.iov[0].iov_len, IPPROTO_TCP, 0);
766 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
768 arg.bound_dev_if = oif;
770 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
771 &arg, arg.iov[0].iov_len);
773 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
776 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
778 struct inet_timewait_sock *tw = inet_twsk(sk);
779 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
781 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
782 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
785 tcp_twsk_md5_key(tcptw),
786 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
793 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
794 struct request_sock *req)
796 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
797 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
800 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
802 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
807 * Send a SYN-ACK after having received a SYN.
808 * This still operates on a request_sock only, not on a big
811 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
812 struct request_sock *req,
813 struct request_values *rvp)
815 const struct inet_request_sock *ireq = inet_rsk(req);
818 struct sk_buff * skb;
820 /* First, grab a route. */
821 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
824 skb = tcp_make_synack(sk, dst, req, rvp);
827 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
829 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
832 err = net_xmit_eval(err);
839 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
840 struct request_values *rvp)
842 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
843 return tcp_v4_send_synack(sk, NULL, req, rvp);
847 * IPv4 request_sock destructor.
849 static void tcp_v4_reqsk_destructor(struct request_sock *req)
851 kfree(inet_rsk(req)->opt);
855 * Return 1 if a syncookie should be sent
857 int tcp_syn_flood_action(struct sock *sk,
858 const struct sk_buff *skb,
861 const char *msg = "Dropping request";
863 struct listen_sock *lopt;
867 #ifdef CONFIG_SYN_COOKIES
868 if (sysctl_tcp_syncookies) {
869 msg = "Sending cookies";
871 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
874 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
876 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
877 if (!lopt->synflood_warned) {
878 lopt->synflood_warned = 1;
879 pr_info("%s: Possible SYN flooding on port %d. %s. "
880 " Check SNMP counters.\n",
881 proto, ntohs(tcp_hdr(skb)->dest), msg);
885 EXPORT_SYMBOL(tcp_syn_flood_action);
888 * Save and compile IPv4 options into the request_sock if needed.
890 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
893 const struct ip_options *opt = &(IPCB(skb)->opt);
894 struct ip_options_rcu *dopt = NULL;
896 if (opt && opt->optlen) {
897 int opt_size = sizeof(*dopt) + opt->optlen;
899 dopt = kmalloc(opt_size, GFP_ATOMIC);
901 if (ip_options_echo(&dopt->opt, skb)) {
910 #ifdef CONFIG_TCP_MD5SIG
912 * RFC2385 MD5 checksumming requires a mapping of
913 * IP address->MD5 Key.
914 * We need to maintain these in the sk structure.
917 /* Find the Key structure for an address. */
918 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
919 const union tcp_md5_addr *addr,
922 struct tcp_sock *tp = tcp_sk(sk);
923 struct tcp_md5sig_key *key;
924 struct hlist_node *pos;
925 unsigned int size = sizeof(struct in_addr);
926 struct tcp_md5sig_info *md5sig;
928 /* caller either holds rcu_read_lock() or socket lock */
929 md5sig = rcu_dereference_check(tp->md5sig_info,
930 sock_owned_by_user(sk));
933 #if IS_ENABLED(CONFIG_IPV6)
934 if (family == AF_INET6)
935 size = sizeof(struct in6_addr);
937 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
938 if (key->family != family)
940 if (!memcmp(&key->addr, addr, size))
945 EXPORT_SYMBOL(tcp_md5_do_lookup);
947 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
948 struct sock *addr_sk)
950 union tcp_md5_addr *addr;
952 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
953 return tcp_md5_do_lookup(sk, addr, AF_INET);
955 EXPORT_SYMBOL(tcp_v4_md5_lookup);
957 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
958 struct request_sock *req)
960 union tcp_md5_addr *addr;
962 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
963 return tcp_md5_do_lookup(sk, addr, AF_INET);
966 /* This can be called on a newly created socket, from other files */
967 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
968 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
970 /* Add Key to the list */
971 struct tcp_md5sig_key *key;
972 struct tcp_sock *tp = tcp_sk(sk);
973 struct tcp_md5sig_info *md5sig;
975 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
977 /* Pre-existing entry - just update that one. */
978 memcpy(key->key, newkey, newkeylen);
979 key->keylen = newkeylen;
983 md5sig = rcu_dereference_protected(tp->md5sig_info,
984 sock_owned_by_user(sk));
986 md5sig = kmalloc(sizeof(*md5sig), gfp);
990 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
991 INIT_HLIST_HEAD(&md5sig->head);
992 rcu_assign_pointer(tp->md5sig_info, md5sig);
995 key = sock_kmalloc(sk, sizeof(*key), gfp);
998 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
999 sock_kfree_s(sk, key, sizeof(*key));
1003 memcpy(key->key, newkey, newkeylen);
1004 key->keylen = newkeylen;
1005 key->family = family;
1006 memcpy(&key->addr, addr,
1007 (family == AF_INET6) ? sizeof(struct in6_addr) :
1008 sizeof(struct in_addr));
1009 hlist_add_head_rcu(&key->node, &md5sig->head);
1012 EXPORT_SYMBOL(tcp_md5_do_add);
1014 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1016 struct tcp_sock *tp = tcp_sk(sk);
1017 struct tcp_md5sig_key *key;
1018 struct tcp_md5sig_info *md5sig;
1020 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1023 hlist_del_rcu(&key->node);
1024 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1025 kfree_rcu(key, rcu);
1026 md5sig = rcu_dereference_protected(tp->md5sig_info,
1027 sock_owned_by_user(sk));
1028 if (hlist_empty(&md5sig->head))
1029 tcp_free_md5sig_pool();
1032 EXPORT_SYMBOL(tcp_md5_do_del);
1034 void tcp_clear_md5_list(struct sock *sk)
1036 struct tcp_sock *tp = tcp_sk(sk);
1037 struct tcp_md5sig_key *key;
1038 struct hlist_node *pos, *n;
1039 struct tcp_md5sig_info *md5sig;
1041 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1043 if (!hlist_empty(&md5sig->head))
1044 tcp_free_md5sig_pool();
1045 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1046 hlist_del_rcu(&key->node);
1047 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1048 kfree_rcu(key, rcu);
1052 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1055 struct tcp_md5sig cmd;
1056 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1058 if (optlen < sizeof(cmd))
1061 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1064 if (sin->sin_family != AF_INET)
1067 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1068 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1071 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1074 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1075 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1079 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1080 __be32 daddr, __be32 saddr, int nbytes)
1082 struct tcp4_pseudohdr *bp;
1083 struct scatterlist sg;
1085 bp = &hp->md5_blk.ip4;
1088 * 1. the TCP pseudo-header (in the order: source IP address,
1089 * destination IP address, zero-padded protocol number, and
1095 bp->protocol = IPPROTO_TCP;
1096 bp->len = cpu_to_be16(nbytes);
1098 sg_init_one(&sg, bp, sizeof(*bp));
1099 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1102 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1103 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1105 struct tcp_md5sig_pool *hp;
1106 struct hash_desc *desc;
1108 hp = tcp_get_md5sig_pool();
1110 goto clear_hash_noput;
1111 desc = &hp->md5_desc;
1113 if (crypto_hash_init(desc))
1115 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1117 if (tcp_md5_hash_header(hp, th))
1119 if (tcp_md5_hash_key(hp, key))
1121 if (crypto_hash_final(desc, md5_hash))
1124 tcp_put_md5sig_pool();
1128 tcp_put_md5sig_pool();
1130 memset(md5_hash, 0, 16);
1134 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1135 const struct sock *sk, const struct request_sock *req,
1136 const struct sk_buff *skb)
1138 struct tcp_md5sig_pool *hp;
1139 struct hash_desc *desc;
1140 const struct tcphdr *th = tcp_hdr(skb);
1141 __be32 saddr, daddr;
1144 saddr = inet_sk(sk)->inet_saddr;
1145 daddr = inet_sk(sk)->inet_daddr;
1147 saddr = inet_rsk(req)->loc_addr;
1148 daddr = inet_rsk(req)->rmt_addr;
1150 const struct iphdr *iph = ip_hdr(skb);
1155 hp = tcp_get_md5sig_pool();
1157 goto clear_hash_noput;
1158 desc = &hp->md5_desc;
1160 if (crypto_hash_init(desc))
1163 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1165 if (tcp_md5_hash_header(hp, th))
1167 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1169 if (tcp_md5_hash_key(hp, key))
1171 if (crypto_hash_final(desc, md5_hash))
1174 tcp_put_md5sig_pool();
1178 tcp_put_md5sig_pool();
1180 memset(md5_hash, 0, 16);
1183 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1185 static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1188 * This gets called for each TCP segment that arrives
1189 * so we want to be efficient.
1190 * We have 3 drop cases:
1191 * o No MD5 hash and one expected.
1192 * o MD5 hash and we're not expecting one.
1193 * o MD5 hash and its wrong.
1195 const __u8 *hash_location = NULL;
1196 struct tcp_md5sig_key *hash_expected;
1197 const struct iphdr *iph = ip_hdr(skb);
1198 const struct tcphdr *th = tcp_hdr(skb);
1200 unsigned char newhash[16];
1202 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1204 hash_location = tcp_parse_md5sig_option(th);
1206 /* We've parsed the options - do we have a hash? */
1207 if (!hash_expected && !hash_location)
1210 if (hash_expected && !hash_location) {
1211 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1215 if (!hash_expected && hash_location) {
1216 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1220 /* Okay, so this is hash_expected and hash_location -
1221 * so we need to calculate the checksum.
1223 genhash = tcp_v4_md5_hash_skb(newhash,
1227 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1228 if (net_ratelimit()) {
1229 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1230 &iph->saddr, ntohs(th->source),
1231 &iph->daddr, ntohs(th->dest),
1232 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1241 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1243 .obj_size = sizeof(struct tcp_request_sock),
1244 .rtx_syn_ack = tcp_v4_rtx_synack,
1245 .send_ack = tcp_v4_reqsk_send_ack,
1246 .destructor = tcp_v4_reqsk_destructor,
1247 .send_reset = tcp_v4_send_reset,
1248 .syn_ack_timeout = tcp_syn_ack_timeout,
1251 #ifdef CONFIG_TCP_MD5SIG
1252 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1253 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1254 .calc_md5_hash = tcp_v4_md5_hash_skb,
1258 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1260 struct tcp_extend_values tmp_ext;
1261 struct tcp_options_received tmp_opt;
1262 const u8 *hash_location;
1263 struct request_sock *req;
1264 struct inet_request_sock *ireq;
1265 struct tcp_sock *tp = tcp_sk(sk);
1266 struct dst_entry *dst = NULL;
1267 __be32 saddr = ip_hdr(skb)->saddr;
1268 __be32 daddr = ip_hdr(skb)->daddr;
1269 __u32 isn = TCP_SKB_CB(skb)->when;
1270 int want_cookie = 0;
1272 /* Never answer to SYNs send to broadcast or multicast */
1273 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1276 /* TW buckets are converted to open requests without
1277 * limitations, they conserve resources and peer is
1278 * evidently real one.
1280 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1281 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1286 /* Accept backlog is full. If we have already queued enough
1287 * of warm entries in syn queue, drop request. It is better than
1288 * clogging syn queue with openreqs with exponentially increasing
1291 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1294 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1298 #ifdef CONFIG_TCP_MD5SIG
1299 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1302 tcp_clear_options(&tmp_opt);
1303 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1304 tmp_opt.user_mss = tp->rx_opt.user_mss;
1305 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1307 if (tmp_opt.cookie_plus > 0 &&
1308 tmp_opt.saw_tstamp &&
1309 !tp->rx_opt.cookie_out_never &&
1310 (sysctl_tcp_cookie_size > 0 ||
1311 (tp->cookie_values != NULL &&
1312 tp->cookie_values->cookie_desired > 0))) {
1314 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1315 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1317 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1318 goto drop_and_release;
1320 /* Secret recipe starts with IP addresses */
1321 *mess++ ^= (__force u32)daddr;
1322 *mess++ ^= (__force u32)saddr;
1324 /* plus variable length Initiator Cookie */
1327 *c++ ^= *hash_location++;
1329 want_cookie = 0; /* not our kind of cookie */
1330 tmp_ext.cookie_out_never = 0; /* false */
1331 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1332 } else if (!tp->rx_opt.cookie_in_always) {
1333 /* redundant indications, but ensure initialization. */
1334 tmp_ext.cookie_out_never = 1; /* true */
1335 tmp_ext.cookie_plus = 0;
1337 goto drop_and_release;
1339 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1341 if (want_cookie && !tmp_opt.saw_tstamp)
1342 tcp_clear_options(&tmp_opt);
1344 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1345 tcp_openreq_init(req, &tmp_opt, skb);
1347 ireq = inet_rsk(req);
1348 ireq->loc_addr = daddr;
1349 ireq->rmt_addr = saddr;
1350 ireq->no_srccheck = inet_sk(sk)->transparent;
1351 ireq->opt = tcp_v4_save_options(sk, skb);
1353 if (security_inet_conn_request(sk, skb, req))
1356 if (!want_cookie || tmp_opt.tstamp_ok)
1357 TCP_ECN_create_request(req, tcp_hdr(skb));
1360 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1361 req->cookie_ts = tmp_opt.tstamp_ok;
1363 struct inet_peer *peer = NULL;
1366 /* VJ's idea. We save last timestamp seen
1367 * from the destination in peer table, when entering
1368 * state TIME-WAIT, and check against it before
1369 * accepting new connection request.
1371 * If "isn" is not zero, this request hit alive
1372 * timewait bucket, so that all the necessary checks
1373 * are made in the function processing timewait state.
1375 if (tmp_opt.saw_tstamp &&
1376 tcp_death_row.sysctl_tw_recycle &&
1377 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1378 fl4.daddr == saddr &&
1379 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1380 inet_peer_refcheck(peer);
1381 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1382 (s32)(peer->tcp_ts - req->ts_recent) >
1384 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1385 goto drop_and_release;
1388 /* Kill the following clause, if you dislike this way. */
1389 else if (!sysctl_tcp_syncookies &&
1390 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1391 (sysctl_max_syn_backlog >> 2)) &&
1392 (!peer || !peer->tcp_ts_stamp) &&
1393 (!dst || !dst_metric(dst, RTAX_RTT))) {
1394 /* Without syncookies last quarter of
1395 * backlog is filled with destinations,
1396 * proven to be alive.
1397 * It means that we continue to communicate
1398 * to destinations, already remembered
1399 * to the moment of synflood.
1401 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1402 &saddr, ntohs(tcp_hdr(skb)->source));
1403 goto drop_and_release;
1406 isn = tcp_v4_init_sequence(skb);
1408 tcp_rsk(req)->snt_isn = isn;
1409 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1411 if (tcp_v4_send_synack(sk, dst, req,
1412 (struct request_values *)&tmp_ext) ||
1416 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1426 EXPORT_SYMBOL(tcp_v4_conn_request);
1430 * The three way handshake has completed - we got a valid synack -
1431 * now create the new socket.
1433 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1434 struct request_sock *req,
1435 struct dst_entry *dst)
1437 struct inet_request_sock *ireq;
1438 struct inet_sock *newinet;
1439 struct tcp_sock *newtp;
1441 #ifdef CONFIG_TCP_MD5SIG
1442 struct tcp_md5sig_key *key;
1444 struct ip_options_rcu *inet_opt;
1446 if (sk_acceptq_is_full(sk))
1449 newsk = tcp_create_openreq_child(sk, req, skb);
1453 newsk->sk_gso_type = SKB_GSO_TCPV4;
1455 newtp = tcp_sk(newsk);
1456 newinet = inet_sk(newsk);
1457 ireq = inet_rsk(req);
1458 newinet->inet_daddr = ireq->rmt_addr;
1459 newinet->inet_rcv_saddr = ireq->loc_addr;
1460 newinet->inet_saddr = ireq->loc_addr;
1461 inet_opt = ireq->opt;
1462 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1464 newinet->mc_index = inet_iif(skb);
1465 newinet->mc_ttl = ip_hdr(skb)->ttl;
1466 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1468 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1469 newinet->inet_id = newtp->write_seq ^ jiffies;
1471 if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
1474 sk_setup_caps(newsk, dst);
1476 tcp_mtup_init(newsk);
1477 tcp_sync_mss(newsk, dst_mtu(dst));
1478 newtp->advmss = dst_metric_advmss(dst);
1479 if (tcp_sk(sk)->rx_opt.user_mss &&
1480 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1481 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1483 tcp_initialize_rcv_mss(newsk);
1484 if (tcp_rsk(req)->snt_synack)
1485 tcp_valid_rtt_meas(newsk,
1486 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1487 newtp->total_retrans = req->retrans;
1489 #ifdef CONFIG_TCP_MD5SIG
1490 /* Copy over the MD5 key from the original socket */
1491 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1495 * We're using one, so create a matching key
1496 * on the newsk structure. If we fail to get
1497 * memory, then we end up not copying the key
1500 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1501 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1502 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1506 if (__inet_inherit_port(sk, newsk) < 0)
1508 __inet_hash_nolisten(newsk, NULL);
1513 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1517 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1520 tcp_clear_xmit_timers(newsk);
1521 tcp_cleanup_congestion_control(newsk);
1522 bh_unlock_sock(newsk);
1526 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1528 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1530 struct tcphdr *th = tcp_hdr(skb);
1531 const struct iphdr *iph = ip_hdr(skb);
1533 struct request_sock **prev;
1534 /* Find possible connection requests. */
1535 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1536 iph->saddr, iph->daddr);
1538 return tcp_check_req(sk, skb, req, prev);
1540 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1541 th->source, iph->daddr, th->dest, inet_iif(skb));
1544 if (nsk->sk_state != TCP_TIME_WAIT) {
1548 inet_twsk_put(inet_twsk(nsk));
1552 #ifdef CONFIG_SYN_COOKIES
1554 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1559 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1561 const struct iphdr *iph = ip_hdr(skb);
1563 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1564 if (!tcp_v4_check(skb->len, iph->saddr,
1565 iph->daddr, skb->csum)) {
1566 skb->ip_summed = CHECKSUM_UNNECESSARY;
1571 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1572 skb->len, IPPROTO_TCP, 0);
1574 if (skb->len <= 76) {
1575 return __skb_checksum_complete(skb);
1581 /* The socket must have it's spinlock held when we get
1584 * We have a potential double-lock case here, so even when
1585 * doing backlog processing we use the BH locking scheme.
1586 * This is because we cannot sleep with the original spinlock
1589 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1592 #ifdef CONFIG_TCP_MD5SIG
1594 * We really want to reject the packet as early as possible
1596 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1597 * o There is an MD5 option and we're not expecting one
1599 if (tcp_v4_inbound_md5_hash(sk, skb))
1603 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1604 sock_rps_save_rxhash(sk, skb);
1605 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1612 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1615 if (sk->sk_state == TCP_LISTEN) {
1616 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1621 sock_rps_save_rxhash(nsk, skb);
1622 if (tcp_child_process(sk, nsk, skb)) {
1629 sock_rps_save_rxhash(sk, skb);
1631 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1638 tcp_v4_send_reset(rsk, skb);
1641 /* Be careful here. If this function gets more complicated and
1642 * gcc suffers from register pressure on the x86, sk (in %ebx)
1643 * might be destroyed here. This current version compiles correctly,
1644 * but you have been warned.
1649 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1652 EXPORT_SYMBOL(tcp_v4_do_rcv);
1658 int tcp_v4_rcv(struct sk_buff *skb)
1660 const struct iphdr *iph;
1661 const struct tcphdr *th;
1664 struct net *net = dev_net(skb->dev);
1666 if (skb->pkt_type != PACKET_HOST)
1669 /* Count it even if it's bad */
1670 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1672 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1677 if (th->doff < sizeof(struct tcphdr) / 4)
1679 if (!pskb_may_pull(skb, th->doff * 4))
1682 /* An explanation is required here, I think.
1683 * Packet length and doff are validated by header prediction,
1684 * provided case of th->doff==0 is eliminated.
1685 * So, we defer the checks. */
1686 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1691 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1692 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1693 skb->len - th->doff * 4);
1694 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1695 TCP_SKB_CB(skb)->when = 0;
1696 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1697 TCP_SKB_CB(skb)->sacked = 0;
1699 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1704 if (sk->sk_state == TCP_TIME_WAIT)
1707 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1708 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1709 goto discard_and_relse;
1712 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1713 goto discard_and_relse;
1716 if (sk_filter(sk, skb))
1717 goto discard_and_relse;
1721 bh_lock_sock_nested(sk);
1723 if (!sock_owned_by_user(sk)) {
1724 #ifdef CONFIG_NET_DMA
1725 struct tcp_sock *tp = tcp_sk(sk);
1726 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1727 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1728 if (tp->ucopy.dma_chan)
1729 ret = tcp_v4_do_rcv(sk, skb);
1733 if (!tcp_prequeue(sk, skb))
1734 ret = tcp_v4_do_rcv(sk, skb);
1736 } else if (unlikely(sk_add_backlog(sk, skb))) {
1738 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1739 goto discard_and_relse;
1748 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1751 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1753 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1755 tcp_v4_send_reset(NULL, skb);
1759 /* Discard frame. */
1768 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1769 inet_twsk_put(inet_twsk(sk));
1773 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1774 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1775 inet_twsk_put(inet_twsk(sk));
1778 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1780 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1782 iph->daddr, th->dest,
1785 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1786 inet_twsk_put(inet_twsk(sk));
1790 /* Fall through to ACK */
1793 tcp_v4_timewait_ack(sk, skb);
1797 case TCP_TW_SUCCESS:;
1802 struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1804 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1805 struct inet_sock *inet = inet_sk(sk);
1806 struct inet_peer *peer;
1809 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1810 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1814 rt_bind_peer(rt, inet->inet_daddr, 1);
1816 *release_it = false;
1821 EXPORT_SYMBOL(tcp_v4_get_peer);
1823 void *tcp_v4_tw_get_peer(struct sock *sk)
1825 const struct inet_timewait_sock *tw = inet_twsk(sk);
1827 return inet_getpeer_v4(tw->tw_daddr, 1);
1829 EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1831 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1832 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1833 .twsk_unique = tcp_twsk_unique,
1834 .twsk_destructor= tcp_twsk_destructor,
1835 .twsk_getpeer = tcp_v4_tw_get_peer,
1838 const struct inet_connection_sock_af_ops ipv4_specific = {
1839 .queue_xmit = ip_queue_xmit,
1840 .send_check = tcp_v4_send_check,
1841 .rebuild_header = inet_sk_rebuild_header,
1842 .conn_request = tcp_v4_conn_request,
1843 .syn_recv_sock = tcp_v4_syn_recv_sock,
1844 .get_peer = tcp_v4_get_peer,
1845 .net_header_len = sizeof(struct iphdr),
1846 .setsockopt = ip_setsockopt,
1847 .getsockopt = ip_getsockopt,
1848 .addr2sockaddr = inet_csk_addr2sockaddr,
1849 .sockaddr_len = sizeof(struct sockaddr_in),
1850 .bind_conflict = inet_csk_bind_conflict,
1851 #ifdef CONFIG_COMPAT
1852 .compat_setsockopt = compat_ip_setsockopt,
1853 .compat_getsockopt = compat_ip_getsockopt,
1856 EXPORT_SYMBOL(ipv4_specific);
1858 #ifdef CONFIG_TCP_MD5SIG
1859 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1860 .md5_lookup = tcp_v4_md5_lookup,
1861 .calc_md5_hash = tcp_v4_md5_hash_skb,
1862 .md5_parse = tcp_v4_parse_md5_keys,
1866 /* NOTE: A lot of things set to zero explicitly by call to
1867 * sk_alloc() so need not be done here.
1869 static int tcp_v4_init_sock(struct sock *sk)
1871 struct inet_connection_sock *icsk = inet_csk(sk);
1872 struct tcp_sock *tp = tcp_sk(sk);
1874 skb_queue_head_init(&tp->out_of_order_queue);
1875 tcp_init_xmit_timers(sk);
1876 tcp_prequeue_init(tp);
1878 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1879 tp->mdev = TCP_TIMEOUT_INIT;
1881 /* So many TCP implementations out there (incorrectly) count the
1882 * initial SYN frame in their delayed-ACK and congestion control
1883 * algorithms that we must have the following bandaid to talk
1884 * efficiently to them. -DaveM
1886 tp->snd_cwnd = TCP_INIT_CWND;
1888 /* See draft-stevens-tcpca-spec-01 for discussion of the
1889 * initialization of these values.
1891 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1892 tp->snd_cwnd_clamp = ~0;
1893 tp->mss_cache = TCP_MSS_DEFAULT;
1895 tp->reordering = sysctl_tcp_reordering;
1896 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1898 sk->sk_state = TCP_CLOSE;
1900 sk->sk_write_space = sk_stream_write_space;
1901 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1903 icsk->icsk_af_ops = &ipv4_specific;
1904 icsk->icsk_sync_mss = tcp_sync_mss;
1905 #ifdef CONFIG_TCP_MD5SIG
1906 tp->af_specific = &tcp_sock_ipv4_specific;
1909 /* TCP Cookie Transactions */
1910 if (sysctl_tcp_cookie_size > 0) {
1911 /* Default, cookies without s_data_payload. */
1913 kzalloc(sizeof(*tp->cookie_values),
1915 if (tp->cookie_values != NULL)
1916 kref_init(&tp->cookie_values->kref);
1918 /* Presumed zeroed, in order of appearance:
1919 * cookie_in_always, cookie_out_never,
1920 * s_data_constant, s_data_in, s_data_out
1922 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1923 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1926 sock_update_memcg(sk);
1927 sk_sockets_allocated_inc(sk);
1933 void tcp_v4_destroy_sock(struct sock *sk)
1935 struct tcp_sock *tp = tcp_sk(sk);
1937 tcp_clear_xmit_timers(sk);
1939 tcp_cleanup_congestion_control(sk);
1941 /* Cleanup up the write buffer. */
1942 tcp_write_queue_purge(sk);
1944 /* Cleans up our, hopefully empty, out_of_order_queue. */
1945 __skb_queue_purge(&tp->out_of_order_queue);
1947 #ifdef CONFIG_TCP_MD5SIG
1948 /* Clean up the MD5 key list, if any */
1949 if (tp->md5sig_info) {
1950 tcp_clear_md5_list(sk);
1951 kfree_rcu(tp->md5sig_info, rcu);
1952 tp->md5sig_info = NULL;
1956 #ifdef CONFIG_NET_DMA
1957 /* Cleans up our sk_async_wait_queue */
1958 __skb_queue_purge(&sk->sk_async_wait_queue);
1961 /* Clean prequeue, it must be empty really */
1962 __skb_queue_purge(&tp->ucopy.prequeue);
1964 /* Clean up a referenced TCP bind bucket. */
1965 if (inet_csk(sk)->icsk_bind_hash)
1969 * If sendmsg cached page exists, toss it.
1971 if (sk->sk_sndmsg_page) {
1972 __free_page(sk->sk_sndmsg_page);
1973 sk->sk_sndmsg_page = NULL;
1976 /* TCP Cookie Transactions */
1977 if (tp->cookie_values != NULL) {
1978 kref_put(&tp->cookie_values->kref,
1979 tcp_cookie_values_release);
1980 tp->cookie_values = NULL;
1983 sk_sockets_allocated_dec(sk);
1984 sock_release_memcg(sk);
1986 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1988 #ifdef CONFIG_PROC_FS
1989 /* Proc filesystem TCP sock list dumping. */
1991 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1993 return hlist_nulls_empty(head) ? NULL :
1994 list_entry(head->first, struct inet_timewait_sock, tw_node);
1997 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1999 return !is_a_nulls(tw->tw_node.next) ?
2000 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2004 * Get next listener socket follow cur. If cur is NULL, get first socket
2005 * starting from bucket given in st->bucket; when st->bucket is zero the
2006 * very first socket in the hash table is returned.
2008 static void *listening_get_next(struct seq_file *seq, void *cur)
2010 struct inet_connection_sock *icsk;
2011 struct hlist_nulls_node *node;
2012 struct sock *sk = cur;
2013 struct inet_listen_hashbucket *ilb;
2014 struct tcp_iter_state *st = seq->private;
2015 struct net *net = seq_file_net(seq);
2018 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2019 spin_lock_bh(&ilb->lock);
2020 sk = sk_nulls_head(&ilb->head);
2024 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2028 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2029 struct request_sock *req = cur;
2031 icsk = inet_csk(st->syn_wait_sk);
2035 if (req->rsk_ops->family == st->family) {
2041 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2044 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2046 sk = sk_nulls_next(st->syn_wait_sk);
2047 st->state = TCP_SEQ_STATE_LISTENING;
2048 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2050 icsk = inet_csk(sk);
2051 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2052 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2054 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2055 sk = sk_nulls_next(sk);
2058 sk_nulls_for_each_from(sk, node) {
2059 if (!net_eq(sock_net(sk), net))
2061 if (sk->sk_family == st->family) {
2065 icsk = inet_csk(sk);
2066 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2067 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2069 st->uid = sock_i_uid(sk);
2070 st->syn_wait_sk = sk;
2071 st->state = TCP_SEQ_STATE_OPENREQ;
2075 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2077 spin_unlock_bh(&ilb->lock);
2079 if (++st->bucket < INET_LHTABLE_SIZE) {
2080 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2081 spin_lock_bh(&ilb->lock);
2082 sk = sk_nulls_head(&ilb->head);
2090 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2092 struct tcp_iter_state *st = seq->private;
2097 rc = listening_get_next(seq, NULL);
2099 while (rc && *pos) {
2100 rc = listening_get_next(seq, rc);
2106 static inline int empty_bucket(struct tcp_iter_state *st)
2108 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2109 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2113 * Get first established socket starting from bucket given in st->bucket.
2114 * If st->bucket is zero, the very first socket in the hash is returned.
2116 static void *established_get_first(struct seq_file *seq)
2118 struct tcp_iter_state *st = seq->private;
2119 struct net *net = seq_file_net(seq);
2123 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2125 struct hlist_nulls_node *node;
2126 struct inet_timewait_sock *tw;
2127 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2129 /* Lockless fast path for the common case of empty buckets */
2130 if (empty_bucket(st))
2134 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2135 if (sk->sk_family != st->family ||
2136 !net_eq(sock_net(sk), net)) {
2142 st->state = TCP_SEQ_STATE_TIME_WAIT;
2143 inet_twsk_for_each(tw, node,
2144 &tcp_hashinfo.ehash[st->bucket].twchain) {
2145 if (tw->tw_family != st->family ||
2146 !net_eq(twsk_net(tw), net)) {
2152 spin_unlock_bh(lock);
2153 st->state = TCP_SEQ_STATE_ESTABLISHED;
2159 static void *established_get_next(struct seq_file *seq, void *cur)
2161 struct sock *sk = cur;
2162 struct inet_timewait_sock *tw;
2163 struct hlist_nulls_node *node;
2164 struct tcp_iter_state *st = seq->private;
2165 struct net *net = seq_file_net(seq);
2170 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2174 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2181 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2182 st->state = TCP_SEQ_STATE_ESTABLISHED;
2184 /* Look for next non empty bucket */
2186 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2189 if (st->bucket > tcp_hashinfo.ehash_mask)
2192 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2193 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2195 sk = sk_nulls_next(sk);
2197 sk_nulls_for_each_from(sk, node) {
2198 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2202 st->state = TCP_SEQ_STATE_TIME_WAIT;
2203 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2211 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2213 struct tcp_iter_state *st = seq->private;
2217 rc = established_get_first(seq);
2220 rc = established_get_next(seq, rc);
2226 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2229 struct tcp_iter_state *st = seq->private;
2231 st->state = TCP_SEQ_STATE_LISTENING;
2232 rc = listening_get_idx(seq, &pos);
2235 st->state = TCP_SEQ_STATE_ESTABLISHED;
2236 rc = established_get_idx(seq, pos);
2242 static void *tcp_seek_last_pos(struct seq_file *seq)
2244 struct tcp_iter_state *st = seq->private;
2245 int offset = st->offset;
2246 int orig_num = st->num;
2249 switch (st->state) {
2250 case TCP_SEQ_STATE_OPENREQ:
2251 case TCP_SEQ_STATE_LISTENING:
2252 if (st->bucket >= INET_LHTABLE_SIZE)
2254 st->state = TCP_SEQ_STATE_LISTENING;
2255 rc = listening_get_next(seq, NULL);
2256 while (offset-- && rc)
2257 rc = listening_get_next(seq, rc);
2262 case TCP_SEQ_STATE_ESTABLISHED:
2263 case TCP_SEQ_STATE_TIME_WAIT:
2264 st->state = TCP_SEQ_STATE_ESTABLISHED;
2265 if (st->bucket > tcp_hashinfo.ehash_mask)
2267 rc = established_get_first(seq);
2268 while (offset-- && rc)
2269 rc = established_get_next(seq, rc);
2277 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2279 struct tcp_iter_state *st = seq->private;
2282 if (*pos && *pos == st->last_pos) {
2283 rc = tcp_seek_last_pos(seq);
2288 st->state = TCP_SEQ_STATE_LISTENING;
2292 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2295 st->last_pos = *pos;
2299 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2301 struct tcp_iter_state *st = seq->private;
2304 if (v == SEQ_START_TOKEN) {
2305 rc = tcp_get_idx(seq, 0);
2309 switch (st->state) {
2310 case TCP_SEQ_STATE_OPENREQ:
2311 case TCP_SEQ_STATE_LISTENING:
2312 rc = listening_get_next(seq, v);
2314 st->state = TCP_SEQ_STATE_ESTABLISHED;
2317 rc = established_get_first(seq);
2320 case TCP_SEQ_STATE_ESTABLISHED:
2321 case TCP_SEQ_STATE_TIME_WAIT:
2322 rc = established_get_next(seq, v);
2327 st->last_pos = *pos;
2331 static void tcp_seq_stop(struct seq_file *seq, void *v)
2333 struct tcp_iter_state *st = seq->private;
2335 switch (st->state) {
2336 case TCP_SEQ_STATE_OPENREQ:
2338 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2339 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2341 case TCP_SEQ_STATE_LISTENING:
2342 if (v != SEQ_START_TOKEN)
2343 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2345 case TCP_SEQ_STATE_TIME_WAIT:
2346 case TCP_SEQ_STATE_ESTABLISHED:
2348 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2353 int tcp_seq_open(struct inode *inode, struct file *file)
2355 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2356 struct tcp_iter_state *s;
2359 err = seq_open_net(inode, file, &afinfo->seq_ops,
2360 sizeof(struct tcp_iter_state));
2364 s = ((struct seq_file *)file->private_data)->private;
2365 s->family = afinfo->family;
2369 EXPORT_SYMBOL(tcp_seq_open);
2371 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2374 struct proc_dir_entry *p;
2376 afinfo->seq_ops.start = tcp_seq_start;
2377 afinfo->seq_ops.next = tcp_seq_next;
2378 afinfo->seq_ops.stop = tcp_seq_stop;
2380 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2381 afinfo->seq_fops, afinfo);
2386 EXPORT_SYMBOL(tcp_proc_register);
2388 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2390 proc_net_remove(net, afinfo->name);
2392 EXPORT_SYMBOL(tcp_proc_unregister);
2394 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2395 struct seq_file *f, int i, int uid, int *len)
2397 const struct inet_request_sock *ireq = inet_rsk(req);
2398 int ttd = req->expires - jiffies;
2400 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2401 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2404 ntohs(inet_sk(sk)->inet_sport),
2406 ntohs(ireq->rmt_port),
2408 0, 0, /* could print option size, but that is af dependent. */
2409 1, /* timers active (only the expire timer) */
2410 jiffies_to_clock_t(ttd),
2413 0, /* non standard timer */
2414 0, /* open_requests have no inode */
2415 atomic_read(&sk->sk_refcnt),
2420 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2423 unsigned long timer_expires;
2424 const struct tcp_sock *tp = tcp_sk(sk);
2425 const struct inet_connection_sock *icsk = inet_csk(sk);
2426 const struct inet_sock *inet = inet_sk(sk);
2427 __be32 dest = inet->inet_daddr;
2428 __be32 src = inet->inet_rcv_saddr;
2429 __u16 destp = ntohs(inet->inet_dport);
2430 __u16 srcp = ntohs(inet->inet_sport);
2433 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2435 timer_expires = icsk->icsk_timeout;
2436 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2438 timer_expires = icsk->icsk_timeout;
2439 } else if (timer_pending(&sk->sk_timer)) {
2441 timer_expires = sk->sk_timer.expires;
2444 timer_expires = jiffies;
2447 if (sk->sk_state == TCP_LISTEN)
2448 rx_queue = sk->sk_ack_backlog;
2451 * because we dont lock socket, we might find a transient negative value
2453 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2455 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2456 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2457 i, src, srcp, dest, destp, sk->sk_state,
2458 tp->write_seq - tp->snd_una,
2461 jiffies_to_clock_t(timer_expires - jiffies),
2462 icsk->icsk_retransmits,
2464 icsk->icsk_probes_out,
2466 atomic_read(&sk->sk_refcnt), sk,
2467 jiffies_to_clock_t(icsk->icsk_rto),
2468 jiffies_to_clock_t(icsk->icsk_ack.ato),
2469 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2471 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2475 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2476 struct seq_file *f, int i, int *len)
2480 int ttd = tw->tw_ttd - jiffies;
2485 dest = tw->tw_daddr;
2486 src = tw->tw_rcv_saddr;
2487 destp = ntohs(tw->tw_dport);
2488 srcp = ntohs(tw->tw_sport);
2490 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2491 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2492 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2493 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2494 atomic_read(&tw->tw_refcnt), tw, len);
2499 static int tcp4_seq_show(struct seq_file *seq, void *v)
2501 struct tcp_iter_state *st;
2504 if (v == SEQ_START_TOKEN) {
2505 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2506 " sl local_address rem_address st tx_queue "
2507 "rx_queue tr tm->when retrnsmt uid timeout "
2513 switch (st->state) {
2514 case TCP_SEQ_STATE_LISTENING:
2515 case TCP_SEQ_STATE_ESTABLISHED:
2516 get_tcp4_sock(v, seq, st->num, &len);
2518 case TCP_SEQ_STATE_OPENREQ:
2519 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2521 case TCP_SEQ_STATE_TIME_WAIT:
2522 get_timewait4_sock(v, seq, st->num, &len);
2525 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2530 static const struct file_operations tcp_afinfo_seq_fops = {
2531 .owner = THIS_MODULE,
2532 .open = tcp_seq_open,
2534 .llseek = seq_lseek,
2535 .release = seq_release_net
2538 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2541 .seq_fops = &tcp_afinfo_seq_fops,
2543 .show = tcp4_seq_show,
2547 static int __net_init tcp4_proc_init_net(struct net *net)
2549 return tcp_proc_register(net, &tcp4_seq_afinfo);
2552 static void __net_exit tcp4_proc_exit_net(struct net *net)
2554 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2557 static struct pernet_operations tcp4_net_ops = {
2558 .init = tcp4_proc_init_net,
2559 .exit = tcp4_proc_exit_net,
2562 int __init tcp4_proc_init(void)
2564 return register_pernet_subsys(&tcp4_net_ops);
2567 void tcp4_proc_exit(void)
2569 unregister_pernet_subsys(&tcp4_net_ops);
2571 #endif /* CONFIG_PROC_FS */
2573 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2575 const struct iphdr *iph = skb_gro_network_header(skb);
2577 switch (skb->ip_summed) {
2578 case CHECKSUM_COMPLETE:
2579 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2581 skb->ip_summed = CHECKSUM_UNNECESSARY;
2587 NAPI_GRO_CB(skb)->flush = 1;
2591 return tcp_gro_receive(head, skb);
2594 int tcp4_gro_complete(struct sk_buff *skb)
2596 const struct iphdr *iph = ip_hdr(skb);
2597 struct tcphdr *th = tcp_hdr(skb);
2599 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2600 iph->saddr, iph->daddr, 0);
2601 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2603 return tcp_gro_complete(skb);
2606 struct proto tcp_prot = {
2608 .owner = THIS_MODULE,
2610 .connect = tcp_v4_connect,
2611 .disconnect = tcp_disconnect,
2612 .accept = inet_csk_accept,
2614 .init = tcp_v4_init_sock,
2615 .destroy = tcp_v4_destroy_sock,
2616 .shutdown = tcp_shutdown,
2617 .setsockopt = tcp_setsockopt,
2618 .getsockopt = tcp_getsockopt,
2619 .recvmsg = tcp_recvmsg,
2620 .sendmsg = tcp_sendmsg,
2621 .sendpage = tcp_sendpage,
2622 .backlog_rcv = tcp_v4_do_rcv,
2624 .unhash = inet_unhash,
2625 .get_port = inet_csk_get_port,
2626 .enter_memory_pressure = tcp_enter_memory_pressure,
2627 .sockets_allocated = &tcp_sockets_allocated,
2628 .orphan_count = &tcp_orphan_count,
2629 .memory_allocated = &tcp_memory_allocated,
2630 .memory_pressure = &tcp_memory_pressure,
2631 .sysctl_wmem = sysctl_tcp_wmem,
2632 .sysctl_rmem = sysctl_tcp_rmem,
2633 .max_header = MAX_TCP_HEADER,
2634 .obj_size = sizeof(struct tcp_sock),
2635 .slab_flags = SLAB_DESTROY_BY_RCU,
2636 .twsk_prot = &tcp_timewait_sock_ops,
2637 .rsk_prot = &tcp_request_sock_ops,
2638 .h.hashinfo = &tcp_hashinfo,
2639 .no_autobind = true,
2640 #ifdef CONFIG_COMPAT
2641 .compat_setsockopt = compat_tcp_setsockopt,
2642 .compat_getsockopt = compat_tcp_getsockopt,
2644 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2645 .init_cgroup = tcp_init_cgroup,
2646 .destroy_cgroup = tcp_destroy_cgroup,
2647 .proto_cgroup = tcp_proto_cgroup,
2650 EXPORT_SYMBOL(tcp_prot);
2652 static int __net_init tcp_sk_init(struct net *net)
2654 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2655 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2658 static void __net_exit tcp_sk_exit(struct net *net)
2660 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2663 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2665 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2668 static struct pernet_operations __net_initdata tcp_sk_ops = {
2669 .init = tcp_sk_init,
2670 .exit = tcp_sk_exit,
2671 .exit_batch = tcp_sk_exit_batch,
2674 void __init tcp_v4_init(void)
2676 inet_hashinfo_init(&tcp_hashinfo);
2677 if (register_pernet_subsys(&tcp_sk_ops))
2678 panic("Failed to create the TCP control socket.\n");