1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Implementation of the Transmission Control Protocol(TCP).
9 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
21 * David S. Miller : New socket lookup architecture.
22 * This code is dedicated to John Dyson.
23 * David S. Miller : Change semantics of established hash,
24 * half is devoted to TIME_WAIT sockets
25 * and the rest go in the other half.
26 * Andi Kleen : Add support for syncookies and fixed
27 * some bugs: ip options weren't passed to
28 * the TCP layer, missed a check for an
30 * Andi Kleen : Implemented fast path mtu discovery.
31 * Fixed many serious bugs in the
32 * request_sock handling and moved
33 * most of it into the af independent code.
34 * Added tail drop and some other bugfixes.
35 * Added new listen semantics.
36 * Mike McLagan : Routing by source
37 * Juan Jose Ciarlante: ip_dynaddr bits
38 * Andi Kleen: various fixes.
39 * Vitaly E. Lavrov : Transparent proxy revived after year
41 * Andi Kleen : Fix new listen.
42 * Andi Kleen : Fix accept error reporting.
43 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
44 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
45 * a single port at the same time.
48 #define pr_fmt(fmt) "TCP: " fmt
50 #include <linux/bottom_half.h>
51 #include <linux/types.h>
52 #include <linux/fcntl.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/cache.h>
56 #include <linux/jhash.h>
57 #include <linux/init.h>
58 #include <linux/times.h>
59 #include <linux/slab.h>
60 #include <linux/sched.h>
62 #include <net/net_namespace.h>
64 #include <net/inet_hashtables.h>
66 #include <net/transp_v6.h>
68 #include <net/inet_common.h>
69 #include <net/timewait_sock.h>
71 #include <net/secure_seq.h>
72 #include <net/busy_poll.h>
74 #include <linux/inet.h>
75 #include <linux/ipv6.h>
76 #include <linux/stddef.h>
77 #include <linux/proc_fs.h>
78 #include <linux/seq_file.h>
79 #include <linux/inetdevice.h>
80 #include <linux/btf_ids.h>
82 #include <crypto/hash.h>
83 #include <linux/scatterlist.h>
85 #include <trace/events/tcp.h>
87 #ifdef CONFIG_TCP_MD5SIG
88 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
89 __be32 daddr, __be32 saddr, const struct tcphdr *th);
92 struct inet_hashinfo tcp_hashinfo;
93 EXPORT_SYMBOL(tcp_hashinfo);
95 static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
97 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
99 return secure_tcp_seq(ip_hdr(skb)->daddr,
102 tcp_hdr(skb)->source);
105 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
107 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
110 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112 int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse);
113 const struct inet_timewait_sock *tw = inet_twsk(sktw);
114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
118 /* Still does not detect *everything* that goes through
119 * lo, since we require a loopback src or dst address
120 * or direct binding to 'lo' interface.
122 bool loopback = false;
123 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
125 #if IS_ENABLED(CONFIG_IPV6)
126 if (tw->tw_family == AF_INET6) {
127 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
128 ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
129 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
130 ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
135 if (ipv4_is_loopback(tw->tw_daddr) ||
136 ipv4_is_loopback(tw->tw_rcv_saddr))
143 /* With PAWS, it is safe from the viewpoint
144 of data integrity. Even without PAWS it is safe provided sequence
145 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
147 Actually, the idea is close to VJ's one, only timestamp cache is
148 held not per host, but per port pair and TW bucket is used as state
151 If TW bucket has been already destroyed we fall back to VJ's scheme
152 and use initial timestamp retrieved from peer table.
154 if (tcptw->tw_ts_recent_stamp &&
155 (!twp || (reuse && time_after32(ktime_get_seconds(),
156 tcptw->tw_ts_recent_stamp)))) {
157 /* In case of repair and re-using TIME-WAIT sockets we still
158 * want to be sure that it is safe as above but honor the
159 * sequence numbers and time stamps set as part of the repair
162 * Without this check re-using a TIME-WAIT socket with TCP
163 * repair would accumulate a -1 on the repair assigned
164 * sequence number. The first time it is reused the sequence
165 * is -1, the second time -2, etc. This fixes that issue
166 * without appearing to create any others.
168 if (likely(!tp->repair)) {
169 u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
173 WRITE_ONCE(tp->write_seq, seq);
174 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
175 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
183 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
185 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
188 /* This check is replicated from tcp_v4_connect() and intended to
189 * prevent BPF program called below from accessing bytes that are out
190 * of the bound specified by user in addr_len.
192 if (addr_len < sizeof(struct sockaddr_in))
195 sock_owned_by_me(sk);
197 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, &addr_len);
200 /* This will initiate an outgoing connection. */
201 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
203 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
204 struct inet_timewait_death_row *tcp_death_row;
205 struct inet_sock *inet = inet_sk(sk);
206 struct tcp_sock *tp = tcp_sk(sk);
207 struct ip_options_rcu *inet_opt;
208 struct net *net = sock_net(sk);
209 __be16 orig_sport, orig_dport;
210 __be32 daddr, nexthop;
215 if (addr_len < sizeof(struct sockaddr_in))
218 if (usin->sin_family != AF_INET)
219 return -EAFNOSUPPORT;
221 nexthop = daddr = usin->sin_addr.s_addr;
222 inet_opt = rcu_dereference_protected(inet->inet_opt,
223 lockdep_sock_is_held(sk));
224 if (inet_opt && inet_opt->opt.srr) {
227 nexthop = inet_opt->opt.faddr;
230 orig_sport = inet->inet_sport;
231 orig_dport = usin->sin_port;
232 fl4 = &inet->cork.fl.u.ip4;
233 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
234 sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport,
238 if (err == -ENETUNREACH)
239 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
243 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
248 if (!inet_opt || !inet_opt->opt.srr)
251 tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
253 if (!inet->inet_saddr) {
254 err = inet_bhash2_update_saddr(sk, &fl4->saddr, AF_INET);
260 sk_rcv_saddr_set(sk, inet->inet_saddr);
263 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
264 /* Reset inherited state */
265 tp->rx_opt.ts_recent = 0;
266 tp->rx_opt.ts_recent_stamp = 0;
267 if (likely(!tp->repair))
268 WRITE_ONCE(tp->write_seq, 0);
271 inet->inet_dport = usin->sin_port;
272 sk_daddr_set(sk, daddr);
274 inet_csk(sk)->icsk_ext_hdr_len = 0;
276 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
278 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
280 /* Socket identity is still unknown (sport may be zero).
281 * However we set state to SYN-SENT and not releasing socket
282 * lock select source port, enter ourselves into the hash tables and
283 * complete initialization after this.
285 tcp_set_state(sk, TCP_SYN_SENT);
286 err = inet_hash_connect(tcp_death_row, sk);
292 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
293 inet->inet_sport, inet->inet_dport, sk);
299 tp->tcp_usec_ts = dst_tcp_usec_ts(&rt->dst);
300 /* OK, now commit destination to socket. */
301 sk->sk_gso_type = SKB_GSO_TCPV4;
302 sk_setup_caps(sk, &rt->dst);
305 if (likely(!tp->repair)) {
307 WRITE_ONCE(tp->write_seq,
308 secure_tcp_seq(inet->inet_saddr,
312 WRITE_ONCE(tp->tsoffset,
313 secure_tcp_ts_off(net, inet->inet_saddr,
317 atomic_set(&inet->inet_id, get_random_u16());
319 if (tcp_fastopen_defer_connect(sk, &err))
324 err = tcp_connect(sk);
333 * This unhashes the socket and releases the local port,
336 tcp_set_state(sk, TCP_CLOSE);
337 inet_bhash2_reset_saddr(sk);
339 sk->sk_route_caps = 0;
340 inet->inet_dport = 0;
343 EXPORT_SYMBOL(tcp_v4_connect);
346 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
347 * It can be called through tcp_release_cb() if socket was owned by user
348 * at the time tcp_v4_err() was called to handle ICMP message.
350 void tcp_v4_mtu_reduced(struct sock *sk)
352 struct inet_sock *inet = inet_sk(sk);
353 struct dst_entry *dst;
356 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
358 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
359 dst = inet_csk_update_pmtu(sk, mtu);
363 /* Something is about to be wrong... Remember soft error
364 * for the case, if this connection will not able to recover.
366 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
367 WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
371 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
372 ip_sk_accept_pmtu(sk) &&
373 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
374 tcp_sync_mss(sk, mtu);
376 /* Resend the TCP packet because it's
377 * clear that the old packet has been
378 * dropped. This is the new "fast" path mtu
381 tcp_simple_retransmit(sk);
382 } /* else let the usual retransmit timer handle it */
384 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
386 static void do_redirect(struct sk_buff *skb, struct sock *sk)
388 struct dst_entry *dst = __sk_dst_check(sk, 0);
391 dst->ops->redirect(dst, sk, skb);
395 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
396 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
398 struct request_sock *req = inet_reqsk(sk);
399 struct net *net = sock_net(sk);
401 /* ICMPs are not backlogged, hence we cannot get
402 * an established socket here.
404 if (seq != tcp_rsk(req)->snt_isn) {
405 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
408 * Still in SYN_RECV, just remove it silently.
409 * There is no good way to pass the error to the newly
410 * created socket, and POSIX does not want network
411 * errors returned from accept().
413 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
414 tcp_listendrop(req->rsk_listener);
418 EXPORT_SYMBOL(tcp_req_err);
420 /* TCP-LD (RFC 6069) logic */
421 void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
423 struct inet_connection_sock *icsk = inet_csk(sk);
424 struct tcp_sock *tp = tcp_sk(sk);
429 if (sock_owned_by_user(sk))
432 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
436 skb = tcp_rtx_queue_head(sk);
437 if (WARN_ON_ONCE(!skb))
440 icsk->icsk_backoff--;
441 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
442 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
444 tcp_mstamp_refresh(tp);
445 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
446 remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
449 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
450 remaining, TCP_RTO_MAX);
452 /* RTO revert clocked out retransmission.
453 * Will retransmit now.
455 tcp_retransmit_timer(sk);
458 EXPORT_SYMBOL(tcp_ld_RTO_revert);
461 * This routine is called by the ICMP module when it gets some
462 * sort of error condition. If err < 0 then the socket should
463 * be closed and the error returned to the user. If err > 0
464 * it's just the icmp type << 8 | icmp code. After adjustment
465 * header points to the first 8 bytes of the tcp header. We need
466 * to find the appropriate port.
468 * The locking strategy used here is very "optimistic". When
469 * someone else accesses the socket the ICMP is just dropped
470 * and for some paths there is no check at all.
471 * A more general error queue to queue errors for later handling
472 * is probably better.
476 int tcp_v4_err(struct sk_buff *skb, u32 info)
478 const struct iphdr *iph = (const struct iphdr *)skb->data;
479 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
481 const int type = icmp_hdr(skb)->type;
482 const int code = icmp_hdr(skb)->code;
484 struct request_sock *fastopen;
487 struct net *net = dev_net(skb->dev);
489 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
490 iph->daddr, th->dest, iph->saddr,
491 ntohs(th->source), inet_iif(skb), 0);
493 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
496 if (sk->sk_state == TCP_TIME_WAIT) {
497 /* To increase the counter of ignored icmps for TCP-AO */
498 tcp_ao_ignore_icmp(sk, AF_INET, type, code);
499 inet_twsk_put(inet_twsk(sk));
502 seq = ntohl(th->seq);
503 if (sk->sk_state == TCP_NEW_SYN_RECV) {
504 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
505 type == ICMP_TIME_EXCEEDED ||
506 (type == ICMP_DEST_UNREACH &&
507 (code == ICMP_NET_UNREACH ||
508 code == ICMP_HOST_UNREACH)));
512 if (tcp_ao_ignore_icmp(sk, AF_INET, type, code)) {
518 /* If too many ICMPs get dropped on busy
519 * servers this needs to be solved differently.
520 * We do take care of PMTU discovery (RFC1191) special case :
521 * we can receive locally generated ICMP messages while socket is held.
523 if (sock_owned_by_user(sk)) {
524 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
525 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
527 if (sk->sk_state == TCP_CLOSE)
530 if (static_branch_unlikely(&ip4_min_ttl)) {
531 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
532 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
533 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
539 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
540 fastopen = rcu_dereference(tp->fastopen_rsk);
541 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
542 if (sk->sk_state != TCP_LISTEN &&
543 !between(seq, snd_una, tp->snd_nxt)) {
544 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
550 if (!sock_owned_by_user(sk))
551 do_redirect(skb, sk);
553 case ICMP_SOURCE_QUENCH:
554 /* Just silently ignore these. */
556 case ICMP_PARAMETERPROB:
559 case ICMP_DEST_UNREACH:
560 if (code > NR_ICMP_UNREACH)
563 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
564 /* We are not interested in TCP_LISTEN and open_requests
565 * (SYN-ACKs send out by Linux are always <576bytes so
566 * they should go through unfragmented).
568 if (sk->sk_state == TCP_LISTEN)
571 WRITE_ONCE(tp->mtu_info, info);
572 if (!sock_owned_by_user(sk)) {
573 tcp_v4_mtu_reduced(sk);
575 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
581 err = icmp_err_convert[code].errno;
582 /* check if this ICMP message allows revert of backoff.
586 (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
587 tcp_ld_RTO_revert(sk, seq);
589 case ICMP_TIME_EXCEEDED:
596 switch (sk->sk_state) {
599 /* Only in fast or simultaneous open. If a fast open socket is
600 * already accepted it is treated as a connected one below.
602 if (fastopen && !fastopen->sk)
605 ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
607 if (!sock_owned_by_user(sk)) {
608 WRITE_ONCE(sk->sk_err, err);
614 WRITE_ONCE(sk->sk_err_soft, err);
619 /* If we've already connected we will keep trying
620 * until we time out, or the user gives up.
622 * rfc1122 4.2.3.9 allows to consider as hard errors
623 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
624 * but it is obsoleted by pmtu discovery).
626 * Note, that in modern internet, where routing is unreliable
627 * and in each dark corner broken firewalls sit, sending random
628 * errors ordered by their masters even this two messages finally lose
629 * their original sense (even Linux sends invalid PORT_UNREACHs)
631 * Now we are in compliance with RFCs.
635 if (!sock_owned_by_user(sk) &&
636 inet_test_bit(RECVERR, sk)) {
637 WRITE_ONCE(sk->sk_err, err);
639 } else { /* Only an error on timeout */
640 WRITE_ONCE(sk->sk_err_soft, err);
649 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
651 struct tcphdr *th = tcp_hdr(skb);
653 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
654 skb->csum_start = skb_transport_header(skb) - skb->head;
655 skb->csum_offset = offsetof(struct tcphdr, check);
658 /* This routine computes an IPv4 TCP checksum. */
659 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
661 const struct inet_sock *inet = inet_sk(sk);
663 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
665 EXPORT_SYMBOL(tcp_v4_send_check);
667 #define REPLY_OPTIONS_LEN (MAX_TCP_OPTION_SPACE / sizeof(__be32))
669 static bool tcp_v4_ao_sign_reset(const struct sock *sk, struct sk_buff *skb,
670 const struct tcp_ao_hdr *aoh,
671 struct ip_reply_arg *arg, struct tcphdr *reply,
672 __be32 reply_options[REPLY_OPTIONS_LEN])
675 int sdif = tcp_v4_sdif(skb);
676 int dif = inet_iif(skb);
677 int l3index = sdif ? dif : 0;
678 bool allocated_traffic_key;
679 struct tcp_ao_key *key;
686 if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, ntohl(reply->seq),
687 &key, &traffic_key, &allocated_traffic_key,
691 reply_options[0] = htonl((TCPOPT_AO << 24) | (tcp_ao_len(key) << 16) |
692 (aoh->rnext_keyid << 8) | keyid);
693 arg->iov[0].iov_len += tcp_ao_len_aligned(key);
694 reply->doff = arg->iov[0].iov_len / 4;
696 if (tcp_ao_hash_hdr(AF_INET, (char *)&reply_options[1],
698 (union tcp_ao_addr *)&ip_hdr(skb)->saddr,
699 (union tcp_ao_addr *)&ip_hdr(skb)->daddr,
705 if (allocated_traffic_key)
714 * This routine will send an RST to the other tcp.
716 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
718 * Answer: if a packet caused RST, it is not for a socket
719 * existing in our system, if it is matched to a socket,
720 * it is just duplicate segment or bug in other side's TCP.
721 * So that we build reply only basing on parameters
722 * arrived with segment.
723 * Exception: precedence violation. We do not implement it in any case.
726 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
728 const struct tcphdr *th = tcp_hdr(skb);
731 __be32 opt[REPLY_OPTIONS_LEN];
733 const __u8 *md5_hash_location = NULL;
734 const struct tcp_ao_hdr *aoh;
735 struct ip_reply_arg arg;
736 #ifdef CONFIG_TCP_MD5SIG
737 struct tcp_md5sig_key *key = NULL;
738 unsigned char newhash[16];
739 struct sock *sk1 = NULL;
742 u64 transmit_time = 0;
747 /* Never send a reset in response to a reset. */
751 /* If sk not NULL, it means we did a successful lookup and incoming
752 * route had to be correct. prequeue might have dropped our dst.
754 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
757 /* Swap the send and the receive. */
758 memset(&rep, 0, sizeof(rep));
759 rep.th.dest = th->source;
760 rep.th.source = th->dest;
761 rep.th.doff = sizeof(struct tcphdr) / 4;
765 rep.th.seq = th->ack_seq;
768 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
769 skb->len - (th->doff << 2));
772 memset(&arg, 0, sizeof(arg));
773 arg.iov[0].iov_base = (unsigned char *)&rep;
774 arg.iov[0].iov_len = sizeof(rep.th);
776 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
778 /* Invalid TCP option size or twice included auth */
779 if (tcp_parse_auth_options(tcp_hdr(skb), &md5_hash_location, &aoh))
782 if (aoh && tcp_v4_ao_sign_reset(sk, skb, aoh, &arg, &rep.th, rep.opt))
785 #ifdef CONFIG_TCP_MD5SIG
787 if (sk && sk_fullsock(sk)) {
788 const union tcp_md5_addr *addr;
791 /* sdif set, means packet ingressed via a device
792 * in an L3 domain and inet_iif is set to it.
794 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
795 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
796 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
797 } else if (md5_hash_location) {
798 const union tcp_md5_addr *addr;
799 int sdif = tcp_v4_sdif(skb);
800 int dif = inet_iif(skb);
804 * active side is lost. Try to find listening socket through
805 * source port, and then find md5 key through listening socket.
806 * we are not loose security here:
807 * Incoming packet is checked with md5 hash with finding key,
808 * no RST generated if md5 hash doesn't match.
810 sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
811 NULL, 0, ip_hdr(skb)->saddr,
812 th->source, ip_hdr(skb)->daddr,
813 ntohs(th->source), dif, sdif);
814 /* don't send rst if it can't find key */
818 /* sdif set, means packet ingressed via a device
819 * in an L3 domain and dif is set to it.
821 l3index = sdif ? dif : 0;
822 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
823 key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
828 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
829 if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
835 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
837 (TCPOPT_MD5SIG << 8) |
839 /* Update length and the length the header thinks exists */
840 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
841 rep.th.doff = arg.iov[0].iov_len / 4;
843 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
844 key, ip_hdr(skb)->saddr,
845 ip_hdr(skb)->daddr, &rep.th);
848 /* Can't co-exist with TCPMD5, hence check rep.opt[0] */
849 if (rep.opt[0] == 0) {
850 __be32 mrst = mptcp_reset_option(skb);
854 arg.iov[0].iov_len += sizeof(mrst);
855 rep.th.doff = arg.iov[0].iov_len / 4;
859 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
860 ip_hdr(skb)->saddr, /* XXX */
861 arg.iov[0].iov_len, IPPROTO_TCP, 0);
862 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
863 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
865 /* When socket is gone, all binding information is lost.
866 * routing might fail in this case. No choice here, if we choose to force
867 * input interface, we will misroute in case of asymmetric route.
870 arg.bound_dev_if = sk->sk_bound_dev_if;
872 trace_tcp_send_reset(sk, skb);
875 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
876 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
878 arg.tos = ip_hdr(skb)->tos;
879 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
881 ctl_sk = this_cpu_read(ipv4_tcp_sk);
882 sock_net_set(ctl_sk, net);
884 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
885 inet_twsk(sk)->tw_mark : sk->sk_mark;
886 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
887 inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
888 transmit_time = tcp_transmit_time(sk);
889 xfrm_sk_clone_policy(ctl_sk, sk);
890 txhash = (sk->sk_state == TCP_TIME_WAIT) ?
891 inet_twsk(sk)->tw_txhash : sk->sk_txhash;
894 ctl_sk->sk_priority = 0;
896 ip_send_unicast_reply(ctl_sk,
897 skb, &TCP_SKB_CB(skb)->header.h4.opt,
898 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
899 &arg, arg.iov[0].iov_len,
900 transmit_time, txhash);
902 xfrm_sk_free_policy(ctl_sk);
903 sock_net_set(ctl_sk, &init_net);
904 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
905 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
908 #ifdef CONFIG_TCP_MD5SIG
914 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
915 outside socket context is ugly, certainly. What can I do?
918 static void tcp_v4_send_ack(const struct sock *sk,
919 struct sk_buff *skb, u32 seq, u32 ack,
920 u32 win, u32 tsval, u32 tsecr, int oif,
922 int reply_flags, u8 tos, u32 txhash)
924 const struct tcphdr *th = tcp_hdr(skb);
927 __be32 opt[(MAX_TCP_OPTION_SPACE >> 2)];
929 struct net *net = sock_net(sk);
930 struct ip_reply_arg arg;
934 memset(&rep.th, 0, sizeof(struct tcphdr));
935 memset(&arg, 0, sizeof(arg));
937 arg.iov[0].iov_base = (unsigned char *)&rep;
938 arg.iov[0].iov_len = sizeof(rep.th);
940 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
941 (TCPOPT_TIMESTAMP << 8) |
943 rep.opt[1] = htonl(tsval);
944 rep.opt[2] = htonl(tsecr);
945 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
948 /* Swap the send and the receive. */
949 rep.th.dest = th->source;
950 rep.th.source = th->dest;
951 rep.th.doff = arg.iov[0].iov_len / 4;
952 rep.th.seq = htonl(seq);
953 rep.th.ack_seq = htonl(ack);
955 rep.th.window = htons(win);
957 #ifdef CONFIG_TCP_MD5SIG
958 if (tcp_key_is_md5(key)) {
959 int offset = (tsecr) ? 3 : 0;
961 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
963 (TCPOPT_MD5SIG << 8) |
965 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
966 rep.th.doff = arg.iov[0].iov_len/4;
968 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
969 key->md5_key, ip_hdr(skb)->saddr,
970 ip_hdr(skb)->daddr, &rep.th);
974 if (tcp_key_is_ao(key)) {
975 int offset = (tsecr) ? 3 : 0;
977 rep.opt[offset++] = htonl((TCPOPT_AO << 24) |
978 (tcp_ao_len(key->ao_key) << 16) |
979 (key->ao_key->sndid << 8) |
981 arg.iov[0].iov_len += tcp_ao_len_aligned(key->ao_key);
982 rep.th.doff = arg.iov[0].iov_len / 4;
984 tcp_ao_hash_hdr(AF_INET, (char *)&rep.opt[offset],
985 key->ao_key, key->traffic_key,
986 (union tcp_ao_addr *)&ip_hdr(skb)->saddr,
987 (union tcp_ao_addr *)&ip_hdr(skb)->daddr,
991 arg.flags = reply_flags;
992 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
993 ip_hdr(skb)->saddr, /* XXX */
994 arg.iov[0].iov_len, IPPROTO_TCP, 0);
995 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
997 arg.bound_dev_if = oif;
999 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
1001 ctl_sk = this_cpu_read(ipv4_tcp_sk);
1002 sock_net_set(ctl_sk, net);
1003 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
1004 inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
1005 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
1006 inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
1007 transmit_time = tcp_transmit_time(sk);
1008 ip_send_unicast_reply(ctl_sk,
1009 skb, &TCP_SKB_CB(skb)->header.h4.opt,
1010 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
1011 &arg, arg.iov[0].iov_len,
1012 transmit_time, txhash);
1014 sock_net_set(ctl_sk, &init_net);
1015 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
1019 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1021 struct inet_timewait_sock *tw = inet_twsk(sk);
1022 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1023 struct tcp_key key = {};
1024 #ifdef CONFIG_TCP_AO
1025 struct tcp_ao_info *ao_info;
1027 if (static_branch_unlikely(&tcp_ao_needed.key)) {
1028 /* FIXME: the segment to-be-acked is not verified yet */
1029 ao_info = rcu_dereference(tcptw->ao_info);
1031 const struct tcp_ao_hdr *aoh;
1033 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) {
1039 key.ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
1043 struct tcp_ao_key *rnext_key;
1045 key.traffic_key = snd_other_key(key.ao_key);
1046 key.sne = READ_ONCE(ao_info->snd_sne);
1047 rnext_key = READ_ONCE(ao_info->rnext_key);
1048 key.rcv_next = rnext_key->rcvid;
1049 key.type = TCP_KEY_AO;
1053 #ifdef CONFIG_TCP_MD5SIG
1054 } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1055 key.md5_key = tcp_twsk_md5_key(tcptw);
1057 key.type = TCP_KEY_MD5;
1061 tcp_v4_send_ack(sk, skb,
1062 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1063 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1064 tcp_tw_tsval(tcptw),
1065 tcptw->tw_ts_recent,
1066 tw->tw_bound_dev_if, &key,
1067 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
1074 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1075 struct request_sock *req)
1077 struct tcp_key key = {};
1079 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1080 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1082 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
1083 tcp_sk(sk)->snd_nxt;
1085 #ifdef CONFIG_TCP_AO
1086 if (static_branch_unlikely(&tcp_ao_needed.key) &&
1087 tcp_rsk_used_ao(req)) {
1088 const union tcp_md5_addr *addr;
1089 const struct tcp_ao_hdr *aoh;
1092 /* Invalid TCP option size or twice included auth */
1093 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1098 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
1099 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
1100 key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET,
1101 aoh->rnext_keyid, -1);
1102 if (unlikely(!key.ao_key)) {
1103 /* Send ACK with any matching MKT for the peer */
1104 key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET, -1, -1);
1105 /* Matching key disappeared (user removed the key?)
1106 * let the handshake timeout.
1109 net_info_ratelimited("TCP-AO key for (%pI4, %d)->(%pI4, %d) suddenly disappeared, won't ACK new connection\n",
1111 ntohs(tcp_hdr(skb)->source),
1112 &ip_hdr(skb)->daddr,
1113 ntohs(tcp_hdr(skb)->dest));
1117 key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC);
1118 if (!key.traffic_key)
1121 key.type = TCP_KEY_AO;
1122 key.rcv_next = aoh->keyid;
1123 tcp_v4_ao_calc_key_rsk(key.ao_key, key.traffic_key, req);
1127 #ifdef CONFIG_TCP_MD5SIG
1128 } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1129 const union tcp_md5_addr *addr;
1132 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
1133 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
1134 key.md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1136 key.type = TCP_KEY_MD5;
1141 * The window field (SEG.WND) of every outgoing segment, with the
1142 * exception of <SYN> segments, MUST be right-shifted by
1143 * Rcv.Wind.Shift bits:
1145 tcp_v4_send_ack(sk, skb, seq,
1146 tcp_rsk(req)->rcv_nxt,
1147 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1148 tcp_rsk_tsval(tcp_rsk(req)),
1149 READ_ONCE(req->ts_recent),
1151 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
1153 READ_ONCE(tcp_rsk(req)->txhash));
1154 if (tcp_key_is_ao(&key))
1155 kfree(key.traffic_key);
1159 * Send a SYN-ACK after having received a SYN.
1160 * This still operates on a request_sock only, not on a big
1163 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
1165 struct request_sock *req,
1166 struct tcp_fastopen_cookie *foc,
1167 enum tcp_synack_type synack_type,
1168 struct sk_buff *syn_skb)
1170 const struct inet_request_sock *ireq = inet_rsk(req);
1173 struct sk_buff *skb;
1176 /* First, grab a route. */
1177 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1180 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
1183 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1185 tos = READ_ONCE(inet_sk(sk)->tos);
1187 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1188 tos = (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
1189 (tos & INET_ECN_MASK);
1191 if (!INET_ECN_is_capable(tos) &&
1192 tcp_bpf_ca_needs_ecn((struct sock *)req))
1193 tos |= INET_ECN_ECT_0;
1196 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
1198 rcu_dereference(ireq->ireq_opt),
1201 err = net_xmit_eval(err);
1208 * IPv4 request_sock destructor.
1210 static void tcp_v4_reqsk_destructor(struct request_sock *req)
1212 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
1215 #ifdef CONFIG_TCP_MD5SIG
1217 * RFC2385 MD5 checksumming requires a mapping of
1218 * IP address->MD5 Key.
1219 * We need to maintain these in the sk structure.
1222 DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ);
1223 EXPORT_SYMBOL(tcp_md5_needed);
1225 static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
1230 /* l3index always overrides non-l3index */
1231 if (old->l3index && new->l3index == 0)
1233 if (old->l3index == 0 && new->l3index)
1236 return old->prefixlen < new->prefixlen;
1239 /* Find the Key structure for an address. */
1240 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1241 const union tcp_md5_addr *addr,
1242 int family, bool any_l3index)
1244 const struct tcp_sock *tp = tcp_sk(sk);
1245 struct tcp_md5sig_key *key;
1246 const struct tcp_md5sig_info *md5sig;
1248 struct tcp_md5sig_key *best_match = NULL;
1251 /* caller either holds rcu_read_lock() or socket lock */
1252 md5sig = rcu_dereference_check(tp->md5sig_info,
1253 lockdep_sock_is_held(sk));
1257 hlist_for_each_entry_rcu(key, &md5sig->head, node,
1258 lockdep_sock_is_held(sk)) {
1259 if (key->family != family)
1261 if (!any_l3index && key->flags & TCP_MD5SIG_FLAG_IFINDEX &&
1262 key->l3index != l3index)
1264 if (family == AF_INET) {
1265 mask = inet_make_mask(key->prefixlen);
1266 match = (key->addr.a4.s_addr & mask) ==
1267 (addr->a4.s_addr & mask);
1268 #if IS_ENABLED(CONFIG_IPV6)
1269 } else if (family == AF_INET6) {
1270 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1277 if (match && better_md5_match(best_match, key))
1282 EXPORT_SYMBOL(__tcp_md5_do_lookup);
1284 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1285 const union tcp_md5_addr *addr,
1286 int family, u8 prefixlen,
1287 int l3index, u8 flags)
1289 const struct tcp_sock *tp = tcp_sk(sk);
1290 struct tcp_md5sig_key *key;
1291 unsigned int size = sizeof(struct in_addr);
1292 const struct tcp_md5sig_info *md5sig;
1294 /* caller either holds rcu_read_lock() or socket lock */
1295 md5sig = rcu_dereference_check(tp->md5sig_info,
1296 lockdep_sock_is_held(sk));
1299 #if IS_ENABLED(CONFIG_IPV6)
1300 if (family == AF_INET6)
1301 size = sizeof(struct in6_addr);
1303 hlist_for_each_entry_rcu(key, &md5sig->head, node,
1304 lockdep_sock_is_held(sk)) {
1305 if (key->family != family)
1307 if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
1309 if (key->l3index != l3index)
1311 if (!memcmp(&key->addr, addr, size) &&
1312 key->prefixlen == prefixlen)
1318 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1319 const struct sock *addr_sk)
1321 const union tcp_md5_addr *addr;
1324 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1325 addr_sk->sk_bound_dev_if);
1326 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1327 return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1329 EXPORT_SYMBOL(tcp_v4_md5_lookup);
1331 static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
1333 struct tcp_sock *tp = tcp_sk(sk);
1334 struct tcp_md5sig_info *md5sig;
1336 md5sig = kmalloc(sizeof(*md5sig), gfp);
1341 INIT_HLIST_HEAD(&md5sig->head);
1342 rcu_assign_pointer(tp->md5sig_info, md5sig);
1346 /* This can be called on a newly created socket, from other files */
1347 static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1348 int family, u8 prefixlen, int l3index, u8 flags,
1349 const u8 *newkey, u8 newkeylen, gfp_t gfp)
1351 /* Add Key to the list */
1352 struct tcp_md5sig_key *key;
1353 struct tcp_sock *tp = tcp_sk(sk);
1354 struct tcp_md5sig_info *md5sig;
1356 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1358 /* Pre-existing entry - just update that one.
1359 * Note that the key might be used concurrently.
1360 * data_race() is telling kcsan that we do not care of
1361 * key mismatches, since changing MD5 key on live flows
1362 * can lead to packet drops.
1364 data_race(memcpy(key->key, newkey, newkeylen));
1366 /* Pairs with READ_ONCE() in tcp_md5_hash_key().
1367 * Also note that a reader could catch new key->keylen value
1368 * but old key->key[], this is the reason we use __GFP_ZERO
1369 * at sock_kmalloc() time below these lines.
1371 WRITE_ONCE(key->keylen, newkeylen);
1376 md5sig = rcu_dereference_protected(tp->md5sig_info,
1377 lockdep_sock_is_held(sk));
1379 key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
1383 memcpy(key->key, newkey, newkeylen);
1384 key->keylen = newkeylen;
1385 key->family = family;
1386 key->prefixlen = prefixlen;
1387 key->l3index = l3index;
1389 memcpy(&key->addr, addr,
1390 (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) ? sizeof(struct in6_addr) :
1391 sizeof(struct in_addr));
1392 hlist_add_head_rcu(&key->node, &md5sig->head);
1396 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1397 int family, u8 prefixlen, int l3index, u8 flags,
1398 const u8 *newkey, u8 newkeylen)
1400 struct tcp_sock *tp = tcp_sk(sk);
1402 if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1403 if (tcp_md5_alloc_sigpool())
1406 if (tcp_md5sig_info_add(sk, GFP_KERNEL)) {
1407 tcp_md5_release_sigpool();
1411 if (!static_branch_inc(&tcp_md5_needed.key)) {
1412 struct tcp_md5sig_info *md5sig;
1414 md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1415 rcu_assign_pointer(tp->md5sig_info, NULL);
1416 kfree_rcu(md5sig, rcu);
1417 tcp_md5_release_sigpool();
1422 return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags,
1423 newkey, newkeylen, GFP_KERNEL);
1425 EXPORT_SYMBOL(tcp_md5_do_add);
1427 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1428 int family, u8 prefixlen, int l3index,
1429 struct tcp_md5sig_key *key)
1431 struct tcp_sock *tp = tcp_sk(sk);
1433 if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1434 tcp_md5_add_sigpool();
1436 if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC))) {
1437 tcp_md5_release_sigpool();
1441 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) {
1442 struct tcp_md5sig_info *md5sig;
1444 md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1445 net_warn_ratelimited("Too many TCP-MD5 keys in the system\n");
1446 rcu_assign_pointer(tp->md5sig_info, NULL);
1447 kfree_rcu(md5sig, rcu);
1448 tcp_md5_release_sigpool();
1453 return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index,
1454 key->flags, key->key, key->keylen,
1455 sk_gfp_mask(sk, GFP_ATOMIC));
1457 EXPORT_SYMBOL(tcp_md5_key_copy);
1459 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1460 u8 prefixlen, int l3index, u8 flags)
1462 struct tcp_md5sig_key *key;
1464 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1467 hlist_del_rcu(&key->node);
1468 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1469 kfree_rcu(key, rcu);
1472 EXPORT_SYMBOL(tcp_md5_do_del);
1474 void tcp_clear_md5_list(struct sock *sk)
1476 struct tcp_sock *tp = tcp_sk(sk);
1477 struct tcp_md5sig_key *key;
1478 struct hlist_node *n;
1479 struct tcp_md5sig_info *md5sig;
1481 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1483 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1484 hlist_del_rcu(&key->node);
1485 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1486 kfree_rcu(key, rcu);
1490 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1491 sockptr_t optval, int optlen)
1493 struct tcp_md5sig cmd;
1494 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1495 const union tcp_md5_addr *addr;
1501 if (optlen < sizeof(cmd))
1504 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
1507 if (sin->sin_family != AF_INET)
1510 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
1511 l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
1513 if (optname == TCP_MD5SIG_EXT &&
1514 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1515 prefixlen = cmd.tcpm_prefixlen;
1520 if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
1521 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
1522 struct net_device *dev;
1525 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
1526 if (dev && netif_is_l3_master(dev))
1527 l3index = dev->ifindex;
1531 /* ok to reference set/not set outside of rcu;
1532 * right now device MUST be an L3 master
1534 if (!dev || !l3index)
1538 addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
1540 if (!cmd.tcpm_keylen)
1541 return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
1543 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1546 /* Don't allow keys for peers that have a matching TCP-AO key.
1547 * See the comment in tcp_ao_add_cmd()
1549 if (tcp_ao_required(sk, addr, AF_INET, l3flag ? l3index : -1, false))
1550 return -EKEYREJECTED;
1552 return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
1553 cmd.tcpm_key, cmd.tcpm_keylen);
1556 static int tcp_v4_md5_hash_headers(struct tcp_sigpool *hp,
1557 __be32 daddr, __be32 saddr,
1558 const struct tcphdr *th, int nbytes)
1560 struct tcp4_pseudohdr *bp;
1561 struct scatterlist sg;
1568 bp->protocol = IPPROTO_TCP;
1569 bp->len = cpu_to_be16(nbytes);
1571 _th = (struct tcphdr *)(bp + 1);
1572 memcpy(_th, th, sizeof(*th));
1575 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1576 ahash_request_set_crypt(hp->req, &sg, NULL,
1577 sizeof(*bp) + sizeof(*th));
1578 return crypto_ahash_update(hp->req);
1581 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1582 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1584 struct tcp_sigpool hp;
1586 if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
1587 goto clear_hash_nostart;
1589 if (crypto_ahash_init(hp.req))
1591 if (tcp_v4_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2))
1593 if (tcp_md5_hash_key(&hp, key))
1595 ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
1596 if (crypto_ahash_final(hp.req))
1599 tcp_sigpool_end(&hp);
1603 tcp_sigpool_end(&hp);
1605 memset(md5_hash, 0, 16);
1609 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1610 const struct sock *sk,
1611 const struct sk_buff *skb)
1613 const struct tcphdr *th = tcp_hdr(skb);
1614 struct tcp_sigpool hp;
1615 __be32 saddr, daddr;
1617 if (sk) { /* valid for establish/request sockets */
1618 saddr = sk->sk_rcv_saddr;
1619 daddr = sk->sk_daddr;
1621 const struct iphdr *iph = ip_hdr(skb);
1626 if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
1627 goto clear_hash_nostart;
1629 if (crypto_ahash_init(hp.req))
1632 if (tcp_v4_md5_hash_headers(&hp, daddr, saddr, th, skb->len))
1634 if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2))
1636 if (tcp_md5_hash_key(&hp, key))
1638 ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
1639 if (crypto_ahash_final(hp.req))
1642 tcp_sigpool_end(&hp);
1646 tcp_sigpool_end(&hp);
1648 memset(md5_hash, 0, 16);
1651 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1655 static void tcp_v4_init_req(struct request_sock *req,
1656 const struct sock *sk_listener,
1657 struct sk_buff *skb)
1659 struct inet_request_sock *ireq = inet_rsk(req);
1660 struct net *net = sock_net(sk_listener);
1662 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1663 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1664 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1667 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1668 struct sk_buff *skb,
1670 struct request_sock *req)
1672 tcp_v4_init_req(req, sk, skb);
1674 if (security_inet_conn_request(sk, skb, req))
1677 return inet_csk_route_req(sk, &fl->u.ip4, req);
1680 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1682 .obj_size = sizeof(struct tcp_request_sock),
1683 .rtx_syn_ack = tcp_rtx_synack,
1684 .send_ack = tcp_v4_reqsk_send_ack,
1685 .destructor = tcp_v4_reqsk_destructor,
1686 .send_reset = tcp_v4_send_reset,
1687 .syn_ack_timeout = tcp_syn_ack_timeout,
1690 const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1691 .mss_clamp = TCP_MSS_DEFAULT,
1692 #ifdef CONFIG_TCP_MD5SIG
1693 .req_md5_lookup = tcp_v4_md5_lookup,
1694 .calc_md5_hash = tcp_v4_md5_hash_skb,
1696 #ifdef CONFIG_TCP_AO
1697 .ao_lookup = tcp_v4_ao_lookup_rsk,
1698 .ao_calc_key = tcp_v4_ao_calc_key_rsk,
1699 .ao_synack_hash = tcp_v4_ao_synack_hash,
1701 #ifdef CONFIG_SYN_COOKIES
1702 .cookie_init_seq = cookie_v4_init_sequence,
1704 .route_req = tcp_v4_route_req,
1705 .init_seq = tcp_v4_init_seq,
1706 .init_ts_off = tcp_v4_init_ts_off,
1707 .send_synack = tcp_v4_send_synack,
1710 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1712 /* Never answer to SYNs send to broadcast or multicast */
1713 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1716 return tcp_conn_request(&tcp_request_sock_ops,
1717 &tcp_request_sock_ipv4_ops, sk, skb);
1723 EXPORT_SYMBOL(tcp_v4_conn_request);
1727 * The three way handshake has completed - we got a valid synack -
1728 * now create the new socket.
1730 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1731 struct request_sock *req,
1732 struct dst_entry *dst,
1733 struct request_sock *req_unhash,
1736 struct inet_request_sock *ireq;
1737 bool found_dup_sk = false;
1738 struct inet_sock *newinet;
1739 struct tcp_sock *newtp;
1741 #ifdef CONFIG_TCP_MD5SIG
1742 const union tcp_md5_addr *addr;
1743 struct tcp_md5sig_key *key;
1746 struct ip_options_rcu *inet_opt;
1748 if (sk_acceptq_is_full(sk))
1751 newsk = tcp_create_openreq_child(sk, req, skb);
1755 newsk->sk_gso_type = SKB_GSO_TCPV4;
1756 inet_sk_rx_dst_set(newsk, skb);
1758 newtp = tcp_sk(newsk);
1759 newinet = inet_sk(newsk);
1760 ireq = inet_rsk(req);
1761 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1762 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1763 newsk->sk_bound_dev_if = ireq->ir_iif;
1764 newinet->inet_saddr = ireq->ir_loc_addr;
1765 inet_opt = rcu_dereference(ireq->ireq_opt);
1766 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1767 newinet->mc_index = inet_iif(skb);
1768 newinet->mc_ttl = ip_hdr(skb)->ttl;
1769 newinet->rcv_tos = ip_hdr(skb)->tos;
1770 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1772 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1773 atomic_set(&newinet->inet_id, get_random_u16());
1775 /* Set ToS of the new socket based upon the value of incoming SYN.
1776 * ECT bits are set later in tcp_init_transfer().
1778 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1779 newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1782 dst = inet_csk_route_child_sock(sk, newsk, req);
1786 /* syncookie case : see end of cookie_v4_check() */
1788 sk_setup_caps(newsk, dst);
1790 tcp_ca_openreq_child(newsk, dst);
1792 tcp_sync_mss(newsk, dst_mtu(dst));
1793 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1795 tcp_initialize_rcv_mss(newsk);
1797 #ifdef CONFIG_TCP_MD5SIG
1798 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1799 /* Copy over the MD5 key from the original socket */
1800 addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1801 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1802 if (key && !tcp_rsk_used_ao(req)) {
1803 if (tcp_md5_key_copy(newsk, addr, AF_INET, 32, l3index, key))
1805 sk_gso_disable(newsk);
1808 #ifdef CONFIG_TCP_AO
1809 if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET))
1810 goto put_and_exit; /* OOM, release back memory */
1813 if (__inet_inherit_port(sk, newsk) < 0)
1815 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1817 if (likely(*own_req)) {
1818 tcp_move_syn(newtp, req);
1819 ireq->ireq_opt = NULL;
1821 newinet->inet_opt = NULL;
1823 if (!req_unhash && found_dup_sk) {
1824 /* This code path should only be executed in the
1825 * syncookie case only
1827 bh_unlock_sock(newsk);
1835 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1842 newinet->inet_opt = NULL;
1843 inet_csk_prepare_forced_close(newsk);
1847 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1849 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1851 #ifdef CONFIG_SYN_COOKIES
1852 const struct tcphdr *th = tcp_hdr(skb);
1855 sk = cookie_v4_check(sk, skb);
1860 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1861 struct tcphdr *th, u32 *cookie)
1864 #ifdef CONFIG_SYN_COOKIES
1865 mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1866 &tcp_request_sock_ipv4_ops, sk, th);
1868 *cookie = __cookie_v4_init_sequence(iph, th, &mss);
1869 tcp_synq_overflow(sk);
1875 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1877 /* The socket must have it's spinlock held when we get
1878 * here, unless it is a TCP_LISTEN socket.
1880 * We have a potential double-lock case here, so even when
1881 * doing backlog processing we use the BH locking scheme.
1882 * This is because we cannot sleep with the original spinlock
1885 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1887 enum skb_drop_reason reason;
1890 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1891 struct dst_entry *dst;
1893 dst = rcu_dereference_protected(sk->sk_rx_dst,
1894 lockdep_sock_is_held(sk));
1896 sock_rps_save_rxhash(sk, skb);
1897 sk_mark_napi_id(sk, skb);
1899 if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1900 !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
1902 RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1906 tcp_rcv_established(sk, skb);
1910 if (tcp_checksum_complete(skb))
1913 if (sk->sk_state == TCP_LISTEN) {
1914 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1919 reason = tcp_child_process(sk, nsk, skb);
1927 sock_rps_save_rxhash(sk, skb);
1929 reason = tcp_rcv_state_process(sk, skb);
1937 tcp_v4_send_reset(rsk, skb);
1939 kfree_skb_reason(skb, reason);
1940 /* Be careful here. If this function gets more complicated and
1941 * gcc suffers from register pressure on the x86, sk (in %ebx)
1942 * might be destroyed here. This current version compiles correctly,
1943 * but you have been warned.
1948 reason = SKB_DROP_REASON_TCP_CSUM;
1949 trace_tcp_bad_csum(skb);
1950 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1951 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1954 EXPORT_SYMBOL(tcp_v4_do_rcv);
1956 int tcp_v4_early_demux(struct sk_buff *skb)
1958 struct net *net = dev_net(skb->dev);
1959 const struct iphdr *iph;
1960 const struct tcphdr *th;
1963 if (skb->pkt_type != PACKET_HOST)
1966 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1972 if (th->doff < sizeof(struct tcphdr) / 4)
1975 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
1976 iph->saddr, th->source,
1977 iph->daddr, ntohs(th->dest),
1978 skb->skb_iif, inet_sdif(skb));
1981 skb->destructor = sock_edemux;
1982 if (sk_fullsock(sk)) {
1983 struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1986 dst = dst_check(dst, 0);
1988 sk->sk_rx_dst_ifindex == skb->skb_iif)
1989 skb_dst_set_noref(skb, dst);
1995 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1996 enum skb_drop_reason *reason)
1998 u32 limit, tail_gso_size, tail_gso_segs;
1999 struct skb_shared_info *shinfo;
2000 const struct tcphdr *th;
2001 struct tcphdr *thtail;
2002 struct sk_buff *tail;
2003 unsigned int hdrlen;
2009 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
2010 * we can fix skb->truesize to its real value to avoid future drops.
2011 * This is valid because skb is not yet charged to the socket.
2012 * It has been noticed pure SACK packets were sometimes dropped
2013 * (if cooked by drivers without copybreak feature).
2019 if (unlikely(tcp_checksum_complete(skb))) {
2021 trace_tcp_bad_csum(skb);
2022 *reason = SKB_DROP_REASON_TCP_CSUM;
2023 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
2024 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
2028 /* Attempt coalescing to last skb in backlog, even if we are
2030 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
2032 th = (const struct tcphdr *)skb->data;
2033 hdrlen = th->doff * 4;
2035 tail = sk->sk_backlog.tail;
2038 thtail = (struct tcphdr *)tail->data;
2040 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
2041 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
2042 ((TCP_SKB_CB(tail)->tcp_flags |
2043 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
2044 !((TCP_SKB_CB(tail)->tcp_flags &
2045 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
2046 ((TCP_SKB_CB(tail)->tcp_flags ^
2047 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
2048 #ifdef CONFIG_TLS_DEVICE
2049 tail->decrypted != skb->decrypted ||
2051 !mptcp_skb_can_collapse(tail, skb) ||
2052 thtail->doff != th->doff ||
2053 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
2056 __skb_pull(skb, hdrlen);
2058 shinfo = skb_shinfo(skb);
2059 gso_size = shinfo->gso_size ?: skb->len;
2060 gso_segs = shinfo->gso_segs ?: 1;
2062 shinfo = skb_shinfo(tail);
2063 tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
2064 tail_gso_segs = shinfo->gso_segs ?: 1;
2066 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
2067 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
2069 if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
2070 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
2071 thtail->window = th->window;
2074 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
2075 * thtail->fin, so that the fast path in tcp_rcv_established()
2076 * is not entered if we append a packet with a FIN.
2077 * SYN, RST, URG are not present.
2078 * ACK is set on both packets.
2079 * PSH : we do not really care in TCP stack,
2080 * at least for 'GRO' packets.
2082 thtail->fin |= th->fin;
2083 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2085 if (TCP_SKB_CB(skb)->has_rxtstamp) {
2086 TCP_SKB_CB(tail)->has_rxtstamp = true;
2087 tail->tstamp = skb->tstamp;
2088 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
2091 /* Not as strict as GRO. We only need to carry mss max value */
2092 shinfo->gso_size = max(gso_size, tail_gso_size);
2093 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
2095 sk->sk_backlog.len += delta;
2096 __NET_INC_STATS(sock_net(sk),
2097 LINUX_MIB_TCPBACKLOGCOALESCE);
2098 kfree_skb_partial(skb, fragstolen);
2101 __skb_push(skb, hdrlen);
2104 limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
2106 /* Only socket owner can try to collapse/prune rx queues
2107 * to reduce memory overhead, so add a little headroom here.
2108 * Few sockets backlog are possibly concurrently non empty.
2112 if (unlikely(sk_add_backlog(sk, skb, limit))) {
2114 *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
2115 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
2120 EXPORT_SYMBOL(tcp_add_backlog);
2122 int tcp_filter(struct sock *sk, struct sk_buff *skb)
2124 struct tcphdr *th = (struct tcphdr *)skb->data;
2126 return sk_filter_trim_cap(sk, skb, th->doff * 4);
2128 EXPORT_SYMBOL(tcp_filter);
2130 static void tcp_v4_restore_cb(struct sk_buff *skb)
2132 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
2133 sizeof(struct inet_skb_parm));
2136 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
2137 const struct tcphdr *th)
2139 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
2140 * barrier() makes sure compiler wont play fool^Waliasing games.
2142 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
2143 sizeof(struct inet_skb_parm));
2146 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
2147 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
2148 skb->len - th->doff * 4);
2149 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
2150 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
2151 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
2152 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2153 TCP_SKB_CB(skb)->sacked = 0;
2154 TCP_SKB_CB(skb)->has_rxtstamp =
2155 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
2162 int tcp_v4_rcv(struct sk_buff *skb)
2164 struct net *net = dev_net(skb->dev);
2165 enum skb_drop_reason drop_reason;
2166 int sdif = inet_sdif(skb);
2167 int dif = inet_iif(skb);
2168 const struct iphdr *iph;
2169 const struct tcphdr *th;
2174 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
2175 if (skb->pkt_type != PACKET_HOST)
2178 /* Count it even if it's bad */
2179 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
2181 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
2184 th = (const struct tcphdr *)skb->data;
2186 if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
2187 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
2190 if (!pskb_may_pull(skb, th->doff * 4))
2193 /* An explanation is required here, I think.
2194 * Packet length and doff are validated by header prediction,
2195 * provided case of th->doff==0 is eliminated.
2196 * So, we defer the checks. */
2198 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
2201 th = (const struct tcphdr *)skb->data;
2204 sk = __inet_lookup_skb(net->ipv4.tcp_death_row.hashinfo,
2205 skb, __tcp_hdrlen(th), th->source,
2206 th->dest, sdif, &refcounted);
2211 if (sk->sk_state == TCP_TIME_WAIT)
2214 if (sk->sk_state == TCP_NEW_SYN_RECV) {
2215 struct request_sock *req = inet_reqsk(sk);
2216 bool req_stolen = false;
2219 sk = req->rsk_listener;
2220 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2221 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2223 drop_reason = tcp_inbound_hash(sk, req, skb,
2224 &iph->saddr, &iph->daddr,
2225 AF_INET, dif, sdif);
2226 if (unlikely(drop_reason)) {
2227 sk_drops_add(sk, skb);
2231 if (tcp_checksum_complete(skb)) {
2235 if (unlikely(sk->sk_state != TCP_LISTEN)) {
2236 nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
2238 inet_csk_reqsk_queue_drop_and_put(sk, req);
2242 /* reuseport_migrate_sock() has already held one sk_refcnt
2246 /* We own a reference on the listener, increase it again
2247 * as we might lose it too soon.
2253 if (!tcp_filter(sk, skb)) {
2254 th = (const struct tcphdr *)skb->data;
2256 tcp_v4_fill_cb(skb, iph, th);
2257 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
2259 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2264 /* Another cpu got exclusive access to req
2265 * and created a full blown socket.
2266 * Try to feed this packet to this socket
2267 * instead of discarding it.
2269 tcp_v4_restore_cb(skb);
2273 goto discard_and_relse;
2278 tcp_v4_restore_cb(skb);
2280 drop_reason = tcp_child_process(sk, nsk, skb);
2282 tcp_v4_send_reset(nsk, skb);
2283 goto discard_and_relse;
2290 if (static_branch_unlikely(&ip4_min_ttl)) {
2291 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
2292 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
2293 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
2294 drop_reason = SKB_DROP_REASON_TCP_MINTTL;
2295 goto discard_and_relse;
2299 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
2300 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2301 goto discard_and_relse;
2304 drop_reason = tcp_inbound_hash(sk, NULL, skb, &iph->saddr, &iph->daddr,
2305 AF_INET, dif, sdif);
2307 goto discard_and_relse;
2311 if (tcp_filter(sk, skb)) {
2312 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2313 goto discard_and_relse;
2315 th = (const struct tcphdr *)skb->data;
2317 tcp_v4_fill_cb(skb, iph, th);
2321 if (sk->sk_state == TCP_LISTEN) {
2322 ret = tcp_v4_do_rcv(sk, skb);
2323 goto put_and_return;
2326 sk_incoming_cpu_update(sk);
2328 bh_lock_sock_nested(sk);
2329 tcp_segs_in(tcp_sk(sk), skb);
2331 if (!sock_owned_by_user(sk)) {
2332 ret = tcp_v4_do_rcv(sk, skb);
2334 if (tcp_add_backlog(sk, skb, &drop_reason))
2335 goto discard_and_relse;
2346 drop_reason = SKB_DROP_REASON_NO_SOCKET;
2347 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2350 tcp_v4_fill_cb(skb, iph, th);
2352 if (tcp_checksum_complete(skb)) {
2354 drop_reason = SKB_DROP_REASON_TCP_CSUM;
2355 trace_tcp_bad_csum(skb);
2356 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
2358 __TCP_INC_STATS(net, TCP_MIB_INERRS);
2360 tcp_v4_send_reset(NULL, skb);
2364 SKB_DR_OR(drop_reason, NOT_SPECIFIED);
2365 /* Discard frame. */
2366 kfree_skb_reason(skb, drop_reason);
2370 sk_drops_add(sk, skb);
2376 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2377 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2378 inet_twsk_put(inet_twsk(sk));
2382 tcp_v4_fill_cb(skb, iph, th);
2384 if (tcp_checksum_complete(skb)) {
2385 inet_twsk_put(inet_twsk(sk));
2388 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2390 struct sock *sk2 = inet_lookup_listener(net,
2391 net->ipv4.tcp_death_row.hashinfo,
2392 skb, __tcp_hdrlen(th),
2393 iph->saddr, th->source,
2394 iph->daddr, th->dest,
2398 inet_twsk_deschedule_put(inet_twsk(sk));
2400 tcp_v4_restore_cb(skb);
2408 tcp_v4_timewait_ack(sk, skb);
2411 tcp_v4_send_reset(sk, skb);
2412 inet_twsk_deschedule_put(inet_twsk(sk));
2414 case TCP_TW_SUCCESS:;
2419 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2420 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2421 .twsk_unique = tcp_twsk_unique,
2422 .twsk_destructor= tcp_twsk_destructor,
2425 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2427 struct dst_entry *dst = skb_dst(skb);
2429 if (dst && dst_hold_safe(dst)) {
2430 rcu_assign_pointer(sk->sk_rx_dst, dst);
2431 sk->sk_rx_dst_ifindex = skb->skb_iif;
2434 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2436 const struct inet_connection_sock_af_ops ipv4_specific = {
2437 .queue_xmit = ip_queue_xmit,
2438 .send_check = tcp_v4_send_check,
2439 .rebuild_header = inet_sk_rebuild_header,
2440 .sk_rx_dst_set = inet_sk_rx_dst_set,
2441 .conn_request = tcp_v4_conn_request,
2442 .syn_recv_sock = tcp_v4_syn_recv_sock,
2443 .net_header_len = sizeof(struct iphdr),
2444 .setsockopt = ip_setsockopt,
2445 .getsockopt = ip_getsockopt,
2446 .addr2sockaddr = inet_csk_addr2sockaddr,
2447 .sockaddr_len = sizeof(struct sockaddr_in),
2448 .mtu_reduced = tcp_v4_mtu_reduced,
2450 EXPORT_SYMBOL(ipv4_specific);
2452 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2453 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2454 #ifdef CONFIG_TCP_MD5SIG
2455 .md5_lookup = tcp_v4_md5_lookup,
2456 .calc_md5_hash = tcp_v4_md5_hash_skb,
2457 .md5_parse = tcp_v4_parse_md5_keys,
2459 #ifdef CONFIG_TCP_AO
2460 .ao_lookup = tcp_v4_ao_lookup,
2461 .calc_ao_hash = tcp_v4_ao_hash_skb,
2462 .ao_parse = tcp_v4_parse_ao,
2463 .ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
2468 /* NOTE: A lot of things set to zero explicitly by call to
2469 * sk_alloc() so need not be done here.
2471 static int tcp_v4_init_sock(struct sock *sk)
2473 struct inet_connection_sock *icsk = inet_csk(sk);
2477 icsk->icsk_af_ops = &ipv4_specific;
2479 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2480 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2486 #ifdef CONFIG_TCP_MD5SIG
2487 static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
2489 struct tcp_md5sig_info *md5sig;
2491 md5sig = container_of(head, struct tcp_md5sig_info, rcu);
2493 static_branch_slow_dec_deferred(&tcp_md5_needed);
2494 tcp_md5_release_sigpool();
2498 void tcp_v4_destroy_sock(struct sock *sk)
2500 struct tcp_sock *tp = tcp_sk(sk);
2502 trace_tcp_destroy_sock(sk);
2504 tcp_clear_xmit_timers(sk);
2506 tcp_cleanup_congestion_control(sk);
2508 tcp_cleanup_ulp(sk);
2510 /* Cleanup up the write buffer. */
2511 tcp_write_queue_purge(sk);
2513 /* Check if we want to disable active TFO */
2514 tcp_fastopen_active_disable_ofo_check(sk);
2516 /* Cleans up our, hopefully empty, out_of_order_queue. */
2517 skb_rbtree_purge(&tp->out_of_order_queue);
2519 #ifdef CONFIG_TCP_MD5SIG
2520 /* Clean up the MD5 key list, if any */
2521 if (tp->md5sig_info) {
2522 struct tcp_md5sig_info *md5sig;
2524 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
2525 tcp_clear_md5_list(sk);
2526 call_rcu(&md5sig->rcu, tcp_md5sig_info_free_rcu);
2527 rcu_assign_pointer(tp->md5sig_info, NULL);
2530 tcp_ao_destroy_sock(sk, false);
2532 /* Clean up a referenced TCP bind bucket. */
2533 if (inet_csk(sk)->icsk_bind_hash)
2536 BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2538 /* If socket is aborted during connect operation */
2539 tcp_free_fastopen_req(tp);
2540 tcp_fastopen_destroy_cipher(sk);
2541 tcp_saved_syn_free(tp);
2543 sk_sockets_allocated_dec(sk);
2545 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2547 #ifdef CONFIG_PROC_FS
2548 /* Proc filesystem TCP sock list dumping. */
2550 static unsigned short seq_file_family(const struct seq_file *seq);
2552 static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
2554 unsigned short family = seq_file_family(seq);
2556 /* AF_UNSPEC is used as a match all */
2557 return ((family == AF_UNSPEC || family == sk->sk_family) &&
2558 net_eq(sock_net(sk), seq_file_net(seq)));
2561 /* Find a non empty bucket (starting from st->bucket)
2562 * and return the first sk from it.
2564 static void *listening_get_first(struct seq_file *seq)
2566 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2567 struct tcp_iter_state *st = seq->private;
2570 for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) {
2571 struct inet_listen_hashbucket *ilb2;
2572 struct hlist_nulls_node *node;
2575 ilb2 = &hinfo->lhash2[st->bucket];
2576 if (hlist_nulls_empty(&ilb2->nulls_head))
2579 spin_lock(&ilb2->lock);
2580 sk_nulls_for_each(sk, node, &ilb2->nulls_head) {
2581 if (seq_sk_match(seq, sk))
2584 spin_unlock(&ilb2->lock);
2590 /* Find the next sk of "cur" within the same bucket (i.e. st->bucket).
2591 * If "cur" is the last one in the st->bucket,
2592 * call listening_get_first() to return the first sk of the next
2595 static void *listening_get_next(struct seq_file *seq, void *cur)
2597 struct tcp_iter_state *st = seq->private;
2598 struct inet_listen_hashbucket *ilb2;
2599 struct hlist_nulls_node *node;
2600 struct inet_hashinfo *hinfo;
2601 struct sock *sk = cur;
2606 sk = sk_nulls_next(sk);
2607 sk_nulls_for_each_from(sk, node) {
2608 if (seq_sk_match(seq, sk))
2612 hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2613 ilb2 = &hinfo->lhash2[st->bucket];
2614 spin_unlock(&ilb2->lock);
2616 return listening_get_first(seq);
2619 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2621 struct tcp_iter_state *st = seq->private;
2626 rc = listening_get_first(seq);
2628 while (rc && *pos) {
2629 rc = listening_get_next(seq, rc);
2635 static inline bool empty_bucket(struct inet_hashinfo *hinfo,
2636 const struct tcp_iter_state *st)
2638 return hlist_nulls_empty(&hinfo->ehash[st->bucket].chain);
2642 * Get first established socket starting from bucket given in st->bucket.
2643 * If st->bucket is zero, the very first socket in the hash is returned.
2645 static void *established_get_first(struct seq_file *seq)
2647 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2648 struct tcp_iter_state *st = seq->private;
2651 for (; st->bucket <= hinfo->ehash_mask; ++st->bucket) {
2653 struct hlist_nulls_node *node;
2654 spinlock_t *lock = inet_ehash_lockp(hinfo, st->bucket);
2658 /* Lockless fast path for the common case of empty buckets */
2659 if (empty_bucket(hinfo, st))
2663 sk_nulls_for_each(sk, node, &hinfo->ehash[st->bucket].chain) {
2664 if (seq_sk_match(seq, sk))
2667 spin_unlock_bh(lock);
2673 static void *established_get_next(struct seq_file *seq, void *cur)
2675 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2676 struct tcp_iter_state *st = seq->private;
2677 struct hlist_nulls_node *node;
2678 struct sock *sk = cur;
2683 sk = sk_nulls_next(sk);
2685 sk_nulls_for_each_from(sk, node) {
2686 if (seq_sk_match(seq, sk))
2690 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2692 return established_get_first(seq);
2695 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2697 struct tcp_iter_state *st = seq->private;
2701 rc = established_get_first(seq);
2704 rc = established_get_next(seq, rc);
2710 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2713 struct tcp_iter_state *st = seq->private;
2715 st->state = TCP_SEQ_STATE_LISTENING;
2716 rc = listening_get_idx(seq, &pos);
2719 st->state = TCP_SEQ_STATE_ESTABLISHED;
2720 rc = established_get_idx(seq, pos);
2726 static void *tcp_seek_last_pos(struct seq_file *seq)
2728 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2729 struct tcp_iter_state *st = seq->private;
2730 int bucket = st->bucket;
2731 int offset = st->offset;
2732 int orig_num = st->num;
2735 switch (st->state) {
2736 case TCP_SEQ_STATE_LISTENING:
2737 if (st->bucket > hinfo->lhash2_mask)
2739 rc = listening_get_first(seq);
2740 while (offset-- && rc && bucket == st->bucket)
2741 rc = listening_get_next(seq, rc);
2745 st->state = TCP_SEQ_STATE_ESTABLISHED;
2747 case TCP_SEQ_STATE_ESTABLISHED:
2748 if (st->bucket > hinfo->ehash_mask)
2750 rc = established_get_first(seq);
2751 while (offset-- && rc && bucket == st->bucket)
2752 rc = established_get_next(seq, rc);
2760 void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2762 struct tcp_iter_state *st = seq->private;
2765 if (*pos && *pos == st->last_pos) {
2766 rc = tcp_seek_last_pos(seq);
2771 st->state = TCP_SEQ_STATE_LISTENING;
2775 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2778 st->last_pos = *pos;
2781 EXPORT_SYMBOL(tcp_seq_start);
2783 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2785 struct tcp_iter_state *st = seq->private;
2788 if (v == SEQ_START_TOKEN) {
2789 rc = tcp_get_idx(seq, 0);
2793 switch (st->state) {
2794 case TCP_SEQ_STATE_LISTENING:
2795 rc = listening_get_next(seq, v);
2797 st->state = TCP_SEQ_STATE_ESTABLISHED;
2800 rc = established_get_first(seq);
2803 case TCP_SEQ_STATE_ESTABLISHED:
2804 rc = established_get_next(seq, v);
2809 st->last_pos = *pos;
2812 EXPORT_SYMBOL(tcp_seq_next);
2814 void tcp_seq_stop(struct seq_file *seq, void *v)
2816 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2817 struct tcp_iter_state *st = seq->private;
2819 switch (st->state) {
2820 case TCP_SEQ_STATE_LISTENING:
2821 if (v != SEQ_START_TOKEN)
2822 spin_unlock(&hinfo->lhash2[st->bucket].lock);
2824 case TCP_SEQ_STATE_ESTABLISHED:
2826 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2830 EXPORT_SYMBOL(tcp_seq_stop);
2832 static void get_openreq4(const struct request_sock *req,
2833 struct seq_file *f, int i)
2835 const struct inet_request_sock *ireq = inet_rsk(req);
2836 long delta = req->rsk_timer.expires - jiffies;
2838 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2839 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2844 ntohs(ireq->ir_rmt_port),
2846 0, 0, /* could print option size, but that is af dependent. */
2847 1, /* timers active (only the expire timer) */
2848 jiffies_delta_to_clock_t(delta),
2850 from_kuid_munged(seq_user_ns(f),
2851 sock_i_uid(req->rsk_listener)),
2852 0, /* non standard timer */
2853 0, /* open_requests have no inode */
2858 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2861 unsigned long timer_expires;
2862 const struct tcp_sock *tp = tcp_sk(sk);
2863 const struct inet_connection_sock *icsk = inet_csk(sk);
2864 const struct inet_sock *inet = inet_sk(sk);
2865 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2866 __be32 dest = inet->inet_daddr;
2867 __be32 src = inet->inet_rcv_saddr;
2868 __u16 destp = ntohs(inet->inet_dport);
2869 __u16 srcp = ntohs(inet->inet_sport);
2873 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2874 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2875 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2877 timer_expires = icsk->icsk_timeout;
2878 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2880 timer_expires = icsk->icsk_timeout;
2881 } else if (timer_pending(&sk->sk_timer)) {
2883 timer_expires = sk->sk_timer.expires;
2886 timer_expires = jiffies;
2889 state = inet_sk_state_load(sk);
2890 if (state == TCP_LISTEN)
2891 rx_queue = READ_ONCE(sk->sk_ack_backlog);
2893 /* Because we don't lock the socket,
2894 * we might find a transient negative value.
2896 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2897 READ_ONCE(tp->copied_seq), 0);
2899 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2900 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2901 i, src, srcp, dest, destp, state,
2902 READ_ONCE(tp->write_seq) - tp->snd_una,
2905 jiffies_delta_to_clock_t(timer_expires - jiffies),
2906 icsk->icsk_retransmits,
2907 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2908 icsk->icsk_probes_out,
2910 refcount_read(&sk->sk_refcnt), sk,
2911 jiffies_to_clock_t(icsk->icsk_rto),
2912 jiffies_to_clock_t(icsk->icsk_ack.ato),
2913 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2915 state == TCP_LISTEN ?
2916 fastopenq->max_qlen :
2917 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2920 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2921 struct seq_file *f, int i)
2923 long delta = tw->tw_timer.expires - jiffies;
2927 dest = tw->tw_daddr;
2928 src = tw->tw_rcv_saddr;
2929 destp = ntohs(tw->tw_dport);
2930 srcp = ntohs(tw->tw_sport);
2932 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2933 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2934 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2935 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2936 refcount_read(&tw->tw_refcnt), tw);
2941 static int tcp4_seq_show(struct seq_file *seq, void *v)
2943 struct tcp_iter_state *st;
2944 struct sock *sk = v;
2946 seq_setwidth(seq, TMPSZ - 1);
2947 if (v == SEQ_START_TOKEN) {
2948 seq_puts(seq, " sl local_address rem_address st tx_queue "
2949 "rx_queue tr tm->when retrnsmt uid timeout "
2955 if (sk->sk_state == TCP_TIME_WAIT)
2956 get_timewait4_sock(v, seq, st->num);
2957 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2958 get_openreq4(v, seq, st->num);
2960 get_tcp4_sock(v, seq, st->num);
2966 #ifdef CONFIG_BPF_SYSCALL
2967 struct bpf_tcp_iter_state {
2968 struct tcp_iter_state state;
2969 unsigned int cur_sk;
2970 unsigned int end_sk;
2971 unsigned int max_sk;
2972 struct sock **batch;
2973 bool st_bucket_done;
2976 struct bpf_iter__tcp {
2977 __bpf_md_ptr(struct bpf_iter_meta *, meta);
2978 __bpf_md_ptr(struct sock_common *, sk_common);
2979 uid_t uid __aligned(8);
2982 static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
2983 struct sock_common *sk_common, uid_t uid)
2985 struct bpf_iter__tcp ctx;
2987 meta->seq_num--; /* skip SEQ_START_TOKEN */
2989 ctx.sk_common = sk_common;
2991 return bpf_iter_run_prog(prog, &ctx);
2994 static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
2996 while (iter->cur_sk < iter->end_sk)
2997 sock_gen_put(iter->batch[iter->cur_sk++]);
3000 static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
3001 unsigned int new_batch_sz)
3003 struct sock **new_batch;
3005 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3006 GFP_USER | __GFP_NOWARN);
3010 bpf_iter_tcp_put_batch(iter);
3011 kvfree(iter->batch);
3012 iter->batch = new_batch;
3013 iter->max_sk = new_batch_sz;
3018 static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
3019 struct sock *start_sk)
3021 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
3022 struct bpf_tcp_iter_state *iter = seq->private;
3023 struct tcp_iter_state *st = &iter->state;
3024 struct hlist_nulls_node *node;
3025 unsigned int expected = 1;
3028 sock_hold(start_sk);
3029 iter->batch[iter->end_sk++] = start_sk;
3031 sk = sk_nulls_next(start_sk);
3032 sk_nulls_for_each_from(sk, node) {
3033 if (seq_sk_match(seq, sk)) {
3034 if (iter->end_sk < iter->max_sk) {
3036 iter->batch[iter->end_sk++] = sk;
3041 spin_unlock(&hinfo->lhash2[st->bucket].lock);
3046 static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
3047 struct sock *start_sk)
3049 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
3050 struct bpf_tcp_iter_state *iter = seq->private;
3051 struct tcp_iter_state *st = &iter->state;
3052 struct hlist_nulls_node *node;
3053 unsigned int expected = 1;
3056 sock_hold(start_sk);
3057 iter->batch[iter->end_sk++] = start_sk;
3059 sk = sk_nulls_next(start_sk);
3060 sk_nulls_for_each_from(sk, node) {
3061 if (seq_sk_match(seq, sk)) {
3062 if (iter->end_sk < iter->max_sk) {
3064 iter->batch[iter->end_sk++] = sk;
3069 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
3074 static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
3076 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
3077 struct bpf_tcp_iter_state *iter = seq->private;
3078 struct tcp_iter_state *st = &iter->state;
3079 unsigned int expected;
3080 bool resized = false;
3083 /* The st->bucket is done. Directly advance to the next
3084 * bucket instead of having the tcp_seek_last_pos() to skip
3085 * one by one in the current bucket and eventually find out
3086 * it has to advance to the next bucket.
3088 if (iter->st_bucket_done) {
3091 if (st->state == TCP_SEQ_STATE_LISTENING &&
3092 st->bucket > hinfo->lhash2_mask) {
3093 st->state = TCP_SEQ_STATE_ESTABLISHED;
3099 /* Get a new batch */
3102 iter->st_bucket_done = false;
3104 sk = tcp_seek_last_pos(seq);
3106 return NULL; /* Done */
3108 if (st->state == TCP_SEQ_STATE_LISTENING)
3109 expected = bpf_iter_tcp_listening_batch(seq, sk);
3111 expected = bpf_iter_tcp_established_batch(seq, sk);
3113 if (iter->end_sk == expected) {
3114 iter->st_bucket_done = true;
3118 if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) {
3126 static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
3128 /* bpf iter does not support lseek, so it always
3129 * continue from where it was stop()-ped.
3132 return bpf_iter_tcp_batch(seq);
3134 return SEQ_START_TOKEN;
3137 static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3139 struct bpf_tcp_iter_state *iter = seq->private;
3140 struct tcp_iter_state *st = &iter->state;
3143 /* Whenever seq_next() is called, the iter->cur_sk is
3144 * done with seq_show(), so advance to the next sk in
3147 if (iter->cur_sk < iter->end_sk) {
3148 /* Keeping st->num consistent in tcp_iter_state.
3149 * bpf_iter_tcp does not use st->num.
3150 * meta.seq_num is used instead.
3153 /* Move st->offset to the next sk in the bucket such that
3154 * the future start() will resume at st->offset in
3155 * st->bucket. See tcp_seek_last_pos().
3158 sock_gen_put(iter->batch[iter->cur_sk++]);
3161 if (iter->cur_sk < iter->end_sk)
3162 sk = iter->batch[iter->cur_sk];
3164 sk = bpf_iter_tcp_batch(seq);
3167 /* Keeping st->last_pos consistent in tcp_iter_state.
3168 * bpf iter does not do lseek, so st->last_pos always equals to *pos.
3170 st->last_pos = *pos;
3174 static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
3176 struct bpf_iter_meta meta;
3177 struct bpf_prog *prog;
3178 struct sock *sk = v;
3182 if (v == SEQ_START_TOKEN)
3185 if (sk_fullsock(sk))
3188 if (unlikely(sk_unhashed(sk))) {
3193 if (sk->sk_state == TCP_TIME_WAIT) {
3195 } else if (sk->sk_state == TCP_NEW_SYN_RECV) {
3196 const struct request_sock *req = v;
3198 uid = from_kuid_munged(seq_user_ns(seq),
3199 sock_i_uid(req->rsk_listener));
3201 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3205 prog = bpf_iter_get_info(&meta, false);
3206 ret = tcp_prog_seq_show(prog, &meta, v, uid);
3209 if (sk_fullsock(sk))
3215 static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
3217 struct bpf_tcp_iter_state *iter = seq->private;
3218 struct bpf_iter_meta meta;
3219 struct bpf_prog *prog;
3223 prog = bpf_iter_get_info(&meta, true);
3225 (void)tcp_prog_seq_show(prog, &meta, v, 0);
3228 if (iter->cur_sk < iter->end_sk) {
3229 bpf_iter_tcp_put_batch(iter);
3230 iter->st_bucket_done = false;
3234 static const struct seq_operations bpf_iter_tcp_seq_ops = {
3235 .show = bpf_iter_tcp_seq_show,
3236 .start = bpf_iter_tcp_seq_start,
3237 .next = bpf_iter_tcp_seq_next,
3238 .stop = bpf_iter_tcp_seq_stop,
3241 static unsigned short seq_file_family(const struct seq_file *seq)
3243 const struct tcp_seq_afinfo *afinfo;
3245 #ifdef CONFIG_BPF_SYSCALL
3246 /* Iterated from bpf_iter. Let the bpf prog to filter instead. */
3247 if (seq->op == &bpf_iter_tcp_seq_ops)
3251 /* Iterated from proc fs */
3252 afinfo = pde_data(file_inode(seq->file));
3253 return afinfo->family;
3256 static const struct seq_operations tcp4_seq_ops = {
3257 .show = tcp4_seq_show,
3258 .start = tcp_seq_start,
3259 .next = tcp_seq_next,
3260 .stop = tcp_seq_stop,
3263 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
3267 static int __net_init tcp4_proc_init_net(struct net *net)
3269 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
3270 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
3275 static void __net_exit tcp4_proc_exit_net(struct net *net)
3277 remove_proc_entry("tcp", net->proc_net);
3280 static struct pernet_operations tcp4_net_ops = {
3281 .init = tcp4_proc_init_net,
3282 .exit = tcp4_proc_exit_net,
3285 int __init tcp4_proc_init(void)
3287 return register_pernet_subsys(&tcp4_net_ops);
3290 void tcp4_proc_exit(void)
3292 unregister_pernet_subsys(&tcp4_net_ops);
3294 #endif /* CONFIG_PROC_FS */
3296 /* @wake is one when sk_stream_write_space() calls us.
3297 * This sends EPOLLOUT only if notsent_bytes is half the limit.
3298 * This mimics the strategy used in sock_def_write_space().
3300 bool tcp_stream_memory_free(const struct sock *sk, int wake)
3302 const struct tcp_sock *tp = tcp_sk(sk);
3303 u32 notsent_bytes = READ_ONCE(tp->write_seq) -
3304 READ_ONCE(tp->snd_nxt);
3306 return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
3308 EXPORT_SYMBOL(tcp_stream_memory_free);
3310 struct proto tcp_prot = {
3312 .owner = THIS_MODULE,
3314 .pre_connect = tcp_v4_pre_connect,
3315 .connect = tcp_v4_connect,
3316 .disconnect = tcp_disconnect,
3317 .accept = inet_csk_accept,
3319 .init = tcp_v4_init_sock,
3320 .destroy = tcp_v4_destroy_sock,
3321 .shutdown = tcp_shutdown,
3322 .setsockopt = tcp_setsockopt,
3323 .getsockopt = tcp_getsockopt,
3324 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
3325 .keepalive = tcp_set_keepalive,
3326 .recvmsg = tcp_recvmsg,
3327 .sendmsg = tcp_sendmsg,
3328 .splice_eof = tcp_splice_eof,
3329 .backlog_rcv = tcp_v4_do_rcv,
3330 .release_cb = tcp_release_cb,
3332 .unhash = inet_unhash,
3333 .get_port = inet_csk_get_port,
3334 .put_port = inet_put_port,
3335 #ifdef CONFIG_BPF_SYSCALL
3336 .psock_update_sk_prot = tcp_bpf_update_proto,
3338 .enter_memory_pressure = tcp_enter_memory_pressure,
3339 .leave_memory_pressure = tcp_leave_memory_pressure,
3340 .stream_memory_free = tcp_stream_memory_free,
3341 .sockets_allocated = &tcp_sockets_allocated,
3342 .orphan_count = &tcp_orphan_count,
3344 .memory_allocated = &tcp_memory_allocated,
3345 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
3347 .memory_pressure = &tcp_memory_pressure,
3348 .sysctl_mem = sysctl_tcp_mem,
3349 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
3350 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
3351 .max_header = MAX_TCP_HEADER,
3352 .obj_size = sizeof(struct tcp_sock),
3353 .slab_flags = SLAB_TYPESAFE_BY_RCU,
3354 .twsk_prot = &tcp_timewait_sock_ops,
3355 .rsk_prot = &tcp_request_sock_ops,
3357 .no_autobind = true,
3358 .diag_destroy = tcp_abort,
3360 EXPORT_SYMBOL(tcp_prot);
3362 static void __net_exit tcp_sk_exit(struct net *net)
3364 if (net->ipv4.tcp_congestion_control)
3365 bpf_module_put(net->ipv4.tcp_congestion_control,
3366 net->ipv4.tcp_congestion_control->owner);
3369 static void __net_init tcp_set_hashinfo(struct net *net)
3371 struct inet_hashinfo *hinfo;
3372 unsigned int ehash_entries;
3373 struct net *old_net;
3375 if (net_eq(net, &init_net))
3378 old_net = current->nsproxy->net_ns;
3379 ehash_entries = READ_ONCE(old_net->ipv4.sysctl_tcp_child_ehash_entries);
3383 ehash_entries = roundup_pow_of_two(ehash_entries);
3384 hinfo = inet_pernet_hashinfo_alloc(&tcp_hashinfo, ehash_entries);
3386 pr_warn("Failed to allocate TCP ehash (entries: %u) "
3387 "for a netns, fallback to the global one\n",
3390 hinfo = &tcp_hashinfo;
3391 ehash_entries = tcp_hashinfo.ehash_mask + 1;
3394 net->ipv4.tcp_death_row.hashinfo = hinfo;
3395 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = ehash_entries / 2;
3396 net->ipv4.sysctl_max_syn_backlog = max(128U, ehash_entries / 128);
3399 static int __net_init tcp_sk_init(struct net *net)
3401 net->ipv4.sysctl_tcp_ecn = 2;
3402 net->ipv4.sysctl_tcp_ecn_fallback = 1;
3404 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
3405 net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
3406 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
3407 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
3408 net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
3410 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
3411 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
3412 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
3414 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
3415 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
3416 net->ipv4.sysctl_tcp_syncookies = 1;
3417 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
3418 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
3419 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
3420 net->ipv4.sysctl_tcp_orphan_retries = 0;
3421 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
3422 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
3423 net->ipv4.sysctl_tcp_tw_reuse = 2;
3424 net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
3426 refcount_set(&net->ipv4.tcp_death_row.tw_refcount, 1);
3427 tcp_set_hashinfo(net);
3429 net->ipv4.sysctl_tcp_sack = 1;
3430 net->ipv4.sysctl_tcp_window_scaling = 1;
3431 net->ipv4.sysctl_tcp_timestamps = 1;
3432 net->ipv4.sysctl_tcp_early_retrans = 3;
3433 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
3434 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
3435 net->ipv4.sysctl_tcp_retrans_collapse = 1;
3436 net->ipv4.sysctl_tcp_max_reordering = 300;
3437 net->ipv4.sysctl_tcp_dsack = 1;
3438 net->ipv4.sysctl_tcp_app_win = 31;
3439 net->ipv4.sysctl_tcp_adv_win_scale = 1;
3440 net->ipv4.sysctl_tcp_frto = 2;
3441 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
3442 /* This limits the percentage of the congestion window which we
3443 * will allow a single TSO frame to consume. Building TSO frames
3444 * which are too large can cause TCP streams to be bursty.
3446 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
3447 /* Default TSQ limit of 16 TSO segments */
3448 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
3450 /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
3451 net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
3453 net->ipv4.sysctl_tcp_min_tso_segs = 2;
3454 net->ipv4.sysctl_tcp_tso_rtt_log = 9; /* 2^9 = 512 usec */
3455 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
3456 net->ipv4.sysctl_tcp_autocorking = 1;
3457 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
3458 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
3459 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
3460 if (net != &init_net) {
3461 memcpy(net->ipv4.sysctl_tcp_rmem,
3462 init_net.ipv4.sysctl_tcp_rmem,
3463 sizeof(init_net.ipv4.sysctl_tcp_rmem));
3464 memcpy(net->ipv4.sysctl_tcp_wmem,
3465 init_net.ipv4.sysctl_tcp_wmem,
3466 sizeof(init_net.ipv4.sysctl_tcp_wmem));
3468 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
3469 net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
3470 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
3471 net->ipv4.sysctl_tcp_backlog_ack_defer = 1;
3472 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
3473 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
3474 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
3476 /* Set default values for PLB */
3477 net->ipv4.sysctl_tcp_plb_enabled = 0; /* Disabled by default */
3478 net->ipv4.sysctl_tcp_plb_idle_rehash_rounds = 3;
3479 net->ipv4.sysctl_tcp_plb_rehash_rounds = 12;
3480 net->ipv4.sysctl_tcp_plb_suspend_rto_sec = 60;
3481 /* Default congestion threshold for PLB to mark a round is 50% */
3482 net->ipv4.sysctl_tcp_plb_cong_thresh = (1 << TCP_PLB_SCALE) / 2;
3484 /* Reno is always built in */
3485 if (!net_eq(net, &init_net) &&
3486 bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
3487 init_net.ipv4.tcp_congestion_control->owner))
3488 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
3490 net->ipv4.tcp_congestion_control = &tcp_reno;
3492 net->ipv4.sysctl_tcp_syn_linear_timeouts = 4;
3493 net->ipv4.sysctl_tcp_shrink_window = 0;
3495 net->ipv4.sysctl_tcp_pingpong_thresh = 1;
3500 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
3504 tcp_twsk_purge(net_exit_list, AF_INET);
3506 list_for_each_entry(net, net_exit_list, exit_list) {
3507 inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo);
3508 WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
3509 tcp_fastopen_ctx_destroy(net);
3513 static struct pernet_operations __net_initdata tcp_sk_ops = {
3514 .init = tcp_sk_init,
3515 .exit = tcp_sk_exit,
3516 .exit_batch = tcp_sk_exit_batch,
3519 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3520 DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
3521 struct sock_common *sk_common, uid_t uid)
3523 #define INIT_BATCH_SZ 16
3525 static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
3527 struct bpf_tcp_iter_state *iter = priv_data;
3530 err = bpf_iter_init_seq_net(priv_data, aux);
3534 err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ);
3536 bpf_iter_fini_seq_net(priv_data);
3543 static void bpf_iter_fini_tcp(void *priv_data)
3545 struct bpf_tcp_iter_state *iter = priv_data;
3547 bpf_iter_fini_seq_net(priv_data);
3548 kvfree(iter->batch);
3551 static const struct bpf_iter_seq_info tcp_seq_info = {
3552 .seq_ops = &bpf_iter_tcp_seq_ops,
3553 .init_seq_private = bpf_iter_init_tcp,
3554 .fini_seq_private = bpf_iter_fini_tcp,
3555 .seq_priv_size = sizeof(struct bpf_tcp_iter_state),
3558 static const struct bpf_func_proto *
3559 bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id,
3560 const struct bpf_prog *prog)
3563 case BPF_FUNC_setsockopt:
3564 return &bpf_sk_setsockopt_proto;
3565 case BPF_FUNC_getsockopt:
3566 return &bpf_sk_getsockopt_proto;
3572 static struct bpf_iter_reg tcp_reg_info = {
3574 .ctx_arg_info_size = 1,
3576 { offsetof(struct bpf_iter__tcp, sk_common),
3577 PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
3579 .get_func_proto = bpf_iter_tcp_get_func_proto,
3580 .seq_info = &tcp_seq_info,
3583 static void __init bpf_iter_register(void)
3585 tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
3586 if (bpf_iter_reg_target(&tcp_reg_info))
3587 pr_warn("Warning: could not register bpf iterator tcp\n");
3592 void __init tcp_v4_init(void)
3596 for_each_possible_cpu(cpu) {
3599 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
3600 IPPROTO_TCP, &init_net);
3602 panic("Failed to create the TCP control socket.\n");
3603 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
3605 /* Please enforce IP_DF and IPID==0 for RST and
3606 * ACK sent in SYN-RECV and TIME-WAIT state.
3608 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
3610 per_cpu(ipv4_tcp_sk, cpu) = sk;
3612 if (register_pernet_subsys(&tcp_sk_ops))
3613 panic("Failed to create the TCP control socket.\n");
3615 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3616 bpf_iter_register();