net: tcp: add skb drop reasons to tcp_add_backlog()
authorMenglong Dong <imagedong@tencent.com>
Sun, 20 Feb 2022 07:06:33 +0000 (15:06 +0800)
committerDavid S. Miller <davem@davemloft.net>
Sun, 20 Feb 2022 13:55:31 +0000 (13:55 +0000)
Pass the address of drop_reason to tcp_add_backlog() to store the
reasons for skb drops when fails. Following drop reasons are
introduced:

SKB_DROP_REASON_SOCKET_BACKLOG

Reviewed-by: Mengen Sun <mengensun@tencent.com>
Reviewed-by: Hao Peng <flyingpeng@tencent.com>
Signed-off-by: Menglong Dong <imagedong@tencent.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/skbuff.h
include/net/tcp.h
include/trace/events/skb.h
net/ipv4/tcp_ipv4.c
net/ipv6/tcp_ipv6.c

index 46678eb587ffceab90b50331449a25f1ad957566..f7f33c79945b5634135952b1fd30c530617dc0b5 100644 (file)
@@ -358,6 +358,10 @@ enum skb_drop_reason {
                                         * corresponding to
                                         * LINUX_MIB_TCPMD5FAILURE
                                         */
+       SKB_DROP_REASON_SOCKET_BACKLOG, /* failed to add skb to socket
+                                        * backlog (see
+                                        * LINUX_MIB_TCPBACKLOGDROP)
+                                        */
        SKB_DROP_REASON_MAX,
 };
 
index eff2487d972d2c41e568df50953b23fa77629d7b..04f4650e0ff024b6b30ddb6471b179b82ed37d52 100644 (file)
@@ -1367,7 +1367,8 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
                __skb_checksum_complete(skb);
 }
 
-bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+                    enum skb_drop_reason *reason);
 
 #ifdef CONFIG_INET
 void __sk_defer_free_flush(struct sock *sk);
index 46c06b0be850d31582819df2ce08c279768b447d..bfccd77e907110e692be1d317e7376e429aa1d9d 100644 (file)
@@ -31,6 +31,7 @@
        EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED,                   \
           TCP_MD5UNEXPECTED)                                   \
        EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE)      \
+       EM(SKB_DROP_REASON_SOCKET_BACKLOG, SOCKET_BACKLOG)      \
        EMe(SKB_DROP_REASON_MAX, MAX)
 
 #undef EM
index d3c41711905717e6c5c69d58c1440a7356c79f3d..cbca8637ba2fa68bcec210d5ec59abb2f8c438b5 100644 (file)
@@ -1811,7 +1811,8 @@ int tcp_v4_early_demux(struct sk_buff *skb)
        return 0;
 }
 
-bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+                    enum skb_drop_reason *reason)
 {
        u32 limit, tail_gso_size, tail_gso_segs;
        struct skb_shared_info *shinfo;
@@ -1837,6 +1838,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
        if (unlikely(tcp_checksum_complete(skb))) {
                bh_unlock_sock(sk);
                trace_tcp_bad_csum(skb);
+               *reason = SKB_DROP_REASON_TCP_CSUM;
                __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
                __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
                return true;
@@ -1925,6 +1927,7 @@ no_coalesce:
 
        if (unlikely(sk_add_backlog(sk, skb, limit))) {
                bh_unlock_sock(sk);
+               *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
                return true;
        }
@@ -2133,7 +2136,7 @@ process:
        if (!sock_owned_by_user(sk)) {
                ret = tcp_v4_do_rcv(sk, skb);
        } else {
-               if (tcp_add_backlog(sk, skb))
+               if (tcp_add_backlog(sk, skb, &drop_reason))
                        goto discard_and_relse;
        }
        bh_unlock_sock(sk);
index 1262b790b146ff71c65d9f1b14252ebe7bbd3729..abf0ad547858bb05366b2cf88920829420e1079d 100644 (file)
@@ -1784,7 +1784,7 @@ process:
        if (!sock_owned_by_user(sk)) {
                ret = tcp_v6_do_rcv(sk, skb);
        } else {
-               if (tcp_add_backlog(sk, skb))
+               if (tcp_add_backlog(sk, skb, &drop_reason))
                        goto discard_and_relse;
        }
        bh_unlock_sock(sk);