tcp: add stat of data packet reordering events
authorWei Wang <weiwan@google.com>
Wed, 1 Aug 2018 00:46:24 +0000 (17:46 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 1 Aug 2018 16:56:10 +0000 (09:56 -0700)
Introduce a new TCP stats to record the number of reordering events seen
and expose it in both tcp_info (TCP_INFO) and opt_stats
(SOF_TIMESTAMPING_OPT_STATS).
Application can use this stats to track the frequency of the reordering
events in addition to the existing reordering stats which tracks the
magnitude of the latest reordering event.

Note: this new stats tracks reordering events triggered by ACKs, which
could often be fewer than the actual number of packets being delivered
out-of-order.

Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/tcp.h
include/uapi/linux/tcp.h
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_recovery.c

index da6281c549a57c40b709034fb49d33c8e5fdc14e..263e37271afda18f3d61c99272d34da15dfdca29 100644 (file)
@@ -220,8 +220,7 @@ struct tcp_sock {
 #define TCP_RACK_RECOVERY_THRESH 16
                u8 reo_wnd_persist:5, /* No. of recovery since last adj */
                   dsack_seen:1, /* Whether DSACK seen after last adj */
-                  advanced:1,   /* mstamp advanced since last lost marking */
-                  reord:1;      /* reordering detected */
+                  advanced:1;   /* mstamp advanced since last lost marking */
        } rack;
        u16     advmss;         /* Advertised MSS                       */
        u8      compressed_ack;
@@ -267,6 +266,7 @@ struct tcp_sock {
        u8      ecn_flags;      /* ECN status bits.                     */
        u8      keepalive_probes; /* num of allowed keep alive probes   */
        u32     reordering;     /* Packet reordering metric.            */
+       u32     reord_seen;     /* number of data packet reordering events */
        u32     snd_up;         /* Urgent pointer               */
 
 /*
index 0e1c0aec0153b3abcf70a110780663df22af7ca8..e02d31986ff911b0547bd954abcc7339f4668ca6 100644 (file)
@@ -239,6 +239,7 @@ struct tcp_info {
        __u64   tcpi_bytes_sent;     /* RFC4898 tcpEStatsPerfHCDataOctetsOut */
        __u64   tcpi_bytes_retrans;  /* RFC4898 tcpEStatsPerfOctetsRetrans */
        __u32   tcpi_dsack_dups;     /* RFC4898 tcpEStatsStackDSACKDups */
+       __u32   tcpi_reord_seen;     /* reordering events seen */
 };
 
 /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
@@ -264,6 +265,7 @@ enum {
        TCP_NLA_BYTES_SENT,     /* Data bytes sent including retransmission */
        TCP_NLA_BYTES_RETRANS,  /* Data bytes retransmitted */
        TCP_NLA_DSACK_DUPS,     /* DSACK blocks received */
+       TCP_NLA_REORD_SEEN,     /* reordering events seen */
 };
 
 /* for TCP_MD5SIG socket option */
index d6232b598cae5d5988fb774502a9a8735ce62cb8..31fa1c080f2876a5b62846054430c34a6cfdf7c5 100644 (file)
@@ -2597,6 +2597,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->bytes_sent = 0;
        tp->bytes_retrans = 0;
        tp->dsack_dups = 0;
+       tp->reord_seen = 0;
 
        /* Clean up fastopen related fields */
        tcp_free_fastopen_req(tp);
@@ -3207,6 +3208,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        info->tcpi_bytes_sent = tp->bytes_sent;
        info->tcpi_bytes_retrans = tp->bytes_retrans;
        info->tcpi_dsack_dups = tp->dsack_dups;
+       info->tcpi_reord_seen = tp->reord_seen;
        unlock_sock_fast(sk, slow);
 }
 EXPORT_SYMBOL_GPL(tcp_get_info);
@@ -3234,6 +3236,7 @@ static size_t tcp_opt_stats_get_size(void)
                nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */
                nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
                nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
+               nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
                0;
 }
 
@@ -3286,6 +3289,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
        nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans,
                          TCP_NLA_PAD);
        nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
+       nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
 
        return stats;
 }
index fbc85ff7d71d71c05839820a99c3405068b95132..3d6156f07a8d7e95f9e3883d20d19cec7c895f16 100644 (file)
@@ -906,8 +906,8 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
                                       sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
        }
 
-       tp->rack.reord = 1;
        /* This exciting event is worth to be remembered. 8) */
+       tp->reord_seen++;
        NET_INC_STATS(sock_net(sk),
                      ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER);
 }
@@ -1871,6 +1871,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
 
        tp->reordering = min_t(u32, tp->packets_out + addend,
                               sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
+       tp->reord_seen++;
        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
 }
 
index 71593e4400abe344969ed2a6d0f3461e6f8e9bb2..c81aadff769b2c3eee02e6de3a5545c27e8cbc38 100644 (file)
@@ -25,7 +25,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (!tp->rack.reord) {
+       if (!tp->reord_seen) {
                /* If reordering has not been observed, be aggressive during
                 * the recovery or starting the recovery by DUPACK threshold.
                 */