tcp: add tcp_in_slow_start helper
authorYuchung Cheng <ycheng@google.com>
Thu, 9 Jul 2015 20:16:29 +0000 (13:16 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 9 Jul 2015 21:22:52 +0000 (14:22 -0700)
Add a helper to test the slow start condition in various congestion
control modules and other places. This is to prepare a slight improvement
in policy as to exactly when to slow start.

Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Nandita Dukkipati <nanditad@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/tcp.h
net/ipv4/tcp_bic.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_highspeed.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c

index 950cfecaad3c0d01c646c4fd111eca8d0cf8aef3..dba22fc1b065a70b9604872b1460c7b2ea24db1d 100644 (file)
@@ -989,6 +989,11 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
 
 #define TCP_INFINITE_SSTHRESH  0x7fffffff
 
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+       return tp->snd_cwnd <= tp->snd_ssthresh;
+}
+
 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
 {
        return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
@@ -1065,7 +1070,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
        const struct tcp_sock *tp = tcp_sk(sk);
 
        /* If in slow start, ensure cwnd grows to twice what was ACKed. */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                return tp->snd_cwnd < 2 * tp->max_packets_out;
 
        return tp->is_cwnd_limited;
index c037644eafb7caadcb196b1c8b676bbc42abdb93..fd1405d37c149309882742fb12b07331e7282a95 100644 (file)
@@ -146,7 +146,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else {
                bictcp_update(ca, tp->snd_cwnd);
index 84be008c945c654b692211b943f83e909a622516..654729a8cb23f724cca59130580dd6a1ab6a8416 100644 (file)
@@ -413,7 +413,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        /* In "safe" area, increase. */
-       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+       if (tcp_in_slow_start(tp)) {
                acked = tcp_slow_start(tp, acked);
                if (!acked)
                        return;
index 06d3d665a9fd1bfda5688907a284de83697273f6..28011fb1f4a2104a34f81fc0c9fb4a4382bdadac 100644 (file)
@@ -320,7 +320,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+       if (tcp_in_slow_start(tp)) {
                if (hystart && after(ack, ca->end_seq))
                        bictcp_hystart_reset(sk);
                acked = tcp_slow_start(tp, acked);
@@ -439,7 +439,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
                ca->delay_min = delay;
 
        /* hystart triggers when cwnd is larger than some threshold */
-       if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
+       if (hystart && tcp_in_slow_start(tp) &&
            tp->snd_cwnd >= hystart_low_window)
                hystart_update(sk, delay);
 }
index 882c08aae2f58d02bb78212a4eba4d25d7e9c123..db7842495a641829a8725cb436ed2fb3aa5d53e4 100644 (file)
@@ -116,7 +116,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else {
                /* Update AIMD parameters.
index 58469fff6c18fd444c95366caa04ab60965d654a..82f0d9ed60f50f27854fdb62a95281beed9df819 100644 (file)
@@ -236,7 +236,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else {
                /* In dangerous area, increase slowly.
index f71002e4db0ba7fe8dfe35bb2196bbaae751ed59..2ab9bbb6faffb799560df98b093d4cbc1207d816 100644 (file)
@@ -268,7 +268,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        /* In slow start */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
 
        else {
index a51d63a43e33af5fc751e4f0f3369b9394776975..b3d64f61d922e1ec10aa31b4e19ea0fb6c6876be 100644 (file)
@@ -461,7 +461,7 @@ void tcp_update_metrics(struct sock *sk)
                                tcp_metric_set(tm, TCP_METRIC_CWND,
                                               tp->snd_cwnd);
                }
-       } else if (tp->snd_cwnd > tp->snd_ssthresh &&
+       } else if (!tcp_in_slow_start(tp) &&
                   icsk->icsk_ca_state == TCP_CA_Open) {
                /* Cong. avoidance phase, cwnd is reliable. */
                if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
index 333bcb2415ffca51e06f3042ae3d94b8e21c0725..bf5ea9e9bbc1ed3c07c03f9db69b9848cf83ec8e 100644 (file)
@@ -22,7 +22,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else
                tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
index a6cea1d5e20d47f06eab95f3344a3e3b7c44da89..13951c4087d407b72cb5bc2ee75822203244e3f3 100644 (file)
@@ -225,7 +225,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                         */
                        diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
 
-                       if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
+                       if (diff > gamma && tcp_in_slow_start(tp)) {
                                /* Going too fast. Time to slow down
                                 * and switch to congestion avoidance.
                                 */
@@ -240,7 +240,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                                tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
                                tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
 
-                       } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
+                       } else if (tcp_in_slow_start(tp)) {
                                /* Slow start.  */
                                tcp_slow_start(tp, acked);
                        } else {
@@ -281,7 +281,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                vegas->minRTT = 0x7fffffff;
        }
        /* Use normal slow start */
-       else if (tp->snd_cwnd <= tp->snd_ssthresh)
+       else if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
 }
 
index 112151eeee45bff0c37ac92d78d165ba92bd4d0a..0d094b995cd96f8c5150daf586cdde0f495843f5 100644 (file)
@@ -150,7 +150,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
                veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
 
-               if (tp->snd_cwnd <= tp->snd_ssthresh) {
+               if (tcp_in_slow_start(tp)) {
                        /* Slow start.  */
                        tcp_slow_start(tp, acked);
                } else {