net: sched: pie: add derandomization mechanism
authorMohit P. Tahiliani <tahiliani@nitk.edu.in>
Mon, 25 Feb 2019 19:10:00 +0000 (00:40 +0530)
committerDavid S. Miller <davem@davemloft.net>
Mon, 25 Feb 2019 22:21:03 +0000 (14:21 -0800)
Random dropping of packets to achieve latency control may
introduce outlier situations where packets are dropped too
close to each other or too far from each other. This can
cause the real drop percentage to temporarily deviate from
the intended drop probability. In certain scenarios, such
as a small number of simultaneous TCP flows, these
deviations can cause significant deviations in link
utilization and queuing latency.

RFC 8033 suggests using a derandomization mechanism to avoid
these deviations.

Signed-off-by: Mohit P. Tahiliani <tahiliani@nitk.edu.in>
Signed-off-by: Dhaval Khandla <dhavaljkhandla26@gmail.com>
Signed-off-by: Hrishikesh Hiraskar <hrishihiraskar@gmail.com>
Signed-off-by: Manish Kumar B <bmanish15597@gmail.com>
Signed-off-by: Sachin D. Patil <sdp.sachin@gmail.com>
Signed-off-by: Leslie Monis <lesliemonis@gmail.com>
Acked-by: Dave Taht <dave.taht@gmail.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/sch_pie.c

index 30f158582499b45210b430526dac728cbfccec32..916b878d349145e207fd6db992d865c659ba1477 100644 (file)
@@ -55,8 +55,10 @@ struct pie_vars {
        psched_time_t qdelay_old;
        u64 dq_count;           /* measured in bytes */
        psched_time_t dq_tstamp;        /* drain rate */
+       u64 accu_prob;          /* accumulated drop probability */
        u32 avg_dq_rate;        /* bytes per pschedtime tick,scaled */
        u32 qlen_old;           /* in bytes */
+       u8 accu_prob_overflows; /* overflows of accu_prob */
 };
 
 /* statistics gathering */
@@ -91,9 +93,11 @@ static void pie_params_init(struct pie_params *params)
 static void pie_vars_init(struct pie_vars *vars)
 {
        vars->dq_count = DQCOUNT_INVALID;
+       vars->accu_prob = 0;
        vars->avg_dq_rate = 0;
        /* default of 150 ms in pschedtime */
        vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC);
+       vars->accu_prob_overflows = 0;
 }
 
 static bool drop_early(struct Qdisc *sch, u32 packet_size)
@@ -128,9 +132,29 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size)
        else
                local_prob = q->vars.prob;
 
+       if (local_prob == 0) {
+               q->vars.accu_prob = 0;
+               q->vars.accu_prob_overflows = 0;
+       }
+
+       if (local_prob > MAX_PROB - q->vars.accu_prob)
+               q->vars.accu_prob_overflows++;
+
+       q->vars.accu_prob += local_prob;
+
+       if (q->vars.accu_prob_overflows == 0 &&
+           q->vars.accu_prob < (MAX_PROB / 100) * 85)
+               return false;
+       if (q->vars.accu_prob_overflows == 8 &&
+           q->vars.accu_prob >= MAX_PROB / 2)
+               return true;
+
        prandom_bytes(&rnd, 8);
-       if (rnd < local_prob)
+       if (rnd < local_prob) {
+               q->vars.accu_prob = 0;
+               q->vars.accu_prob_overflows = 0;
                return true;
+       }
 
        return false;
 }
@@ -168,6 +192,8 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
 out:
        q->stats.dropped++;
+       q->vars.accu_prob = 0;
+       q->vars.accu_prob_overflows = 0;
        return qdisc_drop(skb, sch, to_free);
 }