net_sched: sch_ets: implement lockless ets_dump()
authorEric Dumazet <edumazet@google.com>
Thu, 18 Apr 2024 07:32:41 +0000 (07:32 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 19 Apr 2024 10:34:07 +0000 (11:34 +0100)
Instead of relying on RTNL, ets_dump() can use READ_ONCE()
annotations, paired with WRITE_ONCE() ones in ets_change().

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/sch_ets.c

index 835b4460b44854a803d3054e744702022b7551f4..f80bc05d4c5a5050226e6cfd30fa951c0b61029f 100644 (file)
@@ -646,7 +646,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
 
        sch_tree_lock(sch);
 
-       q->nbands = nbands;
+       WRITE_ONCE(q->nbands, nbands);
        for (i = nstrict; i < q->nstrict; i++) {
                if (q->classes[i].qdisc->q.qlen) {
                        list_add_tail(&q->classes[i].alist, &q->active);
@@ -658,11 +658,11 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
                        list_del(&q->classes[i].alist);
                qdisc_tree_flush_backlog(q->classes[i].qdisc);
        }
-       q->nstrict = nstrict;
+       WRITE_ONCE(q->nstrict, nstrict);
        memcpy(q->prio2band, priomap, sizeof(priomap));
 
        for (i = 0; i < q->nbands; i++)
-               q->classes[i].quantum = quanta[i];
+               WRITE_ONCE(q->classes[i].quantum, quanta[i]);
 
        for (i = oldbands; i < q->nbands; i++) {
                q->classes[i].qdisc = queues[i];
@@ -676,7 +676,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
        for (i = q->nbands; i < oldbands; i++) {
                qdisc_put(q->classes[i].qdisc);
                q->classes[i].qdisc = NULL;
-               q->classes[i].quantum = 0;
+               WRITE_ONCE(q->classes[i].quantum, 0);
                q->classes[i].deficit = 0;
                gnet_stats_basic_sync_init(&q->classes[i].bstats);
                memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
@@ -733,6 +733,7 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct ets_sched *q = qdisc_priv(sch);
        struct nlattr *opts;
        struct nlattr *nest;
+       u8 nbands, nstrict;
        int band;
        int prio;
        int err;
@@ -745,21 +746,22 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (!opts)
                goto nla_err;
 
-       if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands))
+       nbands = READ_ONCE(q->nbands);
+       if (nla_put_u8(skb, TCA_ETS_NBANDS, nbands))
                goto nla_err;
 
-       if (q->nstrict &&
-           nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict))
+       nstrict = READ_ONCE(q->nstrict);
+       if (nstrict && nla_put_u8(skb, TCA_ETS_NSTRICT, nstrict))
                goto nla_err;
 
-       if (q->nbands > q->nstrict) {
+       if (nbands > nstrict) {
                nest = nla_nest_start(skb, TCA_ETS_QUANTA);
                if (!nest)
                        goto nla_err;
 
-               for (band = q->nstrict; band < q->nbands; band++) {
+               for (band = nstrict; band < nbands; band++) {
                        if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND,
-                                       q->classes[band].quantum))
+                                       READ_ONCE(q->classes[band].quantum)))
                                goto nla_err;
                }
 
@@ -771,7 +773,8 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
                goto nla_err;
 
        for (prio = 0; prio <= TC_PRIO_MAX; prio++) {
-               if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio]))
+               if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND,
+                              READ_ONCE(q->prio2band[prio])))
                        goto nla_err;
        }