net_sched: sch_skbprio: implement lockless skbprio_dump()
authorEric Dumazet <edumazet@google.com>
Thu, 18 Apr 2024 07:32:48 +0000 (07:32 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 19 Apr 2024 10:34:08 +0000 (11:34 +0100)
Instead of relying on RTNL, skbprio_dump() can use READ_ONCE()
annotation, paired with WRITE_ONCE() one in skbprio_change().

Also add a READ_ONCE(sch->limit) in skbprio_enqueue().

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/sch_skbprio.c

index b4dd626c309c36725e6030a338d21d1fabcb6704..20ff7386b74bd89c00b50a8f0def91b6c5cce7f4 100644 (file)
@@ -79,7 +79,9 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        prio = min(skb->priority, max_priority);
 
        qdisc = &q->qdiscs[prio];
-       if (sch->q.qlen < sch->limit) {
+
+       /* sch->limit can change under us from skbprio_change() */
+       if (sch->q.qlen < READ_ONCE(sch->limit)) {
                __skb_queue_tail(qdisc, skb);
                qdisc_qstats_backlog_inc(sch, skb);
                q->qstats[prio].backlog += qdisc_pkt_len(skb);
@@ -172,7 +174,7 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
        if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
                return -EINVAL;
 
-       sch->limit = ctl->limit;
+       WRITE_ONCE(sch->limit, ctl->limit);
        return 0;
 }
 
@@ -200,7 +202,7 @@ static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct tc_skbprio_qopt opt;
 
-       opt.limit = sch->limit;
+       opt.limit = READ_ONCE(sch->limit);
 
        if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
                return -1;