net/sched: Use nested-BH locking for sch_frag_data_storage
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Mon, 12 May 2025 09:27:32 +0000 (11:27 +0200)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 15 May 2025 13:23:31 +0000 (15:23 +0200)
sch_frag_data_storage is a per-CPU variable and relies on disabled BH
for its locking. Without per-CPU locking in local_bh_disable() on
PREEMPT_RT this data structure requires explicit locking.

Add local_lock_t to the struct and use local_lock_nested_bh() for locking.
This change adds only lockdep coverage and does not alter the functional
behaviour for !PREEMPT_RT.

Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20250512092736.229935-12-bigeasy@linutronix.de
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/sched/sch_frag.c

index ce63414185fd6dcb7e3544b9302aeb41a49c19b3..d1d87dce7f3f72e33e3c8ec0c0eb35bdd9b5c9f1 100644 (file)
@@ -16,14 +16,18 @@ struct sch_frag_data {
        unsigned int l2_len;
        u8 l2_data[VLAN_ETH_HLEN];
        int (*xmit)(struct sk_buff *skb);
+       local_lock_t bh_lock;
 };
 
-static DEFINE_PER_CPU(struct sch_frag_data, sch_frag_data_storage);
+static DEFINE_PER_CPU(struct sch_frag_data, sch_frag_data_storage) = {
+       .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
 
 static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct sch_frag_data *data = this_cpu_ptr(&sch_frag_data_storage);
 
+       lockdep_assert_held(&data->bh_lock);
        if (skb_cow_head(skb, data->l2_len) < 0) {
                kfree_skb(skb);
                return -ENOMEM;
@@ -95,6 +99,7 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
                struct rtable sch_frag_rt = { 0 };
                unsigned long orig_dst;
 
+               local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
                sch_frag_prepare_frag(skb, xmit);
                dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
                         DST_OBSOLETE_NONE, DST_NOCOUNT);
@@ -105,11 +110,13 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
                IPCB(skb)->frag_max_size = mru;
 
                ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
+               local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
                refdst_drop(orig_dst);
        } else if (skb_protocol(skb, true) == htons(ETH_P_IPV6)) {
                unsigned long orig_dst;
                struct rt6_info sch_frag_rt;
 
+               local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
                sch_frag_prepare_frag(skb, xmit);
                memset(&sch_frag_rt, 0, sizeof(sch_frag_rt));
                dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
@@ -122,6 +129,7 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
 
                ret = ipv6_stub->ipv6_fragment(net, skb->sk, skb,
                                               sch_frag_xmit);
+               local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
                refdst_drop(orig_dst);
        } else {
                net_warn_ratelimited("Fail frag %s: eth=%x, MRU=%d, MTU=%d\n",