net/sched: act_mirred: Create function tcf_mirred_to_dev and improve readability
authorVictor Nogueira <victor@mojatatu.com>
Tue, 19 Dec 2023 18:16:21 +0000 (15:16 -0300)
committerDavid S. Miller <davem@davemloft.net>
Tue, 26 Dec 2023 21:20:08 +0000 (21:20 +0000)
As a preparation for adding block ID to mirred, separate the part of
mirred that redirect/mirrors to a dev into a specific function so that it
can be called by blockcast for each dev.

Also improve readability. Eg. rename use_reinsert to dont_clone and skb2
to skb_to_send.

Co-developed-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
Co-developed-by: Pedro Tammela <pctammela@mojatatu.com>
Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
Signed-off-by: Victor Nogueira <victor@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/act_mirred.c

index 0a711c184c29bd8f0425a056ae16ffc7aa018e94..6f2544c1e3961d32ea697586519340434cd9cf60 100644 (file)
@@ -225,48 +225,26 @@ static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
        return err;
 }
 
-TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
-                                    const struct tc_action *a,
-                                    struct tcf_result *res)
+static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
+                            struct net_device *dev,
+                            const bool m_mac_header_xmit, int m_eaction,
+                            int retval)
 {
-       struct tcf_mirred *m = to_mirred(a);
-       struct sk_buff *skb2 = skb;
-       bool m_mac_header_xmit;
-       struct net_device *dev;
-       unsigned int nest_level;
-       int retval, err = 0;
-       bool use_reinsert;
+       struct sk_buff *skb_to_send = skb;
        bool want_ingress;
        bool is_redirect;
        bool expects_nh;
        bool at_ingress;
-       int m_eaction;
+       bool dont_clone;
        int mac_len;
        bool at_nh;
+       int err;
 
-       nest_level = __this_cpu_inc_return(mirred_nest_level);
-       if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
-               net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
-                                    netdev_name(skb->dev));
-               __this_cpu_dec(mirred_nest_level);
-               return TC_ACT_SHOT;
-       }
-
-       tcf_lastuse_update(&m->tcf_tm);
-       tcf_action_update_bstats(&m->common, skb);
-
-       m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
-       m_eaction = READ_ONCE(m->tcfm_eaction);
-       retval = READ_ONCE(m->tcf_action);
-       dev = rcu_dereference_bh(m->tcfm_dev);
-       if (unlikely(!dev)) {
-               pr_notice_once("tc mirred: target device is gone\n");
-               goto out;
-       }
-
+       is_redirect = tcf_mirred_is_act_redirect(m_eaction);
        if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
                net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
                                       dev->name);
+               err = -ENODEV;
                goto out;
        }
 
@@ -274,61 +252,98 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
         * since we can't easily detect the clsact caller, skip clone only for
         * ingress - that covers the TC S/W datapath.
         */
-       is_redirect = tcf_mirred_is_act_redirect(m_eaction);
        at_ingress = skb_at_tc_ingress(skb);
-       use_reinsert = at_ingress && is_redirect &&
-                      tcf_mirred_can_reinsert(retval);
-       if (!use_reinsert) {
-               skb2 = skb_clone(skb, GFP_ATOMIC);
-               if (!skb2)
+       dont_clone = skb_at_tc_ingress(skb) && is_redirect &&
+               tcf_mirred_can_reinsert(retval);
+       if (!dont_clone) {
+               skb_to_send = skb_clone(skb, GFP_ATOMIC);
+               if (!skb_to_send) {
+                       err =  -ENOMEM;
                        goto out;
+               }
        }
 
        want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
 
        /* All mirred/redirected skbs should clear previous ct info */
-       nf_reset_ct(skb2);
+       nf_reset_ct(skb_to_send);
        if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
-               skb_dst_drop(skb2);
+               skb_dst_drop(skb_to_send);
 
        expects_nh = want_ingress || !m_mac_header_xmit;
        at_nh = skb->data == skb_network_header(skb);
        if (at_nh != expects_nh) {
-               mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
+               mac_len = at_ingress ? skb->mac_len :
                          skb_network_offset(skb);
                if (expects_nh) {
                        /* target device/action expect data at nh */
-                       skb_pull_rcsum(skb2, mac_len);
+                       skb_pull_rcsum(skb_to_send, mac_len);
                } else {
                        /* target device/action expect data at mac */
-                       skb_push_rcsum(skb2, mac_len);
+                       skb_push_rcsum(skb_to_send, mac_len);
                }
        }
 
-       skb2->skb_iif = skb->dev->ifindex;
-       skb2->dev = dev;
+       skb_to_send->skb_iif = skb->dev->ifindex;
+       skb_to_send->dev = dev;
 
-       /* mirror is always swallowed */
        if (is_redirect) {
-               skb_set_redirected(skb2, skb2->tc_at_ingress);
-
-               /* let's the caller reinsert the packet, if possible */
-               if (use_reinsert) {
-                       err = tcf_mirred_forward(want_ingress, skb);
-                       if (err)
-                               tcf_action_inc_overlimit_qstats(&m->common);
-                       __this_cpu_dec(mirred_nest_level);
-                       return TC_ACT_CONSUMED;
-               }
+               if (skb == skb_to_send)
+                       retval = TC_ACT_CONSUMED;
+
+               skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
+
+               err = tcf_mirred_forward(want_ingress, skb_to_send);
+       } else {
+               err = tcf_mirred_forward(want_ingress, skb_to_send);
        }
 
-       err = tcf_mirred_forward(want_ingress, skb2);
        if (err) {
 out:
                tcf_action_inc_overlimit_qstats(&m->common);
-               if (tcf_mirred_is_act_redirect(m_eaction))
+               if (is_redirect)
                        retval = TC_ACT_SHOT;
        }
+
+       return retval;
+}
+
+TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
+                                    const struct tc_action *a,
+                                    struct tcf_result *res)
+{
+       struct tcf_mirred *m = to_mirred(a);
+       int retval = READ_ONCE(m->tcf_action);
+       unsigned int nest_level;
+       bool m_mac_header_xmit;
+       struct net_device *dev;
+       int m_eaction;
+
+       nest_level = __this_cpu_inc_return(mirred_nest_level);
+       if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
+               net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
+                                    netdev_name(skb->dev));
+               retval = TC_ACT_SHOT;
+               goto dec_nest_level;
+       }
+
+       tcf_lastuse_update(&m->tcf_tm);
+       tcf_action_update_bstats(&m->common, skb);
+
+       dev = rcu_dereference_bh(m->tcfm_dev);
+       if (unlikely(!dev)) {
+               pr_notice_once("tc mirred: target device is gone\n");
+               tcf_action_inc_overlimit_qstats(&m->common);
+               goto dec_nest_level;
+       }
+
+       m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
+       m_eaction = READ_ONCE(m->tcfm_eaction);
+
+       retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction,
+                                  retval);
+
+dec_nest_level:
        __this_cpu_dec(mirred_nest_level);
 
        return retval;