net/sched: flower: Move filter handle initialization earlier
authorPaul Blakey <paulb@nvidia.com>
Fri, 17 Feb 2023 22:36:15 +0000 (00:36 +0200)
committerJakub Kicinski <kuba@kernel.org>
Tue, 21 Feb 2023 00:46:10 +0000 (16:46 -0800)
To support miss to action during hardware offload the filter's
handle is needed when setting up the actions (tcf_exts_init()),
and before offloading.

Move filter handle initialization earlier.

Signed-off-by: Paul Blakey <paulb@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/sched/cls_flower.c

index 885c95191ccfcc67fdd9a5e2c5cc29690ed32654..be01d39dd7b98c203fb4c704cb0040df990d4098 100644 (file)
@@ -2187,10 +2187,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        INIT_LIST_HEAD(&fnew->hw_list);
        refcount_set(&fnew->refcnt, 1);
 
-       err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
-       if (err < 0)
-               goto errout;
-
        if (tb[TCA_FLOWER_FLAGS]) {
                fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
 
@@ -2200,15 +2196,45 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                }
        }
 
+       if (!fold) {
+               spin_lock(&tp->lock);
+               if (!handle) {
+                       handle = 1;
+                       err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+                                           INT_MAX, GFP_ATOMIC);
+               } else {
+                       err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+                                           handle, GFP_ATOMIC);
+
+                       /* Filter with specified handle was concurrently
+                        * inserted after initial check in cls_api. This is not
+                        * necessarily an error if NLM_F_EXCL is not set in
+                        * message flags. Returning EAGAIN will cause cls_api to
+                        * try to update concurrently inserted rule.
+                        */
+                       if (err == -ENOSPC)
+                               err = -EAGAIN;
+               }
+               spin_unlock(&tp->lock);
+
+               if (err)
+                       goto errout;
+       }
+       fnew->handle = handle;
+
+       err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
+       if (err < 0)
+               goto errout_idr;
+
        err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
                           tp->chain->tmplt_priv, flags, fnew->flags,
                           extack);
        if (err)
-               goto errout;
+               goto errout_idr;
 
        err = fl_check_assign_mask(head, fnew, fold, mask);
        if (err)
-               goto errout;
+               goto errout_idr;
 
        err = fl_ht_insert_unique(fnew, fold, &in_ht);
        if (err)
@@ -2274,29 +2300,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                refcount_dec(&fold->refcnt);
                __fl_put(fold);
        } else {
-               if (handle) {
-                       /* user specifies a handle and it doesn't exist */
-                       err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
-                                           handle, GFP_ATOMIC);
-
-                       /* Filter with specified handle was concurrently
-                        * inserted after initial check in cls_api. This is not
-                        * necessarily an error if NLM_F_EXCL is not set in
-                        * message flags. Returning EAGAIN will cause cls_api to
-                        * try to update concurrently inserted rule.
-                        */
-                       if (err == -ENOSPC)
-                               err = -EAGAIN;
-               } else {
-                       handle = 1;
-                       err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
-                                           INT_MAX, GFP_ATOMIC);
-               }
-               if (err)
-                       goto errout_hw;
+               idr_replace(&head->handle_idr, fnew, fnew->handle);
 
                refcount_inc(&fnew->refcnt);
-               fnew->handle = handle;
                list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
                spin_unlock(&tp->lock);
        }
@@ -2319,6 +2325,8 @@ errout_hw:
                                       fnew->mask->filter_ht_params);
 errout_mask:
        fl_mask_put(head, fnew->mask);
+errout_idr:
+       idr_remove(&head->handle_idr, fnew->handle);
 errout:
        __fl_put(fnew);
 errout_tb: