mlxsw: spectrum: Implement common FID core
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <net/tc_act/tc_vlan.h>
43
44 #include "reg.h"
45 #include "core.h"
46 #include "resources.h"
47 #include "spectrum.h"
48 #include "core_acl_flex_keys.h"
49 #include "core_acl_flex_actions.h"
50 #include "spectrum_acl_flex_keys.h"
51
52 struct mlxsw_sp_acl {
53         struct mlxsw_sp *mlxsw_sp;
54         struct mlxsw_afk *afk;
55         struct mlxsw_afa *afa;
56         struct mlxsw_sp_fid *dummy_fid;
57         const struct mlxsw_sp_acl_ops *ops;
58         struct rhashtable ruleset_ht;
59         struct list_head rules;
60         struct {
61                 struct delayed_work dw;
62                 unsigned long interval; /* ms */
63 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
64         } rule_activity_update;
65         unsigned long priv[0];
66         /* priv has to be always the last item */
67 };
68
69 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
70 {
71         return acl->afk;
72 }
73
74 struct mlxsw_sp_acl_ruleset_ht_key {
75         struct net_device *dev; /* dev this ruleset is bound to */
76         bool ingress;
77         const struct mlxsw_sp_acl_profile_ops *ops;
78 };
79
80 struct mlxsw_sp_acl_ruleset {
81         struct rhash_head ht_node; /* Member of acl HT */
82         struct mlxsw_sp_acl_ruleset_ht_key ht_key;
83         struct rhashtable rule_ht;
84         unsigned int ref_count;
85         unsigned long priv[0];
86         /* priv has to be always the last item */
87 };
88
89 struct mlxsw_sp_acl_rule {
90         struct rhash_head ht_node; /* Member of rule HT */
91         struct list_head list;
92         unsigned long cookie; /* HT key */
93         struct mlxsw_sp_acl_ruleset *ruleset;
94         struct mlxsw_sp_acl_rule_info *rulei;
95         u64 last_used;
96         u64 last_packets;
97         u64 last_bytes;
98         unsigned long priv[0];
99         /* priv has to be always the last item */
100 };
101
102 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
103         .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
104         .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
105         .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
106         .automatic_shrinking = true,
107 };
108
109 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
110         .key_len = sizeof(unsigned long),
111         .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
112         .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
113         .automatic_shrinking = true,
114 };
115
116 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
117 {
118         return mlxsw_sp->acl->dummy_fid;
119 }
120
121 static struct mlxsw_sp_acl_ruleset *
122 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
123                             const struct mlxsw_sp_acl_profile_ops *ops)
124 {
125         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
126         struct mlxsw_sp_acl_ruleset *ruleset;
127         size_t alloc_size;
128         int err;
129
130         alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
131         ruleset = kzalloc(alloc_size, GFP_KERNEL);
132         if (!ruleset)
133                 return ERR_PTR(-ENOMEM);
134         ruleset->ref_count = 1;
135         ruleset->ht_key.ops = ops;
136
137         err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
138         if (err)
139                 goto err_rhashtable_init;
140
141         err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
142         if (err)
143                 goto err_ops_ruleset_add;
144
145         return ruleset;
146
147 err_ops_ruleset_add:
148         rhashtable_destroy(&ruleset->rule_ht);
149 err_rhashtable_init:
150         kfree(ruleset);
151         return ERR_PTR(err);
152 }
153
154 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
155                                          struct mlxsw_sp_acl_ruleset *ruleset)
156 {
157         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
158
159         ops->ruleset_del(mlxsw_sp, ruleset->priv);
160         rhashtable_destroy(&ruleset->rule_ht);
161         kfree(ruleset);
162 }
163
164 static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
165                                      struct mlxsw_sp_acl_ruleset *ruleset,
166                                      struct net_device *dev, bool ingress)
167 {
168         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
169         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
170         int err;
171
172         ruleset->ht_key.dev = dev;
173         ruleset->ht_key.ingress = ingress;
174         err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
175                                      mlxsw_sp_acl_ruleset_ht_params);
176         if (err)
177                 return err;
178         err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
179         if (err)
180                 goto err_ops_ruleset_bind;
181         return 0;
182
183 err_ops_ruleset_bind:
184         rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
185                                mlxsw_sp_acl_ruleset_ht_params);
186         return err;
187 }
188
189 static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
190                                         struct mlxsw_sp_acl_ruleset *ruleset)
191 {
192         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
193         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
194
195         ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
196         rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
197                                mlxsw_sp_acl_ruleset_ht_params);
198 }
199
200 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
201 {
202         ruleset->ref_count++;
203 }
204
205 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
206                                          struct mlxsw_sp_acl_ruleset *ruleset)
207 {
208         if (--ruleset->ref_count)
209                 return;
210         mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
211         mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
212 }
213
214 struct mlxsw_sp_acl_ruleset *
215 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
216                          struct net_device *dev, bool ingress,
217                          enum mlxsw_sp_acl_profile profile)
218 {
219         const struct mlxsw_sp_acl_profile_ops *ops;
220         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
221         struct mlxsw_sp_acl_ruleset_ht_key ht_key;
222         struct mlxsw_sp_acl_ruleset *ruleset;
223         int err;
224
225         ops = acl->ops->profile_ops(mlxsw_sp, profile);
226         if (!ops)
227                 return ERR_PTR(-EINVAL);
228
229         memset(&ht_key, 0, sizeof(ht_key));
230         ht_key.dev = dev;
231         ht_key.ingress = ingress;
232         ht_key.ops = ops;
233         ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
234                                          mlxsw_sp_acl_ruleset_ht_params);
235         if (ruleset) {
236                 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
237                 return ruleset;
238         }
239         ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
240         if (IS_ERR(ruleset))
241                 return ruleset;
242         err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress);
243         if (err)
244                 goto err_ruleset_bind;
245         return ruleset;
246
247 err_ruleset_bind:
248         mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
249         return ERR_PTR(err);
250 }
251
252 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
253                               struct mlxsw_sp_acl_ruleset *ruleset)
254 {
255         mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
256 }
257
258 static int
259 mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
260                                  struct mlxsw_sp_acl_rule_info *rulei)
261 {
262         int err;
263
264         err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
265         if (err)
266                 return err;
267         rulei->counter_valid = true;
268         return 0;
269 }
270
271 static void
272 mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
273                                 struct mlxsw_sp_acl_rule_info *rulei)
274 {
275         rulei->counter_valid = false;
276         mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
277 }
278
279 struct mlxsw_sp_acl_rule_info *
280 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
281 {
282         struct mlxsw_sp_acl_rule_info *rulei;
283         int err;
284
285         rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
286         if (!rulei)
287                 return NULL;
288         rulei->act_block = mlxsw_afa_block_create(acl->afa);
289         if (IS_ERR(rulei->act_block)) {
290                 err = PTR_ERR(rulei->act_block);
291                 goto err_afa_block_create;
292         }
293         return rulei;
294
295 err_afa_block_create:
296         kfree(rulei);
297         return ERR_PTR(err);
298 }
299
300 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
301 {
302         mlxsw_afa_block_destroy(rulei->act_block);
303         kfree(rulei);
304 }
305
306 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
307 {
308         return mlxsw_afa_block_commit(rulei->act_block);
309 }
310
311 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
312                                  unsigned int priority)
313 {
314         rulei->priority = priority;
315 }
316
317 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
318                                     enum mlxsw_afk_element element,
319                                     u32 key_value, u32 mask_value)
320 {
321         mlxsw_afk_values_add_u32(&rulei->values, element,
322                                  key_value, mask_value);
323 }
324
325 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
326                                     enum mlxsw_afk_element element,
327                                     const char *key_value,
328                                     const char *mask_value, unsigned int len)
329 {
330         mlxsw_afk_values_add_buf(&rulei->values, element,
331                                  key_value, mask_value, len);
332 }
333
334 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
335 {
336         mlxsw_afa_block_continue(rulei->act_block);
337 }
338
339 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
340                                  u16 group_id)
341 {
342         mlxsw_afa_block_jump(rulei->act_block, group_id);
343 }
344
345 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
346 {
347         return mlxsw_afa_block_append_drop(rulei->act_block);
348 }
349
350 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
351                                struct mlxsw_sp_acl_rule_info *rulei,
352                                struct net_device *out_dev)
353 {
354         struct mlxsw_sp_port *mlxsw_sp_port;
355         u8 local_port;
356         bool in_port;
357
358         if (out_dev) {
359                 if (!mlxsw_sp_port_dev_check(out_dev))
360                         return -EINVAL;
361                 mlxsw_sp_port = netdev_priv(out_dev);
362                 if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
363                         return -EINVAL;
364                 local_port = mlxsw_sp_port->local_port;
365                 in_port = false;
366         } else {
367                 /* If out_dev is NULL, the called wants to
368                  * set forward to ingress port.
369                  */
370                 local_port = 0;
371                 in_port = true;
372         }
373         return mlxsw_afa_block_append_fwd(rulei->act_block,
374                                           local_port, in_port);
375 }
376
377 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
378                                 struct mlxsw_sp_acl_rule_info *rulei,
379                                 u32 action, u16 vid, u16 proto, u8 prio)
380 {
381         u8 ethertype;
382
383         if (action == TCA_VLAN_ACT_MODIFY) {
384                 switch (proto) {
385                 case ETH_P_8021Q:
386                         ethertype = 0;
387                         break;
388                 case ETH_P_8021AD:
389                         ethertype = 1;
390                         break;
391                 default:
392                         dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
393                                 proto);
394                         return -EINVAL;
395                 }
396
397                 return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
398                                                           vid, prio, ethertype);
399         } else {
400                 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
401                 return -EINVAL;
402         }
403 }
404
405 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
406                                  struct mlxsw_sp_acl_rule_info *rulei)
407 {
408         return mlxsw_afa_block_append_counter(rulei->act_block,
409                                               rulei->counter_index);
410 }
411
412 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
413                                    struct mlxsw_sp_acl_rule_info *rulei,
414                                    u16 fid)
415 {
416         return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
417 }
418
419 struct mlxsw_sp_acl_rule *
420 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
421                          struct mlxsw_sp_acl_ruleset *ruleset,
422                          unsigned long cookie)
423 {
424         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
425         struct mlxsw_sp_acl_rule *rule;
426         int err;
427
428         mlxsw_sp_acl_ruleset_ref_inc(ruleset);
429         rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
430         if (!rule) {
431                 err = -ENOMEM;
432                 goto err_alloc;
433         }
434         rule->cookie = cookie;
435         rule->ruleset = ruleset;
436
437         rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
438         if (IS_ERR(rule->rulei)) {
439                 err = PTR_ERR(rule->rulei);
440                 goto err_rulei_create;
441         }
442
443         err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
444         if (err)
445                 goto err_counter_alloc;
446         return rule;
447
448 err_counter_alloc:
449         mlxsw_sp_acl_rulei_destroy(rule->rulei);
450 err_rulei_create:
451         kfree(rule);
452 err_alloc:
453         mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
454         return ERR_PTR(err);
455 }
456
457 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
458                                struct mlxsw_sp_acl_rule *rule)
459 {
460         struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
461
462         mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
463         mlxsw_sp_acl_rulei_destroy(rule->rulei);
464         kfree(rule);
465         mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
466 }
467
468 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
469                           struct mlxsw_sp_acl_rule *rule)
470 {
471         struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
472         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
473         int err;
474
475         err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
476         if (err)
477                 return err;
478
479         err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
480                                      mlxsw_sp_acl_rule_ht_params);
481         if (err)
482                 goto err_rhashtable_insert;
483
484         list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
485         return 0;
486
487 err_rhashtable_insert:
488         ops->rule_del(mlxsw_sp, rule->priv);
489         return err;
490 }
491
492 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
493                            struct mlxsw_sp_acl_rule *rule)
494 {
495         struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
496         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
497
498         list_del(&rule->list);
499         rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
500                                mlxsw_sp_acl_rule_ht_params);
501         ops->rule_del(mlxsw_sp, rule->priv);
502 }
503
504 struct mlxsw_sp_acl_rule *
505 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
506                          struct mlxsw_sp_acl_ruleset *ruleset,
507                          unsigned long cookie)
508 {
509         return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
510                                        mlxsw_sp_acl_rule_ht_params);
511 }
512
513 struct mlxsw_sp_acl_rule_info *
514 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
515 {
516         return rule->rulei;
517 }
518
519 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
520                                              struct mlxsw_sp_acl_rule *rule)
521 {
522         struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
523         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
524         bool active;
525         int err;
526
527         err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
528         if (err)
529                 return err;
530         if (active)
531                 rule->last_used = jiffies;
532         return 0;
533 }
534
535 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
536 {
537         struct mlxsw_sp_acl_rule *rule;
538         int err;
539
540         /* Protect internal structures from changes */
541         rtnl_lock();
542         list_for_each_entry(rule, &acl->rules, list) {
543                 err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
544                                                         rule);
545                 if (err)
546                         goto err_rule_update;
547         }
548         rtnl_unlock();
549         return 0;
550
551 err_rule_update:
552         rtnl_unlock();
553         return err;
554 }
555
556 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
557 {
558         unsigned long interval = acl->rule_activity_update.interval;
559
560         mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
561                                msecs_to_jiffies(interval));
562 }
563
564 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
565 {
566         struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
567                                                 rule_activity_update.dw.work);
568         int err;
569
570         err = mlxsw_sp_acl_rules_activity_update(acl);
571         if (err)
572                 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
573
574         mlxsw_sp_acl_rule_activity_work_schedule(acl);
575 }
576
577 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
578                                 struct mlxsw_sp_acl_rule *rule,
579                                 u64 *packets, u64 *bytes, u64 *last_use)
580
581 {
582         struct mlxsw_sp_acl_rule_info *rulei;
583         u64 current_packets;
584         u64 current_bytes;
585         int err;
586
587         rulei = mlxsw_sp_acl_rule_rulei(rule);
588         err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
589                                         &current_packets, &current_bytes);
590         if (err)
591                 return err;
592
593         *packets = current_packets - rule->last_packets;
594         *bytes = current_bytes - rule->last_bytes;
595         *last_use = rule->last_used;
596
597         rule->last_bytes = current_bytes;
598         rule->last_packets = current_packets;
599
600         return 0;
601 }
602
603 #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
604
605 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
606                                      char *enc_actions, bool is_first)
607 {
608         struct mlxsw_sp *mlxsw_sp = priv;
609         char pefa_pl[MLXSW_REG_PEFA_LEN];
610         u32 kvdl_index;
611         int err;
612
613         /* The first action set of a TCAM entry is stored directly in TCAM,
614          * not KVD linear area.
615          */
616         if (is_first)
617                 return 0;
618
619         err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
620                                   &kvdl_index);
621         if (err)
622                 return err;
623         mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
624         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
625         if (err)
626                 goto err_pefa_write;
627         *p_kvdl_index = kvdl_index;
628         return 0;
629
630 err_pefa_write:
631         mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
632         return err;
633 }
634
635 static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
636                                       bool is_first)
637 {
638         struct mlxsw_sp *mlxsw_sp = priv;
639
640         if (is_first)
641                 return;
642         mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
643 }
644
645 static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
646                                            u8 local_port)
647 {
648         struct mlxsw_sp *mlxsw_sp = priv;
649         char ppbs_pl[MLXSW_REG_PPBS_LEN];
650         u32 kvdl_index;
651         int err;
652
653         err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
654         if (err)
655                 return err;
656         mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
657         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
658         if (err)
659                 goto err_ppbs_write;
660         *p_kvdl_index = kvdl_index;
661         return 0;
662
663 err_ppbs_write:
664         mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
665         return err;
666 }
667
668 static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
669 {
670         struct mlxsw_sp *mlxsw_sp = priv;
671
672         mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
673 }
674
675 static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
676         .kvdl_set_add           = mlxsw_sp_act_kvdl_set_add,
677         .kvdl_set_del           = mlxsw_sp_act_kvdl_set_del,
678         .kvdl_fwd_entry_add     = mlxsw_sp_act_kvdl_fwd_entry_add,
679         .kvdl_fwd_entry_del     = mlxsw_sp_act_kvdl_fwd_entry_del,
680 };
681
682 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
683 {
684         const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
685         struct mlxsw_sp_fid *fid;
686         struct mlxsw_sp_acl *acl;
687         int err;
688
689         acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
690         if (!acl)
691                 return -ENOMEM;
692         mlxsw_sp->acl = acl;
693         acl->mlxsw_sp = mlxsw_sp;
694         acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
695                                                        ACL_FLEX_KEYS),
696                                     mlxsw_sp_afk_blocks,
697                                     MLXSW_SP_AFK_BLOCKS_COUNT);
698         if (!acl->afk) {
699                 err = -ENOMEM;
700                 goto err_afk_create;
701         }
702
703         acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
704                                                        ACL_ACTIONS_PER_SET),
705                                     &mlxsw_sp_act_afa_ops, mlxsw_sp);
706         if (IS_ERR(acl->afa)) {
707                 err = PTR_ERR(acl->afa);
708                 goto err_afa_create;
709         }
710
711         err = rhashtable_init(&acl->ruleset_ht,
712                               &mlxsw_sp_acl_ruleset_ht_params);
713         if (err)
714                 goto err_rhashtable_init;
715
716         fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
717         if (IS_ERR(fid)) {
718                 err = PTR_ERR(fid);
719                 goto err_fid_get;
720         }
721         acl->dummy_fid = fid;
722
723         INIT_LIST_HEAD(&acl->rules);
724         err = acl_ops->init(mlxsw_sp, acl->priv);
725         if (err)
726                 goto err_acl_ops_init;
727
728         acl->ops = acl_ops;
729
730         /* Create the delayed work for the rule activity_update */
731         INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
732                           mlxsw_sp_acl_rul_activity_update_work);
733         acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
734         mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
735         return 0;
736
737 err_acl_ops_init:
738         mlxsw_sp_fid_put(fid);
739 err_fid_get:
740         rhashtable_destroy(&acl->ruleset_ht);
741 err_rhashtable_init:
742         mlxsw_afa_destroy(acl->afa);
743 err_afa_create:
744         mlxsw_afk_destroy(acl->afk);
745 err_afk_create:
746         kfree(acl);
747         return err;
748 }
749
750 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
751 {
752         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
753         const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
754
755         cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
756         acl_ops->fini(mlxsw_sp, acl->priv);
757         WARN_ON(!list_empty(&acl->rules));
758         mlxsw_sp_fid_put(acl->dummy_fid);
759         rhashtable_destroy(&acl->ruleset_ht);
760         mlxsw_afa_destroy(acl->afa);
761         mlxsw_afk_destroy(acl->afk);
762         kfree(acl);
763 }