Merge tag 'pinctrl-v6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl_tcam.c
CommitLineData
9948a064
JP
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
22a67766
JP
3
4#include <linux/kernel.h>
5#include <linux/slab.h>
6#include <linux/errno.h>
7#include <linux/bitops.h>
8#include <linux/list.h>
9#include <linux/rhashtable.h>
10#include <linux/netdevice.h>
5ec2ee28 11#include <linux/mutex.h>
1267f722 12#include <linux/refcount.h>
627f9c1b 13#include <linux/idr.h>
74cbc3c0 14#include <net/devlink.h>
3985de72 15#include <trace/events/mlxsw.h>
22a67766
JP
16
17#include "reg.h"
18#include "core.h"
19#include "resources.h"
20#include "spectrum.h"
64eccd00 21#include "spectrum_acl_tcam.h"
22a67766
JP
22#include "core_acl_flex_keys.h"
23
bab5c1cf
JP
24size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
25{
26 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
27
28 return ops->priv_size;
29}
22a67766 30
e5e7962e 31#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
98bbf70c 32#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
c9c9af91 33#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
e5e7962e 34
ea8b2e28
JP
35int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
36 struct mlxsw_sp_acl_rule_info *rulei,
37 u32 *priority, bool fillup_priority)
38{
39 u64 max_priority;
40
41 if (!fillup_priority) {
42 *priority = 0;
43 return 0;
44 }
45
46 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
47 return -EIO;
48
d7263ab3
ND
49 /* Priority range is 1..cap_kvd_size-1. */
50 max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
51 if (rulei->priority >= max_priority)
ea8b2e28
JP
52 return -EINVAL;
53
54 /* Unlike in TC, in HW, higher number means higher priority. */
55 *priority = max_priority - rulei->priority;
56 return 0;
57}
58
22a67766
JP
59static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
60 u16 *p_id)
61{
627f9c1b 62 int id;
22a67766 63
627f9c1b
IS
64 id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
65 GFP_KERNEL);
66 if (id < 0)
67 return id;
68
69 *p_id = id;
70
71 return 0;
22a67766
JP
72}
73
74static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
75 u16 id)
76{
627f9c1b 77 ida_free(&tcam->used_regions, id);
22a67766
JP
78}
79
80static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
81 u16 *p_id)
82{
627f9c1b 83 int id;
22a67766 84
627f9c1b
IS
85 id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
86 GFP_KERNEL);
87 if (id < 0)
88 return id;
89
90 *p_id = id;
91
92 return 0;
22a67766
JP
93}
94
95static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
96 u16 id)
97{
627f9c1b 98 ida_free(&tcam->used_groups, id);
22a67766
JP
99}
100
101struct mlxsw_sp_acl_tcam_pattern {
102 const enum mlxsw_afk_element *elements;
103 unsigned int elements_count;
104};
105
106struct mlxsw_sp_acl_tcam_group {
107 struct mlxsw_sp_acl_tcam *tcam;
108 u16 id;
5ec2ee28 109 struct mutex lock; /* guards region list updates */
2802aadf 110 struct list_head region_list;
22a67766 111 unsigned int region_count;
2802aadf
JP
112};
113
114struct mlxsw_sp_acl_tcam_vgroup {
115 struct mlxsw_sp_acl_tcam_group group;
116 struct list_head vregion_list;
b2d6b4d2 117 struct rhashtable vchunk_ht;
22a67766
JP
118 const struct mlxsw_sp_acl_tcam_pattern *patterns;
119 unsigned int patterns_count;
e2f2a1fd
JP
120 bool tmplt_elusage_set;
121 struct mlxsw_afk_element_usage tmplt_elusage;
6b861682 122 bool vregion_rehash_enabled;
593bb843
JP
123 unsigned int *p_min_prio;
124 unsigned int *p_max_prio;
22a67766
JP
125};
126
559c2768
JP
127struct mlxsw_sp_acl_tcam_rehash_ctx {
128 void *hints_priv;
220f4fba 129 bool this_is_rollback;
6f9579d4
JP
130 struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
131 * currently migrated.
132 */
133 struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
134 * migration from in
135 * a vchunk being
136 * currently migrated.
137 */
138 struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
139 * migration at
140 * a vchunk being
141 * currently migrated.
142 */
559c2768
JP
143};
144
0f54236d 145struct mlxsw_sp_acl_tcam_vregion {
1263a9ab
JP
146 struct mutex lock; /* Protects consistency of region, region2 pointers
147 * and vchunk_list.
148 */
0f54236d 149 struct mlxsw_sp_acl_tcam_region *region;
e5e7962e 150 struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
0f54236d 151 struct list_head list; /* Member of a TCAM group */
e5e7962e 152 struct list_head tlist; /* Member of a TCAM */
b2d6b4d2 153 struct list_head vchunk_list; /* List of vchunks under this vregion */
0f54236d 154 struct mlxsw_afk_key_info *key_info;
e5e7962e 155 struct mlxsw_sp_acl_tcam *tcam;
6b861682 156 struct mlxsw_sp_acl_tcam_vgroup *vgroup;
f9b274ce
JP
157 struct {
158 struct delayed_work dw;
559c2768 159 struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
f9b274ce 160 } rehash;
e5e7962e 161 struct mlxsw_sp *mlxsw_sp;
1267f722 162 refcount_t ref_count;
0f54236d
JP
163};
164
b2d6b4d2
JP
165struct mlxsw_sp_acl_tcam_vchunk;
166
22a67766 167struct mlxsw_sp_acl_tcam_chunk {
b2d6b4d2 168 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
e5e7962e 169 struct mlxsw_sp_acl_tcam_region *region;
e99f8e7f 170 unsigned long priv[];
b2d6b4d2
JP
171 /* priv has to be always the last item */
172};
173
174struct mlxsw_sp_acl_tcam_vchunk {
175 struct mlxsw_sp_acl_tcam_chunk *chunk;
e5e7962e 176 struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
0f54236d 177 struct list_head list; /* Member of a TCAM vregion */
22a67766 178 struct rhash_head ht_node; /* Member of a chunk HT */
e5e7962e 179 struct list_head ventry_list;
0f54236d 180 unsigned int priority; /* Priority within the vregion and group */
2802aadf 181 struct mlxsw_sp_acl_tcam_vgroup *vgroup;
0f54236d 182 struct mlxsw_sp_acl_tcam_vregion *vregion;
1267f722 183 refcount_t ref_count;
22a67766
JP
184};
185
186struct mlxsw_sp_acl_tcam_entry {
c4c2dc54 187 struct mlxsw_sp_acl_tcam_ventry *ventry;
e5e7962e 188 struct mlxsw_sp_acl_tcam_chunk *chunk;
e99f8e7f 189 unsigned long priv[];
64eccd00 190 /* priv has to be always the last item */
22a67766
JP
191};
192
c4c2dc54
JP
193struct mlxsw_sp_acl_tcam_ventry {
194 struct mlxsw_sp_acl_tcam_entry *entry;
e5e7962e 195 struct list_head list; /* Member of a TCAM vchunk */
c4c2dc54 196 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
e5e7962e 197 struct mlxsw_sp_acl_rule_info *rulei;
c4c2dc54
JP
198};
199
b2d6b4d2 200static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
22a67766 201 .key_len = sizeof(unsigned int),
b2d6b4d2
JP
202 .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
203 .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
22a67766
JP
204 .automatic_shrinking = true,
205};
206
207static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
208 struct mlxsw_sp_acl_tcam_group *group)
209{
2802aadf 210 struct mlxsw_sp_acl_tcam_region *region;
22a67766
JP
211 char pagt_pl[MLXSW_REG_PAGT_LEN];
212 int acl_index = 0;
213
214 mlxsw_reg_pagt_pack(pagt_pl, group->id);
2802aadf
JP
215 list_for_each_entry(region, &group->region_list, list) {
216 bool multi = false;
217
218 /* Check if the next entry in the list has the same vregion. */
219 if (region->list.next != &group->region_list &&
220 list_next_entry(region, list)->vregion == region->vregion)
221 multi = true;
0f54236d 222 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
2802aadf 223 region->id, multi);
e5e7962e 224 }
22a67766
JP
225 mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
226 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
227}
228
229static int
2802aadf
JP
230mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
231 struct mlxsw_sp_acl_tcam_group *group)
22a67766
JP
232{
233 int err;
234
235 group->tcam = tcam;
2802aadf
JP
236 INIT_LIST_HEAD(&group->region_list);
237
238 err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
239 if (err)
240 return err;
241
72865028
IS
242 mutex_init(&group->lock);
243
2802aadf
JP
244 return 0;
245}
246
247static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
248{
249 struct mlxsw_sp_acl_tcam *tcam = group->tcam;
250
5ec2ee28 251 mutex_destroy(&group->lock);
2802aadf
JP
252 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
253 WARN_ON(!list_empty(&group->region_list));
254}
255
256static int
257mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
258 struct mlxsw_sp_acl_tcam *tcam,
259 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
260 const struct mlxsw_sp_acl_tcam_pattern *patterns,
261 unsigned int patterns_count,
6b861682 262 struct mlxsw_afk_element_usage *tmplt_elusage,
593bb843
JP
263 bool vregion_rehash_enabled,
264 unsigned int *p_min_prio,
265 unsigned int *p_max_prio)
2802aadf
JP
266{
267 int err;
268
269 vgroup->patterns = patterns;
270 vgroup->patterns_count = patterns_count;
6b861682 271 vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
593bb843
JP
272 vgroup->p_min_prio = p_min_prio;
273 vgroup->p_max_prio = p_max_prio;
6b861682 274
e2f2a1fd 275 if (tmplt_elusage) {
2802aadf
JP
276 vgroup->tmplt_elusage_set = true;
277 memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
278 sizeof(vgroup->tmplt_elusage));
e2f2a1fd 279 }
2802aadf
JP
280 INIT_LIST_HEAD(&vgroup->vregion_list);
281
282 err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
22a67766
JP
283 if (err)
284 return err;
285
2802aadf 286 err = rhashtable_init(&vgroup->vchunk_ht,
b2d6b4d2 287 &mlxsw_sp_acl_tcam_vchunk_ht_params);
22a67766
JP
288 if (err)
289 goto err_rhashtable_init;
290
291 return 0;
292
293err_rhashtable_init:
2802aadf 294 mlxsw_sp_acl_tcam_group_del(&vgroup->group);
22a67766
JP
295 return err;
296}
297
2802aadf
JP
298static void
299mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
22a67766 300{
2802aadf
JP
301 rhashtable_destroy(&vgroup->vchunk_ht);
302 mlxsw_sp_acl_tcam_group_del(&vgroup->group);
303 WARN_ON(!list_empty(&vgroup->vregion_list));
22a67766
JP
304}
305
306static int
307mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
308 struct mlxsw_sp_acl_tcam_group *group,
4b23258d
JP
309 struct mlxsw_sp_port *mlxsw_sp_port,
310 bool ingress)
22a67766 311{
22a67766
JP
312 char ppbt_pl[MLXSW_REG_PPBT_LEN];
313
02caf499
JP
314 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
315 MLXSW_REG_PXBT_E_EACL,
316 MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
22a67766
JP
317 group->id);
318 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
319}
320
321static void
322mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
02caf499 323 struct mlxsw_sp_acl_tcam_group *group,
4b23258d
JP
324 struct mlxsw_sp_port *mlxsw_sp_port,
325 bool ingress)
22a67766
JP
326{
327 char ppbt_pl[MLXSW_REG_PPBT_LEN];
328
02caf499
JP
329 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
330 MLXSW_REG_PXBT_E_EACL,
331 MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
22a67766
JP
332 group->id);
333 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
334}
335
0ade3b64
JP
336static u16
337mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
338{
339 return group->id;
340}
341
22a67766 342static unsigned int
0f54236d 343mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
22a67766 344{
b2d6b4d2 345 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
22a67766 346
b2d6b4d2 347 if (list_empty(&vregion->vchunk_list))
22a67766 348 return 0;
b2d6b4d2
JP
349 /* As a priority of a vregion, return priority of the first vchunk */
350 vchunk = list_first_entry(&vregion->vchunk_list,
351 typeof(*vchunk), list);
352 return vchunk->priority;
22a67766
JP
353}
354
355static unsigned int
0f54236d 356mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
22a67766 357{
b2d6b4d2 358 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
22a67766 359
b2d6b4d2 360 if (list_empty(&vregion->vchunk_list))
22a67766 361 return 0;
b2d6b4d2
JP
362 vchunk = list_last_entry(&vregion->vchunk_list,
363 typeof(*vchunk), list);
364 return vchunk->priority;
22a67766
JP
365}
366
593bb843
JP
367static void
368mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
369{
370 struct mlxsw_sp_acl_tcam_vregion *vregion;
371
372 if (list_empty(&vgroup->vregion_list))
373 return;
374 vregion = list_first_entry(&vgroup->vregion_list,
375 typeof(*vregion), list);
376 *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
377 vregion = list_last_entry(&vgroup->vregion_list,
378 typeof(*vregion), list);
379 *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
380}
381
0f54236d
JP
382static int
383mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
2802aadf
JP
384 struct mlxsw_sp_acl_tcam_group *group,
385 struct mlxsw_sp_acl_tcam_region *region,
79604b6e 386 unsigned int priority,
2802aadf 387 struct mlxsw_sp_acl_tcam_region *next_region)
22a67766 388{
2802aadf
JP
389 struct mlxsw_sp_acl_tcam_region *region2;
390 struct list_head *pos;
0f54236d
JP
391 int err;
392
5ec2ee28
JP
393 mutex_lock(&group->lock);
394 if (group->region_count == group->tcam->max_group_size) {
395 err = -ENOBUFS;
396 goto err_region_count_check;
397 }
0f54236d 398
2802aadf
JP
399 if (next_region) {
400 /* If the next region is defined, place the new one
401 * before it. The next one is a sibling.
402 */
403 pos = &next_region->list;
404 } else {
405 /* Position the region inside the list according to priority */
406 list_for_each(pos, &group->region_list) {
407 region2 = list_entry(pos, typeof(*region2), list);
408 if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
79604b6e 409 priority)
2802aadf
JP
410 break;
411 }
412 }
413 list_add_tail(&region->list, pos);
414 region->group = group;
415
0f54236d
JP
416 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
417 if (err)
2802aadf 418 goto err_group_update;
22a67766 419
22a67766 420 group->region_count++;
5ec2ee28 421 mutex_unlock(&group->lock);
0f54236d 422 return 0;
2802aadf
JP
423
424err_group_update:
425 list_del(&region->list);
5ec2ee28
JP
426err_region_count_check:
427 mutex_unlock(&group->lock);
2802aadf 428 return err;
22a67766
JP
429}
430
431static void
0f54236d
JP
432mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
433 struct mlxsw_sp_acl_tcam_region *region)
22a67766 434{
2802aadf 435 struct mlxsw_sp_acl_tcam_group *group = region->group;
0f54236d 436
5ec2ee28 437 mutex_lock(&group->lock);
2802aadf 438 list_del(&region->list);
22a67766 439 group->region_count--;
0f54236d 440 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
5ec2ee28 441 mutex_unlock(&group->lock);
22a67766
JP
442}
443
444static int
2802aadf
JP
445mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
446 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
79604b6e
JP
447 struct mlxsw_sp_acl_tcam_vregion *vregion,
448 unsigned int priority)
22a67766 449{
0f54236d
JP
450 struct mlxsw_sp_acl_tcam_vregion *vregion2;
451 struct list_head *pos;
22a67766
JP
452 int err;
453
0f54236d 454 /* Position the vregion inside the list according to priority */
2802aadf 455 list_for_each(pos, &vgroup->vregion_list) {
0f54236d 456 vregion2 = list_entry(pos, typeof(*vregion2), list);
79604b6e 457 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
0f54236d
JP
458 break;
459 }
460 list_add_tail(&vregion->list, pos);
22a67766 461
2802aadf 462 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
79604b6e
JP
463 vregion->region,
464 priority, NULL);
22a67766 465 if (err)
0f54236d 466 goto err_region_attach;
22a67766
JP
467
468 return 0;
469
0f54236d
JP
470err_region_attach:
471 list_del(&vregion->list);
22a67766
JP
472 return err;
473}
474
475static void
2802aadf
JP
476mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
477 struct mlxsw_sp_acl_tcam_vregion *vregion)
22a67766 478{
0f54236d 479 list_del(&vregion->list);
e5e7962e
JP
480 if (vregion->region2)
481 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
482 vregion->region2);
0f54236d 483 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
22a67766
JP
484}
485
0f54236d 486static struct mlxsw_sp_acl_tcam_vregion *
2802aadf
JP
487mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
488 unsigned int priority,
489 struct mlxsw_afk_element_usage *elusage,
490 bool *p_need_split)
22a67766 491{
0f54236d 492 struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
22a67766
JP
493 struct list_head *pos;
494 bool issubset;
495
2802aadf 496 list_for_each(pos, &vgroup->vregion_list) {
0f54236d 497 vregion = list_entry(pos, typeof(*vregion), list);
22a67766
JP
498
499 /* First, check if the requested priority does not rather belong
0f54236d 500 * under some of the next vregions.
22a67766 501 */
2802aadf 502 if (pos->next != &vgroup->vregion_list) { /* not last */
0f54236d
JP
503 vregion2 = list_entry(pos->next, typeof(*vregion2),
504 list);
505 if (priority >=
506 mlxsw_sp_acl_tcam_vregion_prio(vregion2))
22a67766
JP
507 continue;
508 }
509
0f54236d
JP
510 issubset = mlxsw_afk_key_info_subset(vregion->key_info,
511 elusage);
22a67766
JP
512
513 /* If requested element usage would not fit and the priority
0f54236d
JP
514 * is lower than the currently inspected vregion we cannot
515 * use this region, so return NULL to indicate new vregion has
22a67766
JP
516 * to be created.
517 */
518 if (!issubset &&
0f54236d 519 priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
22a67766
JP
520 return NULL;
521
522 /* If requested element usage would not fit and the priority
0f54236d
JP
523 * is higher than the currently inspected vregion we cannot
524 * use this vregion. There is still some hope that the next
525 * vregion would be the fit. So let it be processed and
22a67766
JP
526 * eventually break at the check right above this.
527 */
528 if (!issubset &&
0f54236d 529 priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
22a67766
JP
530 continue;
531
0f54236d 532 /* Indicate if the vregion needs to be split in order to add
22a67766 533 * the requested priority. Split is needed when requested
0f54236d 534 * element usage won't fit into the found vregion.
22a67766
JP
535 */
536 *p_need_split = !issubset;
0f54236d 537 return vregion;
22a67766 538 }
0f54236d 539 return NULL; /* New vregion has to be created. */
22a67766
JP
540}
541
542static void
2802aadf
JP
543mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
544 struct mlxsw_afk_element_usage *elusage,
545 struct mlxsw_afk_element_usage *out)
22a67766
JP
546{
547 const struct mlxsw_sp_acl_tcam_pattern *pattern;
548 int i;
549
e2f2a1fd
JP
550 /* In case the template is set, we don't have to look up the pattern
551 * and just use the template.
552 */
2802aadf
JP
553 if (vgroup->tmplt_elusage_set) {
554 memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
e2f2a1fd
JP
555 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
556 return;
557 }
558
2802aadf
JP
559 for (i = 0; i < vgroup->patterns_count; i++) {
560 pattern = &vgroup->patterns[i];
22a67766
JP
561 mlxsw_afk_element_usage_fill(out, pattern->elements,
562 pattern->elements_count);
563 if (mlxsw_afk_element_usage_subset(elusage, out))
564 return;
565 }
566 memcpy(out, elusage, sizeof(*out));
567}
568
22a67766
JP
569static int
570mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
571 struct mlxsw_sp_acl_tcam_region *region)
572{
573 struct mlxsw_afk_key_info *key_info = region->key_info;
574 char ptar_pl[MLXSW_REG_PTAR_LEN];
575 unsigned int encodings_count;
576 int i;
577 int err;
578
579 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
45e0620d 580 region->key_type,
22a67766
JP
581 MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
582 region->id, region->tcam_region_info);
583 encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
584 for (i = 0; i < encodings_count; i++) {
585 u16 encoding;
586
587 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
588 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
589 }
590 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
591 if (err)
592 return err;
593 mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
594 return 0;
595}
596
597static void
598mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_acl_tcam_region *region)
600{
601 char ptar_pl[MLXSW_REG_PTAR_LEN];
602
45e0620d
JP
603 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
604 region->key_type, 0, region->id,
22a67766
JP
605 region->tcam_region_info);
606 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
607}
608
22a67766
JP
609static int
610mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
611 struct mlxsw_sp_acl_tcam_region *region)
612{
613 char pacl_pl[MLXSW_REG_PACL_LEN];
614
615 mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
616 region->tcam_region_info);
617 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
618}
619
620static void
621mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_acl_tcam_region *region)
623{
624 char pacl_pl[MLXSW_REG_PACL_LEN];
625
626 mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
627 region->tcam_region_info);
628 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
629}
630
22a67766
JP
631static struct mlxsw_sp_acl_tcam_region *
632mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
633 struct mlxsw_sp_acl_tcam *tcam,
e5e7962e
JP
634 struct mlxsw_sp_acl_tcam_vregion *vregion,
635 void *hints_priv)
22a67766 636{
64eccd00 637 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
22a67766
JP
638 struct mlxsw_sp_acl_tcam_region *region;
639 int err;
640
64eccd00 641 region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
22a67766
JP
642 if (!region)
643 return ERR_PTR(-ENOMEM);
22a67766 644 region->mlxsw_sp = mlxsw_sp;
0f54236d
JP
645 region->vregion = vregion;
646 region->key_info = vregion->key_info;
22a67766
JP
647
648 err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
649 if (err)
650 goto err_region_id_get;
651
a6b9c87d
IS
652 err = ops->region_associate(mlxsw_sp, region);
653 if (err)
654 goto err_tcam_region_associate;
655
64eccd00 656 region->key_type = ops->key_type;
22a67766
JP
657 err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
658 if (err)
659 goto err_tcam_region_alloc;
660
661 err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
662 if (err)
663 goto err_tcam_region_enable;
664
a339bf8a 665 err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
e5e7962e 666 region, hints_priv);
22a67766 667 if (err)
64eccd00 668 goto err_tcam_region_init;
22a67766
JP
669
670 return region;
671
64eccd00 672err_tcam_region_init:
22a67766
JP
673 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
674err_tcam_region_enable:
675 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
676err_tcam_region_alloc:
a6b9c87d 677err_tcam_region_associate:
22a67766
JP
678 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
679err_region_id_get:
22a67766
JP
680 kfree(region);
681 return ERR_PTR(err);
682}
683
684static void
685mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
686 struct mlxsw_sp_acl_tcam_region *region)
687{
efeb7dfe 688 struct mlxsw_sp_acl_tcam *tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
64eccd00
JP
689 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
690
691 ops->region_fini(mlxsw_sp, region->priv);
22a67766
JP
692 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
693 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
efeb7dfe 694 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
22a67766
JP
695 kfree(region);
696}
697
e5e7962e
JP
698static void
699mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
700{
701 unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
702
703 if (!interval)
704 return;
f9b274ce 705 mlxsw_core_schedule_dw(&vregion->rehash.dw,
e5e7962e
JP
706 msecs_to_jiffies(interval));
707}
708
b2c091ce 709static void
e5e7962e 710mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
c9c9af91
JP
711 struct mlxsw_sp_acl_tcam_vregion *vregion,
712 int *credits);
e5e7962e
JP
713
714static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
715{
716 struct mlxsw_sp_acl_tcam_vregion *vregion =
717 container_of(work, struct mlxsw_sp_acl_tcam_vregion,
f9b274ce 718 rehash.dw.work);
c9c9af91 719 int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
e5e7962e 720
d90cfe20 721 mutex_lock(&vregion->lock);
b2c091ce 722 mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
d90cfe20 723 mutex_unlock(&vregion->lock);
c9c9af91
JP
724 if (credits < 0)
725 /* Rehash gone out of credits so it was interrupted.
726 * Schedule the work as soon as possible to continue.
727 */
728 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
729 else
730 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
e5e7962e
JP
731}
732
743edc85
IS
733static void
734mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
735{
736 /* The entry markers are relative to the current chunk and therefore
737 * needs to be reset together with the chunk marker.
738 */
739 ctx->current_vchunk = NULL;
740 ctx->start_ventry = NULL;
741 ctx->stop_ventry = NULL;
742}
743
6f9579d4
JP
744static void
745mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
746{
747 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
748
749 /* If a rule was added or deleted from vchunk which is currently
750 * under rehash migration, we have to reset the ventry pointers
751 * to make sure all rules are properly migrated.
752 */
753 if (vregion->rehash.ctx.current_vchunk == vchunk) {
754 vregion->rehash.ctx.start_ventry = NULL;
755 vregion->rehash.ctx.stop_ventry = NULL;
756 }
757}
758
759static void
760mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
761{
762 /* If a chunk was added or deleted from vregion we have to reset
763 * the current chunk pointer to make sure all chunks
764 * are properly migrated.
765 */
743edc85 766 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
6f9579d4
JP
767}
768
0f54236d
JP
769static struct mlxsw_sp_acl_tcam_vregion *
770mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
79604b6e
JP
771 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
772 unsigned int priority,
0f54236d
JP
773 struct mlxsw_afk_element_usage *elusage)
774{
e5e7962e 775 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
0f54236d 776 struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
79604b6e 777 struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
0f54236d
JP
778 struct mlxsw_sp_acl_tcam_vregion *vregion;
779 int err;
780
781 vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
782 if (!vregion)
783 return ERR_PTR(-ENOMEM);
b2d6b4d2 784 INIT_LIST_HEAD(&vregion->vchunk_list);
1263a9ab 785 mutex_init(&vregion->lock);
e5e7962e
JP
786 vregion->tcam = tcam;
787 vregion->mlxsw_sp = mlxsw_sp;
6b861682 788 vregion->vgroup = vgroup;
1267f722 789 refcount_set(&vregion->ref_count, 1);
0f54236d
JP
790
791 vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
792 if (IS_ERR(vregion->key_info)) {
793 err = PTR_ERR(vregion->key_info);
794 goto err_key_info_get;
795 }
796
797 vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
e5e7962e 798 vregion, NULL);
0f54236d
JP
799 if (IS_ERR(vregion->region)) {
800 err = PTR_ERR(vregion->region);
801 goto err_region_create;
802 }
803
79604b6e
JP
804 err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
805 priority);
806 if (err)
807 goto err_vgroup_vregion_attach;
808
6b861682 809 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
e5e7962e 810 /* Create the delayed work for vregion periodic rehash */
f9b274ce 811 INIT_DELAYED_WORK(&vregion->rehash.dw,
e5e7962e
JP
812 mlxsw_sp_acl_tcam_vregion_rehash_work);
813 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
7b0f62ee 814 mutex_lock(&tcam->lock);
6b861682 815 list_add_tail(&vregion->tlist, &tcam->vregion_list);
7b0f62ee 816 mutex_unlock(&tcam->lock);
e5e7962e
JP
817 }
818
0f54236d
JP
819 return vregion;
820
79604b6e
JP
821err_vgroup_vregion_attach:
822 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
0f54236d
JP
823err_region_create:
824 mlxsw_afk_key_info_put(vregion->key_info);
825err_key_info_get:
826 kfree(vregion);
827 return ERR_PTR(err);
828}
829
830static void
831mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
832 struct mlxsw_sp_acl_tcam_vregion *vregion)
833{
e5e7962e 834 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
6b861682 835 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
7b0f62ee 836 struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
e5e7962e 837
6b861682 838 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
fb4e2b70
IS
839 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
840
7b0f62ee 841 mutex_lock(&tcam->lock);
6b861682 842 list_del(&vregion->tlist);
7b0f62ee 843 mutex_unlock(&tcam->lock);
fb4e2b70
IS
844 if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
845 ctx->hints_priv)
846 ops->region_rehash_hints_put(ctx->hints_priv);
6b861682 847 }
79604b6e 848 mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
e5e7962e
JP
849 if (vregion->region2)
850 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
0f54236d
JP
851 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
852 mlxsw_afk_key_info_put(vregion->key_info);
1263a9ab 853 mutex_destroy(&vregion->lock);
0f54236d
JP
854 kfree(vregion);
855}
856
79604b6e
JP
857static struct mlxsw_sp_acl_tcam_vregion *
858mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
859 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
860 unsigned int priority,
861 struct mlxsw_afk_element_usage *elusage)
22a67766 862{
79604b6e 863 struct mlxsw_afk_element_usage vregion_elusage;
0f54236d 864 struct mlxsw_sp_acl_tcam_vregion *vregion;
22a67766 865 bool need_split;
22a67766 866
2802aadf
JP
867 vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
868 elusage, &need_split);
79604b6e
JP
869 if (vregion) {
870 if (need_split) {
871 /* According to priority, new vchunk should belong to
872 * an existing vregion. However, this vchunk needs
873 * elements that vregion does not contain. We need
874 * to split the existing vregion into two and create
875 * a new vregion for the new vchunk in between.
876 * This is not supported now.
877 */
878 return ERR_PTR(-EOPNOTSUPP);
879 }
1267f722 880 refcount_inc(&vregion->ref_count);
79604b6e 881 return vregion;
22a67766
JP
882 }
883
79604b6e
JP
884 mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
885 &vregion_elusage);
22a67766 886
79604b6e
JP
887 return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
888 &vregion_elusage);
22a67766
JP
889}
890
891static void
79604b6e
JP
892mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
893 struct mlxsw_sp_acl_tcam_vregion *vregion)
22a67766 894{
1267f722 895 if (!refcount_dec_and_test(&vregion->ref_count))
79604b6e
JP
896 return;
897 mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
22a67766
JP
898}
899
900static struct mlxsw_sp_acl_tcam_chunk *
901mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
b2d6b4d2
JP
902 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
903 struct mlxsw_sp_acl_tcam_region *region)
22a67766 904{
64eccd00 905 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
22a67766 906 struct mlxsw_sp_acl_tcam_chunk *chunk;
b2d6b4d2
JP
907
908 chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
909 if (!chunk)
910 return ERR_PTR(-ENOMEM);
911 chunk->vchunk = vchunk;
e5e7962e 912 chunk->region = region;
b2d6b4d2
JP
913
914 ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
915 return chunk;
916}
917
918static void
919mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
920 struct mlxsw_sp_acl_tcam_chunk *chunk)
921{
922 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
923
924 ops->chunk_fini(chunk->priv);
925 kfree(chunk);
926}
927
928static struct mlxsw_sp_acl_tcam_vchunk *
929mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
2802aadf 930 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
b2d6b4d2
JP
931 unsigned int priority,
932 struct mlxsw_afk_element_usage *elusage)
933{
6ef4889f 934 struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
79604b6e 935 struct mlxsw_sp_acl_tcam_vregion *vregion;
6ef4889f 936 struct list_head *pos;
22a67766
JP
937 int err;
938
939 if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
940 return ERR_PTR(-EINVAL);
941
b2d6b4d2
JP
942 vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
943 if (!vchunk)
22a67766 944 return ERR_PTR(-ENOMEM);
e5e7962e 945 INIT_LIST_HEAD(&vchunk->ventry_list);
b2d6b4d2 946 vchunk->priority = priority;
2802aadf 947 vchunk->vgroup = vgroup;
1267f722 948 refcount_set(&vchunk->ref_count, 1);
22a67766 949
79604b6e
JP
950 vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
951 priority, elusage);
952 if (IS_ERR(vregion)) {
953 err = PTR_ERR(vregion);
954 goto err_vregion_get;
955 }
956
957 vchunk->vregion = vregion;
22a67766 958
2802aadf 959 err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
b2d6b4d2 960 mlxsw_sp_acl_tcam_vchunk_ht_params);
22a67766
JP
961 if (err)
962 goto err_rhashtable_insert;
963
1263a9ab 964 mutex_lock(&vregion->lock);
b2d6b4d2
JP
965 vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
966 vchunk->vregion->region);
967 if (IS_ERR(vchunk->chunk)) {
1263a9ab 968 mutex_unlock(&vregion->lock);
b2d6b4d2
JP
969 err = PTR_ERR(vchunk->chunk);
970 goto err_chunk_create;
971 }
972
6f9579d4 973 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
6ef4889f
JP
974
975 /* Position the vchunk inside the list according to priority */
976 list_for_each(pos, &vregion->vchunk_list) {
977 vchunk2 = list_entry(pos, typeof(*vchunk2), list);
978 if (vchunk2->priority > priority)
979 break;
980 }
981 list_add_tail(&vchunk->list, pos);
1263a9ab 982 mutex_unlock(&vregion->lock);
593bb843 983 mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
79604b6e 984
b2d6b4d2 985 return vchunk;
22a67766 986
b2d6b4d2 987err_chunk_create:
2802aadf 988 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
b2d6b4d2 989 mlxsw_sp_acl_tcam_vchunk_ht_params);
22a67766 990err_rhashtable_insert:
79604b6e
JP
991 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
992err_vregion_get:
b2d6b4d2 993 kfree(vchunk);
22a67766
JP
994 return ERR_PTR(err);
995}
996
997static void
b2d6b4d2
JP
998mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
999 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
22a67766 1000{
1263a9ab 1001 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
2802aadf 1002 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
22a67766 1003
1263a9ab 1004 mutex_lock(&vregion->lock);
6f9579d4 1005 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
79604b6e 1006 list_del(&vchunk->list);
e5e7962e
JP
1007 if (vchunk->chunk2)
1008 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
b2d6b4d2 1009 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1263a9ab 1010 mutex_unlock(&vregion->lock);
2802aadf 1011 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
b2d6b4d2 1012 mlxsw_sp_acl_tcam_vchunk_ht_params);
79604b6e 1013 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
b2d6b4d2 1014 kfree(vchunk);
593bb843 1015 mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
22a67766
JP
1016}
1017
b2d6b4d2
JP
1018static struct mlxsw_sp_acl_tcam_vchunk *
1019mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
2802aadf 1020 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
b2d6b4d2
JP
1021 unsigned int priority,
1022 struct mlxsw_afk_element_usage *elusage)
22a67766 1023{
b2d6b4d2 1024 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
22a67766 1025
2802aadf 1026 vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
b2d6b4d2
JP
1027 mlxsw_sp_acl_tcam_vchunk_ht_params);
1028 if (vchunk) {
1029 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
22a67766
JP
1030 elusage)))
1031 return ERR_PTR(-EINVAL);
1267f722 1032 refcount_inc(&vchunk->ref_count);
b2d6b4d2 1033 return vchunk;
22a67766 1034 }
2802aadf 1035 return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
b2d6b4d2 1036 priority, elusage);
22a67766
JP
1037}
1038
b2d6b4d2
JP
1039static void
1040mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1041 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
22a67766 1042{
1267f722 1043 if (!refcount_dec_and_test(&vchunk->ref_count))
22a67766 1044 return;
b2d6b4d2 1045 mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
22a67766
JP
1046}
1047
c4c2dc54
JP
1048static struct mlxsw_sp_acl_tcam_entry *
1049mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1050 struct mlxsw_sp_acl_tcam_ventry *ventry,
e5e7962e 1051 struct mlxsw_sp_acl_tcam_chunk *chunk)
22a67766 1052{
64eccd00 1053 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
c4c2dc54 1054 struct mlxsw_sp_acl_tcam_entry *entry;
22a67766
JP
1055 int err;
1056
c4c2dc54
JP
1057 entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1058 if (!entry)
1059 return ERR_PTR(-ENOMEM);
1060 entry->ventry = ventry;
e5e7962e 1061 entry->chunk = chunk;
22a67766 1062
e5e7962e
JP
1063 err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1064 entry->priv, ventry->rulei);
22a67766 1065 if (err)
64eccd00 1066 goto err_entry_add;
22a67766 1067
c4c2dc54 1068 return entry;
22a67766 1069
64eccd00 1070err_entry_add:
c4c2dc54
JP
1071 kfree(entry);
1072 return ERR_PTR(err);
22a67766
JP
1073}
1074
c4c2dc54 1075static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
c4c2dc54 1076 struct mlxsw_sp_acl_tcam_entry *entry)
22a67766 1077{
64eccd00 1078 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
22a67766 1079
e5e7962e
JP
1080 ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1081 entry->chunk->priv, entry->priv);
c4c2dc54 1082 kfree(entry);
22a67766
JP
1083}
1084
2507a64c
ND
1085static int
1086mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
c4c2dc54 1087 struct mlxsw_sp_acl_tcam_region *region,
2507a64c
ND
1088 struct mlxsw_sp_acl_tcam_entry *entry,
1089 struct mlxsw_sp_acl_rule_info *rulei)
1090{
1091 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
2507a64c 1092
42d704e0 1093 return ops->entry_action_replace(mlxsw_sp, region->priv,
2507a64c
ND
1094 entry->priv, rulei);
1095}
1096
7fd056c2
AS
1097static int
1098mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1099 struct mlxsw_sp_acl_tcam_entry *entry,
1100 bool *activity)
1101{
64eccd00 1102 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
7fd056c2 1103
e5e7962e 1104 return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
64eccd00 1105 entry->priv, activity);
7fd056c2
AS
1106}
1107
c4c2dc54 1108static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
2802aadf 1109 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
c4c2dc54
JP
1110 struct mlxsw_sp_acl_tcam_ventry *ventry,
1111 struct mlxsw_sp_acl_rule_info *rulei)
1112{
1263a9ab 1113 struct mlxsw_sp_acl_tcam_vregion *vregion;
c4c2dc54
JP
1114 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1115 int err;
1116
2802aadf 1117 vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
c4c2dc54
JP
1118 &rulei->values.elusage);
1119 if (IS_ERR(vchunk))
1120 return PTR_ERR(vchunk);
1121
1122 ventry->vchunk = vchunk;
e5e7962e 1123 ventry->rulei = rulei;
1263a9ab
JP
1124 vregion = vchunk->vregion;
1125
1126 mutex_lock(&vregion->lock);
c4c2dc54 1127 ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
e5e7962e 1128 vchunk->chunk);
c4c2dc54 1129 if (IS_ERR(ventry->entry)) {
1263a9ab 1130 mutex_unlock(&vregion->lock);
c4c2dc54
JP
1131 err = PTR_ERR(ventry->entry);
1132 goto err_entry_create;
1133 }
1134
e5e7962e 1135 list_add_tail(&ventry->list, &vchunk->ventry_list);
6f9579d4 1136 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1263a9ab 1137 mutex_unlock(&vregion->lock);
e5e7962e 1138
c4c2dc54
JP
1139 return 0;
1140
1141err_entry_create:
1142 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1143 return err;
1144}
1145
1146static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1147 struct mlxsw_sp_acl_tcam_ventry *ventry)
1148{
1149 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1263a9ab 1150 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
c4c2dc54 1151
1263a9ab 1152 mutex_lock(&vregion->lock);
6f9579d4 1153 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
e5e7962e
JP
1154 list_del(&ventry->list);
1155 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1263a9ab 1156 mutex_unlock(&vregion->lock);
c4c2dc54
JP
1157 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1158}
1159
1160static int
1161mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1162 struct mlxsw_sp_acl_tcam_ventry *ventry,
1163 struct mlxsw_sp_acl_rule_info *rulei)
1164{
1165 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1166
1167 return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1168 vchunk->vregion->region,
1169 ventry->entry, rulei);
1170}
1171
1172static int
1173mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1174 struct mlxsw_sp_acl_tcam_ventry *ventry,
1175 bool *activity)
1176{
79b5b4b1
IS
1177 struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
1178 int err;
1179
1180 mutex_lock(&vregion->lock);
1181 err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
1182 activity);
1183 mutex_unlock(&vregion->lock);
1184 return err;
c4c2dc54
JP
1185}
1186
e5e7962e
JP
1187static int
1188mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1189 struct mlxsw_sp_acl_tcam_ventry *ventry,
c9c9af91
JP
1190 struct mlxsw_sp_acl_tcam_chunk *chunk,
1191 int *credits)
e5e7962e 1192{
2c331593 1193 struct mlxsw_sp_acl_tcam_entry *new_entry;
e5e7962e 1194
6ca219e7 1195 /* First check if the entry is not already where we want it to be. */
2c331593 1196 if (ventry->entry->chunk == chunk)
6ca219e7
JP
1197 return 0;
1198
c9c9af91
JP
1199 if (--(*credits) < 0)
1200 return 0;
1201
2c331593
JP
1202 new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1203 if (IS_ERR(new_entry))
1204 return PTR_ERR(new_entry);
e5e7962e 1205 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
2c331593 1206 ventry->entry = new_entry;
e5e7962e
JP
1207 return 0;
1208}
1209
1210static int
844f01da
JP
1211mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1212 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1213 struct mlxsw_sp_acl_tcam_region *region,
1214 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
e5e7962e 1215{
e1d2f7a9 1216 struct mlxsw_sp_acl_tcam_chunk *new_chunk;
e5e7962e 1217
8ca3f7a7
IS
1218 WARN_ON(vchunk->chunk2);
1219
e1d2f7a9 1220 new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
44fd86cb 1221 if (IS_ERR(new_chunk))
e1d2f7a9 1222 return PTR_ERR(new_chunk);
e1d2f7a9
JP
1223 vchunk->chunk2 = vchunk->chunk;
1224 vchunk->chunk = new_chunk;
6f9579d4
JP
1225 ctx->current_vchunk = vchunk;
1226 ctx->start_ventry = NULL;
1227 ctx->stop_ventry = NULL;
844f01da
JP
1228 return 0;
1229}
1230
1231static void
1232mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
6f9579d4
JP
1233 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1234 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
844f01da
JP
1235{
1236 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1237 vchunk->chunk2 = NULL;
743edc85 1238 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
844f01da
JP
1239}
1240
1241static int
1242mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1243 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1244 struct mlxsw_sp_acl_tcam_region *region,
c9c9af91
JP
1245 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1246 int *credits)
844f01da
JP
1247{
1248 struct mlxsw_sp_acl_tcam_ventry *ventry;
1249 int err;
844f01da 1250
84350051
JP
1251 if (vchunk->chunk->region != region) {
1252 err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1253 region, ctx);
1254 if (err)
1255 return err;
1256 } else if (!vchunk->chunk2) {
1257 /* The chunk is already as it should be, nothing to do. */
1258 return 0;
1259 }
e1d2f7a9 1260
b377add0
IS
1261 if (list_empty(&vchunk->ventry_list))
1262 goto out;
1263
6f9579d4
JP
1264 /* If the migration got interrupted, we have the ventry to start from
1265 * stored in context.
1266 */
1267 if (ctx->start_ventry)
1268 ventry = ctx->start_ventry;
1269 else
1270 ventry = list_first_entry(&vchunk->ventry_list,
1271 typeof(*ventry), list);
1272
743edc85
IS
1273 WARN_ON(ventry->vchunk != vchunk);
1274
6f9579d4
JP
1275 list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1276 /* During rollback, once we reach the ventry that failed
1277 * to migrate, we are done.
1278 */
1279 if (ventry == ctx->stop_ventry)
1280 break;
1281
e5e7962e 1282 err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
c9c9af91 1283 vchunk->chunk, credits);
e5e7962e 1284 if (err) {
7c33c72b
JP
1285 if (ctx->this_is_rollback) {
1286 /* Save the ventry which we ended with and try
1287 * to continue later on.
1288 */
1289 ctx->start_ventry = ventry;
e5e7962e 1290 return err;
7c33c72b 1291 }
84350051
JP
1292 /* Swap the chunk and chunk2 pointers so the follow-up
1293 * rollback call will see the original chunk pointer
1294 * in vchunk->chunk.
1295 */
1296 swap(vchunk->chunk, vchunk->chunk2);
6f9579d4
JP
1297 /* The rollback has to be done from beginning of the
1298 * chunk, that is why we have to null the start_ventry.
1299 * However, we know where to stop the rollback,
1300 * at the current ventry.
1301 */
1302 ctx->start_ventry = NULL;
1303 ctx->stop_ventry = ventry;
84350051 1304 return err;
c9c9af91
JP
1305 } else if (*credits < 0) {
1306 /* We are out of credits, the rest of the ventries
6f9579d4
JP
1307 * will be migrated later. Save the ventry
1308 * which we ended with.
c9c9af91 1309 */
6f9579d4 1310 ctx->start_ventry = ventry;
c9c9af91 1311 return 0;
e5e7962e
JP
1312 }
1313 }
844f01da 1314
b377add0 1315out:
6f9579d4 1316 mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
e5e7962e 1317 return 0;
e5e7962e
JP
1318}
1319
1320static int
1321mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
220f4fba 1322 struct mlxsw_sp_acl_tcam_vregion *vregion,
c9c9af91
JP
1323 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1324 int *credits)
e5e7962e
JP
1325{
1326 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1327 int err;
1328
b377add0
IS
1329 if (list_empty(&vregion->vchunk_list))
1330 return 0;
1331
6f9579d4
JP
1332 /* If the migration got interrupted, we have the vchunk
1333 * we are working on stored in context.
1334 */
1335 if (ctx->current_vchunk)
1336 vchunk = ctx->current_vchunk;
1337 else
1338 vchunk = list_first_entry(&vregion->vchunk_list,
1339 typeof(*vchunk), list);
1340
1341 list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
e5e7962e 1342 err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
a86838e4 1343 vregion->region,
c9c9af91
JP
1344 ctx, credits);
1345 if (err || *credits < 0)
84350051 1346 return err;
e5e7962e
JP
1347 }
1348 return 0;
e5e7962e
JP
1349}
1350
1351static int
1352mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1353 struct mlxsw_sp_acl_tcam_vregion *vregion,
c9c9af91
JP
1354 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1355 int *credits)
e5e7962e 1356{
84350051 1357 int err, err2;
e5e7962e 1358
3985de72 1359 trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
c9c9af91
JP
1360 err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1361 ctx, credits);
84350051 1362 if (err) {
8ca3f7a7
IS
1363 if (ctx->this_is_rollback)
1364 return err;
84350051
JP
1365 /* In case migration was not successful, we need to swap
1366 * so the original region pointer is assigned again
1367 * to vregion->region.
1368 */
1369 swap(vregion->region, vregion->region2);
743edc85 1370 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
84350051 1371 ctx->this_is_rollback = true;
c9c9af91
JP
1372 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1373 ctx, credits);
f3d4ef1a 1374 if (err2) {
a4e76ba6
JP
1375 trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1376 vregion);
f3d4ef1a 1377 dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
7c33c72b 1378 /* Let the rollback to be continued later on. */
f3d4ef1a 1379 }
84350051 1380 }
6375da3d 1381 trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
e5e7962e
JP
1382 return err;
1383}
1384
c9c9af91
JP
1385static bool
1386mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1387{
1388 return ctx->hints_priv;
1389}
1390
e5e7962e 1391static int
1667f766
JP
1392mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1393 struct mlxsw_sp_acl_tcam_vregion *vregion,
1394 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
e5e7962e
JP
1395{
1396 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
a9550d0f 1397 unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
a86838e4 1398 struct mlxsw_sp_acl_tcam_region *new_region;
e5e7962e 1399 void *hints_priv;
a9550d0f 1400 int err;
e5e7962e 1401
3985de72 1402 trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
e5e7962e
JP
1403
1404 hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1667f766
JP
1405 if (IS_ERR(hints_priv))
1406 return PTR_ERR(hints_priv);
1407
a86838e4
JP
1408 new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1409 vregion, hints_priv);
1410 if (IS_ERR(new_region)) {
1411 err = PTR_ERR(new_region);
a9550d0f
JP
1412 goto err_region_create;
1413 }
1414
a86838e4
JP
1415 /* vregion->region contains the pointer to the new region
1416 * we are going to migrate to.
1417 */
1418 vregion->region2 = vregion->region;
1419 vregion->region = new_region;
a9550d0f 1420 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
a86838e4
JP
1421 vregion->region2->group,
1422 new_region, priority,
1423 vregion->region2);
a9550d0f
JP
1424 if (err)
1425 goto err_group_region_attach;
1426
1667f766 1427 ctx->hints_priv = hints_priv;
220f4fba 1428 ctx->this_is_rollback = false;
743edc85 1429 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
1667f766
JP
1430
1431 return 0;
a9550d0f
JP
1432
1433err_group_region_attach:
a86838e4 1434 vregion->region = vregion->region2;
a9550d0f 1435 vregion->region2 = NULL;
a86838e4 1436 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
a9550d0f
JP
1437err_region_create:
1438 ops->region_rehash_hints_put(hints_priv);
1439 return err;
1667f766
JP
1440}
1441
1442static void
1443mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1444 struct mlxsw_sp_acl_tcam_vregion *vregion,
1445 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1446{
a9550d0f 1447 struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1667f766
JP
1448 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1449
7c33c72b
JP
1450 vregion->region2 = NULL;
1451 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1452 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1667f766
JP
1453 ops->region_rehash_hints_put(ctx->hints_priv);
1454 ctx->hints_priv = NULL;
1455}
1456
b2c091ce 1457static void
1667f766 1458mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
c9c9af91
JP
1459 struct mlxsw_sp_acl_tcam_vregion *vregion,
1460 int *credits)
1667f766
JP
1461{
1462 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1463 int err;
1464
c9c9af91
JP
1465 /* Check if the previous rehash work was interrupted
1466 * which means we have to continue it now.
1467 * If not, start a new rehash.
1468 */
1469 if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1470 err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1471 vregion, ctx);
1472 if (err) {
1473 if (err != -EAGAIN)
1474 dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
b2c091ce 1475 return;
c9c9af91 1476 }
e5e7962e
JP
1477 }
1478
c9c9af91
JP
1479 err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1480 ctx, credits);
e5e7962e 1481 if (err) {
5bcf9255 1482 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
54225988 1483 return;
e5e7962e
JP
1484 }
1485
c9c9af91
JP
1486 if (*credits >= 0)
1487 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
e5e7962e
JP
1488}
1489
74cbc3c0
IS
1490static int
1491mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
1492 struct devlink_param_gset_ctx *ctx)
1493{
1494 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1495 struct mlxsw_sp_acl_tcam *tcam;
1496 struct mlxsw_sp *mlxsw_sp;
1497
1498 mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1499 tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
1500 ctx->val.vu32 = tcam->vregion_rehash_intrvl;
1501
1502 return 0;
1503}
1504
1505static int
1506mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
1507 struct devlink_param_gset_ctx *ctx)
1508{
1509 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1510 struct mlxsw_sp_acl_tcam_vregion *vregion;
1511 struct mlxsw_sp_acl_tcam *tcam;
1512 struct mlxsw_sp *mlxsw_sp;
1513 u32 val = ctx->val.vu32;
1514
1515 if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
1516 return -EINVAL;
1517
1518 mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1519 tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
1520 tcam->vregion_rehash_intrvl = val;
1521 mutex_lock(&tcam->lock);
1522 list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
1523 if (val)
1524 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
1525 else
1526 cancel_delayed_work_sync(&vregion->rehash.dw);
1527 }
1528 mutex_unlock(&tcam->lock);
1529 return 0;
1530}
1531
1532static const struct devlink_param mlxsw_sp_acl_tcam_rehash_params[] = {
1533 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
1534 "acl_region_rehash_interval",
1535 DEVLINK_PARAM_TYPE_U32,
1536 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1537 mlxsw_sp_acl_tcam_region_rehash_intrvl_get,
1538 mlxsw_sp_acl_tcam_region_rehash_intrvl_set,
1539 NULL),
1540};
1541
1542static int mlxsw_sp_acl_tcam_rehash_params_register(struct mlxsw_sp *mlxsw_sp)
1543{
1544 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1545
1546 if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
1547 return 0;
1548
1549 return devl_params_register(devlink, mlxsw_sp_acl_tcam_rehash_params,
1550 ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
1551}
1552
1553static void
1554mlxsw_sp_acl_tcam_rehash_params_unregister(struct mlxsw_sp *mlxsw_sp)
1555{
1556 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1557
1558 if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
1559 return;
1560
1561 devl_params_unregister(devlink, mlxsw_sp_acl_tcam_rehash_params,
1562 ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
1563}
1564
194ab947
IS
1565int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
1566 struct mlxsw_sp_acl_tcam *tcam)
1567{
1568 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1569 u64 max_tcam_regions;
1570 u64 max_regions;
1571 u64 max_groups;
1572 int err;
1573
1574 mutex_init(&tcam->lock);
1575 tcam->vregion_rehash_intrvl =
1576 MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
1577 INIT_LIST_HEAD(&tcam->vregion_list);
1578
74cbc3c0
IS
1579 err = mlxsw_sp_acl_tcam_rehash_params_register(mlxsw_sp);
1580 if (err)
1581 goto err_rehash_params_register;
1582
194ab947
IS
1583 max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1584 ACL_MAX_TCAM_REGIONS);
1585 max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
1586
1587 /* Use 1:1 mapping between ACL region and TCAM region */
1588 if (max_tcam_regions < max_regions)
1589 max_regions = max_tcam_regions;
1590
627f9c1b 1591 ida_init(&tcam->used_regions);
194ab947
IS
1592 tcam->max_regions = max_regions;
1593
1594 max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
627f9c1b 1595 ida_init(&tcam->used_groups);
194ab947
IS
1596 tcam->max_groups = max_groups;
1597 tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1598 ACL_MAX_GROUP_SIZE);
483ae90d
IS
1599 tcam->max_group_size = min_t(unsigned int, tcam->max_group_size,
1600 MLXSW_REG_PAGT_ACL_MAX_NUM);
194ab947
IS
1601
1602 err = ops->init(mlxsw_sp, tcam->priv, tcam);
1603 if (err)
1604 goto err_tcam_init;
1605
1606 return 0;
1607
1608err_tcam_init:
627f9c1b
IS
1609 ida_destroy(&tcam->used_groups);
1610 ida_destroy(&tcam->used_regions);
74cbc3c0
IS
1611 mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
1612err_rehash_params_register:
194ab947
IS
1613 mutex_destroy(&tcam->lock);
1614 return err;
1615}
1616
1617void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
1618 struct mlxsw_sp_acl_tcam *tcam)
1619{
1620 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1621
1622 ops->fini(mlxsw_sp, tcam->priv);
627f9c1b
IS
1623 ida_destroy(&tcam->used_groups);
1624 ida_destroy(&tcam->used_regions);
74cbc3c0 1625 mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
194ab947
IS
1626 mutex_destroy(&tcam->lock);
1627}
1628
22a67766
JP
1629static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1630 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
c43ea06d
JP
1631 MLXSW_AFK_ELEMENT_DMAC_32_47,
1632 MLXSW_AFK_ELEMENT_DMAC_0_31,
1633 MLXSW_AFK_ELEMENT_SMAC_32_47,
1634 MLXSW_AFK_ELEMENT_SMAC_0_31,
22a67766
JP
1635 MLXSW_AFK_ELEMENT_ETHERTYPE,
1636 MLXSW_AFK_ELEMENT_IP_PROTO,
c43ea06d
JP
1637 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1638 MLXSW_AFK_ELEMENT_DST_IP_0_31,
22a67766
JP
1639 MLXSW_AFK_ELEMENT_DST_L4_PORT,
1640 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
9caab08a
PM
1641 MLXSW_AFK_ELEMENT_VID,
1642 MLXSW_AFK_ELEMENT_PCP,
8a41d845 1643 MLXSW_AFK_ELEMENT_TCP_FLAGS,
046759a6 1644 MLXSW_AFK_ELEMENT_IP_TTL_,
abac7b01
OG
1645 MLXSW_AFK_ELEMENT_IP_ECN,
1646 MLXSW_AFK_ELEMENT_IP_DSCP,
22a67766
JP
1647};
1648
1649static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1650 MLXSW_AFK_ELEMENT_ETHERTYPE,
1651 MLXSW_AFK_ELEMENT_IP_PROTO,
c43ea06d
JP
1652 MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1653 MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1654 MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1655 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1656 MLXSW_AFK_ELEMENT_DST_IP_96_127,
1657 MLXSW_AFK_ELEMENT_DST_IP_64_95,
1658 MLXSW_AFK_ELEMENT_DST_IP_32_63,
1659 MLXSW_AFK_ELEMENT_DST_IP_0_31,
22a67766
JP
1660 MLXSW_AFK_ELEMENT_DST_L4_PORT,
1661 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1662};
1663
1664static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1665 {
1666 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1667 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1668 },
1669 {
1670 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1671 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1672 },
1673};
1674
1675#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1676 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1677
1678struct mlxsw_sp_acl_tcam_flower_ruleset {
2802aadf 1679 struct mlxsw_sp_acl_tcam_vgroup vgroup;
22a67766
JP
1680};
1681
1682struct mlxsw_sp_acl_tcam_flower_rule {
c4c2dc54 1683 struct mlxsw_sp_acl_tcam_ventry ventry;
22a67766
JP
1684};
1685
1686static int
1687mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
bab5c1cf 1688 struct mlxsw_sp_acl_tcam *tcam,
e2f2a1fd 1689 void *ruleset_priv,
593bb843
JP
1690 struct mlxsw_afk_element_usage *tmplt_elusage,
1691 unsigned int *p_min_prio,
1692 unsigned int *p_max_prio)
22a67766
JP
1693{
1694 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
22a67766 1695
2802aadf
JP
1696 return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1697 mlxsw_sp_acl_tcam_patterns,
1698 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
593bb843
JP
1699 tmplt_elusage, true,
1700 p_min_prio, p_max_prio);
22a67766
JP
1701}
1702
1703static void
1704mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1705 void *ruleset_priv)
1706{
1707 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1708
2802aadf 1709 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
22a67766
JP
1710}
1711
1712static int
1713mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1714 void *ruleset_priv,
4b23258d
JP
1715 struct mlxsw_sp_port *mlxsw_sp_port,
1716 bool ingress)
22a67766
JP
1717{
1718 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1719
2802aadf 1720 return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
4b23258d 1721 mlxsw_sp_port, ingress);
22a67766
JP
1722}
1723
1724static void
1725mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
02caf499 1726 void *ruleset_priv,
4b23258d
JP
1727 struct mlxsw_sp_port *mlxsw_sp_port,
1728 bool ingress)
22a67766
JP
1729{
1730 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1731
2802aadf 1732 mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
4b23258d 1733 mlxsw_sp_port, ingress);
22a67766
JP
1734}
1735
0ade3b64
JP
1736static u16
1737mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1738{
1739 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1740
2802aadf 1741 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
0ade3b64
JP
1742}
1743
22a67766
JP
1744static int
1745mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1746 void *ruleset_priv, void *rule_priv,
1747 struct mlxsw_sp_acl_rule_info *rulei)
1748{
1749 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1750 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1751
2802aadf 1752 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
c4c2dc54 1753 &rule->ventry, rulei);
22a67766
JP
1754}
1755
1756static void
1757mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1758{
1759 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1760
c4c2dc54 1761 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
22a67766
JP
1762}
1763
2507a64c
ND
1764static int
1765mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
2507a64c
ND
1766 void *rule_priv,
1767 struct mlxsw_sp_acl_rule_info *rulei)
1768{
1769 return -EOPNOTSUPP;
1770}
1771
7fd056c2
AS
1772static int
1773mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1774 void *rule_priv, bool *activity)
1775{
1776 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1777
c4c2dc54
JP
1778 return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1779 activity);
7fd056c2
AS
1780}
1781
22a67766
JP
1782static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1783 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1784 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
1785 .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
1786 .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1787 .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
0ade3b64 1788 .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
c4c2dc54 1789 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
22a67766
JP
1790 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
1791 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
2507a64c 1792 .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
7fd056c2 1793 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
22a67766
JP
1794};
1795
038418ee 1796struct mlxsw_sp_acl_tcam_mr_ruleset {
b2d6b4d2 1797 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
2802aadf 1798 struct mlxsw_sp_acl_tcam_vgroup vgroup;
038418ee
JP
1799};
1800
1801struct mlxsw_sp_acl_tcam_mr_rule {
c4c2dc54 1802 struct mlxsw_sp_acl_tcam_ventry ventry;
038418ee
JP
1803};
1804
1a29d293
ND
1805static int
1806mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1807 struct mlxsw_sp_acl_tcam *tcam,
1808 void *ruleset_priv,
593bb843
JP
1809 struct mlxsw_afk_element_usage *tmplt_elusage,
1810 unsigned int *p_min_prio,
1811 unsigned int *p_max_prio)
1a29d293
ND
1812{
1813 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1814 int err;
1815
2802aadf
JP
1816 err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1817 mlxsw_sp_acl_tcam_patterns,
1818 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
593bb843
JP
1819 tmplt_elusage, false,
1820 p_min_prio, p_max_prio);
1a29d293
ND
1821 if (err)
1822 return err;
1823
1824 /* For most of the TCAM clients it would make sense to take a tcam chunk
1825 * only when the first rule is written. This is not the case for
1826 * multicast router as it is required to bind the multicast router to a
1827 * specific ACL Group ID which must exist in HW before multicast router
1828 * is initialized.
1829 */
b2d6b4d2 1830 ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
2802aadf 1831 &ruleset->vgroup, 1,
b2d6b4d2
JP
1832 tmplt_elusage);
1833 if (IS_ERR(ruleset->vchunk)) {
1834 err = PTR_ERR(ruleset->vchunk);
1a29d293
ND
1835 goto err_chunk_get;
1836 }
1837
1838 return 0;
1839
1840err_chunk_get:
2802aadf 1841 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1a29d293
ND
1842 return err;
1843}
1844
1845static void
1846mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1847{
1848 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1849
b2d6b4d2 1850 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
2802aadf 1851 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1a29d293
ND
1852}
1853
1854static int
1855mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1856 struct mlxsw_sp_port *mlxsw_sp_port,
1857 bool ingress)
1858{
1859 /* Binding is done when initializing multicast router */
1860 return 0;
1861}
1862
1863static void
1864mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1865 void *ruleset_priv,
1866 struct mlxsw_sp_port *mlxsw_sp_port,
1867 bool ingress)
1868{
1869}
1870
1871static u16
1872mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1873{
1874 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1875
2802aadf 1876 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1a29d293
ND
1877}
1878
1a29d293
ND
1879static int
1880mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1881 void *rule_priv,
1882 struct mlxsw_sp_acl_rule_info *rulei)
1883{
1884 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1885 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1886
2802aadf 1887 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
c4c2dc54 1888 &rule->ventry, rulei);
1a29d293
ND
1889}
1890
1891static void
1892mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1893{
1894 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1895
c4c2dc54 1896 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1a29d293
ND
1897}
1898
2507a64c
ND
1899static int
1900mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
42d704e0 1901 void *rule_priv,
2507a64c
ND
1902 struct mlxsw_sp_acl_rule_info *rulei)
1903{
2507a64c
ND
1904 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1905
c4c2dc54
JP
1906 return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1907 rulei);
2507a64c
ND
1908}
1909
1a29d293
ND
1910static int
1911mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1912 void *rule_priv, bool *activity)
1913{
d1314096 1914 *activity = false;
1a29d293 1915
d1314096 1916 return 0;
1a29d293
ND
1917}
1918
1919static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1920 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1921 .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add,
1922 .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del,
1923 .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1924 .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1925 .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
c4c2dc54 1926 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1a29d293
ND
1927 .rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
1928 .rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
2507a64c 1929 .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1a29d293
ND
1930 .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1931};
1932
22a67766
JP
1933static const struct mlxsw_sp_acl_profile_ops *
1934mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1935 [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1a29d293 1936 [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
22a67766
JP
1937};
1938
bab5c1cf 1939const struct mlxsw_sp_acl_profile_ops *
22a67766
JP
1940mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1941 enum mlxsw_sp_acl_profile profile)
1942{
1943 const struct mlxsw_sp_acl_profile_ops *ops;
1944
1945 if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1946 return NULL;
1947 ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1948 if (WARN_ON(!ops))
1949 return NULL;
1950 return ops;
1951}