1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
14 #include "resources.h"
16 #include "spectrum_acl_tcam.h"
17 #include "core_acl_flex_keys.h"
19 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
21 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
23 return ops->priv_size;
26 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
27 struct mlxsw_sp_acl_tcam *tcam)
29 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
36 max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
37 ACL_MAX_TCAM_REGIONS);
38 max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
40 /* Use 1:1 mapping between ACL region and TCAM region */
41 if (max_tcam_regions < max_regions)
42 max_regions = max_tcam_regions;
44 alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
45 tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
46 if (!tcam->used_regions)
48 tcam->max_regions = max_regions;
50 max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
51 alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
52 tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
53 if (!tcam->used_groups) {
55 goto err_alloc_used_groups;
57 tcam->max_groups = max_groups;
58 tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
61 err = ops->init(mlxsw_sp, tcam->priv, tcam);
68 kfree(tcam->used_groups);
69 err_alloc_used_groups:
70 kfree(tcam->used_regions);
74 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
75 struct mlxsw_sp_acl_tcam *tcam)
77 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
79 ops->fini(mlxsw_sp, tcam->priv);
80 kfree(tcam->used_groups);
81 kfree(tcam->used_regions);
84 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
85 struct mlxsw_sp_acl_rule_info *rulei,
86 u32 *priority, bool fillup_priority)
90 if (!fillup_priority) {
95 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
98 /* Priority range is 1..cap_kvd_size-1. */
99 max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
100 if (rulei->priority >= max_priority)
103 /* Unlike in TC, in HW, higher number means higher priority. */
104 *priority = max_priority - rulei->priority;
108 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
113 id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
114 if (id < tcam->max_regions) {
115 __set_bit(id, tcam->used_regions);
122 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
125 __clear_bit(id, tcam->used_regions);
128 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
133 id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
134 if (id < tcam->max_groups) {
135 __set_bit(id, tcam->used_groups);
142 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
145 __clear_bit(id, tcam->used_groups);
148 struct mlxsw_sp_acl_tcam_pattern {
149 const enum mlxsw_afk_element *elements;
150 unsigned int elements_count;
153 struct mlxsw_sp_acl_tcam_group {
154 struct mlxsw_sp_acl_tcam *tcam;
156 struct list_head region_list;
157 unsigned int region_count;
158 struct rhashtable chunk_ht;
159 struct mlxsw_sp_acl_tcam_group_ops *ops;
160 const struct mlxsw_sp_acl_tcam_pattern *patterns;
161 unsigned int patterns_count;
162 bool tmplt_elusage_set;
163 struct mlxsw_afk_element_usage tmplt_elusage;
166 struct mlxsw_sp_acl_tcam_chunk {
167 struct list_head list; /* Member of a TCAM region */
168 struct rhash_head ht_node; /* Member of a chunk HT */
169 unsigned int priority; /* Priority within the region and group */
170 struct mlxsw_sp_acl_tcam_group *group;
171 struct mlxsw_sp_acl_tcam_region *region;
172 unsigned int ref_count;
173 unsigned long priv[0];
174 /* priv has to be always the last item */
177 struct mlxsw_sp_acl_tcam_entry {
178 struct mlxsw_sp_acl_tcam_chunk *chunk;
179 unsigned long priv[0];
180 /* priv has to be always the last item */
183 static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
184 .key_len = sizeof(unsigned int),
185 .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
186 .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
187 .automatic_shrinking = true,
190 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
191 struct mlxsw_sp_acl_tcam_group *group)
193 struct mlxsw_sp_acl_tcam_region *region;
194 char pagt_pl[MLXSW_REG_PAGT_LEN];
197 mlxsw_reg_pagt_pack(pagt_pl, group->id);
198 list_for_each_entry(region, &group->region_list, list)
199 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
200 mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
201 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
205 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
206 struct mlxsw_sp_acl_tcam *tcam,
207 struct mlxsw_sp_acl_tcam_group *group,
208 const struct mlxsw_sp_acl_tcam_pattern *patterns,
209 unsigned int patterns_count,
210 struct mlxsw_afk_element_usage *tmplt_elusage)
215 group->patterns = patterns;
216 group->patterns_count = patterns_count;
218 group->tmplt_elusage_set = true;
219 memcpy(&group->tmplt_elusage, tmplt_elusage,
220 sizeof(group->tmplt_elusage));
222 INIT_LIST_HEAD(&group->region_list);
223 err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
227 err = rhashtable_init(&group->chunk_ht,
228 &mlxsw_sp_acl_tcam_chunk_ht_params);
230 goto err_rhashtable_init;
235 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
239 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
240 struct mlxsw_sp_acl_tcam_group *group)
242 struct mlxsw_sp_acl_tcam *tcam = group->tcam;
244 rhashtable_destroy(&group->chunk_ht);
245 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
246 WARN_ON(!list_empty(&group->region_list));
250 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
251 struct mlxsw_sp_acl_tcam_group *group,
252 struct mlxsw_sp_port *mlxsw_sp_port,
255 char ppbt_pl[MLXSW_REG_PPBT_LEN];
257 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
258 MLXSW_REG_PXBT_E_EACL,
259 MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
261 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
265 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
266 struct mlxsw_sp_acl_tcam_group *group,
267 struct mlxsw_sp_port *mlxsw_sp_port,
270 char ppbt_pl[MLXSW_REG_PPBT_LEN];
272 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
273 MLXSW_REG_PXBT_E_EACL,
274 MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
276 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
280 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
286 mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
288 struct mlxsw_sp_acl_tcam_chunk *chunk;
290 if (list_empty(®ion->chunk_list))
292 /* As a priority of a region, return priority of the first chunk */
293 chunk = list_first_entry(®ion->chunk_list, typeof(*chunk), list);
294 return chunk->priority;
298 mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
300 struct mlxsw_sp_acl_tcam_chunk *chunk;
302 if (list_empty(®ion->chunk_list))
304 chunk = list_last_entry(®ion->chunk_list, typeof(*chunk), list);
305 return chunk->priority;
309 mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
310 struct mlxsw_sp_acl_tcam_region *region)
312 struct mlxsw_sp_acl_tcam_region *region2;
313 struct list_head *pos;
315 /* Position the region inside the list according to priority */
316 list_for_each(pos, &group->region_list) {
317 region2 = list_entry(pos, typeof(*region2), list);
318 if (mlxsw_sp_acl_tcam_region_prio(region2) >
319 mlxsw_sp_acl_tcam_region_prio(region))
322 list_add_tail(®ion->list, pos);
323 group->region_count++;
327 mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
328 struct mlxsw_sp_acl_tcam_region *region)
330 group->region_count--;
331 list_del(®ion->list);
335 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
336 struct mlxsw_sp_acl_tcam_group *group,
337 struct mlxsw_sp_acl_tcam_region *region)
341 if (group->region_count == group->tcam->max_group_size)
344 mlxsw_sp_acl_tcam_group_list_add(group, region);
346 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
348 goto err_group_update;
349 region->group = group;
354 mlxsw_sp_acl_tcam_group_list_del(group, region);
355 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
360 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
361 struct mlxsw_sp_acl_tcam_region *region)
363 struct mlxsw_sp_acl_tcam_group *group = region->group;
365 mlxsw_sp_acl_tcam_group_list_del(group, region);
366 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
369 static struct mlxsw_sp_acl_tcam_region *
370 mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
371 unsigned int priority,
372 struct mlxsw_afk_element_usage *elusage,
375 struct mlxsw_sp_acl_tcam_region *region, *region2;
376 struct list_head *pos;
379 list_for_each(pos, &group->region_list) {
380 region = list_entry(pos, typeof(*region), list);
382 /* First, check if the requested priority does not rather belong
383 * under some of the next regions.
385 if (pos->next != &group->region_list) { /* not last */
386 region2 = list_entry(pos->next, typeof(*region2), list);
387 if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
391 issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
393 /* If requested element usage would not fit and the priority
394 * is lower than the currently inspected region we cannot
395 * use this region, so return NULL to indicate new region has
399 priority < mlxsw_sp_acl_tcam_region_prio(region))
402 /* If requested element usage would not fit and the priority
403 * is higher than the currently inspected region we cannot
404 * use this region. There is still some hope that the next
405 * region would be the fit. So let it be processed and
406 * eventually break at the check right above this.
409 priority > mlxsw_sp_acl_tcam_region_max_prio(region))
412 /* Indicate if the region needs to be split in order to add
413 * the requested priority. Split is needed when requested
414 * element usage won't fit into the found region.
416 *p_need_split = !issubset;
419 return NULL; /* New region has to be created. */
423 mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
424 struct mlxsw_afk_element_usage *elusage,
425 struct mlxsw_afk_element_usage *out)
427 const struct mlxsw_sp_acl_tcam_pattern *pattern;
430 /* In case the template is set, we don't have to look up the pattern
431 * and just use the template.
433 if (group->tmplt_elusage_set) {
434 memcpy(out, &group->tmplt_elusage, sizeof(*out));
435 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
439 for (i = 0; i < group->patterns_count; i++) {
440 pattern = &group->patterns[i];
441 mlxsw_afk_element_usage_fill(out, pattern->elements,
442 pattern->elements_count);
443 if (mlxsw_afk_element_usage_subset(elusage, out))
446 memcpy(out, elusage, sizeof(*out));
450 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
451 struct mlxsw_sp_acl_tcam_region *region)
453 struct mlxsw_afk_key_info *key_info = region->key_info;
454 char ptar_pl[MLXSW_REG_PTAR_LEN];
455 unsigned int encodings_count;
459 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
461 MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
462 region->id, region->tcam_region_info);
463 encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
464 for (i = 0; i < encodings_count; i++) {
467 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
468 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
470 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
473 mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
478 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
479 struct mlxsw_sp_acl_tcam_region *region)
481 char ptar_pl[MLXSW_REG_PTAR_LEN];
483 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
484 region->key_type, 0, region->id,
485 region->tcam_region_info);
486 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
490 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
491 struct mlxsw_sp_acl_tcam_region *region)
493 char pacl_pl[MLXSW_REG_PACL_LEN];
495 mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
496 region->tcam_region_info);
497 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
501 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
502 struct mlxsw_sp_acl_tcam_region *region)
504 char pacl_pl[MLXSW_REG_PACL_LEN];
506 mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
507 region->tcam_region_info);
508 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
511 static struct mlxsw_sp_acl_tcam_region *
512 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
513 struct mlxsw_sp_acl_tcam *tcam,
514 struct mlxsw_afk_element_usage *elusage)
516 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
517 struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
518 struct mlxsw_sp_acl_tcam_region *region;
521 region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
523 return ERR_PTR(-ENOMEM);
524 INIT_LIST_HEAD(®ion->chunk_list);
525 region->mlxsw_sp = mlxsw_sp;
527 region->key_info = mlxsw_afk_key_info_get(afk, elusage);
528 if (IS_ERR(region->key_info)) {
529 err = PTR_ERR(region->key_info);
530 goto err_key_info_get;
533 err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id);
535 goto err_region_id_get;
537 err = ops->region_associate(mlxsw_sp, region);
539 goto err_tcam_region_associate;
541 region->key_type = ops->key_type;
542 err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
544 goto err_tcam_region_alloc;
546 err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
548 goto err_tcam_region_enable;
550 err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
552 goto err_tcam_region_init;
556 err_tcam_region_init:
557 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
558 err_tcam_region_enable:
559 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
560 err_tcam_region_alloc:
561 err_tcam_region_associate:
562 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
564 mlxsw_afk_key_info_put(region->key_info);
571 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
572 struct mlxsw_sp_acl_tcam_region *region)
574 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
576 ops->region_fini(mlxsw_sp, region->priv);
577 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
578 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
579 mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
580 mlxsw_afk_key_info_put(region->key_info);
585 mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
586 struct mlxsw_sp_acl_tcam_group *group,
587 unsigned int priority,
588 struct mlxsw_afk_element_usage *elusage,
589 struct mlxsw_sp_acl_tcam_chunk *chunk)
591 struct mlxsw_sp_acl_tcam_region *region;
592 bool region_created = false;
596 region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
598 if (region && need_split) {
599 /* According to priority, the chunk should belong to an
600 * existing region. However, this chunk needs elements
601 * that region does not contain. We need to split the existing
602 * region into two and create a new region for this chunk
603 * in between. This is not supported now.
608 struct mlxsw_afk_element_usage region_elusage;
610 mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
612 region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
615 return PTR_ERR(region);
616 region_created = true;
619 chunk->region = region;
620 list_add_tail(&chunk->list, ®ion->chunk_list);
625 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
627 goto err_group_region_attach;
631 err_group_region_attach:
632 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
637 mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
638 struct mlxsw_sp_acl_tcam_chunk *chunk)
640 struct mlxsw_sp_acl_tcam_region *region = chunk->region;
642 list_del(&chunk->list);
643 if (list_empty(®ion->chunk_list)) {
644 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
645 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
649 static struct mlxsw_sp_acl_tcam_chunk *
650 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
651 struct mlxsw_sp_acl_tcam_group *group,
652 unsigned int priority,
653 struct mlxsw_afk_element_usage *elusage)
655 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
656 struct mlxsw_sp_acl_tcam_chunk *chunk;
659 if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
660 return ERR_PTR(-EINVAL);
662 chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
664 return ERR_PTR(-ENOMEM);
665 chunk->priority = priority;
666 chunk->group = group;
667 chunk->ref_count = 1;
669 err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
672 goto err_chunk_assoc;
674 ops->chunk_init(chunk->region->priv, chunk->priv, priority);
676 err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
677 mlxsw_sp_acl_tcam_chunk_ht_params);
679 goto err_rhashtable_insert;
683 err_rhashtable_insert:
684 ops->chunk_fini(chunk->priv);
685 mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
692 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
693 struct mlxsw_sp_acl_tcam_chunk *chunk)
695 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
696 struct mlxsw_sp_acl_tcam_group *group = chunk->group;
698 rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
699 mlxsw_sp_acl_tcam_chunk_ht_params);
700 ops->chunk_fini(chunk->priv);
701 mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
705 static struct mlxsw_sp_acl_tcam_chunk *
706 mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
707 struct mlxsw_sp_acl_tcam_group *group,
708 unsigned int priority,
709 struct mlxsw_afk_element_usage *elusage)
711 struct mlxsw_sp_acl_tcam_chunk *chunk;
713 chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
714 mlxsw_sp_acl_tcam_chunk_ht_params);
716 if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
718 return ERR_PTR(-EINVAL);
722 return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
726 static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
727 struct mlxsw_sp_acl_tcam_chunk *chunk)
729 if (--chunk->ref_count)
731 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
734 static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
736 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
738 return ops->entry_priv_size;
741 static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
742 struct mlxsw_sp_acl_tcam_group *group,
743 struct mlxsw_sp_acl_tcam_entry *entry,
744 struct mlxsw_sp_acl_rule_info *rulei)
746 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
747 struct mlxsw_sp_acl_tcam_chunk *chunk;
748 struct mlxsw_sp_acl_tcam_region *region;
751 chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
752 &rulei->values.elusage);
754 return PTR_ERR(chunk);
756 region = chunk->region;
758 err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
762 entry->chunk = chunk;
767 mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
771 static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
772 struct mlxsw_sp_acl_tcam_entry *entry)
774 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
775 struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
776 struct mlxsw_sp_acl_tcam_region *region = chunk->region;
778 ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
779 mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
783 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
784 struct mlxsw_sp_acl_tcam_group *group,
785 struct mlxsw_sp_acl_tcam_entry *entry,
786 struct mlxsw_sp_acl_rule_info *rulei)
788 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
789 struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
790 struct mlxsw_sp_acl_tcam_region *region = chunk->region;
792 return ops->entry_action_replace(mlxsw_sp, region->priv, chunk->priv,
797 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
798 struct mlxsw_sp_acl_tcam_entry *entry,
801 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
802 struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
803 struct mlxsw_sp_acl_tcam_region *region = chunk->region;
805 return ops->entry_activity_get(mlxsw_sp, region->priv,
806 entry->priv, activity);
809 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
810 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
811 MLXSW_AFK_ELEMENT_DMAC_32_47,
812 MLXSW_AFK_ELEMENT_DMAC_0_31,
813 MLXSW_AFK_ELEMENT_SMAC_32_47,
814 MLXSW_AFK_ELEMENT_SMAC_0_31,
815 MLXSW_AFK_ELEMENT_ETHERTYPE,
816 MLXSW_AFK_ELEMENT_IP_PROTO,
817 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
818 MLXSW_AFK_ELEMENT_DST_IP_0_31,
819 MLXSW_AFK_ELEMENT_DST_L4_PORT,
820 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
821 MLXSW_AFK_ELEMENT_VID,
822 MLXSW_AFK_ELEMENT_PCP,
823 MLXSW_AFK_ELEMENT_TCP_FLAGS,
824 MLXSW_AFK_ELEMENT_IP_TTL_,
825 MLXSW_AFK_ELEMENT_IP_ECN,
826 MLXSW_AFK_ELEMENT_IP_DSCP,
829 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
830 MLXSW_AFK_ELEMENT_ETHERTYPE,
831 MLXSW_AFK_ELEMENT_IP_PROTO,
832 MLXSW_AFK_ELEMENT_SRC_IP_96_127,
833 MLXSW_AFK_ELEMENT_SRC_IP_64_95,
834 MLXSW_AFK_ELEMENT_SRC_IP_32_63,
835 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
836 MLXSW_AFK_ELEMENT_DST_IP_96_127,
837 MLXSW_AFK_ELEMENT_DST_IP_64_95,
838 MLXSW_AFK_ELEMENT_DST_IP_32_63,
839 MLXSW_AFK_ELEMENT_DST_IP_0_31,
840 MLXSW_AFK_ELEMENT_DST_L4_PORT,
841 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
844 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
846 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
847 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
850 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
851 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
855 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
856 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
858 struct mlxsw_sp_acl_tcam_flower_ruleset {
859 struct mlxsw_sp_acl_tcam_group group;
862 struct mlxsw_sp_acl_tcam_flower_rule {
863 struct mlxsw_sp_acl_tcam_entry entry;
866 struct mlxsw_sp_acl_tcam_mr_ruleset {
867 struct mlxsw_sp_acl_tcam_chunk *chunk;
868 struct mlxsw_sp_acl_tcam_group group;
871 struct mlxsw_sp_acl_tcam_mr_rule {
872 struct mlxsw_sp_acl_tcam_entry entry;
876 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
877 struct mlxsw_sp_acl_tcam *tcam,
879 struct mlxsw_afk_element_usage *tmplt_elusage)
881 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
883 return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
884 mlxsw_sp_acl_tcam_patterns,
885 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
890 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
893 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
895 mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
899 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
901 struct mlxsw_sp_port *mlxsw_sp_port,
904 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
906 return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
907 mlxsw_sp_port, ingress);
911 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
913 struct mlxsw_sp_port *mlxsw_sp_port,
916 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
918 mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
919 mlxsw_sp_port, ingress);
923 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
925 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
927 return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
930 static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
932 return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
933 mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
937 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
938 void *ruleset_priv, void *rule_priv,
939 struct mlxsw_sp_acl_rule_info *rulei)
941 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
942 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
944 return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
945 &rule->entry, rulei);
949 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
951 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
953 mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
957 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
960 struct mlxsw_sp_acl_rule_info *rulei)
966 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
967 void *rule_priv, bool *activity)
969 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
971 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
975 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
976 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
977 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
978 .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
979 .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
980 .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
981 .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
982 .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
983 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
984 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
985 .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
986 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
990 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
991 struct mlxsw_sp_acl_tcam *tcam,
993 struct mlxsw_afk_element_usage *tmplt_elusage)
995 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
998 err = mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
999 mlxsw_sp_acl_tcam_patterns,
1000 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1005 /* For most of the TCAM clients it would make sense to take a tcam chunk
1006 * only when the first rule is written. This is not the case for
1007 * multicast router as it is required to bind the multicast router to a
1008 * specific ACL Group ID which must exist in HW before multicast router
1011 ruleset->chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, &ruleset->group,
1013 if (IS_ERR(ruleset->chunk)) {
1014 err = PTR_ERR(ruleset->chunk);
1021 mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1026 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1028 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1030 mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, ruleset->chunk);
1031 mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1035 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1036 struct mlxsw_sp_port *mlxsw_sp_port,
1039 /* Binding is done when initializing multicast router */
1044 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1046 struct mlxsw_sp_port *mlxsw_sp_port,
1052 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1054 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1056 return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
1059 static size_t mlxsw_sp_acl_tcam_mr_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
1061 return sizeof(struct mlxsw_sp_acl_tcam_mr_rule) +
1062 mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
1066 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1068 struct mlxsw_sp_acl_rule_info *rulei)
1070 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1071 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1073 return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
1074 &rule->entry, rulei);
1078 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1080 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1082 mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
1086 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1087 void *ruleset_priv, void *rule_priv,
1088 struct mlxsw_sp_acl_rule_info *rulei)
1090 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1091 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1093 return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &ruleset->group,
1094 &rule->entry, rulei);
1098 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1099 void *rule_priv, bool *activity)
1101 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1103 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
1107 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1108 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1109 .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add,
1110 .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del,
1111 .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1112 .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1113 .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1114 .rule_priv_size = mlxsw_sp_acl_tcam_mr_rule_priv_size,
1115 .rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
1116 .rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
1117 .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1118 .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1121 static const struct mlxsw_sp_acl_profile_ops *
1122 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1123 [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1124 [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1127 const struct mlxsw_sp_acl_profile_ops *
1128 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1129 enum mlxsw_sp_acl_profile profile)
1131 const struct mlxsw_sp_acl_profile_ops *ops;
1133 if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1135 ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];