Merge remote-tracking branch 'asoc/topic/pcm5102a' into asoc-next
[linux-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl_tcam.c
CommitLineData
22a67766
JP
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/slab.h>
37#include <linux/errno.h>
38#include <linux/bitops.h>
39#include <linux/list.h>
40#include <linux/rhashtable.h>
41#include <linux/netdevice.h>
42#include <linux/parman.h>
43
44#include "reg.h"
45#include "core.h"
46#include "resources.h"
47#include "spectrum.h"
48#include "core_acl_flex_keys.h"
49
50struct mlxsw_sp_acl_tcam {
51 unsigned long *used_regions; /* bit array */
52 unsigned int max_regions;
53 unsigned long *used_groups; /* bit array */
54 unsigned int max_groups;
55 unsigned int max_group_size;
56};
57
58static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
59{
60 struct mlxsw_sp_acl_tcam *tcam = priv;
61 u64 max_tcam_regions;
62 u64 max_regions;
63 u64 max_groups;
64 size_t alloc_size;
65 int err;
66
67 max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
68 ACL_MAX_TCAM_REGIONS);
69 max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
70
71 /* Use 1:1 mapping between ACL region and TCAM region */
72 if (max_tcam_regions < max_regions)
73 max_regions = max_tcam_regions;
74
75 alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
76 tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
77 if (!tcam->used_regions)
78 return -ENOMEM;
79 tcam->max_regions = max_regions;
80
81 max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
82 alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
83 tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
84 if (!tcam->used_groups) {
85 err = -ENOMEM;
86 goto err_alloc_used_groups;
87 }
88 tcam->max_groups = max_groups;
89 tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
90 ACL_MAX_GROUP_SIZE);
91 return 0;
92
93err_alloc_used_groups:
94 kfree(tcam->used_regions);
95 return err;
96}
97
98static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
99{
100 struct mlxsw_sp_acl_tcam *tcam = priv;
101
102 kfree(tcam->used_groups);
103 kfree(tcam->used_regions);
104}
105
106static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
107 u16 *p_id)
108{
109 u16 id;
110
111 id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
112 if (id < tcam->max_regions) {
113 __set_bit(id, tcam->used_regions);
114 *p_id = id;
115 return 0;
116 }
117 return -ENOBUFS;
118}
119
120static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
121 u16 id)
122{
123 __clear_bit(id, tcam->used_regions);
124}
125
126static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
127 u16 *p_id)
128{
129 u16 id;
130
131 id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
132 if (id < tcam->max_groups) {
133 __set_bit(id, tcam->used_groups);
134 *p_id = id;
135 return 0;
136 }
137 return -ENOBUFS;
138}
139
140static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
141 u16 id)
142{
143 __clear_bit(id, tcam->used_groups);
144}
145
146struct mlxsw_sp_acl_tcam_pattern {
147 const enum mlxsw_afk_element *elements;
148 unsigned int elements_count;
149};
150
151struct mlxsw_sp_acl_tcam_group {
152 struct mlxsw_sp_acl_tcam *tcam;
153 u16 id;
154 struct list_head region_list;
155 unsigned int region_count;
156 struct rhashtable chunk_ht;
22a67766
JP
157 struct mlxsw_sp_acl_tcam_group_ops *ops;
158 const struct mlxsw_sp_acl_tcam_pattern *patterns;
159 unsigned int patterns_count;
160};
161
162struct mlxsw_sp_acl_tcam_region {
163 struct list_head list; /* Member of a TCAM group */
164 struct list_head chunk_list; /* List of chunks under this region */
165 struct parman *parman;
166 struct mlxsw_sp *mlxsw_sp;
167 struct mlxsw_sp_acl_tcam_group *group;
168 u16 id; /* ACL ID and region ID - they are same */
169 char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
170 struct mlxsw_afk_key_info *key_info;
171 struct {
172 struct parman_prio parman_prio;
173 struct parman_item parman_item;
174 struct mlxsw_sp_acl_rule_info *rulei;
175 } catchall;
176};
177
178struct mlxsw_sp_acl_tcam_chunk {
179 struct list_head list; /* Member of a TCAM region */
180 struct rhash_head ht_node; /* Member of a chunk HT */
181 unsigned int priority; /* Priority within the region and group */
182 struct parman_prio parman_prio;
183 struct mlxsw_sp_acl_tcam_group *group;
184 struct mlxsw_sp_acl_tcam_region *region;
185 unsigned int ref_count;
186};
187
188struct mlxsw_sp_acl_tcam_entry {
189 struct parman_item parman_item;
190 struct mlxsw_sp_acl_tcam_chunk *chunk;
191};
192
193static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
194 .key_len = sizeof(unsigned int),
195 .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
196 .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
197 .automatic_shrinking = true,
198};
199
200static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
201 struct mlxsw_sp_acl_tcam_group *group)
202{
203 struct mlxsw_sp_acl_tcam_region *region;
204 char pagt_pl[MLXSW_REG_PAGT_LEN];
205 int acl_index = 0;
206
207 mlxsw_reg_pagt_pack(pagt_pl, group->id);
208 list_for_each_entry(region, &group->region_list, list)
209 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
210 mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
211 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
212}
213
214static int
215mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
216 struct mlxsw_sp_acl_tcam *tcam,
217 struct mlxsw_sp_acl_tcam_group *group,
218 const struct mlxsw_sp_acl_tcam_pattern *patterns,
219 unsigned int patterns_count)
220{
221 int err;
222
223 group->tcam = tcam;
224 group->patterns = patterns;
225 group->patterns_count = patterns_count;
226 INIT_LIST_HEAD(&group->region_list);
227 err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
228 if (err)
229 return err;
230
231 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
232 if (err)
233 goto err_group_update;
234
235 err = rhashtable_init(&group->chunk_ht,
236 &mlxsw_sp_acl_tcam_chunk_ht_params);
237 if (err)
238 goto err_rhashtable_init;
239
240 return 0;
241
242err_rhashtable_init:
243err_group_update:
244 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
245 return err;
246}
247
248static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
249 struct mlxsw_sp_acl_tcam_group *group)
250{
251 struct mlxsw_sp_acl_tcam *tcam = group->tcam;
252
253 rhashtable_destroy(&group->chunk_ht);
254 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
255 WARN_ON(!list_empty(&group->region_list));
256}
257
258static int
259mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
260 struct mlxsw_sp_acl_tcam_group *group,
4b23258d
JP
261 struct mlxsw_sp_port *mlxsw_sp_port,
262 bool ingress)
22a67766 263{
22a67766
JP
264 char ppbt_pl[MLXSW_REG_PPBT_LEN];
265
02caf499
JP
266 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
267 MLXSW_REG_PXBT_E_EACL,
268 MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
22a67766
JP
269 group->id);
270 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
271}
272
273static void
274mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
02caf499 275 struct mlxsw_sp_acl_tcam_group *group,
4b23258d
JP
276 struct mlxsw_sp_port *mlxsw_sp_port,
277 bool ingress)
22a67766
JP
278{
279 char ppbt_pl[MLXSW_REG_PPBT_LEN];
280
02caf499
JP
281 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
282 MLXSW_REG_PXBT_E_EACL,
283 MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
22a67766
JP
284 group->id);
285 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
286}
287
0ade3b64
JP
288static u16
289mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
290{
291 return group->id;
292}
293
22a67766
JP
294static unsigned int
295mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
296{
297 struct mlxsw_sp_acl_tcam_chunk *chunk;
298
299 if (list_empty(&region->chunk_list))
300 return 0;
301 /* As a priority of a region, return priority of the first chunk */
302 chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
303 return chunk->priority;
304}
305
306static unsigned int
307mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
308{
309 struct mlxsw_sp_acl_tcam_chunk *chunk;
310
311 if (list_empty(&region->chunk_list))
312 return 0;
313 chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
314 return chunk->priority;
315}
316
317static void
318mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
319 struct mlxsw_sp_acl_tcam_region *region)
320{
321 struct mlxsw_sp_acl_tcam_region *region2;
322 struct list_head *pos;
323
324 /* Position the region inside the list according to priority */
325 list_for_each(pos, &group->region_list) {
326 region2 = list_entry(pos, typeof(*region2), list);
327 if (mlxsw_sp_acl_tcam_region_prio(region2) >
328 mlxsw_sp_acl_tcam_region_prio(region))
329 break;
330 }
331 list_add_tail(&region->list, pos);
332 group->region_count++;
333}
334
335static void
336mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
337 struct mlxsw_sp_acl_tcam_region *region)
338{
339 group->region_count--;
340 list_del(&region->list);
341}
342
343static int
344mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
345 struct mlxsw_sp_acl_tcam_group *group,
346 struct mlxsw_sp_acl_tcam_region *region)
347{
348 int err;
349
350 if (group->region_count == group->tcam->max_group_size)
351 return -ENOBUFS;
352
353 mlxsw_sp_acl_tcam_group_list_add(group, region);
354
355 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
356 if (err)
357 goto err_group_update;
358 region->group = group;
359
360 return 0;
361
362err_group_update:
363 mlxsw_sp_acl_tcam_group_list_del(group, region);
364 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
365 return err;
366}
367
368static void
369mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
370 struct mlxsw_sp_acl_tcam_region *region)
371{
372 struct mlxsw_sp_acl_tcam_group *group = region->group;
373
374 mlxsw_sp_acl_tcam_group_list_del(group, region);
375 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
376}
377
378static struct mlxsw_sp_acl_tcam_region *
379mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
380 unsigned int priority,
381 struct mlxsw_afk_element_usage *elusage,
382 bool *p_need_split)
383{
384 struct mlxsw_sp_acl_tcam_region *region, *region2;
385 struct list_head *pos;
386 bool issubset;
387
388 list_for_each(pos, &group->region_list) {
389 region = list_entry(pos, typeof(*region), list);
390
391 /* First, check if the requested priority does not rather belong
392 * under some of the next regions.
393 */
394 if (pos->next != &group->region_list) { /* not last */
395 region2 = list_entry(pos->next, typeof(*region2), list);
396 if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
397 continue;
398 }
399
400 issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
401
402 /* If requested element usage would not fit and the priority
403 * is lower than the currently inspected region we cannot
404 * use this region, so return NULL to indicate new region has
405 * to be created.
406 */
407 if (!issubset &&
408 priority < mlxsw_sp_acl_tcam_region_prio(region))
409 return NULL;
410
411 /* If requested element usage would not fit and the priority
412 * is higher than the currently inspected region we cannot
413 * use this region. There is still some hope that the next
414 * region would be the fit. So let it be processed and
415 * eventually break at the check right above this.
416 */
417 if (!issubset &&
418 priority > mlxsw_sp_acl_tcam_region_max_prio(region))
419 continue;
420
421 /* Indicate if the region needs to be split in order to add
422 * the requested priority. Split is needed when requested
423 * element usage won't fit into the found region.
424 */
425 *p_need_split = !issubset;
426 return region;
427 }
428 return NULL; /* New region has to be created. */
429}
430
431static void
432mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
433 struct mlxsw_afk_element_usage *elusage,
434 struct mlxsw_afk_element_usage *out)
435{
436 const struct mlxsw_sp_acl_tcam_pattern *pattern;
437 int i;
438
439 for (i = 0; i < group->patterns_count; i++) {
440 pattern = &group->patterns[i];
441 mlxsw_afk_element_usage_fill(out, pattern->elements,
442 pattern->elements_count);
443 if (mlxsw_afk_element_usage_subset(elusage, out))
444 return;
445 }
446 memcpy(out, elusage, sizeof(*out));
447}
448
449#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
450#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
451
452static int
453mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
454 struct mlxsw_sp_acl_tcam_region *region)
455{
456 struct mlxsw_afk_key_info *key_info = region->key_info;
457 char ptar_pl[MLXSW_REG_PTAR_LEN];
458 unsigned int encodings_count;
459 int i;
460 int err;
461
462 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
463 MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
464 region->id, region->tcam_region_info);
465 encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
466 for (i = 0; i < encodings_count; i++) {
467 u16 encoding;
468
469 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
470 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
471 }
472 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
473 if (err)
474 return err;
475 mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
476 return 0;
477}
478
479static void
480mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
481 struct mlxsw_sp_acl_tcam_region *region)
482{
483 char ptar_pl[MLXSW_REG_PTAR_LEN];
484
485 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
486 region->tcam_region_info);
487 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
488}
489
490static int
491mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
492 struct mlxsw_sp_acl_tcam_region *region,
493 u16 new_size)
494{
495 char ptar_pl[MLXSW_REG_PTAR_LEN];
496
497 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
498 new_size, region->id, region->tcam_region_info);
499 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
500}
501
502static int
503mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
504 struct mlxsw_sp_acl_tcam_region *region)
505{
506 char pacl_pl[MLXSW_REG_PACL_LEN];
507
508 mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
509 region->tcam_region_info);
510 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
511}
512
513static void
514mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
515 struct mlxsw_sp_acl_tcam_region *region)
516{
517 char pacl_pl[MLXSW_REG_PACL_LEN];
518
519 mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
520 region->tcam_region_info);
521 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
522}
523
524static int
525mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
526 struct mlxsw_sp_acl_tcam_region *region,
527 unsigned int offset,
528 struct mlxsw_sp_acl_rule_info *rulei)
529{
530 char ptce2_pl[MLXSW_REG_PTCE2_LEN];
531 char *act_set;
532 char *mask;
533 char *key;
534
535 mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
536 region->tcam_region_info, offset);
537 key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
538 mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
539 mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
540
541 /* Only the first action set belongs here, the rest is in KVD */
542 act_set = mlxsw_afa_block_first_set(rulei->act_block);
543 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
544
545 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
546}
547
548static void
549mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
550 struct mlxsw_sp_acl_tcam_region *region,
551 unsigned int offset)
552{
553 char ptce2_pl[MLXSW_REG_PTCE2_LEN];
554
555 mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
556 region->tcam_region_info, offset);
557 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
558}
559
7fd056c2
AS
560static int
561mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
562 struct mlxsw_sp_acl_tcam_region *region,
563 unsigned int offset,
564 bool *activity)
565{
566 char ptce2_pl[MLXSW_REG_PTCE2_LEN];
567 int err;
568
569 mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
570 region->tcam_region_info, offset);
571 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
572 if (err)
573 return err;
574 *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
575 return 0;
576}
577
9bcdef32 578#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
22a67766
JP
579
580static int
581mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
582 struct mlxsw_sp_acl_tcam_region *region)
583{
584 struct parman_prio *parman_prio = &region->catchall.parman_prio;
585 struct parman_item *parman_item = &region->catchall.parman_item;
586 struct mlxsw_sp_acl_rule_info *rulei;
587 int err;
588
589 parman_prio_init(region->parman, parman_prio,
590 MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
591 err = parman_item_add(region->parman, parman_prio, parman_item);
592 if (err)
593 goto err_parman_item_add;
594
595 rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
596 if (IS_ERR(rulei)) {
597 err = PTR_ERR(rulei);
598 goto err_rulei_create;
599 }
600
2a52a8c6
JP
601 err = mlxsw_sp_acl_rulei_act_continue(rulei);
602 if (WARN_ON(err))
603 goto err_rulei_act_continue;
604
22a67766
JP
605 err = mlxsw_sp_acl_rulei_commit(rulei);
606 if (err)
607 goto err_rulei_commit;
608
609 err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
610 parman_item->index, rulei);
611 region->catchall.rulei = rulei;
612 if (err)
613 goto err_rule_insert;
614
615 return 0;
616
617err_rule_insert:
618err_rulei_commit:
2a52a8c6 619err_rulei_act_continue:
22a67766
JP
620 mlxsw_sp_acl_rulei_destroy(rulei);
621err_rulei_create:
622 parman_item_remove(region->parman, parman_prio, parman_item);
623err_parman_item_add:
624 parman_prio_fini(parman_prio);
625 return err;
626}
627
628static void
629mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
630 struct mlxsw_sp_acl_tcam_region *region)
631{
632 struct parman_prio *parman_prio = &region->catchall.parman_prio;
633 struct parman_item *parman_item = &region->catchall.parman_item;
634 struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
635
636 mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
637 parman_item->index);
638 mlxsw_sp_acl_rulei_destroy(rulei);
639 parman_item_remove(region->parman, parman_prio, parman_item);
640 parman_prio_fini(parman_prio);
641}
642
643static void
644mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
645 struct mlxsw_sp_acl_tcam_region *region,
646 u16 src_offset, u16 dst_offset, u16 size)
647{
648 char prcr_pl[MLXSW_REG_PRCR_LEN];
649
650 mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
651 region->tcam_region_info, src_offset,
652 region->tcam_region_info, dst_offset, size);
653 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
654}
655
656static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
657 unsigned long new_count)
658{
659 struct mlxsw_sp_acl_tcam_region *region = priv;
660 struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
661 u64 max_tcam_rules;
662
663 max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
664 if (new_count > max_tcam_rules)
665 return -EINVAL;
666 return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
667}
668
669static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
670 unsigned long from_index,
671 unsigned long to_index,
672 unsigned long count)
673{
674 struct mlxsw_sp_acl_tcam_region *region = priv;
675 struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
676
677 mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
678 from_index, to_index, count);
679}
680
681static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
682 .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
683 .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
684 .resize = mlxsw_sp_acl_tcam_region_parman_resize,
685 .move = mlxsw_sp_acl_tcam_region_parman_move,
686 .algo = PARMAN_ALGO_TYPE_LSORT,
687};
688
689static struct mlxsw_sp_acl_tcam_region *
690mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
691 struct mlxsw_sp_acl_tcam *tcam,
692 struct mlxsw_afk_element_usage *elusage)
693{
694 struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
695 struct mlxsw_sp_acl_tcam_region *region;
696 int err;
697
698 region = kzalloc(sizeof(*region), GFP_KERNEL);
699 if (!region)
700 return ERR_PTR(-ENOMEM);
701 INIT_LIST_HEAD(&region->chunk_list);
702 region->mlxsw_sp = mlxsw_sp;
703
704 region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
705 region);
706 if (!region->parman) {
707 err = -ENOMEM;
708 goto err_parman_create;
709 }
710
711 region->key_info = mlxsw_afk_key_info_get(afk, elusage);
712 if (IS_ERR(region->key_info)) {
713 err = PTR_ERR(region->key_info);
714 goto err_key_info_get;
715 }
716
717 err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
718 if (err)
719 goto err_region_id_get;
720
721 err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
722 if (err)
723 goto err_tcam_region_alloc;
724
725 err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
726 if (err)
727 goto err_tcam_region_enable;
728
729 err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
730 if (err)
731 goto err_tcam_region_catchall_add;
732
733 return region;
734
735err_tcam_region_catchall_add:
736 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
737err_tcam_region_enable:
738 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
739err_tcam_region_alloc:
740 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
741err_region_id_get:
742 mlxsw_afk_key_info_put(region->key_info);
743err_key_info_get:
744 parman_destroy(region->parman);
745err_parman_create:
746 kfree(region);
747 return ERR_PTR(err);
748}
749
750static void
751mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
752 struct mlxsw_sp_acl_tcam_region *region)
753{
754 mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
755 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
756 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
757 mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
758 mlxsw_afk_key_info_put(region->key_info);
759 parman_destroy(region->parman);
760 kfree(region);
761}
762
763static int
764mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
765 struct mlxsw_sp_acl_tcam_group *group,
766 unsigned int priority,
767 struct mlxsw_afk_element_usage *elusage,
768 struct mlxsw_sp_acl_tcam_chunk *chunk)
769{
770 struct mlxsw_sp_acl_tcam_region *region;
771 bool region_created = false;
772 bool need_split;
773 int err;
774
775 region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
776 &need_split);
777 if (region && need_split) {
778 /* According to priority, the chunk should belong to an
779 * existing region. However, this chunk needs elements
780 * that region does not contain. We need to split the existing
781 * region into two and create a new region for this chunk
782 * in between. This is not supported now.
783 */
784 return -EOPNOTSUPP;
785 }
786 if (!region) {
787 struct mlxsw_afk_element_usage region_elusage;
788
789 mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
790 &region_elusage);
791 region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
792 &region_elusage);
793 if (IS_ERR(region))
794 return PTR_ERR(region);
795 region_created = true;
796 }
797
798 chunk->region = region;
799 list_add_tail(&chunk->list, &region->chunk_list);
800
801 if (!region_created)
802 return 0;
803
804 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
805 if (err)
806 goto err_group_region_attach;
807
808 return 0;
809
810err_group_region_attach:
811 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
812 return err;
813}
814
815static void
816mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
817 struct mlxsw_sp_acl_tcam_chunk *chunk)
818{
819 struct mlxsw_sp_acl_tcam_region *region = chunk->region;
820
821 list_del(&chunk->list);
822 if (list_empty(&region->chunk_list)) {
823 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
824 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
825 }
826}
827
828static struct mlxsw_sp_acl_tcam_chunk *
829mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
830 struct mlxsw_sp_acl_tcam_group *group,
831 unsigned int priority,
832 struct mlxsw_afk_element_usage *elusage)
833{
834 struct mlxsw_sp_acl_tcam_chunk *chunk;
835 int err;
836
837 if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
838 return ERR_PTR(-EINVAL);
839
840 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
841 if (!chunk)
842 return ERR_PTR(-ENOMEM);
843 chunk->priority = priority;
844 chunk->group = group;
845 chunk->ref_count = 1;
846
847 err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
848 elusage, chunk);
849 if (err)
850 goto err_chunk_assoc;
851
852 parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
853
854 err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
855 mlxsw_sp_acl_tcam_chunk_ht_params);
856 if (err)
857 goto err_rhashtable_insert;
858
859 return chunk;
860
861err_rhashtable_insert:
862 parman_prio_fini(&chunk->parman_prio);
863 mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
864err_chunk_assoc:
865 kfree(chunk);
866 return ERR_PTR(err);
867}
868
869static void
870mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
871 struct mlxsw_sp_acl_tcam_chunk *chunk)
872{
873 struct mlxsw_sp_acl_tcam_group *group = chunk->group;
874
875 rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
876 mlxsw_sp_acl_tcam_chunk_ht_params);
877 parman_prio_fini(&chunk->parman_prio);
878 mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
879 kfree(chunk);
880}
881
882static struct mlxsw_sp_acl_tcam_chunk *
883mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
884 struct mlxsw_sp_acl_tcam_group *group,
885 unsigned int priority,
886 struct mlxsw_afk_element_usage *elusage)
887{
888 struct mlxsw_sp_acl_tcam_chunk *chunk;
889
890 chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
891 mlxsw_sp_acl_tcam_chunk_ht_params);
892 if (chunk) {
893 if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
894 elusage)))
895 return ERR_PTR(-EINVAL);
896 chunk->ref_count++;
897 return chunk;
898 }
899 return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
900 priority, elusage);
901}
902
903static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
904 struct mlxsw_sp_acl_tcam_chunk *chunk)
905{
906 if (--chunk->ref_count)
907 return;
908 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
909}
910
911static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
912 struct mlxsw_sp_acl_tcam_group *group,
913 struct mlxsw_sp_acl_tcam_entry *entry,
914 struct mlxsw_sp_acl_rule_info *rulei)
915{
916 struct mlxsw_sp_acl_tcam_chunk *chunk;
917 struct mlxsw_sp_acl_tcam_region *region;
918 int err;
919
920 chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
921 &rulei->values.elusage);
922 if (IS_ERR(chunk))
923 return PTR_ERR(chunk);
924
925 region = chunk->region;
926 err = parman_item_add(region->parman, &chunk->parman_prio,
927 &entry->parman_item);
928 if (err)
929 goto err_parman_item_add;
930
931 err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
932 entry->parman_item.index,
933 rulei);
934 if (err)
935 goto err_rule_insert;
936 entry->chunk = chunk;
937
938 return 0;
939
940err_rule_insert:
941 parman_item_remove(region->parman, &chunk->parman_prio,
942 &entry->parman_item);
943err_parman_item_add:
944 mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
945 return err;
946}
947
948static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
949 struct mlxsw_sp_acl_tcam_entry *entry)
950{
951 struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
952 struct mlxsw_sp_acl_tcam_region *region = chunk->region;
953
954 mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
955 entry->parman_item.index);
956 parman_item_remove(region->parman, &chunk->parman_prio,
957 &entry->parman_item);
958 mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
959}
960
7fd056c2
AS
961static int
962mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
963 struct mlxsw_sp_acl_tcam_entry *entry,
964 bool *activity)
965{
966 struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
967 struct mlxsw_sp_acl_tcam_region *region = chunk->region;
968
969 return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
970 entry->parman_item.index,
971 activity);
972}
973
22a67766
JP
974static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
975 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
976 MLXSW_AFK_ELEMENT_DMAC,
977 MLXSW_AFK_ELEMENT_SMAC,
978 MLXSW_AFK_ELEMENT_ETHERTYPE,
979 MLXSW_AFK_ELEMENT_IP_PROTO,
980 MLXSW_AFK_ELEMENT_SRC_IP4,
981 MLXSW_AFK_ELEMENT_DST_IP4,
982 MLXSW_AFK_ELEMENT_DST_L4_PORT,
983 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
9caab08a
PM
984 MLXSW_AFK_ELEMENT_VID,
985 MLXSW_AFK_ELEMENT_PCP,
8a41d845 986 MLXSW_AFK_ELEMENT_TCP_FLAGS,
046759a6 987 MLXSW_AFK_ELEMENT_IP_TTL_,
abac7b01
OG
988 MLXSW_AFK_ELEMENT_IP_ECN,
989 MLXSW_AFK_ELEMENT_IP_DSCP,
22a67766
JP
990};
991
992static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
993 MLXSW_AFK_ELEMENT_ETHERTYPE,
994 MLXSW_AFK_ELEMENT_IP_PROTO,
995 MLXSW_AFK_ELEMENT_SRC_IP6_HI,
996 MLXSW_AFK_ELEMENT_SRC_IP6_LO,
997 MLXSW_AFK_ELEMENT_DST_IP6_HI,
998 MLXSW_AFK_ELEMENT_DST_IP6_LO,
999 MLXSW_AFK_ELEMENT_DST_L4_PORT,
1000 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1001};
1002
1003static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1004 {
1005 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1006 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1007 },
1008 {
1009 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1010 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1011 },
1012};
1013
1014#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1015 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1016
1017struct mlxsw_sp_acl_tcam_flower_ruleset {
1018 struct mlxsw_sp_acl_tcam_group group;
1019};
1020
1021struct mlxsw_sp_acl_tcam_flower_rule {
1022 struct mlxsw_sp_acl_tcam_entry entry;
1023};
1024
1025static int
1026mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1027 void *priv, void *ruleset_priv)
1028{
1029 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1030 struct mlxsw_sp_acl_tcam *tcam = priv;
1031
1032 return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
1033 mlxsw_sp_acl_tcam_patterns,
1034 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
1035}
1036
1037static void
1038mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1039 void *ruleset_priv)
1040{
1041 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1042
1043 mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1044}
1045
1046static int
1047mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1048 void *ruleset_priv,
4b23258d
JP
1049 struct mlxsw_sp_port *mlxsw_sp_port,
1050 bool ingress)
22a67766
JP
1051{
1052 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1053
1054 return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
4b23258d 1055 mlxsw_sp_port, ingress);
22a67766
JP
1056}
1057
1058static void
1059mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
02caf499 1060 void *ruleset_priv,
4b23258d
JP
1061 struct mlxsw_sp_port *mlxsw_sp_port,
1062 bool ingress)
22a67766
JP
1063{
1064 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1065
4b23258d
JP
1066 mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
1067 mlxsw_sp_port, ingress);
22a67766
JP
1068}
1069
0ade3b64
JP
1070static u16
1071mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1072{
1073 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1074
1075 return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
1076}
1077
22a67766
JP
1078static int
1079mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1080 void *ruleset_priv, void *rule_priv,
1081 struct mlxsw_sp_acl_rule_info *rulei)
1082{
1083 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1084 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1085
1086 return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
1087 &rule->entry, rulei);
1088}
1089
1090static void
1091mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1092{
1093 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1094
1095 mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
1096}
1097
7fd056c2
AS
1098static int
1099mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1100 void *rule_priv, bool *activity)
1101{
1102 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1103
1104 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
1105 activity);
1106}
1107
22a67766
JP
1108static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1109 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1110 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
1111 .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
1112 .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1113 .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
0ade3b64 1114 .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
22a67766
JP
1115 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1116 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
1117 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
7fd056c2 1118 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
22a67766
JP
1119};
1120
1121static const struct mlxsw_sp_acl_profile_ops *
1122mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1123 [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1124};
1125
1126static const struct mlxsw_sp_acl_profile_ops *
1127mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1128 enum mlxsw_sp_acl_profile profile)
1129{
1130 const struct mlxsw_sp_acl_profile_ops *ops;
1131
1132 if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1133 return NULL;
1134 ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1135 if (WARN_ON(!ops))
1136 return NULL;
1137 return ops;
1138}
1139
1140const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
1141 .priv_size = sizeof(struct mlxsw_sp_acl_tcam),
1142 .init = mlxsw_sp_acl_tcam_init,
1143 .fini = mlxsw_sp_acl_tcam_fini,
1144 .profile_ops = mlxsw_sp_acl_tcam_profile_ops,
1145};