Merge remote-tracking branch 'asoc/topic/pcm5102a' into asoc-next
[linux-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_mr_tcam.c
CommitLineData
0e14c777
YG
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/list.h>
37#include <linux/netdevice.h>
38#include <linux/parman.h>
39
ea00aa3a 40#include "spectrum_mr_tcam.h"
0e14c777
YG
41#include "reg.h"
42#include "spectrum.h"
43#include "core_acl_flex_actions.h"
44#include "spectrum_mr.h"
45
46struct mlxsw_sp_mr_tcam_region {
47 struct mlxsw_sp *mlxsw_sp;
48 enum mlxsw_reg_rtar_key_type rtar_key_type;
49 struct parman *parman;
50 struct parman_prio *parman_prios;
51};
52
53struct mlxsw_sp_mr_tcam {
54 struct mlxsw_sp_mr_tcam_region ipv4_tcam_region;
55};
56
57/* This struct maps to one RIGR2 register entry */
58struct mlxsw_sp_mr_erif_sublist {
59 struct list_head list;
60 u32 rigr2_kvdl_index;
61 int num_erifs;
62 u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
63 bool synced;
64};
65
66struct mlxsw_sp_mr_tcam_erif_list {
67 struct list_head erif_sublists;
68 u32 kvdl_index;
69};
70
71static bool
72mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
73 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
74{
75 int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
76 MC_ERIF_LIST_ENTRIES);
77
78 return erif_sublist->num_erifs == erif_list_entries;
79}
80
81static void
82mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
83{
84 INIT_LIST_HEAD(&erif_list->erif_sublists);
85}
86
87#define MLXSW_SP_KVDL_RIGR2_SIZE 1
88
89static struct mlxsw_sp_mr_erif_sublist *
90mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
91 struct mlxsw_sp_mr_tcam_erif_list *erif_list)
92{
93 struct mlxsw_sp_mr_erif_sublist *erif_sublist;
94 int err;
95
96 erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
97 if (!erif_sublist)
98 return ERR_PTR(-ENOMEM);
99 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
100 &erif_sublist->rigr2_kvdl_index);
101 if (err) {
102 kfree(erif_sublist);
103 return ERR_PTR(err);
104 }
105
106 list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
107 return erif_sublist;
108}
109
110static void
111mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
112 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
113{
114 list_del(&erif_sublist->list);
115 mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
116 kfree(erif_sublist);
117}
118
119static int
120mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
121 struct mlxsw_sp_mr_tcam_erif_list *erif_list,
122 u16 erif_index)
123{
124 struct mlxsw_sp_mr_erif_sublist *sublist;
125
126 /* If either there is no erif_entry or the last one is full, allocate a
127 * new one.
128 */
129 if (list_empty(&erif_list->erif_sublists)) {
130 sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
131 if (IS_ERR(sublist))
132 return PTR_ERR(sublist);
133 erif_list->kvdl_index = sublist->rigr2_kvdl_index;
134 } else {
135 sublist = list_last_entry(&erif_list->erif_sublists,
136 struct mlxsw_sp_mr_erif_sublist,
137 list);
138 sublist->synced = false;
139 if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
140 sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
141 erif_list);
142 if (IS_ERR(sublist))
143 return PTR_ERR(sublist);
144 }
145 }
146
147 /* Add the eRIF to the last entry's last index */
148 sublist->erif_indices[sublist->num_erifs++] = erif_index;
149 return 0;
150}
151
152static void
153mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
154 struct mlxsw_sp_mr_tcam_erif_list *erif_list)
155{
156 struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
157
158 list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
159 list)
160 mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
161}
162
163static int
164mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
165 struct mlxsw_sp_mr_tcam_erif_list *erif_list)
166{
167 struct mlxsw_sp_mr_erif_sublist *curr_sublist;
168 char rigr2_pl[MLXSW_REG_RIGR2_LEN];
169 int err;
170 int i;
171
172 list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
173 if (curr_sublist->synced)
174 continue;
175
176 /* If the sublist is not the last one, pack the next index */
177 if (list_is_last(&curr_sublist->list,
178 &erif_list->erif_sublists)) {
179 mlxsw_reg_rigr2_pack(rigr2_pl,
180 curr_sublist->rigr2_kvdl_index,
181 false, 0);
182 } else {
183 struct mlxsw_sp_mr_erif_sublist *next_sublist;
184
185 next_sublist = list_next_entry(curr_sublist, list);
186 mlxsw_reg_rigr2_pack(rigr2_pl,
187 curr_sublist->rigr2_kvdl_index,
188 true,
189 next_sublist->rigr2_kvdl_index);
190 }
191
192 /* Pack all the erifs */
193 for (i = 0; i < curr_sublist->num_erifs; i++) {
194 u16 erif_index = curr_sublist->erif_indices[i];
195
196 mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
197 erif_index);
198 }
199
200 /* Write the entry */
201 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
202 rigr2_pl);
203 if (err)
204 /* No need of a rollback here because this
205 * hardware entry should not be pointed yet.
206 */
207 return err;
208 curr_sublist->synced = true;
209 }
210 return 0;
211}
212
213static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
214 struct mlxsw_sp_mr_tcam_erif_list *from)
215{
216 list_splice(&from->erif_sublists, &to->erif_sublists);
217 to->kvdl_index = from->kvdl_index;
218}
219
220struct mlxsw_sp_mr_tcam_route {
221 struct mlxsw_sp_mr_tcam_erif_list erif_list;
222 struct mlxsw_afa_block *afa_block;
223 u32 counter_index;
224 struct parman_item parman_item;
225 struct parman_prio *parman_prio;
226 enum mlxsw_sp_mr_route_action action;
227 struct mlxsw_sp_mr_route_key key;
228 u16 irif_index;
229 u16 min_mtu;
230};
231
232static struct mlxsw_afa_block *
233mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
234 enum mlxsw_sp_mr_route_action route_action,
235 u16 irif_index, u32 counter_index,
236 u16 min_mtu,
237 struct mlxsw_sp_mr_tcam_erif_list *erif_list)
238{
239 struct mlxsw_afa_block *afa_block;
240 int err;
241
242 afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
b508e0b6
DC
243 if (!afa_block)
244 return ERR_PTR(-ENOMEM);
0e14c777 245
c18c1e18
JP
246 err = mlxsw_afa_block_append_allocated_counter(afa_block,
247 counter_index);
0e14c777
YG
248 if (err)
249 goto err;
250
251 switch (route_action) {
252 case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
253 err = mlxsw_afa_block_append_trap(afa_block,
254 MLXSW_TRAP_ID_ACL1);
255 if (err)
256 goto err;
257 break;
607feade 258 case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
0e14c777
YG
259 case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
260 /* If we are about to append a multicast router action, commit
261 * the erif_list.
262 */
263 err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
264 if (err)
265 goto err;
266
267 err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
268 min_mtu, false,
269 erif_list->kvdl_index);
270 if (err)
271 goto err;
607feade
YG
272
273 if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
274 err = mlxsw_afa_block_append_trap_and_forward(afa_block,
275 MLXSW_TRAP_ID_ACL2);
276 if (err)
277 goto err;
278 }
0e14c777
YG
279 break;
280 default:
281 err = -EINVAL;
282 goto err;
283 }
284
285 err = mlxsw_afa_block_commit(afa_block);
286 if (err)
287 goto err;
288 return afa_block;
289err:
290 mlxsw_afa_block_destroy(afa_block);
291 return ERR_PTR(err);
292}
293
294static void
295mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
296{
297 mlxsw_afa_block_destroy(afa_block);
298}
299
300static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
301 struct parman_item *parman_item,
302 struct mlxsw_sp_mr_route_key *key,
303 struct mlxsw_afa_block *afa_block)
304{
305 char rmft2_pl[MLXSW_REG_RMFT2_LEN];
306
307 switch (key->proto) {
308 case MLXSW_SP_L3_PROTO_IPV4:
309 mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
310 key->vrid,
311 MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
312 ntohl(key->group.addr4),
313 ntohl(key->group_mask.addr4),
314 ntohl(key->source.addr4),
315 ntohl(key->source_mask.addr4),
316 mlxsw_afa_block_first_set(afa_block));
317 break;
318 case MLXSW_SP_L3_PROTO_IPV6:
319 default:
320 WARN_ON_ONCE(1);
321 }
322
323 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
324}
325
326static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
327 struct parman_item *parman_item)
328{
329 char rmft2_pl[MLXSW_REG_RMFT2_LEN];
330
331 mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid,
332 0, 0, 0, 0, 0, 0, NULL);
333
334 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
335}
336
337static int
338mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
339 struct mlxsw_sp_mr_tcam_erif_list *erif_list,
340 struct mlxsw_sp_mr_route_info *route_info)
341{
342 int err;
343 int i;
344
345 for (i = 0; i < route_info->erif_num; i++) {
346 u16 erif_index = route_info->erif_indices[i];
347
348 err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
349 erif_index);
350 if (err)
351 return err;
352 }
353 return 0;
354}
355
356static int
357mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
358 struct mlxsw_sp_mr_tcam_route *route,
359 enum mlxsw_sp_mr_route_prio prio)
360{
361 struct parman_prio *parman_prio = NULL;
362 int err;
363
364 switch (route->key.proto) {
365 case MLXSW_SP_L3_PROTO_IPV4:
366 parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio];
367 err = parman_item_add(mr_tcam->ipv4_tcam_region.parman,
368 parman_prio, &route->parman_item);
369 if (err)
370 return err;
371 break;
372 case MLXSW_SP_L3_PROTO_IPV6:
373 default:
374 WARN_ON_ONCE(1);
375 }
376 route->parman_prio = parman_prio;
377 return 0;
378}
379
380static void
381mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
382 struct mlxsw_sp_mr_tcam_route *route)
383{
384 switch (route->key.proto) {
385 case MLXSW_SP_L3_PROTO_IPV4:
386 parman_item_remove(mr_tcam->ipv4_tcam_region.parman,
387 route->parman_prio, &route->parman_item);
388 break;
389 case MLXSW_SP_L3_PROTO_IPV6:
390 default:
391 WARN_ON_ONCE(1);
392 }
393}
394
395static int
396mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
397 void *route_priv,
398 struct mlxsw_sp_mr_route_params *route_params)
399{
400 struct mlxsw_sp_mr_tcam_route *route = route_priv;
401 struct mlxsw_sp_mr_tcam *mr_tcam = priv;
402 int err;
403
404 route->key = route_params->key;
405 route->irif_index = route_params->value.irif_index;
406 route->min_mtu = route_params->value.min_mtu;
407 route->action = route_params->value.route_action;
408
409 /* Create the egress RIFs list */
410 mlxsw_sp_mr_erif_list_init(&route->erif_list);
411 err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
412 &route_params->value);
413 if (err)
414 goto err_erif_populate;
415
416 /* Create the flow counter */
417 err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
418 if (err)
419 goto err_counter_alloc;
420
421 /* Create the flexible action block */
422 route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
423 route->action,
424 route->irif_index,
425 route->counter_index,
426 route->min_mtu,
427 &route->erif_list);
428 if (IS_ERR(route->afa_block)) {
429 err = PTR_ERR(route->afa_block);
430 goto err_afa_block_create;
431 }
432
433 /* Allocate place in the TCAM */
434 err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
435 route_params->prio);
436 if (err)
437 goto err_parman_item_add;
438
439 /* Write the route to the TCAM */
440 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
441 &route->key, route->afa_block);
442 if (err)
443 goto err_route_replace;
444 return 0;
445
446err_route_replace:
447 mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
448err_parman_item_add:
449 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
450err_afa_block_create:
451 mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
452err_erif_populate:
453err_counter_alloc:
454 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
455 return err;
456}
457
458static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
459 void *priv, void *route_priv)
460{
461 struct mlxsw_sp_mr_tcam_route *route = route_priv;
462 struct mlxsw_sp_mr_tcam *mr_tcam = priv;
463
464 mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
465 &route->parman_item);
466 mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
467 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
468 mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
469 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
470}
471
472static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
473 void *route_priv, u64 *packets,
474 u64 *bytes)
475{
476 struct mlxsw_sp_mr_tcam_route *route = route_priv;
477
478 return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
479 packets, bytes);
480}
481
482static int
483mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
484 void *route_priv,
485 enum mlxsw_sp_mr_route_action route_action)
486{
487 struct mlxsw_sp_mr_tcam_route *route = route_priv;
488 struct mlxsw_afa_block *afa_block;
489 int err;
490
491 /* Create a new flexible action block */
492 afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
493 route->irif_index,
494 route->counter_index,
495 route->min_mtu,
496 &route->erif_list);
497 if (IS_ERR(afa_block))
498 return PTR_ERR(afa_block);
499
500 /* Update the TCAM route entry */
501 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
502 &route->key, afa_block);
503 if (err)
504 goto err;
505
506 /* Delete the old one */
507 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
508 route->afa_block = afa_block;
509 route->action = route_action;
510 return 0;
511err:
512 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
513 return err;
514}
515
516static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
517 void *route_priv, u16 min_mtu)
518{
519 struct mlxsw_sp_mr_tcam_route *route = route_priv;
520 struct mlxsw_afa_block *afa_block;
521 int err;
522
523 /* Create a new flexible action block */
524 afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
525 route->action,
526 route->irif_index,
527 route->counter_index,
528 min_mtu,
529 &route->erif_list);
530 if (IS_ERR(afa_block))
531 return PTR_ERR(afa_block);
532
533 /* Update the TCAM route entry */
534 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
535 &route->key, afa_block);
536 if (err)
537 goto err;
538
539 /* Delete the old one */
540 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
541 route->afa_block = afa_block;
542 route->min_mtu = min_mtu;
543 return 0;
544err:
545 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
546 return err;
547}
548
549static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
550 void *route_priv, u16 irif_index)
551{
552 struct mlxsw_sp_mr_tcam_route *route = route_priv;
553
554 if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
555 return -EINVAL;
556 route->irif_index = irif_index;
557 return 0;
558}
559
560static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
561 void *route_priv, u16 erif_index)
562{
563 struct mlxsw_sp_mr_tcam_route *route = route_priv;
564 int err;
565
566 err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
567 erif_index);
568 if (err)
569 return err;
570
571 /* Commit the action only if the route action is not TRAP */
572 if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
573 return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
574 &route->erif_list);
575 return 0;
576}
577
578static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
579 void *route_priv, u16 erif_index)
580{
581 struct mlxsw_sp_mr_tcam_route *route = route_priv;
582 struct mlxsw_sp_mr_erif_sublist *erif_sublist;
583 struct mlxsw_sp_mr_tcam_erif_list erif_list;
584 struct mlxsw_afa_block *afa_block;
585 int err;
586 int i;
587
588 /* Create a copy of the original erif_list without the deleted entry */
589 mlxsw_sp_mr_erif_list_init(&erif_list);
590 list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
591 for (i = 0; i < erif_sublist->num_erifs; i++) {
592 u16 curr_erif = erif_sublist->erif_indices[i];
593
594 if (curr_erif == erif_index)
595 continue;
596 err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
597 curr_erif);
598 if (err)
599 goto err_erif_list_add;
600 }
601 }
602
603 /* Create the flexible action block pointing to the new erif_list */
604 afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
605 route->irif_index,
606 route->counter_index,
607 route->min_mtu,
608 &erif_list);
609 if (IS_ERR(afa_block)) {
610 err = PTR_ERR(afa_block);
611 goto err_afa_block_create;
612 }
613
614 /* Update the TCAM route entry */
615 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
616 &route->key, afa_block);
617 if (err)
618 goto err_route_write;
619
620 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
621 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
622 route->afa_block = afa_block;
623 mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
624 return 0;
625
626err_route_write:
627 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
628err_afa_block_create:
629err_erif_list_add:
630 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
631 return err;
632}
633
634static int
635mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
636 struct mlxsw_sp_mr_route_info *route_info)
637{
638 struct mlxsw_sp_mr_tcam_route *route = route_priv;
639 struct mlxsw_sp_mr_tcam_erif_list erif_list;
640 struct mlxsw_afa_block *afa_block;
641 int err;
642
643 /* Create a new erif_list */
644 mlxsw_sp_mr_erif_list_init(&erif_list);
645 err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
646 if (err)
647 goto err_erif_populate;
648
649 /* Create the flexible action block pointing to the new erif_list */
650 afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
651 route_info->route_action,
652 route_info->irif_index,
653 route->counter_index,
654 route_info->min_mtu,
655 &erif_list);
656 if (IS_ERR(afa_block)) {
657 err = PTR_ERR(afa_block);
658 goto err_afa_block_create;
659 }
660
661 /* Update the TCAM route entry */
662 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
663 &route->key, afa_block);
664 if (err)
665 goto err_route_write;
666
667 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
668 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
669 route->afa_block = afa_block;
670 mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
671 route->action = route_info->route_action;
672 route->irif_index = route_info->irif_index;
673 route->min_mtu = route_info->min_mtu;
674 return 0;
675
676err_route_write:
677 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
678err_afa_block_create:
679err_erif_populate:
680 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
681 return err;
682}
683
684#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
685#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
686
687static int
688mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
689{
690 struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
691 char rtar_pl[MLXSW_REG_RTAR_LEN];
692
693 mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
694 mr_tcam_region->rtar_key_type,
695 MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
696 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
697}
698
699static void
700mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
701{
702 struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
703 char rtar_pl[MLXSW_REG_RTAR_LEN];
704
705 mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
706 mr_tcam_region->rtar_key_type, 0);
707 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
708}
709
710static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
711 unsigned long new_count)
712{
713 struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
714 struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
715 char rtar_pl[MLXSW_REG_RTAR_LEN];
716 u64 max_tcam_rules;
717
718 max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
719 if (new_count > max_tcam_rules)
720 return -EINVAL;
721 mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
722 mr_tcam_region->rtar_key_type, new_count);
723 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
724}
725
726static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
727 unsigned long from_index,
728 unsigned long to_index,
729 unsigned long count)
730{
731 struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
732 struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
733 char rrcr_pl[MLXSW_REG_RRCR_LEN];
734
735 mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
736 from_index, count,
737 mr_tcam_region->rtar_key_type, to_index);
738 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
739}
740
741static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
742 .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
743 .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
744 .resize = mlxsw_sp_mr_tcam_region_parman_resize,
745 .move = mlxsw_sp_mr_tcam_region_parman_move,
746 .algo = PARMAN_ALGO_TYPE_LSORT,
747};
748
749static int
750mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
751 struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
752 enum mlxsw_reg_rtar_key_type rtar_key_type)
753{
754 struct parman_prio *parman_prios;
755 struct parman *parman;
756 int err;
757 int i;
758
759 mr_tcam_region->rtar_key_type = rtar_key_type;
760 mr_tcam_region->mlxsw_sp = mlxsw_sp;
761
762 err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
763 if (err)
764 return err;
765
766 parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
767 mr_tcam_region);
768 if (!parman) {
769 err = -ENOMEM;
770 goto err_parman_create;
771 }
772 mr_tcam_region->parman = parman;
773
774 parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
775 sizeof(*parman_prios), GFP_KERNEL);
b5c7d4e5
DC
776 if (!parman_prios) {
777 err = -ENOMEM;
0e14c777 778 goto err_parman_prios_alloc;
b5c7d4e5 779 }
0e14c777
YG
780 mr_tcam_region->parman_prios = parman_prios;
781
782 for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
783 parman_prio_init(mr_tcam_region->parman,
784 &mr_tcam_region->parman_prios[i], i);
785 return 0;
786
787err_parman_prios_alloc:
788 parman_destroy(parman);
789err_parman_create:
790 mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
791 return err;
792}
793
794static void
795mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
796{
797 int i;
798
799 for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
800 parman_prio_fini(&mr_tcam_region->parman_prios[i]);
801 kfree(mr_tcam_region->parman_prios);
802 parman_destroy(mr_tcam_region->parman);
803 mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
804}
805
806static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
807{
808 struct mlxsw_sp_mr_tcam *mr_tcam = priv;
809
810 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
811 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
812 return -EIO;
813
814 return mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
815 &mr_tcam->ipv4_tcam_region,
816 MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST);
817}
818
819static void mlxsw_sp_mr_tcam_fini(void *priv)
820{
821 struct mlxsw_sp_mr_tcam *mr_tcam = priv;
822
823 mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region);
824}
825
826const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
827 .priv_size = sizeof(struct mlxsw_sp_mr_tcam),
828 .route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
829 .init = mlxsw_sp_mr_tcam_init,
830 .route_create = mlxsw_sp_mr_tcam_route_create,
831 .route_update = mlxsw_sp_mr_tcam_route_update,
832 .route_stats = mlxsw_sp_mr_tcam_route_stats,
833 .route_action_update = mlxsw_sp_mr_tcam_route_action_update,
834 .route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
835 .route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
836 .route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
837 .route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
838 .route_destroy = mlxsw_sp_mr_tcam_route_destroy,
839 .fini = mlxsw_sp_mr_tcam_fini,
840};