Merge branch 'core-objtool-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_mr.c
CommitLineData
9948a064
JP
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
c011ec1b
YG
3
4#include <linux/rhashtable.h>
6981e104 5#include <net/ipv6.h>
c011ec1b
YG
6
7#include "spectrum_mr.h"
8#include "spectrum_router.h"
9
10struct mlxsw_sp_mr {
11 const struct mlxsw_sp_mr_ops *mr_ops;
12 void *catchall_route_priv;
13 struct delayed_work stats_update_dw;
14 struct list_head table_list;
15#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
16 unsigned long priv[0];
17 /* priv has to be always the last item */
18};
19
4caef463
YM
20struct mlxsw_sp_mr_vif;
21struct mlxsw_sp_mr_vif_ops {
22 bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif);
23};
24
c011ec1b
YG
25struct mlxsw_sp_mr_vif {
26 struct net_device *dev;
27 const struct mlxsw_sp_rif *rif;
28 unsigned long vif_flags;
29
30 /* A list of route_vif_entry structs that point to routes that the VIF
31 * instance is used as one of the egress VIFs
32 */
33 struct list_head route_evif_list;
34
35 /* A list of route_vif_entry structs that point to routes that the VIF
36 * instance is used as an ingress VIF
37 */
38 struct list_head route_ivif_list;
4caef463
YM
39
40 /* Protocol specific operations for a VIF */
41 const struct mlxsw_sp_mr_vif_ops *ops;
c011ec1b
YG
42};
43
44struct mlxsw_sp_mr_route_vif_entry {
45 struct list_head vif_node;
46 struct list_head route_node;
47 struct mlxsw_sp_mr_vif *mr_vif;
48 struct mlxsw_sp_mr_route *mr_route;
49};
50
4caef463
YM
51struct mlxsw_sp_mr_table;
52struct mlxsw_sp_mr_table_ops {
53 bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table,
54 const struct mr_mfc *mfc);
55 void (*key_create)(struct mlxsw_sp_mr_table *mr_table,
56 struct mlxsw_sp_mr_route_key *key,
57 struct mr_mfc *mfc);
58 bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table,
59 const struct mlxsw_sp_mr_route *mr_route);
60};
61
c011ec1b
YG
62struct mlxsw_sp_mr_table {
63 struct list_head node;
64 enum mlxsw_sp_l3proto proto;
65 struct mlxsw_sp *mlxsw_sp;
66 u32 vr_id;
67 struct mlxsw_sp_mr_vif vifs[MAXVIFS];
68 struct list_head route_list;
69 struct rhashtable route_ht;
4caef463 70 const struct mlxsw_sp_mr_table_ops *ops;
c011ec1b
YG
71 char catchall_route_priv[0];
72 /* catchall_route_priv has to be always the last item */
73};
74
75struct mlxsw_sp_mr_route {
76 struct list_head node;
77 struct rhash_head ht_node;
78 struct mlxsw_sp_mr_route_key key;
79 enum mlxsw_sp_mr_route_action route_action;
80 u16 min_mtu;
4caef463 81 struct mr_mfc *mfc;
c011ec1b
YG
82 void *route_priv;
83 const struct mlxsw_sp_mr_table *mr_table;
84 /* A list of route_vif_entry structs that point to the egress VIFs */
85 struct list_head evif_list;
86 /* A route_vif_entry struct that point to the ingress VIF */
87 struct mlxsw_sp_mr_route_vif_entry ivif;
88};
89
90static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
91 .key_len = sizeof(struct mlxsw_sp_mr_route_key),
92 .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
93 .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
94 .automatic_shrinking = true,
95};
96
c011ec1b
YG
97static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
98{
4caef463 99 return vif->ops->is_regular(vif) && vif->dev && vif->rif;
c011ec1b
YG
100}
101
f60c2549 102static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
c011ec1b 103{
f60c2549 104 return vif->dev;
c011ec1b
YG
105}
106
107static bool
108mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
109{
4caef463 110 vifi_t ivif = mr_route->mfc->mfc_parent;
c011ec1b 111
4caef463 112 return mr_route->mfc->mfc_un.res.ttls[ivif] != 255;
c011ec1b
YG
113}
114
115static int
116mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
117{
118 struct mlxsw_sp_mr_route_vif_entry *rve;
119 int valid_evifs;
120
121 valid_evifs = 0;
122 list_for_each_entry(rve, &mr_route->evif_list, route_node)
123 if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
124 valid_evifs++;
125 return valid_evifs;
126}
127
c011ec1b
YG
128static enum mlxsw_sp_mr_route_action
129mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
130{
131 struct mlxsw_sp_mr_route_vif_entry *rve;
132
133 /* If the ingress port is not regular and resolved, trap the route */
134 if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
135 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
136
137 /* The kernel does not match a (*,G) route that the ingress interface is
138 * not one of the egress interfaces, so trap these kind of routes.
139 */
4caef463
YM
140 if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
141 mr_route) &&
c011ec1b
YG
142 !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
143 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
144
145 /* If the route has no valid eVIFs, trap it. */
146 if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
147 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
148
f60c2549
YG
149 /* If one of the eVIFs has no RIF, trap-and-forward the route as there
150 * is some more routing to do in software too.
c011ec1b 151 */
f60c2549
YG
152 list_for_each_entry(rve, &mr_route->evif_list, route_node)
153 if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif)
154 return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD;
155
c011ec1b
YG
156 return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
157}
158
159static enum mlxsw_sp_mr_route_prio
160mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
161{
4caef463
YM
162 return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
163 mr_route) ?
c011ec1b
YG
164 MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
165}
166
c011ec1b
YG
167static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
168 struct mlxsw_sp_mr_vif *mr_vif)
169{
170 struct mlxsw_sp_mr_route_vif_entry *rve;
171
172 rve = kzalloc(sizeof(*rve), GFP_KERNEL);
173 if (!rve)
174 return -ENOMEM;
175 rve->mr_route = mr_route;
176 rve->mr_vif = mr_vif;
177 list_add_tail(&rve->route_node, &mr_route->evif_list);
178 list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
179 return 0;
180}
181
182static void
183mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
184{
185 list_del(&rve->route_node);
186 list_del(&rve->vif_node);
187 kfree(rve);
188}
189
190static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
191 struct mlxsw_sp_mr_vif *mr_vif)
192{
193 mr_route->ivif.mr_route = mr_route;
194 mr_route->ivif.mr_vif = mr_vif;
195 list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
196}
197
198static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
199{
200 list_del(&mr_route->ivif.vif_node);
201}
202
203static int
204mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
205 struct mlxsw_sp_mr_route *mr_route,
206 struct mlxsw_sp_mr_route_info *route_info)
207{
208 struct mlxsw_sp_mr_route_vif_entry *rve;
209 u16 *erif_indices;
210 u16 irif_index;
211 u16 erif = 0;
212
213 erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
214 GFP_KERNEL);
215 if (!erif_indices)
216 return -ENOMEM;
217
218 list_for_each_entry(rve, &mr_route->evif_list, route_node) {
219 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
220 u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
221
222 erif_indices[erif++] = rifi;
223 }
224 }
225
226 if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
227 irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
228 else
229 irif_index = 0;
230
231 route_info->irif_index = irif_index;
232 route_info->erif_indices = erif_indices;
233 route_info->min_mtu = mr_route->min_mtu;
234 route_info->route_action = mr_route->route_action;
235 route_info->erif_num = erif;
236 return 0;
237}
238
239static void
240mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
241{
242 kfree(route_info->erif_indices);
243}
244
245static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
246 struct mlxsw_sp_mr_route *mr_route,
247 bool replace)
248{
249 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
250 struct mlxsw_sp_mr_route_info route_info;
251 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
252 int err;
253
254 err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
255 if (err)
256 return err;
257
258 if (!replace) {
259 struct mlxsw_sp_mr_route_params route_params;
260
261 mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
262 GFP_KERNEL);
263 if (!mr_route->route_priv) {
264 err = -ENOMEM;
265 goto out;
266 }
267
268 route_params.key = mr_route->key;
269 route_params.value = route_info;
270 route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
271 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
272 mr_route->route_priv,
273 &route_params);
274 if (err)
275 kfree(mr_route->route_priv);
276 } else {
277 err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
278 &route_info);
279 }
280out:
281 mlxsw_sp_mr_route_info_destroy(&route_info);
282 return err;
283}
284
285static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
286 struct mlxsw_sp_mr_route *mr_route)
287{
288 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
289 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
290
291 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
292 kfree(mr_route->route_priv);
293}
294
295static struct mlxsw_sp_mr_route *
eb35da0c
YM
296mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table,
297 struct mr_mfc *mfc)
c011ec1b
YG
298{
299 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
300 struct mlxsw_sp_mr_route *mr_route;
45bfbc01 301 int err = 0;
c011ec1b
YG
302 int i;
303
304 /* Allocate and init a new route and fill it with parameters */
305 mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
306 if (!mr_route)
307 return ERR_PTR(-ENOMEM);
308 INIT_LIST_HEAD(&mr_route->evif_list);
c011ec1b
YG
309
310 /* Find min_mtu and link iVIF and eVIFs */
311 mr_route->min_mtu = ETH_MAX_MTU;
eb35da0c
YM
312 mr_cache_hold(mfc);
313 mr_route->mfc = mfc;
4caef463
YM
314 mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc);
315
c011ec1b
YG
316 mr_route->mr_table = mr_table;
317 for (i = 0; i < MAXVIFS; i++) {
eb35da0c 318 if (mfc->mfc_un.res.ttls[i] != 255) {
c011ec1b
YG
319 err = mlxsw_sp_mr_route_evif_link(mr_route,
320 &mr_table->vifs[i]);
321 if (err)
322 goto err;
323 if (mr_table->vifs[i].dev &&
324 mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
325 mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
326 }
327 }
494fff56 328 mlxsw_sp_mr_route_ivif_link(mr_route,
eb35da0c 329 &mr_table->vifs[mfc->mfc_parent]);
c011ec1b
YG
330
331 mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
332 return mr_route;
333err:
eb35da0c 334 mr_cache_put(mfc);
c011ec1b
YG
335 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
336 mlxsw_sp_mr_route_evif_unlink(rve);
337 kfree(mr_route);
338 return ERR_PTR(err);
339}
340
eb35da0c
YM
341static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
342 struct mlxsw_sp_mr_route *mr_route)
c011ec1b
YG
343{
344 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
345
346 mlxsw_sp_mr_route_ivif_unlink(mr_route);
eb35da0c 347 mr_cache_put(mr_route->mfc);
c011ec1b
YG
348 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
349 mlxsw_sp_mr_route_evif_unlink(rve);
350 kfree(mr_route);
351}
352
c011ec1b
YG
353static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
354 bool offload)
355{
4caef463
YM
356 if (offload)
357 mr_route->mfc->mfc_flags |= MFC_OFFLOAD;
358 else
359 mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD;
c011ec1b
YG
360}
361
362static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
363{
364 bool offload;
365
366 offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
367 mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
368}
369
370static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
371 struct mlxsw_sp_mr_route *mr_route)
372{
373 mlxsw_sp_mr_mfc_offload_set(mr_route, false);
374 mlxsw_sp_mr_route_erase(mr_table, mr_route);
375 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
376 mlxsw_sp_mr_route_ht_params);
377 list_del(&mr_route->node);
378 mlxsw_sp_mr_route_destroy(mr_table, mr_route);
379}
380
eb35da0c
YM
381int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
382 struct mr_mfc *mfc, bool replace)
c011ec1b
YG
383{
384 struct mlxsw_sp_mr_route *mr_orig_route = NULL;
385 struct mlxsw_sp_mr_route *mr_route;
386 int err;
387
eb35da0c 388 if (!mr_table->ops->is_route_valid(mr_table, mfc))
c011ec1b 389 return -EINVAL;
c011ec1b
YG
390
391 /* Create a new route */
eb35da0c 392 mr_route = mlxsw_sp_mr_route_create(mr_table, mfc);
c011ec1b
YG
393 if (IS_ERR(mr_route))
394 return PTR_ERR(mr_route);
395
396 /* Find any route with a matching key */
397 mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
398 &mr_route->key,
399 mlxsw_sp_mr_route_ht_params);
400 if (replace) {
401 /* On replace case, make the route point to the new route_priv.
402 */
403 if (WARN_ON(!mr_orig_route)) {
404 err = -ENOENT;
405 goto err_no_orig_route;
406 }
407 mr_route->route_priv = mr_orig_route->route_priv;
408 } else if (mr_orig_route) {
409 /* On non replace case, if another route with the same key was
410 * found, abort, as duplicate routes are used for proxy routes.
411 */
412 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
413 "Offloading proxy routes is not supported.\n");
414 err = -EINVAL;
415 goto err_duplicate_route;
416 }
417
418 /* Put it in the table data-structures */
419 list_add_tail(&mr_route->node, &mr_table->route_list);
420 err = rhashtable_insert_fast(&mr_table->route_ht,
421 &mr_route->ht_node,
422 mlxsw_sp_mr_route_ht_params);
423 if (err)
424 goto err_rhashtable_insert;
425
426 /* Write the route to the hardware */
427 err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
428 if (err)
429 goto err_mr_route_write;
430
431 /* Destroy the original route */
432 if (replace) {
433 rhashtable_remove_fast(&mr_table->route_ht,
434 &mr_orig_route->ht_node,
435 mlxsw_sp_mr_route_ht_params);
436 list_del(&mr_orig_route->node);
eb35da0c 437 mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
c011ec1b
YG
438 }
439
440 mlxsw_sp_mr_mfc_offload_update(mr_route);
441 return 0;
442
443err_mr_route_write:
444 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
445 mlxsw_sp_mr_route_ht_params);
446err_rhashtable_insert:
447 list_del(&mr_route->node);
448err_no_orig_route:
449err_duplicate_route:
eb35da0c 450 mlxsw_sp_mr_route_destroy(mr_table, mr_route);
c011ec1b
YG
451 return err;
452}
453
eb35da0c
YM
454void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
455 struct mr_mfc *mfc)
c011ec1b
YG
456{
457 struct mlxsw_sp_mr_route *mr_route;
458 struct mlxsw_sp_mr_route_key key;
459
eb35da0c 460 mr_table->ops->key_create(mr_table, &key, mfc);
c011ec1b
YG
461 mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
462 mlxsw_sp_mr_route_ht_params);
463 if (mr_route)
464 __mlxsw_sp_mr_route_del(mr_table, mr_route);
465}
466
467/* Should be called after the VIF struct is updated */
468static int
469mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
470 struct mlxsw_sp_mr_route_vif_entry *rve)
471{
472 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
473 enum mlxsw_sp_mr_route_action route_action;
474 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
475 u16 irif_index;
476 int err;
477
478 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
479 if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
480 return 0;
481
482 /* rve->mr_vif->rif is guaranteed to be valid at this stage */
483 irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
484 err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
485 irif_index);
486 if (err)
487 return err;
488
489 err = mr->mr_ops->route_action_update(mlxsw_sp,
490 rve->mr_route->route_priv,
491 route_action);
492 if (err)
493 /* No need to rollback here because the iRIF change only takes
494 * place after the action has been updated.
495 */
496 return err;
497
498 rve->mr_route->route_action = route_action;
499 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
500 return 0;
501}
502
503static void
504mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
505 struct mlxsw_sp_mr_route_vif_entry *rve)
506{
507 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
508 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
509
510 mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
511 MLXSW_SP_MR_ROUTE_ACTION_TRAP);
512 rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
513 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
514}
515
516/* Should be called after the RIF struct is updated */
517static int
518mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
519 struct mlxsw_sp_mr_route_vif_entry *rve)
520{
521 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
522 enum mlxsw_sp_mr_route_action route_action;
523 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
524 u16 erif_index = 0;
525 int err;
526
527 /* Update the route action, as the new eVIF can be a tunnel or a pimreg
528 * device which will require updating the action.
529 */
530 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
531 if (route_action != rve->mr_route->route_action) {
532 err = mr->mr_ops->route_action_update(mlxsw_sp,
533 rve->mr_route->route_priv,
534 route_action);
535 if (err)
536 return err;
537 }
538
539 /* Add the eRIF */
540 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
541 erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
542 err = mr->mr_ops->route_erif_add(mlxsw_sp,
543 rve->mr_route->route_priv,
544 erif_index);
545 if (err)
546 goto err_route_erif_add;
547 }
548
549 /* Update the minimum MTU */
550 if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
551 rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
552 err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
553 rve->mr_route->route_priv,
554 rve->mr_route->min_mtu);
555 if (err)
556 goto err_route_min_mtu_update;
557 }
558
559 rve->mr_route->route_action = route_action;
560 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
561 return 0;
562
563err_route_min_mtu_update:
564 if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
565 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
566 erif_index);
567err_route_erif_add:
568 if (route_action != rve->mr_route->route_action)
569 mr->mr_ops->route_action_update(mlxsw_sp,
570 rve->mr_route->route_priv,
571 rve->mr_route->route_action);
572 return err;
573}
574
575/* Should be called before the RIF struct is updated */
576static void
577mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
578 struct mlxsw_sp_mr_route_vif_entry *rve)
579{
580 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
581 enum mlxsw_sp_mr_route_action route_action;
582 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
583 u16 rifi;
584
585 /* If the unresolved RIF was not valid, no need to delete it */
586 if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
587 return;
588
589 /* Update the route action: if there is only one valid eVIF in the
590 * route, set the action to trap as the VIF deletion will lead to zero
591 * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
592 * determine the route action.
593 */
594 if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
595 route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
596 else
597 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
598 if (route_action != rve->mr_route->route_action)
599 mr->mr_ops->route_action_update(mlxsw_sp,
600 rve->mr_route->route_priv,
601 route_action);
602
603 /* Delete the erif from the route */
604 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
605 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
606 rve->mr_route->route_action = route_action;
607 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
608}
609
610static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
611 struct net_device *dev,
612 struct mlxsw_sp_mr_vif *mr_vif,
613 unsigned long vif_flags,
614 const struct mlxsw_sp_rif *rif)
615{
616 struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
617 int err;
618
619 /* Update the VIF */
620 mr_vif->dev = dev;
621 mr_vif->rif = rif;
622 mr_vif->vif_flags = vif_flags;
623
624 /* Update all routes where this VIF is used as an unresolved iRIF */
625 list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
626 err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
627 if (err)
628 goto err_irif_unresolve;
629 }
630
631 /* Update all routes where this VIF is used as an unresolved eRIF */
632 list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
633 err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
634 if (err)
635 goto err_erif_unresolve;
636 }
637 return 0;
638
639err_erif_unresolve:
f6bf1baf
IS
640 list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
641 vif_node)
c011ec1b
YG
642 mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
643err_irif_unresolve:
f6bf1baf
IS
644 list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
645 vif_node)
c011ec1b
YG
646 mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
647 mr_vif->rif = NULL;
648 return err;
649}
650
651static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
652 struct net_device *dev,
653 struct mlxsw_sp_mr_vif *mr_vif)
654{
655 struct mlxsw_sp_mr_route_vif_entry *rve;
656
657 /* Update all routes where this VIF is used as an unresolved eRIF */
658 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
659 mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
660
661 /* Update all routes where this VIF is used as an unresolved iRIF */
662 list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
663 mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
664
665 /* Update the VIF */
666 mr_vif->dev = dev;
667 mr_vif->rif = NULL;
668}
669
670int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
671 struct net_device *dev, vifi_t vif_index,
672 unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
673{
674 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
675
676 if (WARN_ON(vif_index >= MAXVIFS))
677 return -EINVAL;
678 if (mr_vif->dev)
679 return -EEXIST;
680 return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
681}
682
683void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
684{
685 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
686
687 if (WARN_ON(vif_index >= MAXVIFS))
688 return;
689 if (WARN_ON(!mr_vif->dev))
690 return;
691 mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
692}
693
6a30dc29 694static struct mlxsw_sp_mr_vif *
c011ec1b
YG
695mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
696 const struct net_device *dev)
697{
698 vifi_t vif_index;
699
700 for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
701 if (mr_table->vifs[vif_index].dev == dev)
702 return &mr_table->vifs[vif_index];
703 return NULL;
704}
705
706int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
707 const struct mlxsw_sp_rif *rif)
708{
709 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
710 struct mlxsw_sp_mr_vif *mr_vif;
711
712 if (!rif_dev)
713 return 0;
714
715 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
716 if (!mr_vif)
717 return 0;
718 return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
719 mr_vif->vif_flags, rif);
720}
721
722void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
723 const struct mlxsw_sp_rif *rif)
724{
725 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
726 struct mlxsw_sp_mr_vif *mr_vif;
727
728 if (!rif_dev)
729 return;
730
731 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
732 if (!mr_vif)
733 return;
734 mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
735}
736
737void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
738 const struct mlxsw_sp_rif *rif, int mtu)
739{
740 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
741 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
742 struct mlxsw_sp_mr_route_vif_entry *rve;
743 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
744 struct mlxsw_sp_mr_vif *mr_vif;
745
746 if (!rif_dev)
747 return;
748
749 /* Search for a VIF that use that RIF */
750 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
751 if (!mr_vif)
752 return;
753
754 /* Update all the routes that uses that VIF as eVIF */
755 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
756 if (mtu < rve->mr_route->min_mtu) {
757 rve->mr_route->min_mtu = mtu;
758 mr->mr_ops->route_min_mtu_update(mlxsw_sp,
759 rve->mr_route->route_priv,
760 mtu);
761 }
762 }
763}
764
4caef463
YM
765/* Protocol specific functions */
766static bool
767mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table,
768 const struct mr_mfc *c)
769{
770 struct mfc_cache *mfc = (struct mfc_cache *) c;
771
772 /* If the route is a (*,*) route, abort, as these kind of routes are
773 * used for proxy routes.
774 */
775 if (mfc->mfc_origin == htonl(INADDR_ANY) &&
776 mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
777 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
778 "Offloading proxy routes is not supported.\n");
779 return false;
780 }
781 return true;
782}
783
784static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
785 struct mlxsw_sp_mr_route_key *key,
786 struct mr_mfc *c)
787{
788 const struct mfc_cache *mfc = (struct mfc_cache *) c;
789 bool starg;
790
791 starg = (mfc->mfc_origin == htonl(INADDR_ANY));
792
793 memset(key, 0, sizeof(*key));
794 key->vrid = mr_table->vr_id;
795 key->proto = MLXSW_SP_L3_PROTO_IPV4;
796 key->group.addr4 = mfc->mfc_mcastgrp;
797 key->group_mask.addr4 = htonl(0xffffffff);
798 key->source.addr4 = mfc->mfc_origin;
799 key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
800}
801
802static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table,
803 const struct mlxsw_sp_mr_route *mr_route)
804{
805 return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
806}
807
808static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif)
809{
810 return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
811}
812
6981e104
YM
813static bool
814mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table,
815 const struct mr_mfc *c)
816{
817 struct mfc6_cache *mfc = (struct mfc6_cache *) c;
818
819 /* If the route is a (*,*) route, abort, as these kind of routes are
820 * used for proxy routes.
821 */
822 if (ipv6_addr_any(&mfc->mf6c_origin) &&
823 ipv6_addr_any(&mfc->mf6c_mcastgrp)) {
824 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
825 "Offloading proxy routes is not supported.\n");
826 return false;
827 }
828 return true;
829}
830
831static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table,
832 struct mlxsw_sp_mr_route_key *key,
833 struct mr_mfc *c)
834{
835 const struct mfc6_cache *mfc = (struct mfc6_cache *) c;
836
837 memset(key, 0, sizeof(*key));
838 key->vrid = mr_table->vr_id;
839 key->proto = MLXSW_SP_L3_PROTO_IPV6;
840 key->group.addr6 = mfc->mf6c_mcastgrp;
841 memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6));
842 key->source.addr6 = mfc->mf6c_origin;
843 if (!ipv6_addr_any(&mfc->mf6c_origin))
844 memset(&key->source_mask.addr6, 0xff,
845 sizeof(key->source_mask.addr6));
846}
847
848static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table,
849 const struct mlxsw_sp_mr_route *mr_route)
850{
851 return ipv6_addr_any(&mr_route->key.source_mask.addr6);
852}
853
854static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif)
855{
856 return !(vif->vif_flags & MIFF_REGISTER);
857}
858
4caef463
YM
859static struct
860mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = {
861 {
862 .is_regular = mlxsw_sp_mr_vif4_is_regular,
863 },
6981e104
YM
864 {
865 .is_regular = mlxsw_sp_mr_vif6_is_regular,
866 },
4caef463
YM
867};
868
869static struct
870mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = {
871 {
872 .is_route_valid = mlxsw_sp_mr_route4_validate,
873 .key_create = mlxsw_sp_mr_route4_key,
874 .is_route_starg = mlxsw_sp_mr_route4_starg,
875 },
6981e104
YM
876 {
877 .is_route_valid = mlxsw_sp_mr_route6_validate,
878 .key_create = mlxsw_sp_mr_route6_key,
879 .is_route_starg = mlxsw_sp_mr_route6_starg,
880 },
881
4caef463
YM
882};
883
c011ec1b
YG
884struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
885 u32 vr_id,
886 enum mlxsw_sp_l3proto proto)
887{
888 struct mlxsw_sp_mr_route_params catchall_route_params = {
889 .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
890 .key = {
891 .vrid = vr_id,
a3b66866 892 .proto = proto,
c011ec1b
YG
893 },
894 .value = {
895 .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
896 }
897 };
898 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
899 struct mlxsw_sp_mr_table *mr_table;
900 int err;
901 int i;
902
903 mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
904 GFP_KERNEL);
905 if (!mr_table)
906 return ERR_PTR(-ENOMEM);
907
908 mr_table->vr_id = vr_id;
909 mr_table->mlxsw_sp = mlxsw_sp;
910 mr_table->proto = proto;
4caef463 911 mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
c011ec1b
YG
912 INIT_LIST_HEAD(&mr_table->route_list);
913
914 err = rhashtable_init(&mr_table->route_ht,
915 &mlxsw_sp_mr_route_ht_params);
916 if (err)
917 goto err_route_rhashtable_init;
918
919 for (i = 0; i < MAXVIFS; i++) {
920 INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
921 INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
4caef463 922 mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto];
c011ec1b
YG
923 }
924
925 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
926 mr_table->catchall_route_priv,
927 &catchall_route_params);
928 if (err)
929 goto err_ops_route_create;
930 list_add_tail(&mr_table->node, &mr->table_list);
931 return mr_table;
932
933err_ops_route_create:
934 rhashtable_destroy(&mr_table->route_ht);
935err_route_rhashtable_init:
936 kfree(mr_table);
937 return ERR_PTR(err);
938}
939
940void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
941{
942 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
943 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
944
945 WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
946 list_del(&mr_table->node);
947 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
948 &mr_table->catchall_route_priv);
949 rhashtable_destroy(&mr_table->route_ht);
950 kfree(mr_table);
951}
952
953void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
954{
955 struct mlxsw_sp_mr_route *mr_route, *tmp;
956 int i;
957
958 list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
959 __mlxsw_sp_mr_route_del(mr_table, mr_route);
960
961 for (i = 0; i < MAXVIFS; i++) {
962 mr_table->vifs[i].dev = NULL;
963 mr_table->vifs[i].rif = NULL;
964 }
965}
966
967bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
968{
969 int i;
970
971 for (i = 0; i < MAXVIFS; i++)
972 if (mr_table->vifs[i].dev)
973 return false;
974 return list_empty(&mr_table->route_list);
975}
976
977static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
978 struct mlxsw_sp_mr_route *mr_route)
979{
980 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
981 u64 packets, bytes;
982
983 if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
984 return;
985
986 mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
987 &bytes);
988
4caef463
YM
989 if (mr_route->mfc->mfc_un.res.pkt != packets)
990 mr_route->mfc->mfc_un.res.lastuse = jiffies;
991 mr_route->mfc->mfc_un.res.pkt = packets;
992 mr_route->mfc->mfc_un.res.bytes = bytes;
c011ec1b
YG
993}
994
995static void mlxsw_sp_mr_stats_update(struct work_struct *work)
996{
997 struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
998 stats_update_dw.work);
999 struct mlxsw_sp_mr_table *mr_table;
1000 struct mlxsw_sp_mr_route *mr_route;
1001 unsigned long interval;
1002
1003 rtnl_lock();
1004 list_for_each_entry(mr_table, &mr->table_list, node)
1005 list_for_each_entry(mr_route, &mr_table->route_list, node)
1006 mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
1007 mr_route);
1008 rtnl_unlock();
1009
1010 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1011 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1012}
1013
1014int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
1015 const struct mlxsw_sp_mr_ops *mr_ops)
1016{
1017 struct mlxsw_sp_mr *mr;
1018 unsigned long interval;
1019 int err;
1020
1021 mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
1022 if (!mr)
1023 return -ENOMEM;
1024 mr->mr_ops = mr_ops;
1025 mlxsw_sp->mr = mr;
1026 INIT_LIST_HEAD(&mr->table_list);
1027
1028 err = mr_ops->init(mlxsw_sp, mr->priv);
1029 if (err)
1030 goto err;
1031
1032 /* Create the delayed work for counter updates */
1033 INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
1034 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1035 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1036 return 0;
1037err:
1038 kfree(mr);
1039 return err;
1040}
1041
1042void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
1043{
1044 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
1045
1046 cancel_delayed_work_sync(&mr->stats_update_dw);
8fae4392 1047 mr->mr_ops->fini(mlxsw_sp, mr->priv);
c011ec1b
YG
1048 kfree(mr);
1049}