1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
53 struct list_head nexthop_list;
54 struct list_head neigh_list;
55 struct net_device *dev; /* NULL for underlay RIF */
56 struct mlxsw_sp_fid *fid;
57 unsigned char addr[ETH_ALEN];
61 const struct mlxsw_sp_rif_ops *ops;
62 struct mlxsw_sp *mlxsw_sp;
64 unsigned int counter_ingress;
65 bool counter_ingress_valid;
66 unsigned int counter_egress;
67 bool counter_egress_valid;
70 struct mlxsw_sp_rif_params {
71 struct net_device *dev;
80 struct mlxsw_sp_rif_subport {
81 struct mlxsw_sp_rif common;
91 struct mlxsw_sp_rif_ipip_lb {
92 struct mlxsw_sp_rif common;
93 struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 u16 ul_rif_id; /* Reserved for Spectrum. */
98 struct mlxsw_sp_rif_params_ipip_lb {
99 struct mlxsw_sp_rif_params common;
100 struct mlxsw_sp_rif_ipip_lb_config lb_config;
103 struct mlxsw_sp_rif_ops {
104 enum mlxsw_sp_rif_type type;
107 void (*setup)(struct mlxsw_sp_rif *rif,
108 const struct mlxsw_sp_rif_params *params);
109 int (*configure)(struct mlxsw_sp_rif *rif);
110 void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 struct netlink_ext_ack *extack);
113 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
116 static struct mlxsw_sp_rif *
117 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
118 const struct net_device *dev);
119 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
120 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
121 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
122 struct mlxsw_sp_lpm_tree *lpm_tree);
123 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
124 const struct mlxsw_sp_fib *fib,
126 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
127 const struct mlxsw_sp_fib *fib);
129 static unsigned int *
130 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
131 enum mlxsw_sp_rif_counter_dir dir)
134 case MLXSW_SP_RIF_COUNTER_EGRESS:
135 return &rif->counter_egress;
136 case MLXSW_SP_RIF_COUNTER_INGRESS:
137 return &rif->counter_ingress;
143 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
144 enum mlxsw_sp_rif_counter_dir dir)
147 case MLXSW_SP_RIF_COUNTER_EGRESS:
148 return rif->counter_egress_valid;
149 case MLXSW_SP_RIF_COUNTER_INGRESS:
150 return rif->counter_ingress_valid;
156 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
157 enum mlxsw_sp_rif_counter_dir dir,
161 case MLXSW_SP_RIF_COUNTER_EGRESS:
162 rif->counter_egress_valid = valid;
164 case MLXSW_SP_RIF_COUNTER_INGRESS:
165 rif->counter_ingress_valid = valid;
170 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
171 unsigned int counter_index, bool enable,
172 enum mlxsw_sp_rif_counter_dir dir)
174 char ritr_pl[MLXSW_REG_RITR_LEN];
175 bool is_egress = false;
178 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
180 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
185 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
187 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
190 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
191 struct mlxsw_sp_rif *rif,
192 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
194 char ricnt_pl[MLXSW_REG_RICNT_LEN];
195 unsigned int *p_counter_index;
199 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
203 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
204 if (!p_counter_index)
206 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
207 MLXSW_REG_RICNT_OPCODE_NOP);
208 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
211 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
215 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
216 unsigned int counter_index)
218 char ricnt_pl[MLXSW_REG_RICNT_LEN];
220 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
221 MLXSW_REG_RICNT_OPCODE_CLEAR);
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
225 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
226 struct mlxsw_sp_rif *rif,
227 enum mlxsw_sp_rif_counter_dir dir)
229 unsigned int *p_counter_index;
232 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
233 if (!p_counter_index)
235 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
240 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
242 goto err_counter_clear;
244 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
245 *p_counter_index, true, dir);
247 goto err_counter_edit;
248 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
253 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
258 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
259 struct mlxsw_sp_rif *rif,
260 enum mlxsw_sp_rif_counter_dir dir)
262 unsigned int *p_counter_index;
264 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
267 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
268 if (WARN_ON(!p_counter_index))
270 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
271 *p_counter_index, false, dir);
272 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
274 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
277 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
279 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
280 struct devlink *devlink;
282 devlink = priv_to_devlink(mlxsw_sp->core);
283 if (!devlink_dpipe_table_counter_enabled(devlink,
284 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
286 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
289 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
291 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
293 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
296 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
298 struct mlxsw_sp_prefix_usage {
299 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
302 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
303 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
306 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
307 struct mlxsw_sp_prefix_usage *prefix_usage2)
309 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
313 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
314 struct mlxsw_sp_prefix_usage *prefix_usage2)
316 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
320 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
321 unsigned char prefix_len)
323 set_bit(prefix_len, prefix_usage->b);
327 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
328 unsigned char prefix_len)
330 clear_bit(prefix_len, prefix_usage->b);
333 struct mlxsw_sp_fib_key {
334 unsigned char addr[sizeof(struct in6_addr)];
335 unsigned char prefix_len;
338 enum mlxsw_sp_fib_entry_type {
339 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
340 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
341 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
342 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
343 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
345 /* This is a special case of local delivery, where a packet should be
346 * decapsulated on reception. Note that there is no corresponding ENCAP,
347 * because that's a type of next hop, not of FIB entry. (There can be
348 * several next hops in a REMOTE entry, and some of them may be
349 * encapsulating entries.)
351 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
352 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
355 struct mlxsw_sp_nexthop_group;
356 struct mlxsw_sp_fib_entry;
358 struct mlxsw_sp_fib_node {
359 struct mlxsw_sp_fib_entry *fib_entry;
360 struct list_head list;
361 struct rhash_head ht_node;
362 struct mlxsw_sp_fib *fib;
363 struct mlxsw_sp_fib_key key;
366 struct mlxsw_sp_fib_entry_decap {
367 struct mlxsw_sp_ipip_entry *ipip_entry;
371 struct mlxsw_sp_fib_entry {
372 struct mlxsw_sp_fib_node *fib_node;
373 enum mlxsw_sp_fib_entry_type type;
374 struct list_head nexthop_group_node;
375 struct mlxsw_sp_nexthop_group *nh_group;
376 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
379 struct mlxsw_sp_fib4_entry {
380 struct mlxsw_sp_fib_entry common;
387 struct mlxsw_sp_fib6_entry {
388 struct mlxsw_sp_fib_entry common;
389 struct list_head rt6_list;
393 struct mlxsw_sp_rt6 {
394 struct list_head list;
395 struct fib6_info *rt;
398 struct mlxsw_sp_lpm_tree {
400 unsigned int ref_count;
401 enum mlxsw_sp_l3proto proto;
402 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
403 struct mlxsw_sp_prefix_usage prefix_usage;
406 struct mlxsw_sp_fib {
407 struct rhashtable ht;
408 struct list_head node_list;
409 struct mlxsw_sp_vr *vr;
410 struct mlxsw_sp_lpm_tree *lpm_tree;
411 enum mlxsw_sp_l3proto proto;
415 u16 id; /* virtual router ID */
416 u32 tb_id; /* kernel fib table id */
417 unsigned int rif_count;
418 struct mlxsw_sp_fib *fib4;
419 struct mlxsw_sp_fib *fib6;
420 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
421 struct mlxsw_sp_rif *ul_rif;
422 refcount_t ul_rif_refcnt;
425 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
427 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
428 struct mlxsw_sp_vr *vr,
429 enum mlxsw_sp_l3proto proto)
431 struct mlxsw_sp_lpm_tree *lpm_tree;
432 struct mlxsw_sp_fib *fib;
435 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
436 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
438 return ERR_PTR(-ENOMEM);
439 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
441 goto err_rhashtable_init;
442 INIT_LIST_HEAD(&fib->node_list);
445 fib->lpm_tree = lpm_tree;
446 mlxsw_sp_lpm_tree_hold(lpm_tree);
447 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
449 goto err_lpm_tree_bind;
453 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
459 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
460 struct mlxsw_sp_fib *fib)
462 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
463 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
464 WARN_ON(!list_empty(&fib->node_list));
465 rhashtable_destroy(&fib->ht);
469 static struct mlxsw_sp_lpm_tree *
470 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
472 static struct mlxsw_sp_lpm_tree *lpm_tree;
475 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
476 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
477 if (lpm_tree->ref_count == 0)
483 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
484 struct mlxsw_sp_lpm_tree *lpm_tree)
486 char ralta_pl[MLXSW_REG_RALTA_LEN];
488 mlxsw_reg_ralta_pack(ralta_pl, true,
489 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
491 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
494 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
495 struct mlxsw_sp_lpm_tree *lpm_tree)
497 char ralta_pl[MLXSW_REG_RALTA_LEN];
499 mlxsw_reg_ralta_pack(ralta_pl, false,
500 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
502 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
506 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
507 struct mlxsw_sp_prefix_usage *prefix_usage,
508 struct mlxsw_sp_lpm_tree *lpm_tree)
510 char ralst_pl[MLXSW_REG_RALST_LEN];
513 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
515 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
518 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
519 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
522 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
523 MLXSW_REG_RALST_BIN_NO_CHILD);
524 last_prefix = prefix;
526 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
529 static struct mlxsw_sp_lpm_tree *
530 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
531 struct mlxsw_sp_prefix_usage *prefix_usage,
532 enum mlxsw_sp_l3proto proto)
534 struct mlxsw_sp_lpm_tree *lpm_tree;
537 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
539 return ERR_PTR(-EBUSY);
540 lpm_tree->proto = proto;
541 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
545 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
548 goto err_left_struct_set;
549 memcpy(&lpm_tree->prefix_usage, prefix_usage,
550 sizeof(lpm_tree->prefix_usage));
551 memset(&lpm_tree->prefix_ref_count, 0,
552 sizeof(lpm_tree->prefix_ref_count));
553 lpm_tree->ref_count = 1;
557 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
561 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
562 struct mlxsw_sp_lpm_tree *lpm_tree)
564 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
567 static struct mlxsw_sp_lpm_tree *
568 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
569 struct mlxsw_sp_prefix_usage *prefix_usage,
570 enum mlxsw_sp_l3proto proto)
572 struct mlxsw_sp_lpm_tree *lpm_tree;
575 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
576 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
577 if (lpm_tree->ref_count != 0 &&
578 lpm_tree->proto == proto &&
579 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
581 mlxsw_sp_lpm_tree_hold(lpm_tree);
585 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
588 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
590 lpm_tree->ref_count++;
593 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
594 struct mlxsw_sp_lpm_tree *lpm_tree)
596 if (--lpm_tree->ref_count == 0)
597 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
600 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
602 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
604 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
605 struct mlxsw_sp_lpm_tree *lpm_tree;
609 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
612 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
613 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
614 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
615 sizeof(struct mlxsw_sp_lpm_tree),
617 if (!mlxsw_sp->router->lpm.trees)
620 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
621 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
622 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
625 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
626 MLXSW_SP_L3_PROTO_IPV4);
627 if (IS_ERR(lpm_tree)) {
628 err = PTR_ERR(lpm_tree);
629 goto err_ipv4_tree_get;
631 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
633 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
634 MLXSW_SP_L3_PROTO_IPV6);
635 if (IS_ERR(lpm_tree)) {
636 err = PTR_ERR(lpm_tree);
637 goto err_ipv6_tree_get;
639 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
644 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
645 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
647 kfree(mlxsw_sp->router->lpm.trees);
651 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
653 struct mlxsw_sp_lpm_tree *lpm_tree;
655 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
656 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
658 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
659 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
661 kfree(mlxsw_sp->router->lpm.trees);
664 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
666 return !!vr->fib4 || !!vr->fib6 ||
667 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
668 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
671 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
673 struct mlxsw_sp_vr *vr;
676 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
677 vr = &mlxsw_sp->router->vrs[i];
678 if (!mlxsw_sp_vr_is_used(vr))
684 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
685 const struct mlxsw_sp_fib *fib, u8 tree_id)
687 char raltb_pl[MLXSW_REG_RALTB_LEN];
689 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
690 (enum mlxsw_reg_ralxx_protocol) fib->proto,
692 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
695 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
696 const struct mlxsw_sp_fib *fib)
698 char raltb_pl[MLXSW_REG_RALTB_LEN];
700 /* Bind to tree 0 which is default */
701 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
702 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
703 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
706 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
708 /* For our purpose, squash main, default and local tables into one */
709 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
710 tb_id = RT_TABLE_MAIN;
714 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
717 struct mlxsw_sp_vr *vr;
720 tb_id = mlxsw_sp_fix_tb_id(tb_id);
722 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
723 vr = &mlxsw_sp->router->vrs[i];
724 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
730 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
733 struct mlxsw_sp_vr *vr;
736 mutex_lock(&mlxsw_sp->router->lock);
737 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
744 mutex_unlock(&mlxsw_sp->router->lock);
748 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
749 enum mlxsw_sp_l3proto proto)
752 case MLXSW_SP_L3_PROTO_IPV4:
754 case MLXSW_SP_L3_PROTO_IPV6:
760 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
762 struct netlink_ext_ack *extack)
764 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
765 struct mlxsw_sp_fib *fib4;
766 struct mlxsw_sp_fib *fib6;
767 struct mlxsw_sp_vr *vr;
770 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
772 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
773 return ERR_PTR(-EBUSY);
775 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
777 return ERR_CAST(fib4);
778 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
781 goto err_fib6_create;
783 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
784 MLXSW_SP_L3_PROTO_IPV4);
785 if (IS_ERR(mr4_table)) {
786 err = PTR_ERR(mr4_table);
787 goto err_mr4_table_create;
789 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
790 MLXSW_SP_L3_PROTO_IPV6);
791 if (IS_ERR(mr6_table)) {
792 err = PTR_ERR(mr6_table);
793 goto err_mr6_table_create;
798 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
799 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
803 err_mr6_table_create:
804 mlxsw_sp_mr_table_destroy(mr4_table);
805 err_mr4_table_create:
806 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
808 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
812 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
813 struct mlxsw_sp_vr *vr)
815 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
816 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
817 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
818 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
819 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
821 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
825 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
826 struct netlink_ext_ack *extack)
828 struct mlxsw_sp_vr *vr;
830 tb_id = mlxsw_sp_fix_tb_id(tb_id);
831 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
833 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
837 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
839 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
840 list_empty(&vr->fib6->node_list) &&
841 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
842 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
843 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
847 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
848 enum mlxsw_sp_l3proto proto, u8 tree_id)
850 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
852 if (!mlxsw_sp_vr_is_used(vr))
854 if (fib->lpm_tree->id == tree_id)
859 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
860 struct mlxsw_sp_fib *fib,
861 struct mlxsw_sp_lpm_tree *new_tree)
863 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
866 fib->lpm_tree = new_tree;
867 mlxsw_sp_lpm_tree_hold(new_tree);
868 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
871 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
875 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
876 fib->lpm_tree = old_tree;
880 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
881 struct mlxsw_sp_fib *fib,
882 struct mlxsw_sp_lpm_tree *new_tree)
884 enum mlxsw_sp_l3proto proto = fib->proto;
885 struct mlxsw_sp_lpm_tree *old_tree;
886 u8 old_id, new_id = new_tree->id;
887 struct mlxsw_sp_vr *vr;
890 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
891 old_id = old_tree->id;
893 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
894 vr = &mlxsw_sp->router->vrs[i];
895 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
897 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
898 mlxsw_sp_vr_fib(vr, proto),
901 goto err_tree_replace;
904 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
905 sizeof(new_tree->prefix_ref_count));
906 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
907 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
912 for (i--; i >= 0; i--) {
913 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
915 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
916 mlxsw_sp_vr_fib(vr, proto),
922 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
924 struct mlxsw_sp_vr *vr;
928 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
931 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
932 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
934 if (!mlxsw_sp->router->vrs)
937 for (i = 0; i < max_vrs; i++) {
938 vr = &mlxsw_sp->router->vrs[i];
945 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
947 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
949 /* At this stage we're guaranteed not to have new incoming
950 * FIB notifications and the work queue is free from FIBs
951 * sitting on top of mlxsw netdevs. However, we can still
952 * have other FIBs queued. Flush the queue before flushing
953 * the device's tables. No need for locks, as we're the only
956 mlxsw_core_flush_owq();
957 mlxsw_sp_router_fib_flush(mlxsw_sp);
958 kfree(mlxsw_sp->router->vrs);
961 static struct net_device *
962 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
964 struct ip_tunnel *tun = netdev_priv(ol_dev);
965 struct net *net = dev_net(ol_dev);
967 return dev_get_by_index_rcu(net, tun->parms.link);
970 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
972 struct net_device *d;
976 d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
978 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
980 tb_id = RT_TABLE_MAIN;
986 static struct mlxsw_sp_rif *
987 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
988 const struct mlxsw_sp_rif_params *params,
989 struct netlink_ext_ack *extack);
991 static struct mlxsw_sp_rif_ipip_lb *
992 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
993 enum mlxsw_sp_ipip_type ipipt,
994 struct net_device *ol_dev,
995 struct netlink_ext_ack *extack)
997 struct mlxsw_sp_rif_params_ipip_lb lb_params;
998 const struct mlxsw_sp_ipip_ops *ipip_ops;
999 struct mlxsw_sp_rif *rif;
1001 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1002 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1003 .common.dev = ol_dev,
1004 .common.lag = false,
1005 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1008 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1010 return ERR_CAST(rif);
1011 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1014 static struct mlxsw_sp_ipip_entry *
1015 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1016 enum mlxsw_sp_ipip_type ipipt,
1017 struct net_device *ol_dev)
1019 const struct mlxsw_sp_ipip_ops *ipip_ops;
1020 struct mlxsw_sp_ipip_entry *ipip_entry;
1021 struct mlxsw_sp_ipip_entry *ret = NULL;
1023 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1024 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1026 return ERR_PTR(-ENOMEM);
1028 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1030 if (IS_ERR(ipip_entry->ol_lb)) {
1031 ret = ERR_CAST(ipip_entry->ol_lb);
1032 goto err_ol_ipip_lb_create;
1035 ipip_entry->ipipt = ipipt;
1036 ipip_entry->ol_dev = ol_dev;
1038 switch (ipip_ops->ul_proto) {
1039 case MLXSW_SP_L3_PROTO_IPV4:
1040 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1042 case MLXSW_SP_L3_PROTO_IPV6:
1049 err_ol_ipip_lb_create:
1055 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1057 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1062 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1063 const enum mlxsw_sp_l3proto ul_proto,
1064 union mlxsw_sp_l3addr saddr,
1066 struct mlxsw_sp_ipip_entry *ipip_entry)
1068 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1069 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1070 union mlxsw_sp_l3addr tun_saddr;
1072 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1075 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1076 return tun_ul_tb_id == ul_tb_id &&
1077 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1081 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1082 struct mlxsw_sp_fib_entry *fib_entry,
1083 struct mlxsw_sp_ipip_entry *ipip_entry)
1088 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1093 ipip_entry->decap_fib_entry = fib_entry;
1094 fib_entry->decap.ipip_entry = ipip_entry;
1095 fib_entry->decap.tunnel_index = tunnel_index;
1099 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1100 struct mlxsw_sp_fib_entry *fib_entry)
1102 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1103 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1104 fib_entry->decap.ipip_entry = NULL;
1105 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1106 1, fib_entry->decap.tunnel_index);
1109 static struct mlxsw_sp_fib_node *
1110 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1111 size_t addr_len, unsigned char prefix_len);
1112 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1113 struct mlxsw_sp_fib_entry *fib_entry);
1116 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1117 struct mlxsw_sp_ipip_entry *ipip_entry)
1119 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1121 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1122 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1124 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1128 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1129 struct mlxsw_sp_ipip_entry *ipip_entry,
1130 struct mlxsw_sp_fib_entry *decap_fib_entry)
1132 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1135 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1137 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1138 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1141 static struct mlxsw_sp_fib_entry *
1142 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1143 enum mlxsw_sp_l3proto proto,
1144 const union mlxsw_sp_l3addr *addr,
1145 enum mlxsw_sp_fib_entry_type type)
1147 struct mlxsw_sp_fib_node *fib_node;
1148 unsigned char addr_prefix_len;
1149 struct mlxsw_sp_fib *fib;
1150 struct mlxsw_sp_vr *vr;
1155 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1158 fib = mlxsw_sp_vr_fib(vr, proto);
1161 case MLXSW_SP_L3_PROTO_IPV4:
1162 addr4 = be32_to_cpu(addr->addr4);
1165 addr_prefix_len = 32;
1167 case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1173 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1175 if (!fib_node || fib_node->fib_entry->type != type)
1178 return fib_node->fib_entry;
1181 /* Given an IPIP entry, find the corresponding decap route. */
1182 static struct mlxsw_sp_fib_entry *
1183 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1184 struct mlxsw_sp_ipip_entry *ipip_entry)
1186 static struct mlxsw_sp_fib_node *fib_node;
1187 const struct mlxsw_sp_ipip_ops *ipip_ops;
1188 unsigned char saddr_prefix_len;
1189 union mlxsw_sp_l3addr saddr;
1190 struct mlxsw_sp_fib *ul_fib;
1191 struct mlxsw_sp_vr *ul_vr;
1197 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1199 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1200 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1204 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1205 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1206 ipip_entry->ol_dev);
1208 switch (ipip_ops->ul_proto) {
1209 case MLXSW_SP_L3_PROTO_IPV4:
1210 saddr4 = be32_to_cpu(saddr.addr4);
1213 saddr_prefix_len = 32;
1220 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1223 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1226 return fib_node->fib_entry;
1229 static struct mlxsw_sp_ipip_entry *
1230 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1231 enum mlxsw_sp_ipip_type ipipt,
1232 struct net_device *ol_dev)
1234 struct mlxsw_sp_ipip_entry *ipip_entry;
1236 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1237 if (IS_ERR(ipip_entry))
1240 list_add_tail(&ipip_entry->ipip_list_node,
1241 &mlxsw_sp->router->ipip_list);
1247 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1248 struct mlxsw_sp_ipip_entry *ipip_entry)
1250 list_del(&ipip_entry->ipip_list_node);
1251 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1255 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1256 const struct net_device *ul_dev,
1257 enum mlxsw_sp_l3proto ul_proto,
1258 union mlxsw_sp_l3addr ul_dip,
1259 struct mlxsw_sp_ipip_entry *ipip_entry)
1261 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1262 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1264 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1267 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1268 ul_tb_id, ipip_entry);
1271 /* Given decap parameters, find the corresponding IPIP entry. */
1272 static struct mlxsw_sp_ipip_entry *
1273 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1274 const struct net_device *ul_dev,
1275 enum mlxsw_sp_l3proto ul_proto,
1276 union mlxsw_sp_l3addr ul_dip)
1278 struct mlxsw_sp_ipip_entry *ipip_entry;
1280 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1282 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1290 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1291 const struct net_device *dev,
1292 enum mlxsw_sp_ipip_type *p_type)
1294 struct mlxsw_sp_router *router = mlxsw_sp->router;
1295 const struct mlxsw_sp_ipip_ops *ipip_ops;
1296 enum mlxsw_sp_ipip_type ipipt;
1298 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1299 ipip_ops = router->ipip_ops_arr[ipipt];
1300 if (dev->type == ipip_ops->dev_type) {
1309 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1310 const struct net_device *dev)
1312 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1315 static struct mlxsw_sp_ipip_entry *
1316 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1317 const struct net_device *ol_dev)
1319 struct mlxsw_sp_ipip_entry *ipip_entry;
1321 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1323 if (ipip_entry->ol_dev == ol_dev)
1329 static struct mlxsw_sp_ipip_entry *
1330 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1331 const struct net_device *ul_dev,
1332 struct mlxsw_sp_ipip_entry *start)
1334 struct mlxsw_sp_ipip_entry *ipip_entry;
1336 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1338 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1340 struct net_device *ol_dev = ipip_entry->ol_dev;
1341 struct net_device *ipip_ul_dev;
1344 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1347 if (ipip_ul_dev == ul_dev)
1354 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1355 const struct net_device *dev)
1359 mutex_lock(&mlxsw_sp->router->lock);
1360 is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1361 mutex_unlock(&mlxsw_sp->router->lock);
1366 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1367 const struct net_device *ol_dev,
1368 enum mlxsw_sp_ipip_type ipipt)
1370 const struct mlxsw_sp_ipip_ops *ops
1371 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1373 /* For deciding whether decap should be offloaded, we don't care about
1374 * overlay protocol, so ask whether either one is supported.
1376 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1377 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1380 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1381 struct net_device *ol_dev)
1383 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1384 struct mlxsw_sp_ipip_entry *ipip_entry;
1385 enum mlxsw_sp_l3proto ul_proto;
1386 union mlxsw_sp_l3addr saddr;
1389 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1390 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1391 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1392 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1393 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1394 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1397 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1399 if (IS_ERR(ipip_entry))
1400 return PTR_ERR(ipip_entry);
1407 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1408 struct net_device *ol_dev)
1410 struct mlxsw_sp_ipip_entry *ipip_entry;
1412 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1414 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1418 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1419 struct mlxsw_sp_ipip_entry *ipip_entry)
1421 struct mlxsw_sp_fib_entry *decap_fib_entry;
1423 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1424 if (decap_fib_entry)
1425 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1430 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1431 u16 ul_rif_id, bool enable)
1433 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1434 struct mlxsw_sp_rif *rif = &lb_rif->common;
1435 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1436 char ritr_pl[MLXSW_REG_RITR_LEN];
1439 switch (lb_cf.ul_protocol) {
1440 case MLXSW_SP_L3_PROTO_IPV4:
1441 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1442 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1443 rif->rif_index, rif->vr_id, rif->dev->mtu);
1444 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1445 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1446 ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1449 case MLXSW_SP_L3_PROTO_IPV6:
1450 return -EAFNOSUPPORT;
1453 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1456 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1457 struct net_device *ol_dev)
1459 struct mlxsw_sp_ipip_entry *ipip_entry;
1460 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1463 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1465 lb_rif = ipip_entry->ol_lb;
1466 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1467 lb_rif->ul_rif_id, true);
1470 lb_rif->common.mtu = ol_dev->mtu;
1477 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1478 struct net_device *ol_dev)
1480 struct mlxsw_sp_ipip_entry *ipip_entry;
1482 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1484 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1488 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1489 struct mlxsw_sp_ipip_entry *ipip_entry)
1491 if (ipip_entry->decap_fib_entry)
1492 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1495 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1496 struct net_device *ol_dev)
1498 struct mlxsw_sp_ipip_entry *ipip_entry;
1500 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1502 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1505 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1506 struct mlxsw_sp_rif *old_rif,
1507 struct mlxsw_sp_rif *new_rif);
1509 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1510 struct mlxsw_sp_ipip_entry *ipip_entry,
1512 struct netlink_ext_ack *extack)
1514 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1515 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1517 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1521 if (IS_ERR(new_lb_rif))
1522 return PTR_ERR(new_lb_rif);
1523 ipip_entry->ol_lb = new_lb_rif;
1526 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1527 &new_lb_rif->common);
1529 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1534 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1535 struct mlxsw_sp_rif *rif);
1538 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1539 * @mlxsw_sp: mlxsw_sp.
1540 * @ipip_entry: IPIP entry.
1541 * @recreate_loopback: Recreates the associated loopback RIF.
1542 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1543 * relevant when recreate_loopback is true.
1544 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1545 * is only relevant when recreate_loopback is false.
1548 * Return: Non-zero value on failure.
1550 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1551 struct mlxsw_sp_ipip_entry *ipip_entry,
1552 bool recreate_loopback,
1554 bool update_nexthops,
1555 struct netlink_ext_ack *extack)
1559 /* RIFs can't be edited, so to update loopback, we need to destroy and
1560 * recreate it. That creates a window of opportunity where RALUE and
1561 * RATR registers end up referencing a RIF that's already gone. RATRs
1562 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1563 * of RALUE, demote the decap route back.
1565 if (ipip_entry->decap_fib_entry)
1566 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1568 if (recreate_loopback) {
1569 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1570 keep_encap, extack);
1573 } else if (update_nexthops) {
1574 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1575 &ipip_entry->ol_lb->common);
1578 if (ipip_entry->ol_dev->flags & IFF_UP)
1579 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1584 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1585 struct net_device *ol_dev,
1586 struct netlink_ext_ack *extack)
1588 struct mlxsw_sp_ipip_entry *ipip_entry =
1589 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1594 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1595 true, false, false, extack);
1599 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1600 struct mlxsw_sp_ipip_entry *ipip_entry,
1601 struct net_device *ul_dev,
1603 struct netlink_ext_ack *extack)
1605 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1606 enum mlxsw_sp_l3proto ul_proto;
1607 union mlxsw_sp_l3addr saddr;
1609 /* Moving underlay to a different VRF might cause local address
1610 * conflict, and the conflicting tunnels need to be demoted.
1612 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1613 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1614 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1617 *demote_this = true;
1621 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1622 true, true, false, extack);
1626 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1627 struct mlxsw_sp_ipip_entry *ipip_entry,
1628 struct net_device *ul_dev)
1630 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1631 false, false, true, NULL);
1635 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1636 struct mlxsw_sp_ipip_entry *ipip_entry,
1637 struct net_device *ul_dev)
1639 /* A down underlay device causes encapsulated packets to not be
1640 * forwarded, but decap still works. So refresh next hops without
1641 * touching anything else.
1643 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1644 false, false, true, NULL);
1648 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1649 struct net_device *ol_dev,
1650 struct netlink_ext_ack *extack)
1652 const struct mlxsw_sp_ipip_ops *ipip_ops;
1653 struct mlxsw_sp_ipip_entry *ipip_entry;
1656 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1658 /* A change might make a tunnel eligible for offloading, but
1659 * that is currently not implemented. What falls to slow path
1664 /* A change might make a tunnel not eligible for offloading. */
1665 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1666 ipip_entry->ipipt)) {
1667 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1671 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1672 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1676 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1677 struct mlxsw_sp_ipip_entry *ipip_entry)
1679 struct net_device *ol_dev = ipip_entry->ol_dev;
1681 if (ol_dev->flags & IFF_UP)
1682 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1683 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1686 /* The configuration where several tunnels have the same local address in the
1687 * same underlay table needs special treatment in the HW. That is currently not
1688 * implemented in the driver. This function finds and demotes the first tunnel
1689 * with a given source address, except the one passed in in the argument
1693 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1694 enum mlxsw_sp_l3proto ul_proto,
1695 union mlxsw_sp_l3addr saddr,
1697 const struct mlxsw_sp_ipip_entry *except)
1699 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1701 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1703 if (ipip_entry != except &&
1704 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1705 ul_tb_id, ipip_entry)) {
1706 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1714 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1715 struct net_device *ul_dev)
1717 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1719 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1721 struct net_device *ol_dev = ipip_entry->ol_dev;
1722 struct net_device *ipip_ul_dev;
1725 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1727 if (ipip_ul_dev == ul_dev)
1728 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1732 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1733 struct net_device *ol_dev,
1734 unsigned long event,
1735 struct netdev_notifier_info *info)
1737 struct netdev_notifier_changeupper_info *chup;
1738 struct netlink_ext_ack *extack;
1741 mutex_lock(&mlxsw_sp->router->lock);
1743 case NETDEV_REGISTER:
1744 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1746 case NETDEV_UNREGISTER:
1747 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1750 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1753 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1755 case NETDEV_CHANGEUPPER:
1756 chup = container_of(info, typeof(*chup), info);
1757 extack = info->extack;
1758 if (netif_is_l3_master(chup->upper_dev))
1759 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1764 extack = info->extack;
1765 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1768 case NETDEV_CHANGEMTU:
1769 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1772 mutex_unlock(&mlxsw_sp->router->lock);
1777 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1778 struct mlxsw_sp_ipip_entry *ipip_entry,
1779 struct net_device *ul_dev,
1781 unsigned long event,
1782 struct netdev_notifier_info *info)
1784 struct netdev_notifier_changeupper_info *chup;
1785 struct netlink_ext_ack *extack;
1788 case NETDEV_CHANGEUPPER:
1789 chup = container_of(info, typeof(*chup), info);
1790 extack = info->extack;
1791 if (netif_is_l3_master(chup->upper_dev))
1792 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1800 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1803 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1811 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1812 struct net_device *ul_dev,
1813 unsigned long event,
1814 struct netdev_notifier_info *info)
1816 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1819 mutex_lock(&mlxsw_sp->router->lock);
1820 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1823 struct mlxsw_sp_ipip_entry *prev;
1824 bool demote_this = false;
1826 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1827 ul_dev, &demote_this,
1830 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1836 if (list_is_first(&ipip_entry->ipip_list_node,
1837 &mlxsw_sp->router->ipip_list))
1840 /* This can't be cached from previous iteration,
1841 * because that entry could be gone now.
1843 prev = list_prev_entry(ipip_entry,
1845 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1849 mutex_unlock(&mlxsw_sp->router->lock);
1854 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1855 enum mlxsw_sp_l3proto ul_proto,
1856 const union mlxsw_sp_l3addr *ul_sip,
1859 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1860 struct mlxsw_sp_router *router = mlxsw_sp->router;
1861 struct mlxsw_sp_fib_entry *fib_entry;
1864 mutex_lock(&mlxsw_sp->router->lock);
1866 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
1871 router->nve_decap_config.ul_tb_id = ul_tb_id;
1872 router->nve_decap_config.tunnel_index = tunnel_index;
1873 router->nve_decap_config.ul_proto = ul_proto;
1874 router->nve_decap_config.ul_sip = *ul_sip;
1875 router->nve_decap_config.valid = true;
1877 /* It is valid to create a tunnel with a local IP and only later
1878 * assign this IP address to a local interface
1880 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1886 fib_entry->decap.tunnel_index = tunnel_index;
1887 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1889 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1891 goto err_fib_entry_update;
1895 err_fib_entry_update:
1896 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1897 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1899 mutex_unlock(&mlxsw_sp->router->lock);
1903 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1904 enum mlxsw_sp_l3proto ul_proto,
1905 const union mlxsw_sp_l3addr *ul_sip)
1907 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1908 struct mlxsw_sp_router *router = mlxsw_sp->router;
1909 struct mlxsw_sp_fib_entry *fib_entry;
1911 mutex_lock(&mlxsw_sp->router->lock);
1913 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
1916 router->nve_decap_config.valid = false;
1918 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1924 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1925 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1927 mutex_unlock(&mlxsw_sp->router->lock);
1930 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
1932 enum mlxsw_sp_l3proto ul_proto,
1933 const union mlxsw_sp_l3addr *ul_sip)
1935 struct mlxsw_sp_router *router = mlxsw_sp->router;
1937 return router->nve_decap_config.valid &&
1938 router->nve_decap_config.ul_tb_id == ul_tb_id &&
1939 router->nve_decap_config.ul_proto == ul_proto &&
1940 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
1944 struct mlxsw_sp_neigh_key {
1945 struct neighbour *n;
1948 struct mlxsw_sp_neigh_entry {
1949 struct list_head rif_list_node;
1950 struct rhash_head ht_node;
1951 struct mlxsw_sp_neigh_key key;
1954 unsigned char ha[ETH_ALEN];
1955 struct list_head nexthop_list; /* list of nexthops using
1958 struct list_head nexthop_neighs_list_node;
1959 unsigned int counter_index;
1963 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1964 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1965 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1966 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1969 struct mlxsw_sp_neigh_entry *
1970 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1971 struct mlxsw_sp_neigh_entry *neigh_entry)
1974 if (list_empty(&rif->neigh_list))
1977 return list_first_entry(&rif->neigh_list,
1978 typeof(*neigh_entry),
1981 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
1983 return list_next_entry(neigh_entry, rif_list_node);
1986 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1988 return neigh_entry->key.n->tbl->family;
1992 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1994 return neigh_entry->ha;
1997 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1999 struct neighbour *n;
2001 n = neigh_entry->key.n;
2002 return ntohl(*((__be32 *) n->primary_key));
2006 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2008 struct neighbour *n;
2010 n = neigh_entry->key.n;
2011 return (struct in6_addr *) &n->primary_key;
2014 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2015 struct mlxsw_sp_neigh_entry *neigh_entry,
2018 if (!neigh_entry->counter_valid)
2021 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2025 static struct mlxsw_sp_neigh_entry *
2026 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2029 struct mlxsw_sp_neigh_entry *neigh_entry;
2031 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2035 neigh_entry->key.n = n;
2036 neigh_entry->rif = rif;
2037 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2042 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2048 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2049 struct mlxsw_sp_neigh_entry *neigh_entry)
2051 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2052 &neigh_entry->ht_node,
2053 mlxsw_sp_neigh_ht_params);
2057 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2058 struct mlxsw_sp_neigh_entry *neigh_entry)
2060 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2061 &neigh_entry->ht_node,
2062 mlxsw_sp_neigh_ht_params);
2066 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2067 struct mlxsw_sp_neigh_entry *neigh_entry)
2069 struct devlink *devlink;
2070 const char *table_name;
2072 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2074 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2077 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2084 devlink = priv_to_devlink(mlxsw_sp->core);
2085 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2089 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2090 struct mlxsw_sp_neigh_entry *neigh_entry)
2092 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2095 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2098 neigh_entry->counter_valid = true;
2102 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2103 struct mlxsw_sp_neigh_entry *neigh_entry)
2105 if (!neigh_entry->counter_valid)
2107 mlxsw_sp_flow_counter_free(mlxsw_sp,
2108 neigh_entry->counter_index);
2109 neigh_entry->counter_valid = false;
2112 static struct mlxsw_sp_neigh_entry *
2113 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2115 struct mlxsw_sp_neigh_entry *neigh_entry;
2116 struct mlxsw_sp_rif *rif;
2119 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2121 return ERR_PTR(-EINVAL);
2123 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2125 return ERR_PTR(-ENOMEM);
2127 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2129 goto err_neigh_entry_insert;
2131 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2132 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2136 err_neigh_entry_insert:
2137 mlxsw_sp_neigh_entry_free(neigh_entry);
2138 return ERR_PTR(err);
2142 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2143 struct mlxsw_sp_neigh_entry *neigh_entry)
2145 list_del(&neigh_entry->rif_list_node);
2146 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2147 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2148 mlxsw_sp_neigh_entry_free(neigh_entry);
2151 static struct mlxsw_sp_neigh_entry *
2152 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2154 struct mlxsw_sp_neigh_key key;
2157 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2158 &key, mlxsw_sp_neigh_ht_params);
2162 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2164 unsigned long interval;
2166 #if IS_ENABLED(CONFIG_IPV6)
2167 interval = min_t(unsigned long,
2168 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2169 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2171 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2173 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2176 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2180 struct net_device *dev;
2181 struct neighbour *n;
2186 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2188 if (!mlxsw_sp->router->rifs[rif]) {
2189 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2194 dev = mlxsw_sp->router->rifs[rif]->dev;
2195 n = neigh_lookup(&arp_tbl, &dipn, dev);
2199 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2200 neigh_event_send(n, NULL);
2204 #if IS_ENABLED(CONFIG_IPV6)
2205 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2209 struct net_device *dev;
2210 struct neighbour *n;
2211 struct in6_addr dip;
2214 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2217 if (!mlxsw_sp->router->rifs[rif]) {
2218 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2222 dev = mlxsw_sp->router->rifs[rif]->dev;
2223 n = neigh_lookup(&nd_tbl, &dip, dev);
2227 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2228 neigh_event_send(n, NULL);
2232 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2239 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2246 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2248 /* Hardware starts counting at 0, so add 1. */
2251 /* Each record consists of several neighbour entries. */
2252 for (i = 0; i < num_entries; i++) {
2255 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2256 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2262 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2266 /* One record contains one entry. */
2267 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2271 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2272 char *rauhtd_pl, int rec_index)
2274 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2275 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2276 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2279 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2280 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2286 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2288 u8 num_rec, last_rec_index, num_entries;
2290 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2291 last_rec_index = num_rec - 1;
2293 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2295 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2296 MLXSW_REG_RAUHTD_TYPE_IPV6)
2299 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2301 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2307 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2309 enum mlxsw_reg_rauhtd_type type)
2314 /* Ensure the RIF we read from the device does not change mid-dump. */
2315 mutex_lock(&mlxsw_sp->router->lock);
2317 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2318 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2321 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2324 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2325 for (i = 0; i < num_rec; i++)
2326 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2328 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2329 mutex_unlock(&mlxsw_sp->router->lock);
2334 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2336 enum mlxsw_reg_rauhtd_type type;
2340 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2344 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2345 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2349 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2350 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2356 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2358 struct mlxsw_sp_neigh_entry *neigh_entry;
2360 mutex_lock(&mlxsw_sp->router->lock);
2361 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2362 nexthop_neighs_list_node)
2363 /* If this neigh have nexthops, make the kernel think this neigh
2364 * is active regardless of the traffic.
2366 neigh_event_send(neigh_entry->key.n, NULL);
2367 mutex_unlock(&mlxsw_sp->router->lock);
2371 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2373 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2375 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2376 msecs_to_jiffies(interval));
2379 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2381 struct mlxsw_sp_router *router;
2384 router = container_of(work, struct mlxsw_sp_router,
2385 neighs_update.dw.work);
2386 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2388 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2390 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2392 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2395 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2397 struct mlxsw_sp_neigh_entry *neigh_entry;
2398 struct mlxsw_sp_router *router;
2400 router = container_of(work, struct mlxsw_sp_router,
2401 nexthop_probe_dw.work);
2402 /* Iterate over nexthop neighbours, find those who are unresolved and
2403 * send arp on them. This solves the chicken-egg problem when
2404 * the nexthop wouldn't get offloaded until the neighbor is resolved
2405 * but it wouldn't get resolved ever in case traffic is flowing in HW
2406 * using different nexthop.
2408 mutex_lock(&router->lock);
2409 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2410 nexthop_neighs_list_node)
2411 if (!neigh_entry->connected)
2412 neigh_event_send(neigh_entry->key.n, NULL);
2413 mutex_unlock(&router->lock);
2415 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2416 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2420 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2421 struct mlxsw_sp_neigh_entry *neigh_entry,
2422 bool removing, bool dead);
2424 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2426 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2427 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2431 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2432 struct mlxsw_sp_neigh_entry *neigh_entry,
2433 enum mlxsw_reg_rauht_op op)
2435 struct neighbour *n = neigh_entry->key.n;
2436 u32 dip = ntohl(*((__be32 *) n->primary_key));
2437 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2439 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2441 if (neigh_entry->counter_valid)
2442 mlxsw_reg_rauht_pack_counter(rauht_pl,
2443 neigh_entry->counter_index);
2444 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2448 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2449 struct mlxsw_sp_neigh_entry *neigh_entry,
2450 enum mlxsw_reg_rauht_op op)
2452 struct neighbour *n = neigh_entry->key.n;
2453 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2454 const char *dip = n->primary_key;
2456 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2458 if (neigh_entry->counter_valid)
2459 mlxsw_reg_rauht_pack_counter(rauht_pl,
2460 neigh_entry->counter_index);
2461 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2464 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2466 struct neighbour *n = neigh_entry->key.n;
2468 /* Packets with a link-local destination address are trapped
2469 * after LPM lookup and never reach the neighbour table, so
2470 * there is no need to program such neighbours to the device.
2472 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2473 IPV6_ADDR_LINKLOCAL)
2479 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2480 struct mlxsw_sp_neigh_entry *neigh_entry,
2483 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2486 if (!adding && !neigh_entry->connected)
2488 neigh_entry->connected = adding;
2489 if (neigh_entry->key.n->tbl->family == AF_INET) {
2490 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2494 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2495 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2497 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2507 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2509 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2513 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2514 struct mlxsw_sp_neigh_entry *neigh_entry,
2518 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2520 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2521 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2524 struct mlxsw_sp_netevent_work {
2525 struct work_struct work;
2526 struct mlxsw_sp *mlxsw_sp;
2527 struct neighbour *n;
2530 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2532 struct mlxsw_sp_netevent_work *net_work =
2533 container_of(work, struct mlxsw_sp_netevent_work, work);
2534 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2535 struct mlxsw_sp_neigh_entry *neigh_entry;
2536 struct neighbour *n = net_work->n;
2537 unsigned char ha[ETH_ALEN];
2538 bool entry_connected;
2541 /* If these parameters are changed after we release the lock,
2542 * then we are guaranteed to receive another event letting us
2545 read_lock_bh(&n->lock);
2546 memcpy(ha, n->ha, ETH_ALEN);
2547 nud_state = n->nud_state;
2549 read_unlock_bh(&n->lock);
2551 mutex_lock(&mlxsw_sp->router->lock);
2552 mlxsw_sp_span_respin(mlxsw_sp);
2554 entry_connected = nud_state & NUD_VALID && !dead;
2555 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2556 if (!entry_connected && !neigh_entry)
2559 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2560 if (IS_ERR(neigh_entry))
2564 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2565 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2566 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2569 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2570 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2573 mutex_unlock(&mlxsw_sp->router->lock);
2578 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2580 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2582 struct mlxsw_sp_netevent_work *net_work =
2583 container_of(work, struct mlxsw_sp_netevent_work, work);
2584 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2586 mlxsw_sp_mp_hash_init(mlxsw_sp);
2590 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2592 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2594 struct mlxsw_sp_netevent_work *net_work =
2595 container_of(work, struct mlxsw_sp_netevent_work, work);
2596 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2598 __mlxsw_sp_router_init(mlxsw_sp);
2602 static int mlxsw_sp_router_schedule_work(struct net *net,
2603 struct notifier_block *nb,
2604 void (*cb)(struct work_struct *))
2606 struct mlxsw_sp_netevent_work *net_work;
2607 struct mlxsw_sp_router *router;
2609 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2610 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2613 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2617 INIT_WORK(&net_work->work, cb);
2618 net_work->mlxsw_sp = router->mlxsw_sp;
2619 mlxsw_core_schedule_work(&net_work->work);
2623 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2624 unsigned long event, void *ptr)
2626 struct mlxsw_sp_netevent_work *net_work;
2627 struct mlxsw_sp_port *mlxsw_sp_port;
2628 struct mlxsw_sp *mlxsw_sp;
2629 unsigned long interval;
2630 struct neigh_parms *p;
2631 struct neighbour *n;
2634 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2637 /* We don't care about changes in the default table. */
2638 if (!p->dev || (p->tbl->family != AF_INET &&
2639 p->tbl->family != AF_INET6))
2642 /* We are in atomic context and can't take RTNL mutex,
2643 * so use RCU variant to walk the device chain.
2645 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2649 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2650 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2651 mlxsw_sp->router->neighs_update.interval = interval;
2653 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2655 case NETEVENT_NEIGH_UPDATE:
2658 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2661 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2665 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2667 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2671 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2672 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2675 /* Take a reference to ensure the neighbour won't be
2676 * destructed until we drop the reference in delayed
2680 mlxsw_core_schedule_work(&net_work->work);
2681 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2683 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2684 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2685 return mlxsw_sp_router_schedule_work(ptr, nb,
2686 mlxsw_sp_router_mp_hash_event_work);
2688 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2689 return mlxsw_sp_router_schedule_work(ptr, nb,
2690 mlxsw_sp_router_update_priority_work);
2696 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2700 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2701 &mlxsw_sp_neigh_ht_params);
2705 /* Initialize the polling interval according to the default
2708 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2710 /* Create the delayed works for the activity_update */
2711 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2712 mlxsw_sp_router_neighs_update_work);
2713 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2714 mlxsw_sp_router_probe_unresolved_nexthops);
2715 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2716 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2720 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2722 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2723 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2724 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2727 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2728 struct mlxsw_sp_rif *rif)
2730 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2732 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2734 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2735 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2739 enum mlxsw_sp_nexthop_type {
2740 MLXSW_SP_NEXTHOP_TYPE_ETH,
2741 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2744 struct mlxsw_sp_nexthop_key {
2745 struct fib_nh *fib_nh;
2748 struct mlxsw_sp_nexthop {
2749 struct list_head neigh_list_node; /* member of neigh entry list */
2750 struct list_head rif_list_node;
2751 struct list_head router_list_node;
2752 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2755 struct rhash_head ht_node;
2756 struct mlxsw_sp_nexthop_key key;
2757 unsigned char gw_addr[sizeof(struct in6_addr)];
2761 int num_adj_entries;
2762 struct mlxsw_sp_rif *rif;
2763 u8 should_offload:1, /* set indicates this neigh is connected and
2764 * should be put to KVD linear area of this group.
2766 offloaded:1, /* set in case the neigh is actually put into
2767 * KVD linear area of this group.
2769 update:1; /* set indicates that MAC of this neigh should be
2772 enum mlxsw_sp_nexthop_type type;
2774 struct mlxsw_sp_neigh_entry *neigh_entry;
2775 struct mlxsw_sp_ipip_entry *ipip_entry;
2777 unsigned int counter_index;
2781 struct mlxsw_sp_nexthop_group {
2783 struct rhash_head ht_node;
2784 struct list_head fib_list; /* list of fib entries that use this group */
2785 struct neigh_table *neigh_tbl;
2786 u8 adj_index_valid:1,
2787 gateway:1; /* routes using the group use a gateway */
2791 int sum_norm_weight;
2792 struct mlxsw_sp_nexthop nexthops[0];
2793 #define nh_rif nexthops[0].rif
2796 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2797 struct mlxsw_sp_nexthop *nh)
2799 struct devlink *devlink;
2801 devlink = priv_to_devlink(mlxsw_sp->core);
2802 if (!devlink_dpipe_table_counter_enabled(devlink,
2803 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2806 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2809 nh->counter_valid = true;
2812 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2813 struct mlxsw_sp_nexthop *nh)
2815 if (!nh->counter_valid)
2817 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2818 nh->counter_valid = false;
2821 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2822 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2824 if (!nh->counter_valid)
2827 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2831 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2832 struct mlxsw_sp_nexthop *nh)
2835 if (list_empty(&router->nexthop_list))
2838 return list_first_entry(&router->nexthop_list,
2839 typeof(*nh), router_list_node);
2841 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2843 return list_next_entry(nh, router_list_node);
2846 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2848 return nh->offloaded;
2851 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2855 return nh->neigh_entry->ha;
2858 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2859 u32 *p_adj_size, u32 *p_adj_hash_index)
2861 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2862 u32 adj_hash_index = 0;
2865 if (!nh->offloaded || !nh_grp->adj_index_valid)
2868 *p_adj_index = nh_grp->adj_index;
2869 *p_adj_size = nh_grp->ecmp_size;
2871 for (i = 0; i < nh_grp->count; i++) {
2872 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2876 if (nh_iter->offloaded)
2877 adj_hash_index += nh_iter->num_adj_entries;
2880 *p_adj_hash_index = adj_hash_index;
2884 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2889 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2891 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2894 for (i = 0; i < nh_grp->count; i++) {
2895 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2897 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2903 static struct fib_info *
2904 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2906 return nh_grp->priv;
2909 struct mlxsw_sp_nexthop_group_cmp_arg {
2910 enum mlxsw_sp_l3proto proto;
2912 struct fib_info *fi;
2913 struct mlxsw_sp_fib6_entry *fib6_entry;
2918 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2919 const struct in6_addr *gw, int ifindex,
2924 for (i = 0; i < nh_grp->count; i++) {
2925 const struct mlxsw_sp_nexthop *nh;
2927 nh = &nh_grp->nexthops[i];
2928 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
2929 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2937 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2938 const struct mlxsw_sp_fib6_entry *fib6_entry)
2940 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2942 if (nh_grp->count != fib6_entry->nrt6)
2945 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2946 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
2947 struct in6_addr *gw;
2948 int ifindex, weight;
2950 ifindex = fib6_nh->fib_nh_dev->ifindex;
2951 weight = fib6_nh->fib_nh_weight;
2952 gw = &fib6_nh->fib_nh_gw6;
2953 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2962 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2964 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2965 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2967 switch (cmp_arg->proto) {
2968 case MLXSW_SP_L3_PROTO_IPV4:
2969 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2970 case MLXSW_SP_L3_PROTO_IPV6:
2971 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2972 cmp_arg->fib6_entry);
2980 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2982 return nh_grp->neigh_tbl->family;
2985 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2987 const struct mlxsw_sp_nexthop_group *nh_grp = data;
2988 const struct mlxsw_sp_nexthop *nh;
2989 struct fib_info *fi;
2993 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2995 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2996 return jhash(&fi, sizeof(fi), seed);
2998 val = nh_grp->count;
2999 for (i = 0; i < nh_grp->count; i++) {
3000 nh = &nh_grp->nexthops[i];
3001 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3002 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3004 return jhash(&val, sizeof(val), seed);
3012 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3014 unsigned int val = fib6_entry->nrt6;
3015 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3017 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3018 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3019 struct net_device *dev = fib6_nh->fib_nh_dev;
3020 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3022 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3023 val ^= jhash(gw, sizeof(*gw), seed);
3026 return jhash(&val, sizeof(val), seed);
3030 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3032 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3034 switch (cmp_arg->proto) {
3035 case MLXSW_SP_L3_PROTO_IPV4:
3036 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3037 case MLXSW_SP_L3_PROTO_IPV6:
3038 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3045 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3046 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3047 .hashfn = mlxsw_sp_nexthop_group_hash,
3048 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3049 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3052 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3053 struct mlxsw_sp_nexthop_group *nh_grp)
3055 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3059 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3061 mlxsw_sp_nexthop_group_ht_params);
3064 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3065 struct mlxsw_sp_nexthop_group *nh_grp)
3067 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3071 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3073 mlxsw_sp_nexthop_group_ht_params);
3076 static struct mlxsw_sp_nexthop_group *
3077 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3078 struct fib_info *fi)
3080 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3082 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
3084 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3086 mlxsw_sp_nexthop_group_ht_params);
3089 static struct mlxsw_sp_nexthop_group *
3090 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3091 struct mlxsw_sp_fib6_entry *fib6_entry)
3093 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3095 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
3096 cmp_arg.fib6_entry = fib6_entry;
3097 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3099 mlxsw_sp_nexthop_group_ht_params);
3102 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3103 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3104 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3105 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3108 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3109 struct mlxsw_sp_nexthop *nh)
3111 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3112 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3115 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3116 struct mlxsw_sp_nexthop *nh)
3118 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3119 mlxsw_sp_nexthop_ht_params);
3122 static struct mlxsw_sp_nexthop *
3123 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3124 struct mlxsw_sp_nexthop_key key)
3126 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3127 mlxsw_sp_nexthop_ht_params);
3130 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3131 const struct mlxsw_sp_fib *fib,
3132 u32 adj_index, u16 ecmp_size,
3136 char raleu_pl[MLXSW_REG_RALEU_LEN];
3138 mlxsw_reg_raleu_pack(raleu_pl,
3139 (enum mlxsw_reg_ralxx_protocol) fib->proto,
3140 fib->vr->id, adj_index, ecmp_size, new_adj_index,
3142 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3145 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3146 struct mlxsw_sp_nexthop_group *nh_grp,
3147 u32 old_adj_index, u16 old_ecmp_size)
3149 struct mlxsw_sp_fib_entry *fib_entry;
3150 struct mlxsw_sp_fib *fib = NULL;
3153 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3154 if (fib == fib_entry->fib_node->fib)
3156 fib = fib_entry->fib_node->fib;
3157 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3168 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3169 struct mlxsw_sp_nexthop *nh)
3171 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3172 char ratr_pl[MLXSW_REG_RATR_LEN];
3174 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3175 true, MLXSW_REG_RATR_TYPE_ETHERNET,
3176 adj_index, neigh_entry->rif);
3177 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3178 if (nh->counter_valid)
3179 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3181 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3183 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3186 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3187 struct mlxsw_sp_nexthop *nh)
3191 for (i = 0; i < nh->num_adj_entries; i++) {
3194 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3202 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3204 struct mlxsw_sp_nexthop *nh)
3206 const struct mlxsw_sp_ipip_ops *ipip_ops;
3208 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3209 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3212 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3214 struct mlxsw_sp_nexthop *nh)
3218 for (i = 0; i < nh->num_adj_entries; i++) {
3221 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3231 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3232 struct mlxsw_sp_nexthop_group *nh_grp,
3235 u32 adj_index = nh_grp->adj_index; /* base */
3236 struct mlxsw_sp_nexthop *nh;
3239 for (i = 0; i < nh_grp->count; i++) {
3240 nh = &nh_grp->nexthops[i];
3242 if (!nh->should_offload) {
3247 if (nh->update || reallocate) {
3251 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3252 err = mlxsw_sp_nexthop_update
3253 (mlxsw_sp, adj_index, nh);
3255 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3256 err = mlxsw_sp_nexthop_ipip_update
3257 (mlxsw_sp, adj_index, nh);
3265 adj_index += nh->num_adj_entries;
3271 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3272 struct mlxsw_sp_nexthop_group *nh_grp)
3274 struct mlxsw_sp_fib_entry *fib_entry;
3277 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3278 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3285 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3287 /* Valid sizes for an adjacency group are:
3288 * 1-64, 512, 1024, 2048 and 4096.
3290 if (*p_adj_grp_size <= 64)
3292 else if (*p_adj_grp_size <= 512)
3293 *p_adj_grp_size = 512;
3294 else if (*p_adj_grp_size <= 1024)
3295 *p_adj_grp_size = 1024;
3296 else if (*p_adj_grp_size <= 2048)
3297 *p_adj_grp_size = 2048;
3299 *p_adj_grp_size = 4096;
3302 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3303 unsigned int alloc_size)
3305 if (alloc_size >= 4096)
3306 *p_adj_grp_size = 4096;
3307 else if (alloc_size >= 2048)
3308 *p_adj_grp_size = 2048;
3309 else if (alloc_size >= 1024)
3310 *p_adj_grp_size = 1024;
3311 else if (alloc_size >= 512)
3312 *p_adj_grp_size = 512;
3315 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3316 u16 *p_adj_grp_size)
3318 unsigned int alloc_size;
3321 /* Round up the requested group size to the next size supported
3322 * by the device and make sure the request can be satisfied.
3324 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3325 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3326 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3327 *p_adj_grp_size, &alloc_size);
3330 /* It is possible the allocation results in more allocated
3331 * entries than requested. Try to use as much of them as
3334 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3340 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3342 int i, g = 0, sum_norm_weight = 0;
3343 struct mlxsw_sp_nexthop *nh;
3345 for (i = 0; i < nh_grp->count; i++) {
3346 nh = &nh_grp->nexthops[i];
3348 if (!nh->should_offload)
3351 g = gcd(nh->nh_weight, g);
3356 for (i = 0; i < nh_grp->count; i++) {
3357 nh = &nh_grp->nexthops[i];
3359 if (!nh->should_offload)
3361 nh->norm_nh_weight = nh->nh_weight / g;
3362 sum_norm_weight += nh->norm_nh_weight;
3365 nh_grp->sum_norm_weight = sum_norm_weight;
3369 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3371 int total = nh_grp->sum_norm_weight;
3372 u16 ecmp_size = nh_grp->ecmp_size;
3373 int i, weight = 0, lower_bound = 0;
3375 for (i = 0; i < nh_grp->count; i++) {
3376 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3379 if (!nh->should_offload)
3381 weight += nh->norm_nh_weight;
3382 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3383 nh->num_adj_entries = upper_bound - lower_bound;
3384 lower_bound = upper_bound;
3388 static struct mlxsw_sp_nexthop *
3389 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3390 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3393 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3394 struct mlxsw_sp_nexthop_group *nh_grp)
3398 for (i = 0; i < nh_grp->count; i++) {
3399 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3402 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3404 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3409 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3410 struct mlxsw_sp_fib6_entry *fib6_entry)
3412 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3414 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3415 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3416 struct mlxsw_sp_nexthop *nh;
3418 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3419 if (nh && nh->offloaded)
3420 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3422 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3427 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3428 struct mlxsw_sp_nexthop_group *nh_grp)
3430 struct mlxsw_sp_fib6_entry *fib6_entry;
3432 /* Unfortunately, in IPv6 the route and the nexthop are described by
3433 * the same struct, so we need to iterate over all the routes using the
3434 * nexthop group and set / clear the offload indication for them.
3436 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3437 common.nexthop_group_node)
3438 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3442 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3443 struct mlxsw_sp_nexthop_group *nh_grp)
3445 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
3447 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3450 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3456 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3457 struct mlxsw_sp_nexthop_group *nh_grp)
3459 u16 ecmp_size, old_ecmp_size;
3460 struct mlxsw_sp_nexthop *nh;
3461 bool offload_change = false;
3463 bool old_adj_index_valid;
3468 if (!nh_grp->gateway) {
3469 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3473 for (i = 0; i < nh_grp->count; i++) {
3474 nh = &nh_grp->nexthops[i];
3476 if (nh->should_offload != nh->offloaded) {
3477 offload_change = true;
3478 if (nh->should_offload)
3482 if (!offload_change) {
3483 /* Nothing was added or removed, so no need to reallocate. Just
3484 * update MAC on existing adjacency indexes.
3486 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
3488 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3493 mlxsw_sp_nexthop_group_normalize(nh_grp);
3494 if (!nh_grp->sum_norm_weight)
3495 /* No neigh of this group is connected so we just set
3496 * the trap and let everthing flow through kernel.
3500 ecmp_size = nh_grp->sum_norm_weight;
3501 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3503 /* No valid allocation size available. */
3506 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3507 ecmp_size, &adj_index);
3509 /* We ran out of KVD linear space, just set the
3510 * trap and let everything flow through kernel.
3512 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3515 old_adj_index_valid = nh_grp->adj_index_valid;
3516 old_adj_index = nh_grp->adj_index;
3517 old_ecmp_size = nh_grp->ecmp_size;
3518 nh_grp->adj_index_valid = 1;
3519 nh_grp->adj_index = adj_index;
3520 nh_grp->ecmp_size = ecmp_size;
3521 mlxsw_sp_nexthop_group_rebalance(nh_grp);
3522 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
3524 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3528 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3530 if (!old_adj_index_valid) {
3531 /* The trap was set for fib entries, so we have to call
3532 * fib entry update to unset it and use adjacency index.
3534 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3536 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3542 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3543 old_adj_index, old_ecmp_size);
3544 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3545 old_ecmp_size, old_adj_index);
3547 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3554 old_adj_index_valid = nh_grp->adj_index_valid;
3555 nh_grp->adj_index_valid = 0;
3556 for (i = 0; i < nh_grp->count; i++) {
3557 nh = &nh_grp->nexthops[i];
3560 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3562 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3563 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3564 if (old_adj_index_valid)
3565 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3566 nh_grp->ecmp_size, nh_grp->adj_index);
3569 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3573 nh->should_offload = 1;
3575 nh->should_offload = 0;
3580 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3581 struct mlxsw_sp_neigh_entry *neigh_entry)
3583 struct neighbour *n, *old_n = neigh_entry->key.n;
3584 struct mlxsw_sp_nexthop *nh;
3585 bool entry_connected;
3589 nh = list_first_entry(&neigh_entry->nexthop_list,
3590 struct mlxsw_sp_nexthop, neigh_list_node);
3592 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3594 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3598 neigh_event_send(n, NULL);
3601 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3602 neigh_entry->key.n = n;
3603 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3605 goto err_neigh_entry_insert;
3607 read_lock_bh(&n->lock);
3608 nud_state = n->nud_state;
3610 read_unlock_bh(&n->lock);
3611 entry_connected = nud_state & NUD_VALID && !dead;
3613 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3615 neigh_release(old_n);
3617 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
3618 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3625 err_neigh_entry_insert:
3626 neigh_entry->key.n = old_n;
3627 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3633 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3634 struct mlxsw_sp_neigh_entry *neigh_entry,
3635 bool removing, bool dead)
3637 struct mlxsw_sp_nexthop *nh;
3639 if (list_empty(&neigh_entry->nexthop_list))
3645 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
3648 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
3652 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3654 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3655 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3659 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3660 struct mlxsw_sp_rif *rif)
3666 list_add(&nh->rif_list_node, &rif->nexthop_list);
3669 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3674 list_del(&nh->rif_list_node);
3678 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3679 struct mlxsw_sp_nexthop *nh)
3681 struct mlxsw_sp_neigh_entry *neigh_entry;
3682 struct neighbour *n;
3686 if (!nh->nh_grp->gateway || nh->neigh_entry)
3689 /* Take a reference of neigh here ensuring that neigh would
3690 * not be destructed before the nexthop entry is finished.
3691 * The reference is taken either in neigh_lookup() or
3692 * in neigh_create() in case n is not found.
3694 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3696 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3700 neigh_event_send(n, NULL);
3702 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3704 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3705 if (IS_ERR(neigh_entry)) {
3707 goto err_neigh_entry_create;
3711 /* If that is the first nexthop connected to that neigh, add to
3712 * nexthop_neighs_list
3714 if (list_empty(&neigh_entry->nexthop_list))
3715 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3716 &mlxsw_sp->router->nexthop_neighs_list);
3718 nh->neigh_entry = neigh_entry;
3719 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3720 read_lock_bh(&n->lock);
3721 nud_state = n->nud_state;
3723 read_unlock_bh(&n->lock);
3724 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3728 err_neigh_entry_create:
3733 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3734 struct mlxsw_sp_nexthop *nh)
3736 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3737 struct neighbour *n;
3741 n = neigh_entry->key.n;
3743 __mlxsw_sp_nexthop_neigh_update(nh, true);
3744 list_del(&nh->neigh_list_node);
3745 nh->neigh_entry = NULL;
3747 /* If that is the last nexthop connected to that neigh, remove from
3748 * nexthop_neighs_list
3750 if (list_empty(&neigh_entry->nexthop_list))
3751 list_del(&neigh_entry->nexthop_neighs_list_node);
3753 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3754 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3759 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3761 struct net_device *ul_dev;
3765 ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3766 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
3772 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3773 struct mlxsw_sp_nexthop *nh,
3774 struct mlxsw_sp_ipip_entry *ipip_entry)
3778 if (!nh->nh_grp->gateway || nh->ipip_entry)
3781 nh->ipip_entry = ipip_entry;
3782 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3783 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3784 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3787 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3788 struct mlxsw_sp_nexthop *nh)
3790 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3795 __mlxsw_sp_nexthop_neigh_update(nh, true);
3796 nh->ipip_entry = NULL;
3799 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3800 const struct fib_nh *fib_nh,
3801 enum mlxsw_sp_ipip_type *p_ipipt)
3803 struct net_device *dev = fib_nh->fib_nh_dev;
3806 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3807 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3810 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3811 struct mlxsw_sp_nexthop *nh)
3814 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3815 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3816 mlxsw_sp_nexthop_rif_fini(nh);
3818 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3819 mlxsw_sp_nexthop_rif_fini(nh);
3820 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3825 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3826 struct mlxsw_sp_nexthop *nh,
3827 struct fib_nh *fib_nh)
3829 const struct mlxsw_sp_ipip_ops *ipip_ops;
3830 struct net_device *dev = fib_nh->fib_nh_dev;
3831 struct mlxsw_sp_ipip_entry *ipip_entry;
3832 struct mlxsw_sp_rif *rif;
3835 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3837 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3838 if (ipip_ops->can_offload(mlxsw_sp, dev,
3839 MLXSW_SP_L3_PROTO_IPV4)) {
3840 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3841 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3846 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3847 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3851 mlxsw_sp_nexthop_rif_init(nh, rif);
3852 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3854 goto err_neigh_init;
3859 mlxsw_sp_nexthop_rif_fini(nh);
3863 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3864 struct mlxsw_sp_nexthop *nh)
3866 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3869 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3870 struct mlxsw_sp_nexthop_group *nh_grp,
3871 struct mlxsw_sp_nexthop *nh,
3872 struct fib_nh *fib_nh)
3874 struct net_device *dev = fib_nh->fib_nh_dev;
3875 struct in_device *in_dev;
3878 nh->nh_grp = nh_grp;
3879 nh->key.fib_nh = fib_nh;
3880 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3881 nh->nh_weight = fib_nh->fib_nh_weight;
3885 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
3886 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3890 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3891 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3897 in_dev = __in_dev_get_rcu(dev);
3898 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3899 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
3905 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3907 goto err_nexthop_neigh_init;
3911 err_nexthop_neigh_init:
3912 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3916 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3917 struct mlxsw_sp_nexthop *nh)
3919 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3920 list_del(&nh->router_list_node);
3921 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
3922 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3925 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3926 unsigned long event, struct fib_nh *fib_nh)
3928 struct mlxsw_sp_nexthop_key key;
3929 struct mlxsw_sp_nexthop *nh;
3931 if (mlxsw_sp->router->aborted)
3934 key.fib_nh = fib_nh;
3935 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3940 case FIB_EVENT_NH_ADD:
3941 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3943 case FIB_EVENT_NH_DEL:
3944 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3948 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3951 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3952 struct mlxsw_sp_rif *rif)
3954 struct mlxsw_sp_nexthop *nh;
3957 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3959 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3962 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3963 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3970 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3971 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3975 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3976 struct mlxsw_sp_rif *old_rif,
3977 struct mlxsw_sp_rif *new_rif)
3979 struct mlxsw_sp_nexthop *nh;
3981 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3982 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3984 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3987 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3988 struct mlxsw_sp_rif *rif)
3990 struct mlxsw_sp_nexthop *nh, *tmp;
3992 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
3993 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3994 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3998 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3999 struct fib_info *fi)
4001 const struct fib_nh *nh = fib_info_nh(fi, 0);
4003 return nh->fib_nh_scope == RT_SCOPE_LINK ||
4004 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
4007 static struct mlxsw_sp_nexthop_group *
4008 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
4010 unsigned int nhs = fib_info_num_path(fi);
4011 struct mlxsw_sp_nexthop_group *nh_grp;
4012 struct mlxsw_sp_nexthop *nh;
4013 struct fib_nh *fib_nh;
4017 nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
4019 return ERR_PTR(-ENOMEM);
4021 INIT_LIST_HEAD(&nh_grp->fib_list);
4022 nh_grp->neigh_tbl = &arp_tbl;
4024 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
4025 nh_grp->count = nhs;
4027 for (i = 0; i < nh_grp->count; i++) {
4028 nh = &nh_grp->nexthops[i];
4029 fib_nh = fib_info_nh(fi, i);
4030 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
4032 goto err_nexthop4_init;
4034 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4036 goto err_nexthop_group_insert;
4037 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4040 err_nexthop_group_insert:
4042 for (i--; i >= 0; i--) {
4043 nh = &nh_grp->nexthops[i];
4044 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4048 return ERR_PTR(err);
4052 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
4053 struct mlxsw_sp_nexthop_group *nh_grp)
4055 struct mlxsw_sp_nexthop *nh;
4058 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
4059 for (i = 0; i < nh_grp->count; i++) {
4060 nh = &nh_grp->nexthops[i];
4061 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4063 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4064 WARN_ON_ONCE(nh_grp->adj_index_valid);
4065 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
4069 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
4070 struct mlxsw_sp_fib_entry *fib_entry,
4071 struct fib_info *fi)
4073 struct mlxsw_sp_nexthop_group *nh_grp;
4075 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
4077 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
4079 return PTR_ERR(nh_grp);
4081 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
4082 fib_entry->nh_group = nh_grp;
4086 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
4087 struct mlxsw_sp_fib_entry *fib_entry)
4089 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4091 list_del(&fib_entry->nexthop_group_node);
4092 if (!list_empty(&nh_grp->fib_list))
4094 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
4098 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4100 struct mlxsw_sp_fib4_entry *fib4_entry;
4102 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4104 return !fib4_entry->tos;
4108 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4110 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4112 switch (fib_entry->fib_node->fib->proto) {
4113 case MLXSW_SP_L3_PROTO_IPV4:
4114 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
4117 case MLXSW_SP_L3_PROTO_IPV6:
4121 switch (fib_entry->type) {
4122 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4123 return !!nh_group->adj_index_valid;
4124 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4125 return !!nh_group->nh_rif;
4126 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4127 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4128 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4135 static struct mlxsw_sp_nexthop *
4136 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4137 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4141 for (i = 0; i < nh_grp->count; i++) {
4142 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4143 struct fib6_info *rt = mlxsw_sp_rt6->rt;
4145 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4146 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4147 &rt->fib6_nh->fib_nh_gw6))
4156 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4157 struct mlxsw_sp_fib_entry *fib_entry)
4159 struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4160 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4161 int dst_len = fib_entry->fib_node->key.prefix_len;
4162 struct mlxsw_sp_fib4_entry *fib4_entry;
4163 struct fib_rt_info fri;
4164 bool should_offload;
4166 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4167 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4170 fri.tb_id = fib4_entry->tb_id;
4171 fri.dst = cpu_to_be32(*p_dst);
4172 fri.dst_len = dst_len;
4173 fri.tos = fib4_entry->tos;
4174 fri.type = fib4_entry->type;
4175 fri.offload = should_offload;
4176 fri.trap = !should_offload;
4177 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4181 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4182 struct mlxsw_sp_fib_entry *fib_entry)
4184 struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4185 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4186 int dst_len = fib_entry->fib_node->key.prefix_len;
4187 struct mlxsw_sp_fib4_entry *fib4_entry;
4188 struct fib_rt_info fri;
4190 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4193 fri.tb_id = fib4_entry->tb_id;
4194 fri.dst = cpu_to_be32(*p_dst);
4195 fri.dst_len = dst_len;
4196 fri.tos = fib4_entry->tos;
4197 fri.type = fib4_entry->type;
4198 fri.offload = false;
4200 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4204 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4205 struct mlxsw_sp_fib_entry *fib_entry)
4207 struct mlxsw_sp_fib6_entry *fib6_entry;
4208 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4209 bool should_offload;
4211 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4213 /* In IPv6 a multipath route is represented using multiple routes, so
4214 * we need to set the flags on all of them.
4216 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4218 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4219 fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
4224 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4225 struct mlxsw_sp_fib_entry *fib_entry)
4227 struct mlxsw_sp_fib6_entry *fib6_entry;
4228 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4230 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4232 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4233 fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
4237 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4238 struct mlxsw_sp_fib_entry *fib_entry)
4240 switch (fib_entry->fib_node->fib->proto) {
4241 case MLXSW_SP_L3_PROTO_IPV4:
4242 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
4244 case MLXSW_SP_L3_PROTO_IPV6:
4245 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
4251 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4252 struct mlxsw_sp_fib_entry *fib_entry)
4254 switch (fib_entry->fib_node->fib->proto) {
4255 case MLXSW_SP_L3_PROTO_IPV4:
4256 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4258 case MLXSW_SP_L3_PROTO_IPV6:
4259 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4265 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
4266 struct mlxsw_sp_fib_entry *fib_entry,
4267 enum mlxsw_reg_ralue_op op)
4270 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
4271 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
4273 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
4274 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4282 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
4283 const struct mlxsw_sp_fib_entry *fib_entry,
4284 enum mlxsw_reg_ralue_op op)
4286 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
4287 enum mlxsw_reg_ralxx_protocol proto;
4290 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
4292 switch (fib->proto) {
4293 case MLXSW_SP_L3_PROTO_IPV4:
4294 p_dip = (u32 *) fib_entry->fib_node->key.addr;
4295 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
4296 fib_entry->fib_node->key.prefix_len,
4299 case MLXSW_SP_L3_PROTO_IPV6:
4300 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
4301 fib_entry->fib_node->key.prefix_len,
4302 fib_entry->fib_node->key.addr);
4307 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
4309 enum mlxsw_reg_ratr_trap_action trap_action;
4310 char ratr_pl[MLXSW_REG_RATR_LEN];
4313 if (mlxsw_sp->router->adj_discard_index_valid)
4316 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4317 &mlxsw_sp->router->adj_discard_index);
4321 trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
4322 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4323 MLXSW_REG_RATR_TYPE_ETHERNET,
4324 mlxsw_sp->router->adj_discard_index, rif_index);
4325 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4326 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4328 goto err_ratr_write;
4330 mlxsw_sp->router->adj_discard_index_valid = true;
4335 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4336 mlxsw_sp->router->adj_discard_index);
4340 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4341 struct mlxsw_sp_fib_entry *fib_entry,
4342 enum mlxsw_reg_ralue_op op)
4344 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4345 char ralue_pl[MLXSW_REG_RALUE_LEN];
4346 enum mlxsw_reg_ralue_trap_action trap_action;
4348 u32 adjacency_index = 0;
4352 /* In case the nexthop group adjacency index is valid, use it
4353 * with provided ECMP size. Otherwise, setup trap and pass
4354 * traffic to kernel.
4356 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4357 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4358 adjacency_index = fib_entry->nh_group->adj_index;
4359 ecmp_size = fib_entry->nh_group->ecmp_size;
4360 } else if (!nh_group->adj_index_valid && nh_group->count &&
4362 err = mlxsw_sp_adj_discard_write(mlxsw_sp,
4363 nh_group->nh_rif->rif_index);
4366 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4367 adjacency_index = mlxsw_sp->router->adj_discard_index;
4370 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4371 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4374 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4375 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4376 adjacency_index, ecmp_size);
4377 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4380 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4381 struct mlxsw_sp_fib_entry *fib_entry,
4382 enum mlxsw_reg_ralue_op op)
4384 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
4385 enum mlxsw_reg_ralue_trap_action trap_action;
4386 char ralue_pl[MLXSW_REG_RALUE_LEN];
4390 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4391 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4392 rif_index = rif->rif_index;
4394 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4395 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4398 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4399 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4401 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4404 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4405 struct mlxsw_sp_fib_entry *fib_entry,
4406 enum mlxsw_reg_ralue_op op)
4408 char ralue_pl[MLXSW_REG_RALUE_LEN];
4410 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4411 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4412 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4415 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
4416 struct mlxsw_sp_fib_entry *fib_entry,
4417 enum mlxsw_reg_ralue_op op)
4419 enum mlxsw_reg_ralue_trap_action trap_action;
4420 char ralue_pl[MLXSW_REG_RALUE_LEN];
4422 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
4423 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4424 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
4425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4429 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
4430 struct mlxsw_sp_fib_entry *fib_entry,
4431 enum mlxsw_reg_ralue_op op)
4433 enum mlxsw_reg_ralue_trap_action trap_action;
4434 char ralue_pl[MLXSW_REG_RALUE_LEN];
4437 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4438 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
4440 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4441 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
4442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4446 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4447 struct mlxsw_sp_fib_entry *fib_entry,
4448 enum mlxsw_reg_ralue_op op)
4450 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4451 const struct mlxsw_sp_ipip_ops *ipip_ops;
4453 if (WARN_ON(!ipip_entry))
4456 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4457 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4458 fib_entry->decap.tunnel_index);
4461 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4462 struct mlxsw_sp_fib_entry *fib_entry,
4463 enum mlxsw_reg_ralue_op op)
4465 char ralue_pl[MLXSW_REG_RALUE_LEN];
4467 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4468 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
4469 fib_entry->decap.tunnel_index);
4470 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4473 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4474 struct mlxsw_sp_fib_entry *fib_entry,
4475 enum mlxsw_reg_ralue_op op)
4477 switch (fib_entry->type) {
4478 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4479 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
4480 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4481 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
4482 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4483 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
4484 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4485 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
4486 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
4487 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
4489 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4490 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4492 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4493 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
4498 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4499 struct mlxsw_sp_fib_entry *fib_entry,
4500 enum mlxsw_reg_ralue_op op)
4502 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
4507 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
4512 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4513 struct mlxsw_sp_fib_entry *fib_entry)
4515 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4516 MLXSW_REG_RALUE_OP_WRITE_WRITE);
4519 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4520 struct mlxsw_sp_fib_entry *fib_entry)
4522 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4523 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4527 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4528 const struct fib_entry_notifier_info *fen_info,
4529 struct mlxsw_sp_fib_entry *fib_entry)
4531 struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
4532 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4533 struct mlxsw_sp_router *router = mlxsw_sp->router;
4534 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4535 struct mlxsw_sp_ipip_entry *ipip_entry;
4536 struct fib_info *fi = fen_info->fi;
4538 switch (fen_info->type) {
4540 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4541 MLXSW_SP_L3_PROTO_IPV4, dip);
4542 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4543 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4544 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4548 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
4549 MLXSW_SP_L3_PROTO_IPV4,
4553 tunnel_index = router->nve_decap_config.tunnel_index;
4554 fib_entry->decap.tunnel_index = tunnel_index;
4555 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
4560 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4563 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
4565 case RTN_UNREACHABLE: /* fall through */
4567 /* Packets hitting these routes need to be trapped, but
4568 * can do so with a lower priority than packets directed
4569 * at the host, so use action type local instead of trap.
4571 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
4574 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4575 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4577 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4585 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
4586 struct mlxsw_sp_fib_entry *fib_entry)
4588 switch (fib_entry->type) {
4589 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4590 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
4597 static struct mlxsw_sp_fib4_entry *
4598 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4599 struct mlxsw_sp_fib_node *fib_node,
4600 const struct fib_entry_notifier_info *fen_info)
4602 struct mlxsw_sp_fib4_entry *fib4_entry;
4603 struct mlxsw_sp_fib_entry *fib_entry;
4606 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4608 return ERR_PTR(-ENOMEM);
4609 fib_entry = &fib4_entry->common;
4611 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4613 goto err_fib4_entry_type_set;
4615 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4617 goto err_nexthop4_group_get;
4619 fib4_entry->prio = fen_info->fi->fib_priority;
4620 fib4_entry->tb_id = fen_info->tb_id;
4621 fib4_entry->type = fen_info->type;
4622 fib4_entry->tos = fen_info->tos;
4624 fib_entry->fib_node = fib_node;
4628 err_nexthop4_group_get:
4629 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
4630 err_fib4_entry_type_set:
4632 return ERR_PTR(err);
4635 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4636 struct mlxsw_sp_fib4_entry *fib4_entry)
4638 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4639 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
4643 static struct mlxsw_sp_fib4_entry *
4644 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4645 const struct fib_entry_notifier_info *fen_info)
4647 struct mlxsw_sp_fib4_entry *fib4_entry;
4648 struct mlxsw_sp_fib_node *fib_node;
4649 struct mlxsw_sp_fib *fib;
4650 struct mlxsw_sp_vr *vr;
4652 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4655 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4657 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4658 sizeof(fen_info->dst),
4663 fib4_entry = container_of(fib_node->fib_entry,
4664 struct mlxsw_sp_fib4_entry, common);
4665 if (fib4_entry->tb_id == fen_info->tb_id &&
4666 fib4_entry->tos == fen_info->tos &&
4667 fib4_entry->type == fen_info->type &&
4668 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4675 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4676 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4677 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4678 .key_len = sizeof(struct mlxsw_sp_fib_key),
4679 .automatic_shrinking = true,
4682 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4683 struct mlxsw_sp_fib_node *fib_node)
4685 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4686 mlxsw_sp_fib_ht_params);
4689 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4690 struct mlxsw_sp_fib_node *fib_node)
4692 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4693 mlxsw_sp_fib_ht_params);
4696 static struct mlxsw_sp_fib_node *
4697 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4698 size_t addr_len, unsigned char prefix_len)
4700 struct mlxsw_sp_fib_key key;
4702 memset(&key, 0, sizeof(key));
4703 memcpy(key.addr, addr, addr_len);
4704 key.prefix_len = prefix_len;
4705 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4708 static struct mlxsw_sp_fib_node *
4709 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4710 size_t addr_len, unsigned char prefix_len)
4712 struct mlxsw_sp_fib_node *fib_node;
4714 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4718 list_add(&fib_node->list, &fib->node_list);
4719 memcpy(fib_node->key.addr, addr, addr_len);
4720 fib_node->key.prefix_len = prefix_len;
4725 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4727 list_del(&fib_node->list);
4731 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4732 struct mlxsw_sp_fib_node *fib_node)
4734 struct mlxsw_sp_prefix_usage req_prefix_usage;
4735 struct mlxsw_sp_fib *fib = fib_node->fib;
4736 struct mlxsw_sp_lpm_tree *lpm_tree;
4739 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4740 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4743 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4744 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4745 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4747 if (IS_ERR(lpm_tree))
4748 return PTR_ERR(lpm_tree);
4750 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4752 goto err_lpm_tree_replace;
4755 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
4758 err_lpm_tree_replace:
4759 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4763 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4764 struct mlxsw_sp_fib_node *fib_node)
4766 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4767 struct mlxsw_sp_prefix_usage req_prefix_usage;
4768 struct mlxsw_sp_fib *fib = fib_node->fib;
4771 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4773 /* Try to construct a new LPM tree from the current prefix usage
4774 * minus the unused one. If we fail, continue using the old one.
4776 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4777 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4778 fib_node->key.prefix_len);
4779 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4781 if (IS_ERR(lpm_tree))
4784 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4786 goto err_lpm_tree_replace;
4790 err_lpm_tree_replace:
4791 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4794 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4795 struct mlxsw_sp_fib_node *fib_node,
4796 struct mlxsw_sp_fib *fib)
4800 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4803 fib_node->fib = fib;
4805 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
4807 goto err_fib_lpm_tree_link;
4811 err_fib_lpm_tree_link:
4812 fib_node->fib = NULL;
4813 mlxsw_sp_fib_node_remove(fib, fib_node);
4817 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4818 struct mlxsw_sp_fib_node *fib_node)
4820 struct mlxsw_sp_fib *fib = fib_node->fib;
4822 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
4823 fib_node->fib = NULL;
4824 mlxsw_sp_fib_node_remove(fib, fib_node);
4827 static struct mlxsw_sp_fib_node *
4828 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4829 size_t addr_len, unsigned char prefix_len,
4830 enum mlxsw_sp_l3proto proto)
4832 struct mlxsw_sp_fib_node *fib_node;
4833 struct mlxsw_sp_fib *fib;
4834 struct mlxsw_sp_vr *vr;
4837 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
4839 return ERR_CAST(vr);
4840 fib = mlxsw_sp_vr_fib(vr, proto);
4842 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
4846 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
4849 goto err_fib_node_create;
4852 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4854 goto err_fib_node_init;
4859 mlxsw_sp_fib_node_destroy(fib_node);
4860 err_fib_node_create:
4861 mlxsw_sp_vr_put(mlxsw_sp, vr);
4862 return ERR_PTR(err);
4865 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4866 struct mlxsw_sp_fib_node *fib_node)
4868 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
4870 if (fib_node->fib_entry)
4872 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
4873 mlxsw_sp_fib_node_destroy(fib_node);
4874 mlxsw_sp_vr_put(mlxsw_sp, vr);
4877 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4878 struct mlxsw_sp_fib_entry *fib_entry)
4880 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4883 fib_node->fib_entry = fib_entry;
4885 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4887 goto err_fib_entry_update;
4891 err_fib_entry_update:
4892 fib_node->fib_entry = NULL;
4897 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4898 struct mlxsw_sp_fib_entry *fib_entry)
4900 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4902 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4903 fib_node->fib_entry = NULL;
4906 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
4908 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4909 struct mlxsw_sp_fib4_entry *fib4_replaced;
4911 if (!fib_node->fib_entry)
4914 fib4_replaced = container_of(fib_node->fib_entry,
4915 struct mlxsw_sp_fib4_entry, common);
4916 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
4917 fib4_replaced->tb_id == RT_TABLE_LOCAL)
4924 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
4925 const struct fib_entry_notifier_info *fen_info)
4927 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
4928 struct mlxsw_sp_fib_entry *replaced;
4929 struct mlxsw_sp_fib_node *fib_node;
4932 if (mlxsw_sp->router->aborted)
4935 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4936 &fen_info->dst, sizeof(fen_info->dst),
4938 MLXSW_SP_L3_PROTO_IPV4);
4939 if (IS_ERR(fib_node)) {
4940 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4941 return PTR_ERR(fib_node);
4944 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4945 if (IS_ERR(fib4_entry)) {
4946 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
4947 err = PTR_ERR(fib4_entry);
4948 goto err_fib4_entry_create;
4951 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
4952 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4953 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4957 replaced = fib_node->fib_entry;
4958 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
4960 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4961 goto err_fib_node_entry_link;
4964 /* Nothing to replace */
4968 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
4969 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
4971 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
4975 err_fib_node_entry_link:
4976 fib_node->fib_entry = replaced;
4977 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4978 err_fib4_entry_create:
4979 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4983 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4984 struct fib_entry_notifier_info *fen_info)
4986 struct mlxsw_sp_fib4_entry *fib4_entry;
4987 struct mlxsw_sp_fib_node *fib_node;
4989 if (mlxsw_sp->router->aborted)
4992 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4995 fib_node = fib4_entry->common.fib_node;
4997 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
4998 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4999 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5002 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
5004 /* Multicast routes aren't supported, so ignore them. Neighbour
5005 * Discovery packets are specifically trapped.
5007 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
5010 /* Cloned routes are irrelevant in the forwarding path. */
5011 if (rt->fib6_flags & RTF_CACHE)
5017 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
5019 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5021 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
5023 return ERR_PTR(-ENOMEM);
5025 /* In case of route replace, replaced route is deleted with
5026 * no notification. Take reference to prevent accessing freed
5029 mlxsw_sp_rt6->rt = rt;
5032 return mlxsw_sp_rt6;
5035 #if IS_ENABLED(CONFIG_IPV6)
5036 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5038 fib6_info_release(rt);
5041 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5046 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5048 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
5050 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
5051 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
5052 kfree(mlxsw_sp_rt6);
5055 static struct fib6_info *
5056 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
5058 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
5062 static struct mlxsw_sp_rt6 *
5063 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
5064 const struct fib6_info *rt)
5066 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5068 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
5069 if (mlxsw_sp_rt6->rt == rt)
5070 return mlxsw_sp_rt6;
5076 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
5077 const struct fib6_info *rt,
5078 enum mlxsw_sp_ipip_type *ret)
5080 return rt->fib6_nh->fib_nh_dev &&
5081 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
5084 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
5085 struct mlxsw_sp_nexthop_group *nh_grp,
5086 struct mlxsw_sp_nexthop *nh,
5087 const struct fib6_info *rt)
5089 const struct mlxsw_sp_ipip_ops *ipip_ops;
5090 struct mlxsw_sp_ipip_entry *ipip_entry;
5091 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5092 struct mlxsw_sp_rif *rif;
5095 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
5097 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5098 if (ipip_ops->can_offload(mlxsw_sp, dev,
5099 MLXSW_SP_L3_PROTO_IPV6)) {
5100 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
5101 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
5106 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
5107 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
5110 mlxsw_sp_nexthop_rif_init(nh, rif);
5112 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
5114 goto err_nexthop_neigh_init;
5118 err_nexthop_neigh_init:
5119 mlxsw_sp_nexthop_rif_fini(nh);
5123 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
5124 struct mlxsw_sp_nexthop *nh)
5126 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5129 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
5130 struct mlxsw_sp_nexthop_group *nh_grp,
5131 struct mlxsw_sp_nexthop *nh,
5132 const struct fib6_info *rt)
5134 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5136 nh->nh_grp = nh_grp;
5137 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
5138 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
5139 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5141 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5145 nh->ifindex = dev->ifindex;
5147 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
5150 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
5151 struct mlxsw_sp_nexthop *nh)
5153 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5154 list_del(&nh->router_list_node);
5155 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5158 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5159 const struct fib6_info *rt)
5161 return rt->fib6_nh->fib_nh_gw_family ||
5162 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5165 static struct mlxsw_sp_nexthop_group *
5166 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
5167 struct mlxsw_sp_fib6_entry *fib6_entry)
5169 struct mlxsw_sp_nexthop_group *nh_grp;
5170 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5171 struct mlxsw_sp_nexthop *nh;
5175 nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
5178 return ERR_PTR(-ENOMEM);
5179 INIT_LIST_HEAD(&nh_grp->fib_list);
5180 #if IS_ENABLED(CONFIG_IPV6)
5181 nh_grp->neigh_tbl = &nd_tbl;
5183 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
5184 struct mlxsw_sp_rt6, list);
5185 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5186 nh_grp->count = fib6_entry->nrt6;
5187 for (i = 0; i < nh_grp->count; i++) {
5188 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5190 nh = &nh_grp->nexthops[i];
5191 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
5193 goto err_nexthop6_init;
5194 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
5197 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5199 goto err_nexthop_group_insert;
5201 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5204 err_nexthop_group_insert:
5206 for (i--; i >= 0; i--) {
5207 nh = &nh_grp->nexthops[i];
5208 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5211 return ERR_PTR(err);
5215 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
5216 struct mlxsw_sp_nexthop_group *nh_grp)
5218 struct mlxsw_sp_nexthop *nh;
5219 int i = nh_grp->count;
5221 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5222 for (i--; i >= 0; i--) {
5223 nh = &nh_grp->nexthops[i];
5224 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5226 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5227 WARN_ON(nh_grp->adj_index_valid);
5231 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
5232 struct mlxsw_sp_fib6_entry *fib6_entry)
5234 struct mlxsw_sp_nexthop_group *nh_grp;
5236 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
5238 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
5240 return PTR_ERR(nh_grp);
5243 list_add_tail(&fib6_entry->common.nexthop_group_node,
5245 fib6_entry->common.nh_group = nh_grp;
5247 /* The route and the nexthop are described by the same struct, so we
5248 * need to the update the nexthop offload indication for the new route.
5250 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
5255 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
5256 struct mlxsw_sp_fib_entry *fib_entry)
5258 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5260 list_del(&fib_entry->nexthop_group_node);
5261 if (!list_empty(&nh_grp->fib_list))
5263 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5267 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5268 struct mlxsw_sp_fib6_entry *fib6_entry)
5270 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5273 fib6_entry->common.nh_group = NULL;
5274 list_del(&fib6_entry->common.nexthop_group_node);
5276 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5278 goto err_nexthop6_group_get;
5280 /* In case this entry is offloaded, then the adjacency index
5281 * currently associated with it in the device's table is that
5282 * of the old group. Start using the new one instead.
5284 err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
5286 goto err_fib_entry_update;
5288 if (list_empty(&old_nh_grp->fib_list))
5289 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5293 err_fib_entry_update:
5294 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5295 err_nexthop6_group_get:
5296 list_add_tail(&fib6_entry->common.nexthop_group_node,
5297 &old_nh_grp->fib_list);
5298 fib6_entry->common.nh_group = old_nh_grp;
5303 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5304 struct mlxsw_sp_fib6_entry *fib6_entry,
5305 struct fib6_info **rt_arr, unsigned int nrt6)
5307 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5310 for (i = 0; i < nrt6; i++) {
5311 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5312 if (IS_ERR(mlxsw_sp_rt6)) {
5313 err = PTR_ERR(mlxsw_sp_rt6);
5314 goto err_rt6_create;
5317 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5321 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5323 goto err_nexthop6_group_update;
5327 err_nexthop6_group_update:
5330 for (i--; i >= 0; i--) {
5332 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5333 struct mlxsw_sp_rt6, list);
5334 list_del(&mlxsw_sp_rt6->list);
5335 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5341 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5342 struct mlxsw_sp_fib6_entry *fib6_entry,
5343 struct fib6_info **rt_arr, unsigned int nrt6)
5345 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5348 for (i = 0; i < nrt6; i++) {
5349 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
5351 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
5355 list_del(&mlxsw_sp_rt6->list);
5356 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5359 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5362 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5363 struct mlxsw_sp_fib_entry *fib_entry,
5364 const struct fib6_info *rt)
5366 /* Packets hitting RTF_REJECT routes need to be discarded by the
5367 * stack. We can rely on their destination device not having a
5368 * RIF (it's the loopback device) and can thus use action type
5369 * local, which will cause them to be trapped with a lower
5370 * priority than packets that need to be locally received.
5372 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5373 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5374 else if (rt->fib6_type == RTN_BLACKHOLE)
5375 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5376 else if (rt->fib6_flags & RTF_REJECT)
5377 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5378 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5379 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5381 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5385 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5387 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5389 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5392 list_del(&mlxsw_sp_rt6->list);
5393 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5397 static struct mlxsw_sp_fib6_entry *
5398 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5399 struct mlxsw_sp_fib_node *fib_node,
5400 struct fib6_info **rt_arr, unsigned int nrt6)
5402 struct mlxsw_sp_fib6_entry *fib6_entry;
5403 struct mlxsw_sp_fib_entry *fib_entry;
5404 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5407 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5409 return ERR_PTR(-ENOMEM);
5410 fib_entry = &fib6_entry->common;
5412 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5414 for (i = 0; i < nrt6; i++) {
5415 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5416 if (IS_ERR(mlxsw_sp_rt6)) {
5417 err = PTR_ERR(mlxsw_sp_rt6);
5418 goto err_rt6_create;
5420 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5424 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
5426 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5428 goto err_nexthop6_group_get;
5430 fib_entry->fib_node = fib_node;
5434 err_nexthop6_group_get:
5437 for (i--; i >= 0; i--) {
5439 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5440 struct mlxsw_sp_rt6, list);
5441 list_del(&mlxsw_sp_rt6->list);
5442 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5445 return ERR_PTR(err);
5448 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5449 struct mlxsw_sp_fib6_entry *fib6_entry)
5451 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5452 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5453 WARN_ON(fib6_entry->nrt6);
5457 static struct mlxsw_sp_fib6_entry *
5458 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5459 const struct fib6_info *rt)
5461 struct mlxsw_sp_fib6_entry *fib6_entry;
5462 struct mlxsw_sp_fib_node *fib_node;
5463 struct mlxsw_sp_fib *fib;
5464 struct fib6_info *cmp_rt;
5465 struct mlxsw_sp_vr *vr;
5467 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5470 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5472 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5473 sizeof(rt->fib6_dst.addr),
5478 fib6_entry = container_of(fib_node->fib_entry,
5479 struct mlxsw_sp_fib6_entry, common);
5480 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5481 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
5482 rt->fib6_metric == cmp_rt->fib6_metric &&
5483 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5489 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
5491 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5492 struct mlxsw_sp_fib6_entry *fib6_replaced;
5493 struct fib6_info *rt, *rt_replaced;
5495 if (!fib_node->fib_entry)
5498 fib6_replaced = container_of(fib_node->fib_entry,
5499 struct mlxsw_sp_fib6_entry,
5501 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5502 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
5503 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
5504 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
5510 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
5511 struct fib6_info **rt_arr,
5514 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
5515 struct mlxsw_sp_fib_entry *replaced;
5516 struct mlxsw_sp_fib_node *fib_node;
5517 struct fib6_info *rt = rt_arr[0];
5520 if (mlxsw_sp->router->aborted)
5523 if (rt->fib6_src.plen)
5526 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5529 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5531 sizeof(rt->fib6_dst.addr),
5533 MLXSW_SP_L3_PROTO_IPV6);
5534 if (IS_ERR(fib_node))
5535 return PTR_ERR(fib_node);
5537 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
5539 if (IS_ERR(fib6_entry)) {
5540 err = PTR_ERR(fib6_entry);
5541 goto err_fib6_entry_create;
5544 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
5545 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5546 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5550 replaced = fib_node->fib_entry;
5551 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
5553 goto err_fib_node_entry_link;
5555 /* Nothing to replace */
5559 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5560 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
5562 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
5566 err_fib_node_entry_link:
5567 fib_node->fib_entry = replaced;
5568 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5569 err_fib6_entry_create:
5570 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5574 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
5575 struct fib6_info **rt_arr,
5578 struct mlxsw_sp_fib6_entry *fib6_entry;
5579 struct mlxsw_sp_fib_node *fib_node;
5580 struct fib6_info *rt = rt_arr[0];
5583 if (mlxsw_sp->router->aborted)
5586 if (rt->fib6_src.plen)
5589 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5592 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5594 sizeof(rt->fib6_dst.addr),
5596 MLXSW_SP_L3_PROTO_IPV6);
5597 if (IS_ERR(fib_node))
5598 return PTR_ERR(fib_node);
5600 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
5601 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5605 fib6_entry = container_of(fib_node->fib_entry,
5606 struct mlxsw_sp_fib6_entry, common);
5607 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
5610 goto err_fib6_entry_nexthop_add;
5614 err_fib6_entry_nexthop_add:
5615 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5619 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5620 struct fib6_info **rt_arr,
5623 struct mlxsw_sp_fib6_entry *fib6_entry;
5624 struct mlxsw_sp_fib_node *fib_node;
5625 struct fib6_info *rt = rt_arr[0];
5627 if (mlxsw_sp->router->aborted)
5630 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5633 /* Multipath routes are first added to the FIB trie and only then
5634 * notified. If we vetoed the addition, we will get a delete
5635 * notification for a route we do not have. Therefore, do not warn if
5636 * route was not found.
5638 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5642 /* If not all the nexthops are deleted, then only reduce the nexthop
5645 if (nrt6 != fib6_entry->nrt6) {
5646 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
5651 fib_node = fib6_entry->common.fib_node;
5653 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
5654 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5655 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5658 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5659 enum mlxsw_reg_ralxx_protocol proto,
5662 char ralta_pl[MLXSW_REG_RALTA_LEN];
5663 char ralst_pl[MLXSW_REG_RALST_LEN];
5666 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
5667 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5671 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
5672 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5676 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5677 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5678 char raltb_pl[MLXSW_REG_RALTB_LEN];
5679 char ralue_pl[MLXSW_REG_RALUE_LEN];
5681 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
5682 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5687 mlxsw_reg_ralue_pack(ralue_pl, proto,
5688 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
5689 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5690 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5699 static struct mlxsw_sp_mr_table *
5700 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5702 if (family == RTNL_FAMILY_IPMR)
5703 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5705 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5708 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5709 struct mfc_entry_notifier_info *men_info,
5712 struct mlxsw_sp_mr_table *mrt;
5713 struct mlxsw_sp_vr *vr;
5715 if (mlxsw_sp->router->aborted)
5718 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
5722 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5723 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
5726 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5727 struct mfc_entry_notifier_info *men_info)
5729 struct mlxsw_sp_mr_table *mrt;
5730 struct mlxsw_sp_vr *vr;
5732 if (mlxsw_sp->router->aborted)
5735 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5739 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5740 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
5741 mlxsw_sp_vr_put(mlxsw_sp, vr);
5745 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5746 struct vif_entry_notifier_info *ven_info)
5748 struct mlxsw_sp_mr_table *mrt;
5749 struct mlxsw_sp_rif *rif;
5750 struct mlxsw_sp_vr *vr;
5752 if (mlxsw_sp->router->aborted)
5755 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
5759 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5760 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5761 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
5762 ven_info->vif_index,
5763 ven_info->vif_flags, rif);
5767 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5768 struct vif_entry_notifier_info *ven_info)
5770 struct mlxsw_sp_mr_table *mrt;
5771 struct mlxsw_sp_vr *vr;
5773 if (mlxsw_sp->router->aborted)
5776 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5780 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5781 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
5782 mlxsw_sp_vr_put(mlxsw_sp, vr);
5785 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5787 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5790 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5791 MLXSW_SP_LPM_TREE_MIN);
5795 /* The multicast router code does not need an abort trap as by default,
5796 * packets that don't match any routes are trapped to the CPU.
5799 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5800 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5801 MLXSW_SP_LPM_TREE_MIN + 1);
5804 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5805 struct mlxsw_sp_fib_node *fib_node)
5807 struct mlxsw_sp_fib4_entry *fib4_entry;
5809 fib4_entry = container_of(fib_node->fib_entry,
5810 struct mlxsw_sp_fib4_entry, common);
5811 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
5812 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5813 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5816 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5817 struct mlxsw_sp_fib_node *fib_node)
5819 struct mlxsw_sp_fib6_entry *fib6_entry;
5821 fib6_entry = container_of(fib_node->fib_entry,
5822 struct mlxsw_sp_fib6_entry, common);
5823 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
5824 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5825 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5828 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5829 struct mlxsw_sp_fib_node *fib_node)
5831 switch (fib_node->fib->proto) {
5832 case MLXSW_SP_L3_PROTO_IPV4:
5833 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5835 case MLXSW_SP_L3_PROTO_IPV6:
5836 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
5841 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5842 struct mlxsw_sp_vr *vr,
5843 enum mlxsw_sp_l3proto proto)
5845 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5846 struct mlxsw_sp_fib_node *fib_node, *tmp;
5848 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5849 bool do_break = &tmp->list == &fib->node_list;
5851 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5857 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
5861 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5862 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5864 if (!mlxsw_sp_vr_is_used(vr))
5867 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5868 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
5869 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
5871 /* If virtual router was only used for IPv4, then it's no
5874 if (!mlxsw_sp_vr_is_used(vr))
5876 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
5879 /* After flushing all the routes, it is not possible anyone is still
5880 * using the adjacency index that is discarding packets, so free it in
5881 * case it was allocated.
5883 if (!mlxsw_sp->router->adj_discard_index_valid)
5885 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5886 mlxsw_sp->router->adj_discard_index);
5887 mlxsw_sp->router->adj_discard_index_valid = false;
5890 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
5894 if (mlxsw_sp->router->aborted)
5896 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
5897 mlxsw_sp_router_fib_flush(mlxsw_sp);
5898 mlxsw_sp->router->aborted = true;
5899 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5901 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5904 struct mlxsw_sp_fib6_event_work {
5905 struct fib6_info **rt_arr;
5909 struct mlxsw_sp_fib_event_work {
5910 struct work_struct work;
5912 struct mlxsw_sp_fib6_event_work fib6_work;
5913 struct fib_entry_notifier_info fen_info;
5914 struct fib_rule_notifier_info fr_info;
5915 struct fib_nh_notifier_info fnh_info;
5916 struct mfc_entry_notifier_info men_info;
5917 struct vif_entry_notifier_info ven_info;
5919 struct mlxsw_sp *mlxsw_sp;
5920 unsigned long event;
5924 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
5925 struct fib6_entry_notifier_info *fen6_info)
5927 struct fib6_info *rt = fen6_info->rt;
5928 struct fib6_info **rt_arr;
5929 struct fib6_info *iter;
5933 nrt6 = fen6_info->nsiblings + 1;
5935 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
5939 fib6_work->rt_arr = rt_arr;
5940 fib6_work->nrt6 = nrt6;
5945 if (!fen6_info->nsiblings)
5948 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
5949 if (i == fen6_info->nsiblings)
5952 rt_arr[i + 1] = iter;
5953 fib6_info_hold(iter);
5956 WARN_ON_ONCE(i != fen6_info->nsiblings);
5962 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
5966 for (i = 0; i < fib6_work->nrt6; i++)
5967 mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
5968 kfree(fib6_work->rt_arr);
5971 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
5973 struct mlxsw_sp_fib_event_work *fib_work =
5974 container_of(work, struct mlxsw_sp_fib_event_work, work);
5975 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5978 mutex_lock(&mlxsw_sp->router->lock);
5979 mlxsw_sp_span_respin(mlxsw_sp);
5981 switch (fib_work->event) {
5982 case FIB_EVENT_ENTRY_REPLACE:
5983 err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
5984 &fib_work->fen_info);
5986 mlxsw_sp_router_fib_abort(mlxsw_sp);
5987 fib_info_put(fib_work->fen_info.fi);
5989 case FIB_EVENT_ENTRY_DEL:
5990 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5991 fib_info_put(fib_work->fen_info.fi);
5993 case FIB_EVENT_NH_ADD: /* fall through */
5994 case FIB_EVENT_NH_DEL:
5995 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5996 fib_work->fnh_info.fib_nh);
5997 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
6000 mutex_unlock(&mlxsw_sp->router->lock);
6004 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
6006 struct mlxsw_sp_fib_event_work *fib_work =
6007 container_of(work, struct mlxsw_sp_fib_event_work, work);
6008 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6011 mutex_lock(&mlxsw_sp->router->lock);
6012 mlxsw_sp_span_respin(mlxsw_sp);
6014 switch (fib_work->event) {
6015 case FIB_EVENT_ENTRY_REPLACE:
6016 err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
6017 fib_work->fib6_work.rt_arr,
6018 fib_work->fib6_work.nrt6);
6020 mlxsw_sp_router_fib_abort(mlxsw_sp);
6021 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6023 case FIB_EVENT_ENTRY_APPEND:
6024 err = mlxsw_sp_router_fib6_append(mlxsw_sp,
6025 fib_work->fib6_work.rt_arr,
6026 fib_work->fib6_work.nrt6);
6028 mlxsw_sp_router_fib_abort(mlxsw_sp);
6029 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6031 case FIB_EVENT_ENTRY_DEL:
6032 mlxsw_sp_router_fib6_del(mlxsw_sp,
6033 fib_work->fib6_work.rt_arr,
6034 fib_work->fib6_work.nrt6);
6035 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6038 mutex_unlock(&mlxsw_sp->router->lock);
6042 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
6044 struct mlxsw_sp_fib_event_work *fib_work =
6045 container_of(work, struct mlxsw_sp_fib_event_work, work);
6046 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6051 mutex_lock(&mlxsw_sp->router->lock);
6052 switch (fib_work->event) {
6053 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6054 case FIB_EVENT_ENTRY_ADD:
6055 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6057 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
6060 mlxsw_sp_router_fib_abort(mlxsw_sp);
6061 mr_cache_put(fib_work->men_info.mfc);
6063 case FIB_EVENT_ENTRY_DEL:
6064 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
6065 mr_cache_put(fib_work->men_info.mfc);
6067 case FIB_EVENT_VIF_ADD:
6068 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
6069 &fib_work->ven_info);
6071 mlxsw_sp_router_fib_abort(mlxsw_sp);
6072 dev_put(fib_work->ven_info.dev);
6074 case FIB_EVENT_VIF_DEL:
6075 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
6076 &fib_work->ven_info);
6077 dev_put(fib_work->ven_info.dev);
6080 mutex_unlock(&mlxsw_sp->router->lock);
6085 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
6086 struct fib_notifier_info *info)
6088 struct fib_entry_notifier_info *fen_info;
6089 struct fib_nh_notifier_info *fnh_info;
6091 switch (fib_work->event) {
6092 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6093 case FIB_EVENT_ENTRY_DEL:
6094 fen_info = container_of(info, struct fib_entry_notifier_info,
6096 fib_work->fen_info = *fen_info;
6097 /* Take reference on fib_info to prevent it from being
6098 * freed while work is queued. Release it afterwards.
6100 fib_info_hold(fib_work->fen_info.fi);
6102 case FIB_EVENT_NH_ADD: /* fall through */
6103 case FIB_EVENT_NH_DEL:
6104 fnh_info = container_of(info, struct fib_nh_notifier_info,
6106 fib_work->fnh_info = *fnh_info;
6107 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
6112 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
6113 struct fib_notifier_info *info)
6115 struct fib6_entry_notifier_info *fen6_info;
6118 switch (fib_work->event) {
6119 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6120 case FIB_EVENT_ENTRY_APPEND: /* fall through */
6121 case FIB_EVENT_ENTRY_DEL:
6122 fen6_info = container_of(info, struct fib6_entry_notifier_info,
6124 err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
6135 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
6136 struct fib_notifier_info *info)
6138 switch (fib_work->event) {
6139 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6140 case FIB_EVENT_ENTRY_ADD: /* fall through */
6141 case FIB_EVENT_ENTRY_DEL:
6142 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
6143 mr_cache_hold(fib_work->men_info.mfc);
6145 case FIB_EVENT_VIF_ADD: /* fall through */
6146 case FIB_EVENT_VIF_DEL:
6147 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
6148 dev_hold(fib_work->ven_info.dev);
6153 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
6154 struct fib_notifier_info *info,
6155 struct mlxsw_sp *mlxsw_sp)
6157 struct netlink_ext_ack *extack = info->extack;
6158 struct fib_rule_notifier_info *fr_info;
6159 struct fib_rule *rule;
6162 /* nothing to do at the moment */
6163 if (event == FIB_EVENT_RULE_DEL)
6166 if (mlxsw_sp->router->aborted)
6169 fr_info = container_of(info, struct fib_rule_notifier_info, info);
6170 rule = fr_info->rule;
6172 /* Rule only affects locally generated traffic */
6173 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
6176 switch (info->family) {
6178 if (!fib4_rule_default(rule) && !rule->l3mdev)
6182 if (!fib6_rule_default(rule) && !rule->l3mdev)
6185 case RTNL_FAMILY_IPMR:
6186 if (!ipmr_rule_default(rule) && !rule->l3mdev)
6189 case RTNL_FAMILY_IP6MR:
6190 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6196 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6201 /* Called with rcu_read_lock() */
6202 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
6203 unsigned long event, void *ptr)
6205 struct mlxsw_sp_fib_event_work *fib_work;
6206 struct fib_notifier_info *info = ptr;
6207 struct mlxsw_sp_router *router;
6210 if ((info->family != AF_INET && info->family != AF_INET6 &&
6211 info->family != RTNL_FAMILY_IPMR &&
6212 info->family != RTNL_FAMILY_IP6MR))
6215 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6218 case FIB_EVENT_RULE_ADD: /* fall through */
6219 case FIB_EVENT_RULE_DEL:
6220 err = mlxsw_sp_router_fib_rule_event(event, info,
6222 return notifier_from_errno(err);
6223 case FIB_EVENT_ENTRY_ADD: /* fall through */
6224 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6225 case FIB_EVENT_ENTRY_APPEND:
6226 if (router->aborted) {
6227 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
6228 return notifier_from_errno(-EINVAL);
6230 if (info->family == AF_INET) {
6231 struct fib_entry_notifier_info *fen_info = ptr;
6233 if (fen_info->fi->fib_nh_is_v6) {
6234 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
6235 return notifier_from_errno(-EINVAL);
6237 if (fen_info->fi->nh) {
6238 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
6239 return notifier_from_errno(-EINVAL);
6241 } else if (info->family == AF_INET6) {
6242 struct fib6_entry_notifier_info *fen6_info;
6244 fen6_info = container_of(info,
6245 struct fib6_entry_notifier_info,
6247 if (fen6_info->rt->nh) {
6248 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
6249 return notifier_from_errno(-EINVAL);
6255 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
6259 fib_work->mlxsw_sp = router->mlxsw_sp;
6260 fib_work->event = event;
6262 switch (info->family) {
6264 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
6265 mlxsw_sp_router_fib4_event(fib_work, info);
6268 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
6269 err = mlxsw_sp_router_fib6_event(fib_work, info);
6273 case RTNL_FAMILY_IP6MR:
6274 case RTNL_FAMILY_IPMR:
6275 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
6276 mlxsw_sp_router_fibmr_event(fib_work, info);
6280 mlxsw_core_schedule_work(&fib_work->work);
6289 static struct mlxsw_sp_rif *
6290 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
6291 const struct net_device *dev)
6295 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6296 if (mlxsw_sp->router->rifs[i] &&
6297 mlxsw_sp->router->rifs[i]->dev == dev)
6298 return mlxsw_sp->router->rifs[i];
6303 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
6304 const struct net_device *dev)
6306 struct mlxsw_sp_rif *rif;
6308 mutex_lock(&mlxsw_sp->router->lock);
6309 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6310 mutex_unlock(&mlxsw_sp->router->lock);
6315 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
6317 struct mlxsw_sp_rif *rif;
6320 mutex_lock(&mlxsw_sp->router->lock);
6321 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6325 /* We only return the VID for VLAN RIFs. Otherwise we return an
6326 * invalid value (0).
6328 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
6331 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6334 mutex_unlock(&mlxsw_sp->router->lock);
6338 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
6340 char ritr_pl[MLXSW_REG_RITR_LEN];
6343 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
6344 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6348 mlxsw_reg_ritr_enable_set(ritr_pl, false);
6349 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6352 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6353 struct mlxsw_sp_rif *rif)
6355 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6356 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6357 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6361 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6362 unsigned long event)
6364 struct inet6_dev *inet6_dev;
6365 bool addr_list_empty = true;
6366 struct in_device *idev;
6373 idev = __in_dev_get_rcu(dev);
6374 if (idev && idev->ifa_list)
6375 addr_list_empty = false;
6377 inet6_dev = __in6_dev_get(dev);
6378 if (addr_list_empty && inet6_dev &&
6379 !list_empty(&inet6_dev->addr_list))
6380 addr_list_empty = false;
6383 /* macvlans do not have a RIF, but rather piggy back on the
6384 * RIF of their lower device.
6386 if (netif_is_macvlan(dev) && addr_list_empty)
6389 if (rif && addr_list_empty &&
6390 !netif_is_l3_slave(rif->dev))
6392 /* It is possible we already removed the RIF ourselves
6393 * if it was assigned to a netdev that is now a bridge
6402 static enum mlxsw_sp_rif_type
6403 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6404 const struct net_device *dev)
6406 enum mlxsw_sp_fid_type type;
6408 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6409 return MLXSW_SP_RIF_TYPE_IPIP_LB;
6411 /* Otherwise RIF type is derived from the type of the underlying FID. */
6412 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6413 type = MLXSW_SP_FID_TYPE_8021Q;
6414 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6415 type = MLXSW_SP_FID_TYPE_8021Q;
6416 else if (netif_is_bridge_master(dev))
6417 type = MLXSW_SP_FID_TYPE_8021D;
6419 type = MLXSW_SP_FID_TYPE_RFID;
6421 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6424 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6428 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6429 if (!mlxsw_sp->router->rifs[i]) {
6438 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6440 struct net_device *l3_dev)
6442 struct mlxsw_sp_rif *rif;
6444 rif = kzalloc(rif_size, GFP_KERNEL);
6448 INIT_LIST_HEAD(&rif->nexthop_list);
6449 INIT_LIST_HEAD(&rif->neigh_list);
6451 ether_addr_copy(rif->addr, l3_dev->dev_addr);
6452 rif->mtu = l3_dev->mtu;
6456 rif->rif_index = rif_index;
6461 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6464 return mlxsw_sp->router->rifs[rif_index];
6467 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6469 return rif->rif_index;
6472 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6474 return lb_rif->common.rif_index;
6477 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6479 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
6480 struct mlxsw_sp_vr *ul_vr;
6482 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
6483 if (WARN_ON(IS_ERR(ul_vr)))
6489 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6491 return lb_rif->ul_rif_id;
6494 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6496 return rif->dev->ifindex;
6499 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6504 static struct mlxsw_sp_rif *
6505 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6506 const struct mlxsw_sp_rif_params *params,
6507 struct netlink_ext_ack *extack)
6509 u32 tb_id = l3mdev_fib_table(params->dev);
6510 const struct mlxsw_sp_rif_ops *ops;
6511 struct mlxsw_sp_fid *fid = NULL;
6512 enum mlxsw_sp_rif_type type;
6513 struct mlxsw_sp_rif *rif;
6514 struct mlxsw_sp_vr *vr;
6518 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6519 ops = mlxsw_sp->rif_ops_arr[type];
6521 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6523 return ERR_CAST(vr);
6526 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6528 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6529 goto err_rif_index_alloc;
6532 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6538 mlxsw_sp->router->rifs[rif_index] = rif;
6539 rif->mlxsw_sp = mlxsw_sp;
6543 fid = ops->fid_get(rif, extack);
6552 ops->setup(rif, params);
6554 err = ops->configure(rif);
6558 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6559 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6561 goto err_mr_rif_add;
6564 mlxsw_sp_rif_counters_alloc(rif);
6569 for (i--; i >= 0; i--)
6570 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6571 ops->deconfigure(rif);
6574 mlxsw_sp_fid_put(fid);
6576 mlxsw_sp->router->rifs[rif_index] = NULL;
6580 err_rif_index_alloc:
6582 mlxsw_sp_vr_put(mlxsw_sp, vr);
6583 return ERR_PTR(err);
6586 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6588 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6589 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6590 struct mlxsw_sp_fid *fid = rif->fid;
6591 struct mlxsw_sp_vr *vr;
6594 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6595 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6597 mlxsw_sp_rif_counters_free(rif);
6598 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6599 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6600 ops->deconfigure(rif);
6602 /* Loopback RIFs are not associated with a FID. */
6603 mlxsw_sp_fid_put(fid);
6604 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6608 mlxsw_sp_vr_put(mlxsw_sp, vr);
6611 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6612 struct net_device *dev)
6614 struct mlxsw_sp_rif *rif;
6616 mutex_lock(&mlxsw_sp->router->lock);
6617 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6620 mlxsw_sp_rif_destroy(rif);
6622 mutex_unlock(&mlxsw_sp->router->lock);
6626 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6627 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6629 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6631 params->vid = mlxsw_sp_port_vlan->vid;
6632 params->lag = mlxsw_sp_port->lagged;
6634 params->lag_id = mlxsw_sp_port->lag_id;
6636 params->system_port = mlxsw_sp_port->local_port;
6639 static struct mlxsw_sp_rif_subport *
6640 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
6642 return container_of(rif, struct mlxsw_sp_rif_subport, common);
6645 static struct mlxsw_sp_rif *
6646 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
6647 const struct mlxsw_sp_rif_params *params,
6648 struct netlink_ext_ack *extack)
6650 struct mlxsw_sp_rif_subport *rif_subport;
6651 struct mlxsw_sp_rif *rif;
6653 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
6655 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
6657 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6658 refcount_inc(&rif_subport->ref_count);
6662 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
6664 struct mlxsw_sp_rif_subport *rif_subport;
6666 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6667 if (!refcount_dec_and_test(&rif_subport->ref_count))
6670 mlxsw_sp_rif_destroy(rif);
6674 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
6675 struct net_device *l3_dev,
6676 struct netlink_ext_ack *extack)
6678 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6679 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
6680 struct mlxsw_sp_rif_params params = {
6683 u16 vid = mlxsw_sp_port_vlan->vid;
6684 struct mlxsw_sp_rif *rif;
6685 struct mlxsw_sp_fid *fid;
6688 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
6689 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
6691 return PTR_ERR(rif);
6693 /* FID was already created, just take a reference */
6694 fid = rif->ops->fid_get(rif, extack);
6695 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6697 goto err_fid_port_vid_map;
6699 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
6701 goto err_port_vid_learning_set;
6703 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
6704 BR_STATE_FORWARDING);
6706 goto err_port_vid_stp_set;
6708 mlxsw_sp_port_vlan->fid = fid;
6712 err_port_vid_stp_set:
6713 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6714 err_port_vid_learning_set:
6715 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6716 err_fid_port_vid_map:
6717 mlxsw_sp_fid_put(fid);
6718 mlxsw_sp_rif_subport_put(rif);
6723 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6725 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6726 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
6727 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
6728 u16 vid = mlxsw_sp_port_vlan->vid;
6730 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6733 mlxsw_sp_port_vlan->fid = NULL;
6734 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6735 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6736 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6737 mlxsw_sp_fid_put(fid);
6738 mlxsw_sp_rif_subport_put(rif);
6742 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6744 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
6746 mutex_lock(&mlxsw_sp->router->lock);
6747 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6748 mutex_unlock(&mlxsw_sp->router->lock);
6751 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6752 struct net_device *port_dev,
6753 unsigned long event, u16 vid,
6754 struct netlink_ext_ack *extack)
6756 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
6757 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
6759 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
6760 if (WARN_ON(!mlxsw_sp_port_vlan))
6765 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
6768 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6775 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
6776 unsigned long event,
6777 struct netlink_ext_ack *extack)
6779 if (netif_is_bridge_port(port_dev) ||
6780 netif_is_lag_port(port_dev) ||
6781 netif_is_ovs_port(port_dev))
6784 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
6785 MLXSW_SP_DEFAULT_VID, extack);
6788 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6789 struct net_device *lag_dev,
6790 unsigned long event, u16 vid,
6791 struct netlink_ext_ack *extack)
6793 struct net_device *port_dev;
6794 struct list_head *iter;
6797 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6798 if (mlxsw_sp_port_dev_check(port_dev)) {
6799 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6811 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
6812 unsigned long event,
6813 struct netlink_ext_ack *extack)
6815 if (netif_is_bridge_port(lag_dev))
6818 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
6819 MLXSW_SP_DEFAULT_VID, extack);
6822 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
6823 struct net_device *l3_dev,
6824 unsigned long event,
6825 struct netlink_ext_ack *extack)
6827 struct mlxsw_sp_rif_params params = {
6830 struct mlxsw_sp_rif *rif;
6834 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
6836 return PTR_ERR(rif);
6839 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6840 mlxsw_sp_rif_destroy(rif);
6847 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
6848 struct net_device *vlan_dev,
6849 unsigned long event,
6850 struct netlink_ext_ack *extack)
6852 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
6853 u16 vid = vlan_dev_vlan_id(vlan_dev);
6855 if (netif_is_bridge_port(vlan_dev))
6858 if (mlxsw_sp_port_dev_check(real_dev))
6859 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
6860 event, vid, extack);
6861 else if (netif_is_lag_master(real_dev))
6862 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
6864 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
6865 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
6871 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
6873 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
6874 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6876 return ether_addr_equal_masked(mac, vrrp4, mask);
6879 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
6881 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
6882 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6884 return ether_addr_equal_masked(mac, vrrp6, mask);
6887 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
6888 const u8 *mac, bool adding)
6890 char ritr_pl[MLXSW_REG_RITR_LEN];
6891 u8 vrrp_id = adding ? mac[5] : 0;
6894 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
6895 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
6898 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
6899 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6903 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
6904 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
6906 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
6908 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6911 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
6912 const struct net_device *macvlan_dev,
6913 struct netlink_ext_ack *extack)
6915 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6916 struct mlxsw_sp_rif *rif;
6919 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6921 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
6925 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6926 mlxsw_sp_fid_index(rif->fid), true);
6930 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
6931 macvlan_dev->dev_addr, true);
6933 goto err_rif_vrrp_add;
6935 /* Make sure the bridge driver does not have this MAC pointing at
6938 if (rif->ops->fdb_del)
6939 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
6944 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6945 mlxsw_sp_fid_index(rif->fid), false);
6949 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6950 const struct net_device *macvlan_dev)
6952 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6953 struct mlxsw_sp_rif *rif;
6955 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6956 /* If we do not have a RIF, then we already took care of
6957 * removing the macvlan's MAC during RIF deletion.
6961 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
6963 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6964 mlxsw_sp_fid_index(rif->fid), false);
6967 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6968 const struct net_device *macvlan_dev)
6970 mutex_lock(&mlxsw_sp->router->lock);
6971 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6972 mutex_unlock(&mlxsw_sp->router->lock);
6975 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
6976 struct net_device *macvlan_dev,
6977 unsigned long event,
6978 struct netlink_ext_ack *extack)
6982 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
6984 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6991 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
6992 struct net_device *dev,
6993 const unsigned char *dev_addr,
6994 struct netlink_ext_ack *extack)
6996 struct mlxsw_sp_rif *rif;
6999 /* A RIF is not created for macvlan netdevs. Their MAC is used to
7002 if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
7005 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7006 rif = mlxsw_sp->router->rifs[i];
7007 if (rif && rif->ops &&
7008 rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
7010 if (rif && rif->dev && rif->dev != dev &&
7011 !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
7012 mlxsw_sp->mac_mask)) {
7013 NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
7021 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
7022 struct net_device *dev,
7023 unsigned long event,
7024 struct netlink_ext_ack *extack)
7026 if (mlxsw_sp_port_dev_check(dev))
7027 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
7028 else if (netif_is_lag_master(dev))
7029 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
7030 else if (netif_is_bridge_master(dev))
7031 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
7033 else if (is_vlan_dev(dev))
7034 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
7036 else if (netif_is_macvlan(dev))
7037 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
7043 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
7044 unsigned long event, void *ptr)
7046 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
7047 struct net_device *dev = ifa->ifa_dev->dev;
7048 struct mlxsw_sp_router *router;
7049 struct mlxsw_sp_rif *rif;
7052 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
7053 if (event == NETDEV_UP)
7056 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
7057 mutex_lock(&router->lock);
7058 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
7059 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7062 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
7064 mutex_unlock(&router->lock);
7065 return notifier_from_errno(err);
7068 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
7069 unsigned long event, void *ptr)
7071 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
7072 struct net_device *dev = ivi->ivi_dev->dev;
7073 struct mlxsw_sp *mlxsw_sp;
7074 struct mlxsw_sp_rif *rif;
7077 mlxsw_sp = mlxsw_sp_lower_get(dev);
7081 mutex_lock(&mlxsw_sp->router->lock);
7082 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7083 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7086 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7091 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
7093 mutex_unlock(&mlxsw_sp->router->lock);
7094 return notifier_from_errno(err);
7097 struct mlxsw_sp_inet6addr_event_work {
7098 struct work_struct work;
7099 struct mlxsw_sp *mlxsw_sp;
7100 struct net_device *dev;
7101 unsigned long event;
7104 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
7106 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
7107 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
7108 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
7109 struct net_device *dev = inet6addr_work->dev;
7110 unsigned long event = inet6addr_work->event;
7111 struct mlxsw_sp_rif *rif;
7114 mutex_lock(&mlxsw_sp->router->lock);
7116 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7117 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7120 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
7122 mutex_unlock(&mlxsw_sp->router->lock);
7125 kfree(inet6addr_work);
7128 /* Called with rcu_read_lock() */
7129 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
7130 unsigned long event, void *ptr)
7132 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
7133 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
7134 struct net_device *dev = if6->idev->dev;
7135 struct mlxsw_sp_router *router;
7137 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
7138 if (event == NETDEV_UP)
7141 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
7142 if (!inet6addr_work)
7145 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
7146 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
7147 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
7148 inet6addr_work->dev = dev;
7149 inet6addr_work->event = event;
7151 mlxsw_core_schedule_work(&inet6addr_work->work);
7156 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
7157 unsigned long event, void *ptr)
7159 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
7160 struct net_device *dev = i6vi->i6vi_dev->dev;
7161 struct mlxsw_sp *mlxsw_sp;
7162 struct mlxsw_sp_rif *rif;
7165 mlxsw_sp = mlxsw_sp_lower_get(dev);
7169 mutex_lock(&mlxsw_sp->router->lock);
7170 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7171 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7174 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7179 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
7181 mutex_unlock(&mlxsw_sp->router->lock);
7182 return notifier_from_errno(err);
7185 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7186 const char *mac, int mtu)
7188 char ritr_pl[MLXSW_REG_RITR_LEN];
7191 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7192 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7196 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
7197 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
7198 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
7199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7203 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
7204 struct mlxsw_sp_rif *rif)
7206 struct net_device *dev = rif->dev;
7210 fid_index = mlxsw_sp_fid_index(rif->fid);
7212 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
7216 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
7221 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
7223 goto err_rif_fdb_op;
7225 if (rif->mtu != dev->mtu) {
7226 struct mlxsw_sp_vr *vr;
7229 /* The RIF is relevant only to its mr_table instance, as unlike
7230 * unicast routing, in multicast routing a RIF cannot be shared
7231 * between several multicast routing tables.
7233 vr = &mlxsw_sp->router->vrs[rif->vr_id];
7234 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
7235 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
7239 ether_addr_copy(rif->addr, dev->dev_addr);
7240 rif->mtu = dev->mtu;
7242 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
7247 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
7249 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
7253 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
7254 struct netdev_notifier_pre_changeaddr_info *info)
7256 struct netlink_ext_ack *extack;
7258 extack = netdev_notifier_info_to_extack(&info->info);
7259 return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
7260 info->dev_addr, extack);
7263 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
7264 unsigned long event, void *ptr)
7266 struct mlxsw_sp *mlxsw_sp;
7267 struct mlxsw_sp_rif *rif;
7270 mlxsw_sp = mlxsw_sp_lower_get(dev);
7274 mutex_lock(&mlxsw_sp->router->lock);
7275 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7280 case NETDEV_CHANGEMTU: /* fall through */
7281 case NETDEV_CHANGEADDR:
7282 err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
7284 case NETDEV_PRE_CHANGEADDR:
7285 err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
7290 mutex_unlock(&mlxsw_sp->router->lock);
7294 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
7295 struct net_device *l3_dev,
7296 struct netlink_ext_ack *extack)
7298 struct mlxsw_sp_rif *rif;
7300 /* If netdev is already associated with a RIF, then we need to
7301 * destroy it and create a new one with the new virtual router ID.
7303 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7305 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
7308 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
7311 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
7312 struct net_device *l3_dev)
7314 struct mlxsw_sp_rif *rif;
7316 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7319 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
7322 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
7323 struct netdev_notifier_changeupper_info *info)
7325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
7328 /* We do not create a RIF for a macvlan, but only use it to
7329 * direct more MAC addresses to the router.
7331 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
7334 mutex_lock(&mlxsw_sp->router->lock);
7336 case NETDEV_PRECHANGEUPPER:
7338 case NETDEV_CHANGEUPPER:
7339 if (info->linking) {
7340 struct netlink_ext_ack *extack;
7342 extack = netdev_notifier_info_to_extack(&info->info);
7343 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
7345 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
7349 mutex_unlock(&mlxsw_sp->router->lock);
7354 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
7356 struct mlxsw_sp_rif *rif = data;
7358 if (!netif_is_macvlan(dev))
7361 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
7362 mlxsw_sp_fid_index(rif->fid), false);
7365 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
7367 if (!netif_is_macvlan_port(rif->dev))
7370 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
7371 return netdev_walk_all_upper_dev_rcu(rif->dev,
7372 __mlxsw_sp_rif_macvlan_flush, rif);
7375 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
7376 const struct mlxsw_sp_rif_params *params)
7378 struct mlxsw_sp_rif_subport *rif_subport;
7380 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7381 refcount_set(&rif_subport->ref_count, 1);
7382 rif_subport->vid = params->vid;
7383 rif_subport->lag = params->lag;
7385 rif_subport->lag_id = params->lag_id;
7387 rif_subport->system_port = params->system_port;
7390 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
7392 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7393 struct mlxsw_sp_rif_subport *rif_subport;
7394 char ritr_pl[MLXSW_REG_RITR_LEN];
7396 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7397 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7398 rif->rif_index, rif->vr_id, rif->dev->mtu);
7399 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7400 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
7401 rif_subport->lag ? rif_subport->lag_id :
7402 rif_subport->system_port,
7405 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7408 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
7412 err = mlxsw_sp_rif_subport_op(rif, true);
7416 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7417 mlxsw_sp_fid_index(rif->fid), true);
7419 goto err_rif_fdb_op;
7421 mlxsw_sp_fid_rif_set(rif->fid, rif);
7425 mlxsw_sp_rif_subport_op(rif, false);
7429 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
7431 struct mlxsw_sp_fid *fid = rif->fid;
7433 mlxsw_sp_fid_rif_set(fid, NULL);
7434 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7435 mlxsw_sp_fid_index(fid), false);
7436 mlxsw_sp_rif_macvlan_flush(rif);
7437 mlxsw_sp_rif_subport_op(rif, false);
7440 static struct mlxsw_sp_fid *
7441 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
7442 struct netlink_ext_ack *extack)
7444 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
7447 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
7448 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
7449 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
7450 .setup = mlxsw_sp_rif_subport_setup,
7451 .configure = mlxsw_sp_rif_subport_configure,
7452 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
7453 .fid_get = mlxsw_sp_rif_subport_fid_get,
7456 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
7457 enum mlxsw_reg_ritr_if_type type,
7458 u16 vid_fid, bool enable)
7460 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7461 char ritr_pl[MLXSW_REG_RITR_LEN];
7463 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7465 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7466 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
7468 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7471 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7473 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
7476 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7478 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7479 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7482 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7487 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7488 mlxsw_sp_router_port(mlxsw_sp), true);
7490 goto err_fid_mc_flood_set;
7492 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7493 mlxsw_sp_router_port(mlxsw_sp), true);
7495 goto err_fid_bc_flood_set;
7497 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7498 mlxsw_sp_fid_index(rif->fid), true);
7500 goto err_rif_fdb_op;
7502 mlxsw_sp_fid_rif_set(rif->fid, rif);
7506 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7507 mlxsw_sp_router_port(mlxsw_sp), false);
7508 err_fid_bc_flood_set:
7509 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7510 mlxsw_sp_router_port(mlxsw_sp), false);
7511 err_fid_mc_flood_set:
7512 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7516 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7518 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7519 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7520 struct mlxsw_sp_fid *fid = rif->fid;
7522 mlxsw_sp_fid_rif_set(fid, NULL);
7523 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7524 mlxsw_sp_fid_index(fid), false);
7525 mlxsw_sp_rif_macvlan_flush(rif);
7526 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7527 mlxsw_sp_router_port(mlxsw_sp), false);
7528 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7529 mlxsw_sp_router_port(mlxsw_sp), false);
7530 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7533 static struct mlxsw_sp_fid *
7534 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7535 struct netlink_ext_ack *extack)
7537 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
7540 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7542 struct switchdev_notifier_fdb_info info;
7543 struct net_device *dev;
7545 dev = br_fdb_find_port(rif->dev, mac, 0);
7551 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7555 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7556 .type = MLXSW_SP_RIF_TYPE_FID,
7557 .rif_size = sizeof(struct mlxsw_sp_rif),
7558 .configure = mlxsw_sp_rif_fid_configure,
7559 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7560 .fid_get = mlxsw_sp_rif_fid_fid_get,
7561 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
7564 static struct mlxsw_sp_fid *
7565 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
7566 struct netlink_ext_ack *extack)
7568 struct net_device *br_dev;
7572 if (is_vlan_dev(rif->dev)) {
7573 vid = vlan_dev_vlan_id(rif->dev);
7574 br_dev = vlan_dev_real_dev(rif->dev);
7575 if (WARN_ON(!netif_is_bridge_master(br_dev)))
7576 return ERR_PTR(-EINVAL);
7578 err = br_vlan_get_pvid(rif->dev, &vid);
7579 if (err < 0 || !vid) {
7580 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7581 return ERR_PTR(-EINVAL);
7585 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
7588 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7590 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7591 struct switchdev_notifier_fdb_info info;
7592 struct net_device *br_dev;
7593 struct net_device *dev;
7595 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7596 dev = br_fdb_find_port(br_dev, mac, vid);
7602 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7606 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
7607 .type = MLXSW_SP_RIF_TYPE_VLAN,
7608 .rif_size = sizeof(struct mlxsw_sp_rif),
7609 .configure = mlxsw_sp_rif_fid_configure,
7610 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7611 .fid_get = mlxsw_sp_rif_vlan_fid_get,
7612 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
7615 static struct mlxsw_sp_rif_ipip_lb *
7616 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7618 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7622 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7623 const struct mlxsw_sp_rif_params *params)
7625 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7626 struct mlxsw_sp_rif_ipip_lb *rif_lb;
7628 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7630 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7631 rif_lb->lb_config = params_lb->lb_config;
7635 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7637 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7638 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7639 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7640 struct mlxsw_sp_vr *ul_vr;
7643 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
7645 return PTR_ERR(ul_vr);
7647 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
7649 goto err_loopback_op;
7651 lb_rif->ul_vr_id = ul_vr->id;
7652 lb_rif->ul_rif_id = 0;
7657 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7661 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7663 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7664 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7665 struct mlxsw_sp_vr *ul_vr;
7667 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7668 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
7671 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7674 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
7675 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
7676 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
7677 .setup = mlxsw_sp_rif_ipip_lb_setup,
7678 .configure = mlxsw_sp1_rif_ipip_lb_configure,
7679 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
7682 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
7683 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
7684 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
7685 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
7686 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
7690 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
7692 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7693 char ritr_pl[MLXSW_REG_RITR_LEN];
7695 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
7696 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
7697 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
7698 MLXSW_REG_RITR_LOOPBACK_GENERIC);
7700 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7703 static struct mlxsw_sp_rif *
7704 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
7705 struct netlink_ext_ack *extack)
7707 struct mlxsw_sp_rif *ul_rif;
7711 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
7713 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
7714 return ERR_PTR(err);
7717 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
7719 return ERR_PTR(-ENOMEM);
7721 mlxsw_sp->router->rifs[rif_index] = ul_rif;
7722 ul_rif->mlxsw_sp = mlxsw_sp;
7723 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
7730 mlxsw_sp->router->rifs[rif_index] = NULL;
7732 return ERR_PTR(err);
7735 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
7737 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7739 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
7740 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
7744 static struct mlxsw_sp_rif *
7745 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
7746 struct netlink_ext_ack *extack)
7748 struct mlxsw_sp_vr *vr;
7751 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
7753 return ERR_CAST(vr);
7755 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
7758 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
7759 if (IS_ERR(vr->ul_rif)) {
7760 err = PTR_ERR(vr->ul_rif);
7761 goto err_ul_rif_create;
7765 refcount_set(&vr->ul_rif_refcnt, 1);
7770 mlxsw_sp_vr_put(mlxsw_sp, vr);
7771 return ERR_PTR(err);
7774 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
7776 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7777 struct mlxsw_sp_vr *vr;
7779 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
7781 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
7785 mlxsw_sp_ul_rif_destroy(ul_rif);
7786 mlxsw_sp_vr_put(mlxsw_sp, vr);
7789 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
7792 struct mlxsw_sp_rif *ul_rif;
7795 mutex_lock(&mlxsw_sp->router->lock);
7796 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7797 if (IS_ERR(ul_rif)) {
7798 err = PTR_ERR(ul_rif);
7801 *ul_rif_index = ul_rif->rif_index;
7803 mutex_unlock(&mlxsw_sp->router->lock);
7807 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
7809 struct mlxsw_sp_rif *ul_rif;
7811 mutex_lock(&mlxsw_sp->router->lock);
7812 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
7813 if (WARN_ON(!ul_rif))
7816 mlxsw_sp_ul_rif_put(ul_rif);
7818 mutex_unlock(&mlxsw_sp->router->lock);
7822 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7824 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7825 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7826 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7827 struct mlxsw_sp_rif *ul_rif;
7830 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7832 return PTR_ERR(ul_rif);
7834 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
7836 goto err_loopback_op;
7838 lb_rif->ul_vr_id = 0;
7839 lb_rif->ul_rif_id = ul_rif->rif_index;
7844 mlxsw_sp_ul_rif_put(ul_rif);
7848 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7850 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7851 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7852 struct mlxsw_sp_rif *ul_rif;
7854 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
7855 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
7856 mlxsw_sp_ul_rif_put(ul_rif);
7859 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
7860 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
7861 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
7862 .setup = mlxsw_sp_rif_ipip_lb_setup,
7863 .configure = mlxsw_sp2_rif_ipip_lb_configure,
7864 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
7867 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
7868 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
7869 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
7870 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
7871 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
7874 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7876 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7878 mlxsw_sp->router->rifs = kcalloc(max_rifs,
7879 sizeof(struct mlxsw_sp_rif *),
7881 if (!mlxsw_sp->router->rifs)
7887 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
7891 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7892 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
7894 kfree(mlxsw_sp->router->rifs);
7898 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
7900 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
7902 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
7903 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
7906 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
7910 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
7911 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
7913 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
7916 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
7920 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
7923 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
7925 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
7928 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
7930 struct mlxsw_sp_router *router;
7932 /* Flush pending FIB notifications and then flush the device's
7933 * table before requesting another dump. The FIB notification
7934 * block is unregistered, so no need to take RTNL.
7936 mlxsw_core_flush_owq();
7937 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7938 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
7941 #ifdef CONFIG_IP_ROUTE_MULTIPATH
7942 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7944 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7947 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7949 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7952 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
7954 struct net *net = mlxsw_sp_net(mlxsw_sp);
7955 bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
7957 mlxsw_sp_mp_hash_header_set(recr2_pl,
7958 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7959 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7960 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7961 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7964 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7965 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7966 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7967 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7970 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
7972 bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
7974 mlxsw_sp_mp_hash_header_set(recr2_pl,
7975 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7976 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7977 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7978 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
7979 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
7981 mlxsw_sp_mp_hash_field_set(recr2_pl,
7982 MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7984 mlxsw_sp_mp_hash_header_set(recr2_pl,
7985 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
7986 mlxsw_sp_mp_hash_field_set(recr2_pl,
7987 MLXSW_REG_RECR2_TCP_UDP_SPORT);
7988 mlxsw_sp_mp_hash_field_set(recr2_pl,
7989 MLXSW_REG_RECR2_TCP_UDP_DPORT);
7993 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7995 char recr2_pl[MLXSW_REG_RECR2_LEN];
7998 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
7999 mlxsw_reg_recr2_pack(recr2_pl, seed);
8000 mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
8001 mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
8003 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
8006 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8012 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
8014 char rdpm_pl[MLXSW_REG_RDPM_LEN];
8017 MLXSW_REG_ZERO(rdpm, rdpm_pl);
8019 /* HW is determining switch priority based on DSCP-bits, but the
8020 * kernel is still doing that based on the ToS. Since there's a
8021 * mismatch in bits we need to make sure to translate the right
8022 * value ToS would observe, skipping the 2 least-significant ECN bits.
8024 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
8025 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
8027 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
8030 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
8032 struct net *net = mlxsw_sp_net(mlxsw_sp);
8033 bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
8034 char rgcr_pl[MLXSW_REG_RGCR_LEN];
8038 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
8040 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8042 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
8043 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
8044 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
8045 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8051 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8053 char rgcr_pl[MLXSW_REG_RGCR_LEN];
8055 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
8056 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8059 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
8060 struct netlink_ext_ack *extack)
8062 struct mlxsw_sp_router *router;
8065 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
8068 mutex_init(&router->lock);
8069 mlxsw_sp->router = router;
8070 router->mlxsw_sp = mlxsw_sp;
8072 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
8073 err = __mlxsw_sp_router_init(mlxsw_sp);
8075 goto err_router_init;
8077 err = mlxsw_sp_rifs_init(mlxsw_sp);
8081 err = mlxsw_sp_ipips_init(mlxsw_sp);
8083 goto err_ipips_init;
8085 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
8086 &mlxsw_sp_nexthop_ht_params);
8088 goto err_nexthop_ht_init;
8090 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
8091 &mlxsw_sp_nexthop_group_ht_params);
8093 goto err_nexthop_group_ht_init;
8095 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
8096 err = mlxsw_sp_lpm_init(mlxsw_sp);
8100 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
8104 err = mlxsw_sp_vrs_init(mlxsw_sp);
8108 err = mlxsw_sp_neigh_init(mlxsw_sp);
8110 goto err_neigh_init;
8112 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
8114 goto err_mp_hash_init;
8116 err = mlxsw_sp_dscp_init(mlxsw_sp);
8120 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
8121 err = register_inetaddr_notifier(&router->inetaddr_nb);
8123 goto err_register_inetaddr_notifier;
8125 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
8126 err = register_inet6addr_notifier(&router->inet6addr_nb);
8128 goto err_register_inet6addr_notifier;
8130 mlxsw_sp->router->netevent_nb.notifier_call =
8131 mlxsw_sp_router_netevent_event;
8132 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8134 goto err_register_netevent_notifier;
8136 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
8137 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8138 &mlxsw_sp->router->fib_nb,
8139 mlxsw_sp_router_fib_dump_flush, extack);
8141 goto err_register_fib_notifier;
8145 err_register_fib_notifier:
8146 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8147 err_register_netevent_notifier:
8148 unregister_inet6addr_notifier(&router->inet6addr_nb);
8149 err_register_inet6addr_notifier:
8150 unregister_inetaddr_notifier(&router->inetaddr_nb);
8151 err_register_inetaddr_notifier:
8152 mlxsw_core_flush_owq();
8155 mlxsw_sp_neigh_fini(mlxsw_sp);
8157 mlxsw_sp_vrs_fini(mlxsw_sp);
8159 mlxsw_sp_mr_fini(mlxsw_sp);
8161 mlxsw_sp_lpm_fini(mlxsw_sp);
8163 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8164 err_nexthop_group_ht_init:
8165 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8166 err_nexthop_ht_init:
8167 mlxsw_sp_ipips_fini(mlxsw_sp);
8169 mlxsw_sp_rifs_fini(mlxsw_sp);
8171 __mlxsw_sp_router_fini(mlxsw_sp);
8173 mutex_destroy(&mlxsw_sp->router->lock);
8174 kfree(mlxsw_sp->router);
8178 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8180 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8181 &mlxsw_sp->router->fib_nb);
8182 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8183 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
8184 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
8185 mlxsw_core_flush_owq();
8186 mlxsw_sp_neigh_fini(mlxsw_sp);
8187 mlxsw_sp_vrs_fini(mlxsw_sp);
8188 mlxsw_sp_mr_fini(mlxsw_sp);
8189 mlxsw_sp_lpm_fini(mlxsw_sp);
8190 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8191 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8192 mlxsw_sp_ipips_fini(mlxsw_sp);
8193 mlxsw_sp_rifs_fini(mlxsw_sp);
8194 __mlxsw_sp_router_fini(mlxsw_sp);
8195 mutex_destroy(&mlxsw_sp->router->lock);
8196 kfree(mlxsw_sp->router);