1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <net/netevent.h>
23 #include <net/neighbour.h>
25 #include <net/inet_dscp.h>
26 #include <net/ip_fib.h>
27 #include <net/ip6_fib.h>
28 #include <net/nexthop.h>
29 #include <net/fib_rules.h>
30 #include <net/ip_tunnels.h>
31 #include <net/l3mdev.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
35 #include <net/fib_notifier.h>
36 #include <net/switchdev.h>
41 #include "spectrum_cnt.h"
42 #include "spectrum_dpipe.h"
43 #include "spectrum_ipip.h"
44 #include "spectrum_mr.h"
45 #include "spectrum_mr_tcam.h"
46 #include "spectrum_router.h"
47 #include "spectrum_span.h"
51 struct mlxsw_sp_lpm_tree;
52 struct mlxsw_sp_rif_ops;
54 struct mlxsw_sp_crif_key {
55 struct net_device *dev;
58 struct mlxsw_sp_crif {
59 struct mlxsw_sp_crif_key key;
60 struct rhash_head ht_node;
62 struct list_head nexthop_list;
63 struct mlxsw_sp_rif *rif;
66 static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
67 .key_offset = offsetof(struct mlxsw_sp_crif, key),
68 .key_len = sizeof_field(struct mlxsw_sp_crif, key),
69 .head_offset = offsetof(struct mlxsw_sp_crif, ht_node),
73 struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
74 struct list_head neigh_list;
75 struct mlxsw_sp_fid *fid;
76 unsigned char addr[ETH_ALEN];
82 const struct mlxsw_sp_rif_ops *ops;
83 struct mlxsw_sp *mlxsw_sp;
85 unsigned int counter_ingress;
86 bool counter_ingress_valid;
87 unsigned int counter_egress;
88 bool counter_egress_valid;
91 static struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
95 return rif->crif->key.dev;
98 struct mlxsw_sp_rif_params {
99 struct net_device *dev;
109 struct mlxsw_sp_rif_subport {
110 struct mlxsw_sp_rif common;
111 refcount_t ref_count;
120 struct mlxsw_sp_rif_ipip_lb {
121 struct mlxsw_sp_rif common;
122 struct mlxsw_sp_rif_ipip_lb_config lb_config;
123 u16 ul_vr_id; /* Spectrum-1. */
124 u16 ul_rif_id; /* Spectrum-2+. */
127 struct mlxsw_sp_rif_params_ipip_lb {
128 struct mlxsw_sp_rif_params common;
129 struct mlxsw_sp_rif_ipip_lb_config lb_config;
132 struct mlxsw_sp_rif_ops {
133 enum mlxsw_sp_rif_type type;
136 void (*setup)(struct mlxsw_sp_rif *rif,
137 const struct mlxsw_sp_rif_params *params);
138 int (*configure)(struct mlxsw_sp_rif *rif,
139 struct netlink_ext_ack *extack);
140 void (*deconfigure)(struct mlxsw_sp_rif *rif);
141 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
142 struct netlink_ext_ack *extack);
143 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
146 struct mlxsw_sp_rif_mac_profile {
147 unsigned char mac_prefix[ETH_ALEN];
148 refcount_t ref_count;
152 struct mlxsw_sp_router_ops {
153 int (*init)(struct mlxsw_sp *mlxsw_sp);
154 int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
157 static struct mlxsw_sp_rif *
158 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
159 const struct net_device *dev);
160 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
161 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
162 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
163 struct mlxsw_sp_lpm_tree *lpm_tree);
164 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
165 const struct mlxsw_sp_fib *fib,
167 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
168 const struct mlxsw_sp_fib *fib);
170 static unsigned int *
171 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
172 enum mlxsw_sp_rif_counter_dir dir)
175 case MLXSW_SP_RIF_COUNTER_EGRESS:
176 return &rif->counter_egress;
177 case MLXSW_SP_RIF_COUNTER_INGRESS:
178 return &rif->counter_ingress;
184 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
185 enum mlxsw_sp_rif_counter_dir dir)
188 case MLXSW_SP_RIF_COUNTER_EGRESS:
189 return rif->counter_egress_valid;
190 case MLXSW_SP_RIF_COUNTER_INGRESS:
191 return rif->counter_ingress_valid;
197 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
198 enum mlxsw_sp_rif_counter_dir dir,
202 case MLXSW_SP_RIF_COUNTER_EGRESS:
203 rif->counter_egress_valid = valid;
205 case MLXSW_SP_RIF_COUNTER_INGRESS:
206 rif->counter_ingress_valid = valid;
211 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
212 unsigned int counter_index, bool enable,
213 enum mlxsw_sp_rif_counter_dir dir)
215 char ritr_pl[MLXSW_REG_RITR_LEN];
216 bool is_egress = false;
219 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
221 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
222 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
226 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
228 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
231 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
232 struct mlxsw_sp_rif *rif,
233 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
235 char ricnt_pl[MLXSW_REG_RICNT_LEN];
236 unsigned int *p_counter_index;
240 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
244 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
245 if (!p_counter_index)
247 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
248 MLXSW_REG_RICNT_OPCODE_NOP);
249 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
252 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
256 struct mlxsw_sp_rif_counter_set_basic {
257 u64 good_unicast_packets;
258 u64 good_multicast_packets;
259 u64 good_broadcast_packets;
260 u64 good_unicast_bytes;
261 u64 good_multicast_bytes;
262 u64 good_broadcast_bytes;
270 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
271 enum mlxsw_sp_rif_counter_dir dir,
272 struct mlxsw_sp_rif_counter_set_basic *set)
274 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
275 char ricnt_pl[MLXSW_REG_RICNT_LEN];
276 unsigned int *p_counter_index;
279 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
282 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
283 if (!p_counter_index)
286 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
287 MLXSW_REG_RICNT_OPCODE_CLEAR);
288 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
295 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
296 (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
298 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
299 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
300 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
301 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
302 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
303 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
304 MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
305 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
306 MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
307 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
309 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
314 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
315 unsigned int counter_index)
317 char ricnt_pl[MLXSW_REG_RICNT_LEN];
319 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
320 MLXSW_REG_RICNT_OPCODE_CLEAR);
321 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
324 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
325 enum mlxsw_sp_rif_counter_dir dir)
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328 unsigned int *p_counter_index;
331 if (mlxsw_sp_rif_counter_valid_get(rif, dir))
334 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
335 if (!p_counter_index)
338 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
343 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
345 goto err_counter_clear;
347 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
348 *p_counter_index, true, dir);
350 goto err_counter_edit;
351 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
356 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
361 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
362 enum mlxsw_sp_rif_counter_dir dir)
364 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
365 unsigned int *p_counter_index;
367 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
370 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
371 if (WARN_ON(!p_counter_index))
373 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
374 *p_counter_index, false, dir);
375 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
377 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
380 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
382 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
383 struct devlink *devlink;
385 devlink = priv_to_devlink(mlxsw_sp->core);
386 if (!devlink_dpipe_table_counter_enabled(devlink,
387 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
389 mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
392 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
394 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
397 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
399 struct mlxsw_sp_prefix_usage {
400 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
403 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
404 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
407 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
408 struct mlxsw_sp_prefix_usage *prefix_usage2)
410 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
414 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
415 struct mlxsw_sp_prefix_usage *prefix_usage2)
417 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
421 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
422 unsigned char prefix_len)
424 set_bit(prefix_len, prefix_usage->b);
428 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
429 unsigned char prefix_len)
431 clear_bit(prefix_len, prefix_usage->b);
434 struct mlxsw_sp_fib_key {
435 unsigned char addr[sizeof(struct in6_addr)];
436 unsigned char prefix_len;
439 enum mlxsw_sp_fib_entry_type {
440 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
441 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
442 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
443 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
444 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
446 /* This is a special case of local delivery, where a packet should be
447 * decapsulated on reception. Note that there is no corresponding ENCAP,
448 * because that's a type of next hop, not of FIB entry. (There can be
449 * several next hops in a REMOTE entry, and some of them may be
450 * encapsulating entries.)
452 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
453 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
456 struct mlxsw_sp_nexthop_group_info;
457 struct mlxsw_sp_nexthop_group;
458 struct mlxsw_sp_fib_entry;
460 struct mlxsw_sp_fib_node {
461 struct mlxsw_sp_fib_entry *fib_entry;
462 struct list_head list;
463 struct rhash_head ht_node;
464 struct mlxsw_sp_fib *fib;
465 struct mlxsw_sp_fib_key key;
468 struct mlxsw_sp_fib_entry_decap {
469 struct mlxsw_sp_ipip_entry *ipip_entry;
473 struct mlxsw_sp_fib_entry {
474 struct mlxsw_sp_fib_node *fib_node;
475 enum mlxsw_sp_fib_entry_type type;
476 struct list_head nexthop_group_node;
477 struct mlxsw_sp_nexthop_group *nh_group;
478 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
481 struct mlxsw_sp_fib4_entry {
482 struct mlxsw_sp_fib_entry common;
489 struct mlxsw_sp_fib6_entry {
490 struct mlxsw_sp_fib_entry common;
491 struct list_head rt6_list;
495 struct mlxsw_sp_rt6 {
496 struct list_head list;
497 struct fib6_info *rt;
500 struct mlxsw_sp_lpm_tree {
502 unsigned int ref_count;
503 enum mlxsw_sp_l3proto proto;
504 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
505 struct mlxsw_sp_prefix_usage prefix_usage;
508 struct mlxsw_sp_fib {
509 struct rhashtable ht;
510 struct list_head node_list;
511 struct mlxsw_sp_vr *vr;
512 struct mlxsw_sp_lpm_tree *lpm_tree;
513 enum mlxsw_sp_l3proto proto;
517 u16 id; /* virtual router ID */
518 u32 tb_id; /* kernel fib table id */
519 unsigned int rif_count;
520 struct mlxsw_sp_fib *fib4;
521 struct mlxsw_sp_fib *fib6;
522 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
523 struct mlxsw_sp_rif *ul_rif;
524 refcount_t ul_rif_refcnt;
527 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
529 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
530 struct mlxsw_sp_vr *vr,
531 enum mlxsw_sp_l3proto proto)
533 struct mlxsw_sp_lpm_tree *lpm_tree;
534 struct mlxsw_sp_fib *fib;
537 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
538 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
540 return ERR_PTR(-ENOMEM);
541 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
543 goto err_rhashtable_init;
544 INIT_LIST_HEAD(&fib->node_list);
547 fib->lpm_tree = lpm_tree;
548 mlxsw_sp_lpm_tree_hold(lpm_tree);
549 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
551 goto err_lpm_tree_bind;
555 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
561 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
562 struct mlxsw_sp_fib *fib)
564 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
565 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
566 WARN_ON(!list_empty(&fib->node_list));
567 rhashtable_destroy(&fib->ht);
571 static struct mlxsw_sp_lpm_tree *
572 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
574 static struct mlxsw_sp_lpm_tree *lpm_tree;
577 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
578 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
579 if (lpm_tree->ref_count == 0)
585 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
586 struct mlxsw_sp_lpm_tree *lpm_tree)
588 char ralta_pl[MLXSW_REG_RALTA_LEN];
590 mlxsw_reg_ralta_pack(ralta_pl, true,
591 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
593 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
596 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
597 struct mlxsw_sp_lpm_tree *lpm_tree)
599 char ralta_pl[MLXSW_REG_RALTA_LEN];
601 mlxsw_reg_ralta_pack(ralta_pl, false,
602 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
604 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
608 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
609 struct mlxsw_sp_prefix_usage *prefix_usage,
610 struct mlxsw_sp_lpm_tree *lpm_tree)
612 char ralst_pl[MLXSW_REG_RALST_LEN];
615 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
617 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
620 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
621 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
624 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
625 MLXSW_REG_RALST_BIN_NO_CHILD);
626 last_prefix = prefix;
628 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
631 static struct mlxsw_sp_lpm_tree *
632 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
633 struct mlxsw_sp_prefix_usage *prefix_usage,
634 enum mlxsw_sp_l3proto proto)
636 struct mlxsw_sp_lpm_tree *lpm_tree;
639 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
641 return ERR_PTR(-EBUSY);
642 lpm_tree->proto = proto;
643 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
647 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
650 goto err_left_struct_set;
651 memcpy(&lpm_tree->prefix_usage, prefix_usage,
652 sizeof(lpm_tree->prefix_usage));
653 memset(&lpm_tree->prefix_ref_count, 0,
654 sizeof(lpm_tree->prefix_ref_count));
655 lpm_tree->ref_count = 1;
659 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
663 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
664 struct mlxsw_sp_lpm_tree *lpm_tree)
666 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
669 static struct mlxsw_sp_lpm_tree *
670 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
671 struct mlxsw_sp_prefix_usage *prefix_usage,
672 enum mlxsw_sp_l3proto proto)
674 struct mlxsw_sp_lpm_tree *lpm_tree;
677 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
678 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
679 if (lpm_tree->ref_count != 0 &&
680 lpm_tree->proto == proto &&
681 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
683 mlxsw_sp_lpm_tree_hold(lpm_tree);
687 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
690 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
692 lpm_tree->ref_count++;
695 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
696 struct mlxsw_sp_lpm_tree *lpm_tree)
698 if (--lpm_tree->ref_count == 0)
699 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
702 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
704 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
706 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
707 struct mlxsw_sp_lpm_tree *lpm_tree;
711 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
714 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
715 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
716 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
717 sizeof(struct mlxsw_sp_lpm_tree),
719 if (!mlxsw_sp->router->lpm.trees)
722 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
723 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
724 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
727 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
728 MLXSW_SP_L3_PROTO_IPV4);
729 if (IS_ERR(lpm_tree)) {
730 err = PTR_ERR(lpm_tree);
731 goto err_ipv4_tree_get;
733 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
735 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
736 MLXSW_SP_L3_PROTO_IPV6);
737 if (IS_ERR(lpm_tree)) {
738 err = PTR_ERR(lpm_tree);
739 goto err_ipv6_tree_get;
741 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
746 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
747 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
749 kfree(mlxsw_sp->router->lpm.trees);
753 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
755 struct mlxsw_sp_lpm_tree *lpm_tree;
757 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
758 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
760 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
761 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
763 kfree(mlxsw_sp->router->lpm.trees);
766 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
768 return !!vr->fib4 || !!vr->fib6 ||
769 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
770 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
773 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
775 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
776 struct mlxsw_sp_vr *vr;
779 for (i = 0; i < max_vrs; i++) {
780 vr = &mlxsw_sp->router->vrs[i];
781 if (!mlxsw_sp_vr_is_used(vr))
787 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
788 const struct mlxsw_sp_fib *fib, u8 tree_id)
790 char raltb_pl[MLXSW_REG_RALTB_LEN];
792 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
793 (enum mlxsw_reg_ralxx_protocol) fib->proto,
795 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
798 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
799 const struct mlxsw_sp_fib *fib)
801 char raltb_pl[MLXSW_REG_RALTB_LEN];
803 /* Bind to tree 0 which is default */
804 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
805 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
806 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
809 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
811 /* For our purpose, squash main, default and local tables into one */
812 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
813 tb_id = RT_TABLE_MAIN;
817 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
820 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
821 struct mlxsw_sp_vr *vr;
824 tb_id = mlxsw_sp_fix_tb_id(tb_id);
826 for (i = 0; i < max_vrs; i++) {
827 vr = &mlxsw_sp->router->vrs[i];
828 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
834 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
837 struct mlxsw_sp_vr *vr;
840 mutex_lock(&mlxsw_sp->router->lock);
841 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
848 mutex_unlock(&mlxsw_sp->router->lock);
852 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
853 enum mlxsw_sp_l3proto proto)
856 case MLXSW_SP_L3_PROTO_IPV4:
858 case MLXSW_SP_L3_PROTO_IPV6:
864 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
866 struct netlink_ext_ack *extack)
868 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
869 struct mlxsw_sp_fib *fib4;
870 struct mlxsw_sp_fib *fib6;
871 struct mlxsw_sp_vr *vr;
874 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
876 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
877 return ERR_PTR(-EBUSY);
879 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
881 return ERR_CAST(fib4);
882 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
885 goto err_fib6_create;
887 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
888 MLXSW_SP_L3_PROTO_IPV4);
889 if (IS_ERR(mr4_table)) {
890 err = PTR_ERR(mr4_table);
891 goto err_mr4_table_create;
893 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
894 MLXSW_SP_L3_PROTO_IPV6);
895 if (IS_ERR(mr6_table)) {
896 err = PTR_ERR(mr6_table);
897 goto err_mr6_table_create;
902 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
903 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
907 err_mr6_table_create:
908 mlxsw_sp_mr_table_destroy(mr4_table);
909 err_mr4_table_create:
910 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
912 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
916 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
917 struct mlxsw_sp_vr *vr)
919 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
920 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
921 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
922 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
923 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
925 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
929 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
930 struct netlink_ext_ack *extack)
932 struct mlxsw_sp_vr *vr;
934 tb_id = mlxsw_sp_fix_tb_id(tb_id);
935 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
937 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
941 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
943 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
944 list_empty(&vr->fib6->node_list) &&
945 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
946 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
947 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
951 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
952 enum mlxsw_sp_l3proto proto, u8 tree_id)
954 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
956 if (!mlxsw_sp_vr_is_used(vr))
958 if (fib->lpm_tree->id == tree_id)
963 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
964 struct mlxsw_sp_fib *fib,
965 struct mlxsw_sp_lpm_tree *new_tree)
967 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
970 fib->lpm_tree = new_tree;
971 mlxsw_sp_lpm_tree_hold(new_tree);
972 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
975 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
979 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
980 fib->lpm_tree = old_tree;
984 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
985 struct mlxsw_sp_fib *fib,
986 struct mlxsw_sp_lpm_tree *new_tree)
988 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
989 enum mlxsw_sp_l3proto proto = fib->proto;
990 struct mlxsw_sp_lpm_tree *old_tree;
991 u8 old_id, new_id = new_tree->id;
992 struct mlxsw_sp_vr *vr;
995 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
996 old_id = old_tree->id;
998 for (i = 0; i < max_vrs; i++) {
999 vr = &mlxsw_sp->router->vrs[i];
1000 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1002 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1003 mlxsw_sp_vr_fib(vr, proto),
1006 goto err_tree_replace;
1009 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1010 sizeof(new_tree->prefix_ref_count));
1011 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1012 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1017 for (i--; i >= 0; i--) {
1018 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1020 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1021 mlxsw_sp_vr_fib(vr, proto),
1027 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1029 struct mlxsw_sp_vr *vr;
1033 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1036 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1037 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1039 if (!mlxsw_sp->router->vrs)
1042 for (i = 0; i < max_vrs; i++) {
1043 vr = &mlxsw_sp->router->vrs[i];
1050 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1052 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1054 /* At this stage we're guaranteed not to have new incoming
1055 * FIB notifications and the work queue is free from FIBs
1056 * sitting on top of mlxsw netdevs. However, we can still
1057 * have other FIBs queued. Flush the queue before flushing
1058 * the device's tables. No need for locks, as we're the only
1061 mlxsw_core_flush_owq();
1062 mlxsw_sp_router_fib_flush(mlxsw_sp);
1063 kfree(mlxsw_sp->router->vrs);
1066 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1068 struct net_device *d;
1072 d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1074 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1076 tb_id = RT_TABLE_MAIN;
1083 mlxsw_sp_crif_init(struct mlxsw_sp_crif *crif, struct net_device *dev)
1085 crif->key.dev = dev;
1086 INIT_LIST_HEAD(&crif->nexthop_list);
1089 static struct mlxsw_sp_crif *
1090 mlxsw_sp_crif_alloc(struct net_device *dev)
1092 struct mlxsw_sp_crif *crif;
1094 crif = kzalloc(sizeof(*crif), GFP_KERNEL);
1098 mlxsw_sp_crif_init(crif, dev);
1102 static void mlxsw_sp_crif_free(struct mlxsw_sp_crif *crif)
1104 if (WARN_ON(crif->rif))
1107 WARN_ON(!list_empty(&crif->nexthop_list));
1111 static int mlxsw_sp_crif_insert(struct mlxsw_sp_router *router,
1112 struct mlxsw_sp_crif *crif)
1114 return rhashtable_insert_fast(&router->crif_ht, &crif->ht_node,
1115 mlxsw_sp_crif_ht_params);
1118 static void mlxsw_sp_crif_remove(struct mlxsw_sp_router *router,
1119 struct mlxsw_sp_crif *crif)
1121 rhashtable_remove_fast(&router->crif_ht, &crif->ht_node,
1122 mlxsw_sp_crif_ht_params);
1125 static struct mlxsw_sp_crif *
1126 mlxsw_sp_crif_lookup(struct mlxsw_sp_router *router,
1127 const struct net_device *dev)
1129 struct mlxsw_sp_crif_key key = {
1130 .dev = (struct net_device *)dev,
1133 return rhashtable_lookup_fast(&router->crif_ht, &key,
1134 mlxsw_sp_crif_ht_params);
1137 static struct mlxsw_sp_rif *
1138 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1139 const struct mlxsw_sp_rif_params *params,
1140 struct netlink_ext_ack *extack);
1142 static struct mlxsw_sp_rif_ipip_lb *
1143 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1144 enum mlxsw_sp_ipip_type ipipt,
1145 struct net_device *ol_dev,
1146 struct netlink_ext_ack *extack)
1148 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1149 const struct mlxsw_sp_ipip_ops *ipip_ops;
1150 struct mlxsw_sp_rif *rif;
1152 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1153 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1154 .common.dev = ol_dev,
1155 .common.lag = false,
1156 .common.double_entry = ipip_ops->double_rif_entry,
1157 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1160 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1162 return ERR_CAST(rif);
1163 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1166 static struct mlxsw_sp_ipip_entry *
1167 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1168 enum mlxsw_sp_ipip_type ipipt,
1169 struct net_device *ol_dev)
1171 const struct mlxsw_sp_ipip_ops *ipip_ops;
1172 struct mlxsw_sp_ipip_entry *ipip_entry;
1173 struct mlxsw_sp_ipip_entry *ret = NULL;
1176 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1177 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1179 return ERR_PTR(-ENOMEM);
1181 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1183 if (IS_ERR(ipip_entry->ol_lb)) {
1184 ret = ERR_CAST(ipip_entry->ol_lb);
1185 goto err_ol_ipip_lb_create;
1188 ipip_entry->ipipt = ipipt;
1189 ipip_entry->ol_dev = ol_dev;
1190 ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1192 err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1195 goto err_rem_ip_addr_set;
1200 err_rem_ip_addr_set:
1201 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1202 err_ol_ipip_lb_create:
1207 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1208 struct mlxsw_sp_ipip_entry *ipip_entry)
1210 const struct mlxsw_sp_ipip_ops *ipip_ops =
1211 mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1213 ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1214 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1219 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1220 const enum mlxsw_sp_l3proto ul_proto,
1221 union mlxsw_sp_l3addr saddr,
1223 struct mlxsw_sp_ipip_entry *ipip_entry)
1225 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1226 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1227 union mlxsw_sp_l3addr tun_saddr;
1229 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1232 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1233 return tun_ul_tb_id == ul_tb_id &&
1234 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1237 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1238 enum mlxsw_sp_ipip_type ipipt)
1240 const struct mlxsw_sp_ipip_ops *ipip_ops;
1242 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1244 /* Not all tunnels require to increase the default pasing depth
1247 if (ipip_ops->inc_parsing_depth)
1248 return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1253 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1254 enum mlxsw_sp_ipip_type ipipt)
1256 const struct mlxsw_sp_ipip_ops *ipip_ops =
1257 mlxsw_sp->router->ipip_ops_arr[ipipt];
1259 if (ipip_ops->inc_parsing_depth)
1260 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1264 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1265 struct mlxsw_sp_fib_entry *fib_entry,
1266 struct mlxsw_sp_ipip_entry *ipip_entry)
1271 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1276 err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1279 goto err_parsing_depth_inc;
1281 ipip_entry->decap_fib_entry = fib_entry;
1282 fib_entry->decap.ipip_entry = ipip_entry;
1283 fib_entry->decap.tunnel_index = tunnel_index;
1287 err_parsing_depth_inc:
1288 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1289 fib_entry->decap.tunnel_index);
1293 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1294 struct mlxsw_sp_fib_entry *fib_entry)
1296 enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1298 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1299 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1300 fib_entry->decap.ipip_entry = NULL;
1301 mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1302 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1303 1, fib_entry->decap.tunnel_index);
1306 static struct mlxsw_sp_fib_node *
1307 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1308 size_t addr_len, unsigned char prefix_len);
1309 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1310 struct mlxsw_sp_fib_entry *fib_entry);
1313 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1314 struct mlxsw_sp_ipip_entry *ipip_entry)
1316 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1318 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1319 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1321 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1325 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1326 struct mlxsw_sp_ipip_entry *ipip_entry,
1327 struct mlxsw_sp_fib_entry *decap_fib_entry)
1329 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1332 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1334 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1335 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1338 static struct mlxsw_sp_fib_entry *
1339 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1340 enum mlxsw_sp_l3proto proto,
1341 const union mlxsw_sp_l3addr *addr,
1342 enum mlxsw_sp_fib_entry_type type)
1344 struct mlxsw_sp_fib_node *fib_node;
1345 unsigned char addr_prefix_len;
1346 struct mlxsw_sp_fib *fib;
1347 struct mlxsw_sp_vr *vr;
1352 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1355 fib = mlxsw_sp_vr_fib(vr, proto);
1358 case MLXSW_SP_L3_PROTO_IPV4:
1359 addr4 = be32_to_cpu(addr->addr4);
1362 addr_prefix_len = 32;
1364 case MLXSW_SP_L3_PROTO_IPV6:
1365 addrp = &addr->addr6;
1367 addr_prefix_len = 128;
1374 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1376 if (!fib_node || fib_node->fib_entry->type != type)
1379 return fib_node->fib_entry;
1382 /* Given an IPIP entry, find the corresponding decap route. */
1383 static struct mlxsw_sp_fib_entry *
1384 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1385 struct mlxsw_sp_ipip_entry *ipip_entry)
1387 static struct mlxsw_sp_fib_node *fib_node;
1388 const struct mlxsw_sp_ipip_ops *ipip_ops;
1389 unsigned char saddr_prefix_len;
1390 union mlxsw_sp_l3addr saddr;
1391 struct mlxsw_sp_fib *ul_fib;
1392 struct mlxsw_sp_vr *ul_vr;
1398 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1400 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1401 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1405 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1406 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1407 ipip_entry->ol_dev);
1409 switch (ipip_ops->ul_proto) {
1410 case MLXSW_SP_L3_PROTO_IPV4:
1411 saddr4 = be32_to_cpu(saddr.addr4);
1414 saddr_prefix_len = 32;
1416 case MLXSW_SP_L3_PROTO_IPV6:
1417 saddrp = &saddr.addr6;
1419 saddr_prefix_len = 128;
1426 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1429 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1432 return fib_node->fib_entry;
1435 static struct mlxsw_sp_ipip_entry *
1436 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1437 enum mlxsw_sp_ipip_type ipipt,
1438 struct net_device *ol_dev)
1440 struct mlxsw_sp_ipip_entry *ipip_entry;
1442 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1443 if (IS_ERR(ipip_entry))
1446 list_add_tail(&ipip_entry->ipip_list_node,
1447 &mlxsw_sp->router->ipip_list);
1453 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1454 struct mlxsw_sp_ipip_entry *ipip_entry)
1456 list_del(&ipip_entry->ipip_list_node);
1457 mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1461 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1462 const struct net_device *ul_dev,
1463 enum mlxsw_sp_l3proto ul_proto,
1464 union mlxsw_sp_l3addr ul_dip,
1465 struct mlxsw_sp_ipip_entry *ipip_entry)
1467 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1468 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1470 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1473 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1474 ul_tb_id, ipip_entry);
1477 /* Given decap parameters, find the corresponding IPIP entry. */
1478 static struct mlxsw_sp_ipip_entry *
1479 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1480 enum mlxsw_sp_l3proto ul_proto,
1481 union mlxsw_sp_l3addr ul_dip)
1483 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1484 struct net_device *ul_dev;
1488 ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1492 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1494 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1508 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1509 const struct net_device *dev,
1510 enum mlxsw_sp_ipip_type *p_type)
1512 struct mlxsw_sp_router *router = mlxsw_sp->router;
1513 const struct mlxsw_sp_ipip_ops *ipip_ops;
1514 enum mlxsw_sp_ipip_type ipipt;
1516 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1517 ipip_ops = router->ipip_ops_arr[ipipt];
1518 if (dev->type == ipip_ops->dev_type) {
1527 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1528 const struct net_device *dev)
1530 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1533 static struct mlxsw_sp_ipip_entry *
1534 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1535 const struct net_device *ol_dev)
1537 struct mlxsw_sp_ipip_entry *ipip_entry;
1539 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1541 if (ipip_entry->ol_dev == ol_dev)
1547 static struct mlxsw_sp_ipip_entry *
1548 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1549 const struct net_device *ul_dev,
1550 struct mlxsw_sp_ipip_entry *start)
1552 struct mlxsw_sp_ipip_entry *ipip_entry;
1554 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1556 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1558 struct net_device *ol_dev = ipip_entry->ol_dev;
1559 struct net_device *ipip_ul_dev;
1562 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1565 if (ipip_ul_dev == ul_dev)
1572 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1573 const struct net_device *dev)
1575 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1578 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1579 const struct net_device *ol_dev,
1580 enum mlxsw_sp_ipip_type ipipt)
1582 const struct mlxsw_sp_ipip_ops *ops
1583 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1585 return ops->can_offload(mlxsw_sp, ol_dev);
1588 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1589 struct net_device *ol_dev)
1591 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1592 struct mlxsw_sp_ipip_entry *ipip_entry;
1593 enum mlxsw_sp_l3proto ul_proto;
1594 union mlxsw_sp_l3addr saddr;
1597 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1598 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1599 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1600 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1601 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1602 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1605 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1607 if (IS_ERR(ipip_entry))
1608 return PTR_ERR(ipip_entry);
1615 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1616 struct net_device *ol_dev)
1618 struct mlxsw_sp_ipip_entry *ipip_entry;
1620 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1622 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1626 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1627 struct mlxsw_sp_ipip_entry *ipip_entry)
1629 struct mlxsw_sp_fib_entry *decap_fib_entry;
1631 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1632 if (decap_fib_entry)
1633 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1638 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1639 u16 ul_rif_id, bool enable)
1641 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1642 struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
1643 enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1644 struct mlxsw_sp_rif *rif = &lb_rif->common;
1645 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1646 char ritr_pl[MLXSW_REG_RITR_LEN];
1647 struct in6_addr *saddr6;
1650 ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1651 switch (lb_cf.ul_protocol) {
1652 case MLXSW_SP_L3_PROTO_IPV4:
1653 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1654 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1655 rif->rif_index, rif->vr_id, dev->mtu);
1656 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1657 ipip_options, ul_vr_id,
1662 case MLXSW_SP_L3_PROTO_IPV6:
1663 saddr6 = &lb_cf.saddr.addr6;
1664 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1665 rif->rif_index, rif->vr_id, dev->mtu);
1666 mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1667 ipip_options, ul_vr_id,
1673 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1676 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1677 struct net_device *ol_dev)
1679 struct mlxsw_sp_ipip_entry *ipip_entry;
1680 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1683 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1685 lb_rif = ipip_entry->ol_lb;
1686 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1687 lb_rif->ul_rif_id, true);
1690 lb_rif->common.mtu = ol_dev->mtu;
1697 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1698 struct net_device *ol_dev)
1700 struct mlxsw_sp_ipip_entry *ipip_entry;
1702 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1704 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1708 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1709 struct mlxsw_sp_ipip_entry *ipip_entry)
1711 if (ipip_entry->decap_fib_entry)
1712 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1715 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1716 struct net_device *ol_dev)
1718 struct mlxsw_sp_ipip_entry *ipip_entry;
1720 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1722 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1725 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1726 struct mlxsw_sp_rif *rif);
1728 static void mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp *mlxsw_sp,
1729 struct mlxsw_sp_rif *old_rif,
1730 struct mlxsw_sp_rif *new_rif,
1733 struct mlxsw_sp_crif *crif = old_rif->crif;
1734 struct mlxsw_sp_crif mock_crif = {};
1737 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
1739 /* Plant a mock CRIF so that destroying the old RIF doesn't unoffload
1740 * our nexthops and IPIP tunnels, and doesn't sever the crif->rif link.
1742 mlxsw_sp_crif_init(&mock_crif, crif->key.dev);
1743 old_rif->crif = &mock_crif;
1744 mock_crif.rif = old_rif;
1745 mlxsw_sp_rif_destroy(old_rif);
1749 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1750 struct mlxsw_sp_ipip_entry *ipip_entry,
1752 struct netlink_ext_ack *extack)
1754 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1755 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1757 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1761 if (IS_ERR(new_lb_rif))
1762 return PTR_ERR(new_lb_rif);
1763 ipip_entry->ol_lb = new_lb_rif;
1765 mlxsw_sp_rif_migrate_destroy(mlxsw_sp, &old_lb_rif->common,
1766 &new_lb_rif->common, keep_encap);
1771 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1772 * @mlxsw_sp: mlxsw_sp.
1773 * @ipip_entry: IPIP entry.
1774 * @recreate_loopback: Recreates the associated loopback RIF.
1775 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1776 * relevant when recreate_loopback is true.
1777 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1778 * is only relevant when recreate_loopback is false.
1781 * Return: Non-zero value on failure.
1783 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1784 struct mlxsw_sp_ipip_entry *ipip_entry,
1785 bool recreate_loopback,
1787 bool update_nexthops,
1788 struct netlink_ext_ack *extack)
1792 /* RIFs can't be edited, so to update loopback, we need to destroy and
1793 * recreate it. That creates a window of opportunity where RALUE and
1794 * RATR registers end up referencing a RIF that's already gone. RATRs
1795 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1796 * of RALUE, demote the decap route back.
1798 if (ipip_entry->decap_fib_entry)
1799 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1801 if (recreate_loopback) {
1802 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1803 keep_encap, extack);
1806 } else if (update_nexthops) {
1807 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1808 &ipip_entry->ol_lb->common);
1811 if (ipip_entry->ol_dev->flags & IFF_UP)
1812 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1817 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1818 struct net_device *ol_dev,
1819 struct netlink_ext_ack *extack)
1821 struct mlxsw_sp_ipip_entry *ipip_entry =
1822 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1827 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1828 true, false, false, extack);
1832 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1833 struct mlxsw_sp_ipip_entry *ipip_entry,
1834 struct net_device *ul_dev,
1836 struct netlink_ext_ack *extack)
1838 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1839 enum mlxsw_sp_l3proto ul_proto;
1840 union mlxsw_sp_l3addr saddr;
1842 /* Moving underlay to a different VRF might cause local address
1843 * conflict, and the conflicting tunnels need to be demoted.
1845 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1846 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1847 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1850 *demote_this = true;
1854 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1855 true, true, false, extack);
1859 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1860 struct mlxsw_sp_ipip_entry *ipip_entry,
1861 struct net_device *ul_dev)
1863 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1864 false, false, true, NULL);
1868 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1869 struct mlxsw_sp_ipip_entry *ipip_entry,
1870 struct net_device *ul_dev)
1872 /* A down underlay device causes encapsulated packets to not be
1873 * forwarded, but decap still works. So refresh next hops without
1874 * touching anything else.
1876 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1877 false, false, true, NULL);
1881 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1882 struct net_device *ol_dev,
1883 struct netlink_ext_ack *extack)
1885 const struct mlxsw_sp_ipip_ops *ipip_ops;
1886 struct mlxsw_sp_ipip_entry *ipip_entry;
1889 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1891 /* A change might make a tunnel eligible for offloading, but
1892 * that is currently not implemented. What falls to slow path
1897 /* A change might make a tunnel not eligible for offloading. */
1898 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1899 ipip_entry->ipipt)) {
1900 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1904 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1905 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1909 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1910 struct mlxsw_sp_ipip_entry *ipip_entry)
1912 struct net_device *ol_dev = ipip_entry->ol_dev;
1914 if (ol_dev->flags & IFF_UP)
1915 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1916 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1919 /* The configuration where several tunnels have the same local address in the
1920 * same underlay table needs special treatment in the HW. That is currently not
1921 * implemented in the driver. This function finds and demotes the first tunnel
1922 * with a given source address, except the one passed in the argument
1926 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1927 enum mlxsw_sp_l3proto ul_proto,
1928 union mlxsw_sp_l3addr saddr,
1930 const struct mlxsw_sp_ipip_entry *except)
1932 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1934 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1936 if (ipip_entry != except &&
1937 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1938 ul_tb_id, ipip_entry)) {
1939 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1947 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1948 struct net_device *ul_dev)
1950 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1952 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1954 struct net_device *ol_dev = ipip_entry->ol_dev;
1955 struct net_device *ipip_ul_dev;
1958 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1960 if (ipip_ul_dev == ul_dev)
1961 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1965 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1966 struct net_device *ol_dev,
1967 unsigned long event,
1968 struct netdev_notifier_info *info)
1970 struct netdev_notifier_changeupper_info *chup;
1971 struct netlink_ext_ack *extack;
1975 case NETDEV_REGISTER:
1976 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1978 case NETDEV_UNREGISTER:
1979 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1982 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1985 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1987 case NETDEV_CHANGEUPPER:
1988 chup = container_of(info, typeof(*chup), info);
1989 extack = info->extack;
1990 if (netif_is_l3_master(chup->upper_dev))
1991 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1996 extack = info->extack;
1997 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
2000 case NETDEV_CHANGEMTU:
2001 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2008 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2009 struct mlxsw_sp_ipip_entry *ipip_entry,
2010 struct net_device *ul_dev,
2012 unsigned long event,
2013 struct netdev_notifier_info *info)
2015 struct netdev_notifier_changeupper_info *chup;
2016 struct netlink_ext_ack *extack;
2019 case NETDEV_CHANGEUPPER:
2020 chup = container_of(info, typeof(*chup), info);
2021 extack = info->extack;
2022 if (netif_is_l3_master(chup->upper_dev))
2023 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2031 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2034 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2042 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2043 struct net_device *ul_dev,
2044 unsigned long event,
2045 struct netdev_notifier_info *info)
2047 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2050 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2053 struct mlxsw_sp_ipip_entry *prev;
2054 bool demote_this = false;
2056 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2057 ul_dev, &demote_this,
2060 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2066 if (list_is_first(&ipip_entry->ipip_list_node,
2067 &mlxsw_sp->router->ipip_list))
2070 /* This can't be cached from previous iteration,
2071 * because that entry could be gone now.
2073 prev = list_prev_entry(ipip_entry,
2075 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2083 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2084 enum mlxsw_sp_l3proto ul_proto,
2085 const union mlxsw_sp_l3addr *ul_sip,
2088 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2089 struct mlxsw_sp_router *router = mlxsw_sp->router;
2090 struct mlxsw_sp_fib_entry *fib_entry;
2093 mutex_lock(&mlxsw_sp->router->lock);
2095 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2100 router->nve_decap_config.ul_tb_id = ul_tb_id;
2101 router->nve_decap_config.tunnel_index = tunnel_index;
2102 router->nve_decap_config.ul_proto = ul_proto;
2103 router->nve_decap_config.ul_sip = *ul_sip;
2104 router->nve_decap_config.valid = true;
2106 /* It is valid to create a tunnel with a local IP and only later
2107 * assign this IP address to a local interface
2109 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2115 fib_entry->decap.tunnel_index = tunnel_index;
2116 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2118 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2120 goto err_fib_entry_update;
2124 err_fib_entry_update:
2125 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2126 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2128 mutex_unlock(&mlxsw_sp->router->lock);
2132 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2133 enum mlxsw_sp_l3proto ul_proto,
2134 const union mlxsw_sp_l3addr *ul_sip)
2136 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2137 struct mlxsw_sp_router *router = mlxsw_sp->router;
2138 struct mlxsw_sp_fib_entry *fib_entry;
2140 mutex_lock(&mlxsw_sp->router->lock);
2142 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2145 router->nve_decap_config.valid = false;
2147 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2153 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2154 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2156 mutex_unlock(&mlxsw_sp->router->lock);
2159 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2161 enum mlxsw_sp_l3proto ul_proto,
2162 const union mlxsw_sp_l3addr *ul_sip)
2164 struct mlxsw_sp_router *router = mlxsw_sp->router;
2166 return router->nve_decap_config.valid &&
2167 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2168 router->nve_decap_config.ul_proto == ul_proto &&
2169 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2173 struct mlxsw_sp_neigh_key {
2174 struct neighbour *n;
2177 struct mlxsw_sp_neigh_entry {
2178 struct list_head rif_list_node;
2179 struct rhash_head ht_node;
2180 struct mlxsw_sp_neigh_key key;
2183 unsigned char ha[ETH_ALEN];
2184 struct list_head nexthop_list; /* list of nexthops using
2187 struct list_head nexthop_neighs_list_node;
2188 unsigned int counter_index;
2192 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2193 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2194 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2195 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2198 struct mlxsw_sp_neigh_entry *
2199 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2200 struct mlxsw_sp_neigh_entry *neigh_entry)
2203 if (list_empty(&rif->neigh_list))
2206 return list_first_entry(&rif->neigh_list,
2207 typeof(*neigh_entry),
2210 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2212 return list_next_entry(neigh_entry, rif_list_node);
2215 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2217 return neigh_entry->key.n->tbl->family;
2221 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2223 return neigh_entry->ha;
2226 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2228 struct neighbour *n;
2230 n = neigh_entry->key.n;
2231 return ntohl(*((__be32 *) n->primary_key));
2235 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2237 struct neighbour *n;
2239 n = neigh_entry->key.n;
2240 return (struct in6_addr *) &n->primary_key;
2243 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2244 struct mlxsw_sp_neigh_entry *neigh_entry,
2247 if (!neigh_entry->counter_valid)
2250 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2254 static struct mlxsw_sp_neigh_entry *
2255 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2258 struct mlxsw_sp_neigh_entry *neigh_entry;
2260 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2264 neigh_entry->key.n = n;
2265 neigh_entry->rif = rif;
2266 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2271 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2277 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2278 struct mlxsw_sp_neigh_entry *neigh_entry)
2280 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2281 &neigh_entry->ht_node,
2282 mlxsw_sp_neigh_ht_params);
2286 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2287 struct mlxsw_sp_neigh_entry *neigh_entry)
2289 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2290 &neigh_entry->ht_node,
2291 mlxsw_sp_neigh_ht_params);
2295 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2296 struct mlxsw_sp_neigh_entry *neigh_entry)
2298 struct devlink *devlink;
2299 const char *table_name;
2301 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2303 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2306 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2313 devlink = priv_to_devlink(mlxsw_sp->core);
2314 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2318 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2319 struct mlxsw_sp_neigh_entry *neigh_entry)
2321 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2324 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2327 neigh_entry->counter_valid = true;
2331 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2332 struct mlxsw_sp_neigh_entry *neigh_entry)
2334 if (!neigh_entry->counter_valid)
2336 mlxsw_sp_flow_counter_free(mlxsw_sp,
2337 neigh_entry->counter_index);
2338 neigh_entry->counter_valid = false;
2341 static struct mlxsw_sp_neigh_entry *
2342 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2344 struct mlxsw_sp_neigh_entry *neigh_entry;
2345 struct mlxsw_sp_rif *rif;
2348 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2350 return ERR_PTR(-EINVAL);
2352 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2354 return ERR_PTR(-ENOMEM);
2356 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2358 goto err_neigh_entry_insert;
2360 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2361 atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2362 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2366 err_neigh_entry_insert:
2367 mlxsw_sp_neigh_entry_free(neigh_entry);
2368 return ERR_PTR(err);
2372 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2373 struct mlxsw_sp_neigh_entry *neigh_entry)
2375 list_del(&neigh_entry->rif_list_node);
2376 atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2377 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2378 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2379 mlxsw_sp_neigh_entry_free(neigh_entry);
2382 static struct mlxsw_sp_neigh_entry *
2383 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2385 struct mlxsw_sp_neigh_key key;
2388 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2389 &key, mlxsw_sp_neigh_ht_params);
2393 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2395 unsigned long interval;
2397 #if IS_ENABLED(CONFIG_IPV6)
2398 interval = min_t(unsigned long,
2399 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2400 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2402 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2404 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2407 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2411 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2412 struct net_device *dev;
2413 struct neighbour *n;
2418 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2420 if (WARN_ON_ONCE(rif >= max_rifs))
2422 if (!mlxsw_sp->router->rifs[rif]) {
2423 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2428 dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2429 n = neigh_lookup(&arp_tbl, &dipn, dev);
2433 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2434 neigh_event_send(n, NULL);
2438 #if IS_ENABLED(CONFIG_IPV6)
2439 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2443 struct net_device *dev;
2444 struct neighbour *n;
2445 struct in6_addr dip;
2448 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2451 if (!mlxsw_sp->router->rifs[rif]) {
2452 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2456 dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2457 n = neigh_lookup(&nd_tbl, &dip, dev);
2461 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2462 neigh_event_send(n, NULL);
2466 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2473 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2480 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2482 /* Hardware starts counting at 0, so add 1. */
2485 /* Each record consists of several neighbour entries. */
2486 for (i = 0; i < num_entries; i++) {
2489 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2490 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2496 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2500 /* One record contains one entry. */
2501 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2505 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2506 char *rauhtd_pl, int rec_index)
2508 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2509 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2510 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2513 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2514 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2520 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2522 u8 num_rec, last_rec_index, num_entries;
2524 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2525 last_rec_index = num_rec - 1;
2527 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2529 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2530 MLXSW_REG_RAUHTD_TYPE_IPV6)
2533 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2535 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2541 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2543 enum mlxsw_reg_rauhtd_type type)
2548 /* Ensure the RIF we read from the device does not change mid-dump. */
2549 mutex_lock(&mlxsw_sp->router->lock);
2551 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2552 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2555 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2558 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2559 for (i = 0; i < num_rec; i++)
2560 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2562 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2563 mutex_unlock(&mlxsw_sp->router->lock);
2568 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2570 enum mlxsw_reg_rauhtd_type type;
2574 if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2577 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2581 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2582 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2586 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2587 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2593 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2595 struct mlxsw_sp_neigh_entry *neigh_entry;
2597 mutex_lock(&mlxsw_sp->router->lock);
2598 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2599 nexthop_neighs_list_node)
2600 /* If this neigh have nexthops, make the kernel think this neigh
2601 * is active regardless of the traffic.
2603 neigh_event_send(neigh_entry->key.n, NULL);
2604 mutex_unlock(&mlxsw_sp->router->lock);
2608 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2610 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2612 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2613 msecs_to_jiffies(interval));
2616 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2618 struct mlxsw_sp_router *router;
2621 router = container_of(work, struct mlxsw_sp_router,
2622 neighs_update.dw.work);
2623 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2625 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2627 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2629 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2632 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2634 struct mlxsw_sp_neigh_entry *neigh_entry;
2635 struct mlxsw_sp_router *router;
2637 router = container_of(work, struct mlxsw_sp_router,
2638 nexthop_probe_dw.work);
2639 /* Iterate over nexthop neighbours, find those who are unresolved and
2640 * send arp on them. This solves the chicken-egg problem when
2641 * the nexthop wouldn't get offloaded until the neighbor is resolved
2642 * but it wouldn't get resolved ever in case traffic is flowing in HW
2643 * using different nexthop.
2645 mutex_lock(&router->lock);
2646 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2647 nexthop_neighs_list_node)
2648 if (!neigh_entry->connected)
2649 neigh_event_send(neigh_entry->key.n, NULL);
2650 mutex_unlock(&router->lock);
2652 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2653 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2657 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2658 struct mlxsw_sp_neigh_entry *neigh_entry,
2659 bool removing, bool dead);
2661 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2663 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2664 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2668 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2669 struct mlxsw_sp_neigh_entry *neigh_entry,
2670 enum mlxsw_reg_rauht_op op)
2672 struct neighbour *n = neigh_entry->key.n;
2673 u32 dip = ntohl(*((__be32 *) n->primary_key));
2674 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2676 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2678 if (neigh_entry->counter_valid)
2679 mlxsw_reg_rauht_pack_counter(rauht_pl,
2680 neigh_entry->counter_index);
2681 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2685 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2686 struct mlxsw_sp_neigh_entry *neigh_entry,
2687 enum mlxsw_reg_rauht_op op)
2689 struct neighbour *n = neigh_entry->key.n;
2690 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2691 const char *dip = n->primary_key;
2693 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2695 if (neigh_entry->counter_valid)
2696 mlxsw_reg_rauht_pack_counter(rauht_pl,
2697 neigh_entry->counter_index);
2698 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2701 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2703 struct neighbour *n = neigh_entry->key.n;
2705 /* Packets with a link-local destination address are trapped
2706 * after LPM lookup and never reach the neighbour table, so
2707 * there is no need to program such neighbours to the device.
2709 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2710 IPV6_ADDR_LINKLOCAL)
2716 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2717 struct mlxsw_sp_neigh_entry *neigh_entry,
2720 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2723 if (!adding && !neigh_entry->connected)
2725 neigh_entry->connected = adding;
2726 if (neigh_entry->key.n->tbl->family == AF_INET) {
2727 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2731 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2732 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2734 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2744 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2746 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2750 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2751 struct mlxsw_sp_neigh_entry *neigh_entry,
2755 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2757 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2758 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2761 struct mlxsw_sp_netevent_work {
2762 struct work_struct work;
2763 struct mlxsw_sp *mlxsw_sp;
2764 struct neighbour *n;
2767 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2769 struct mlxsw_sp_netevent_work *net_work =
2770 container_of(work, struct mlxsw_sp_netevent_work, work);
2771 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2772 struct mlxsw_sp_neigh_entry *neigh_entry;
2773 struct neighbour *n = net_work->n;
2774 unsigned char ha[ETH_ALEN];
2775 bool entry_connected;
2778 /* If these parameters are changed after we release the lock,
2779 * then we are guaranteed to receive another event letting us
2782 read_lock_bh(&n->lock);
2783 memcpy(ha, n->ha, ETH_ALEN);
2784 nud_state = n->nud_state;
2786 read_unlock_bh(&n->lock);
2788 mutex_lock(&mlxsw_sp->router->lock);
2789 mlxsw_sp_span_respin(mlxsw_sp);
2791 entry_connected = nud_state & NUD_VALID && !dead;
2792 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2793 if (!entry_connected && !neigh_entry)
2796 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2797 if (IS_ERR(neigh_entry))
2801 if (neigh_entry->connected && entry_connected &&
2802 !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2805 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2806 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2807 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2810 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2811 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2814 mutex_unlock(&mlxsw_sp->router->lock);
2819 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2821 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2823 struct mlxsw_sp_netevent_work *net_work =
2824 container_of(work, struct mlxsw_sp_netevent_work, work);
2825 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2827 mlxsw_sp_mp_hash_init(mlxsw_sp);
2831 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2833 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2835 struct mlxsw_sp_netevent_work *net_work =
2836 container_of(work, struct mlxsw_sp_netevent_work, work);
2837 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2839 __mlxsw_sp_router_init(mlxsw_sp);
2843 static int mlxsw_sp_router_schedule_work(struct net *net,
2844 struct mlxsw_sp_router *router,
2845 struct neighbour *n,
2846 void (*cb)(struct work_struct *))
2848 struct mlxsw_sp_netevent_work *net_work;
2850 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2853 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2857 INIT_WORK(&net_work->work, cb);
2858 net_work->mlxsw_sp = router->mlxsw_sp;
2860 mlxsw_core_schedule_work(&net_work->work);
2864 static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
2866 struct mlxsw_sp_port *mlxsw_sp_port;
2869 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2871 return !!mlxsw_sp_port;
2874 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2875 unsigned long event, void *ptr)
2877 struct mlxsw_sp_router *router;
2878 unsigned long interval;
2879 struct neigh_parms *p;
2880 struct neighbour *n;
2883 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2886 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2889 /* We don't care about changes in the default table. */
2890 if (!p->dev || (p->tbl->family != AF_INET &&
2891 p->tbl->family != AF_INET6))
2894 /* We are in atomic context and can't take RTNL mutex,
2895 * so use RCU variant to walk the device chain.
2897 if (!mlxsw_sp_dev_lower_is_port(p->dev))
2900 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2901 router->neighs_update.interval = interval;
2903 case NETEVENT_NEIGH_UPDATE:
2905 net = neigh_parms_net(n->parms);
2907 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2910 if (!mlxsw_sp_dev_lower_is_port(n->dev))
2913 /* Take a reference to ensure the neighbour won't be
2914 * destructed until we drop the reference in delayed
2918 return mlxsw_sp_router_schedule_work(net, router, n,
2919 mlxsw_sp_router_neigh_event_work);
2921 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2922 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2923 return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2924 mlxsw_sp_router_mp_hash_event_work);
2926 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2927 return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2928 mlxsw_sp_router_update_priority_work);
2934 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2938 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2939 &mlxsw_sp_neigh_ht_params);
2943 /* Initialize the polling interval according to the default
2946 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2948 /* Create the delayed works for the activity_update */
2949 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2950 mlxsw_sp_router_neighs_update_work);
2951 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2952 mlxsw_sp_router_probe_unresolved_nexthops);
2953 atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2954 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2955 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2959 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2961 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2962 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2963 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2966 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2967 struct mlxsw_sp_rif *rif)
2969 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2971 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2973 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2974 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2978 enum mlxsw_sp_nexthop_type {
2979 MLXSW_SP_NEXTHOP_TYPE_ETH,
2980 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2983 enum mlxsw_sp_nexthop_action {
2984 /* Nexthop forwards packets to an egress RIF */
2985 MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2986 /* Nexthop discards packets */
2987 MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2988 /* Nexthop traps packets */
2989 MLXSW_SP_NEXTHOP_ACTION_TRAP,
2992 struct mlxsw_sp_nexthop_key {
2993 struct fib_nh *fib_nh;
2996 struct mlxsw_sp_nexthop {
2997 struct list_head neigh_list_node; /* member of neigh entry list */
2998 struct list_head crif_list_node;
2999 struct list_head router_list_node;
3000 struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
3001 * this nexthop belongs to
3003 struct rhash_head ht_node;
3004 struct neigh_table *neigh_tbl;
3005 struct mlxsw_sp_nexthop_key key;
3006 unsigned char gw_addr[sizeof(struct in6_addr)];
3010 int num_adj_entries;
3011 struct mlxsw_sp_crif *crif;
3012 u8 should_offload:1, /* set indicates this nexthop should be written
3013 * to the adjacency table.
3015 offloaded:1, /* set indicates this nexthop was written to the
3018 update:1; /* set indicates this nexthop should be updated in the
3019 * adjacency table (f.e., its MAC changed).
3021 enum mlxsw_sp_nexthop_action action;
3022 enum mlxsw_sp_nexthop_type type;
3024 struct mlxsw_sp_neigh_entry *neigh_entry;
3025 struct mlxsw_sp_ipip_entry *ipip_entry;
3027 unsigned int counter_index;
3031 static struct net_device *
3032 mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop *nh)
3036 return nh->crif->key.dev;
3039 enum mlxsw_sp_nexthop_group_type {
3040 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3041 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3042 MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3045 struct mlxsw_sp_nexthop_group_info {
3046 struct mlxsw_sp_nexthop_group *nh_grp;
3050 int sum_norm_weight;
3051 u8 adj_index_valid:1,
3052 gateway:1, /* routes using the group use a gateway */
3054 struct list_head list; /* member in nh_res_grp_list */
3055 struct mlxsw_sp_nexthop nexthops[];
3058 static struct mlxsw_sp_rif *
3059 mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info *nhgi)
3061 struct mlxsw_sp_crif *crif = nhgi->nexthops[0].crif;
3068 struct mlxsw_sp_nexthop_group_vr_key {
3070 enum mlxsw_sp_l3proto proto;
3073 struct mlxsw_sp_nexthop_group_vr_entry {
3074 struct list_head list; /* member in vr_list */
3075 struct rhash_head ht_node; /* member in vr_ht */
3076 refcount_t ref_count;
3077 struct mlxsw_sp_nexthop_group_vr_key key;
3080 struct mlxsw_sp_nexthop_group {
3081 struct rhash_head ht_node;
3082 struct list_head fib_list; /* list of fib entries that use this group */
3085 struct fib_info *fi;
3091 struct mlxsw_sp_nexthop_group_info *nhgi;
3092 struct list_head vr_list;
3093 struct rhashtable vr_ht;
3094 enum mlxsw_sp_nexthop_group_type type;
3098 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3099 struct mlxsw_sp_nexthop *nh)
3101 struct devlink *devlink;
3103 devlink = priv_to_devlink(mlxsw_sp->core);
3104 if (!devlink_dpipe_table_counter_enabled(devlink,
3105 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3108 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3111 nh->counter_valid = true;
3114 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3115 struct mlxsw_sp_nexthop *nh)
3117 if (!nh->counter_valid)
3119 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3120 nh->counter_valid = false;
3123 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3124 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3126 if (!nh->counter_valid)
3129 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3133 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3134 struct mlxsw_sp_nexthop *nh)
3137 if (list_empty(&router->nexthop_list))
3140 return list_first_entry(&router->nexthop_list,
3141 typeof(*nh), router_list_node);
3143 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3145 return list_next_entry(nh, router_list_node);
3148 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3150 return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3153 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3155 if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3156 !mlxsw_sp_nexthop_is_forward(nh))
3158 return nh->neigh_entry->ha;
3161 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3162 u32 *p_adj_size, u32 *p_adj_hash_index)
3164 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3165 u32 adj_hash_index = 0;
3168 if (!nh->offloaded || !nhgi->adj_index_valid)
3171 *p_adj_index = nhgi->adj_index;
3172 *p_adj_size = nhgi->ecmp_size;
3174 for (i = 0; i < nhgi->count; i++) {
3175 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3179 if (nh_iter->offloaded)
3180 adj_hash_index += nh_iter->num_adj_entries;
3183 *p_adj_hash_index = adj_hash_index;
3187 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3189 if (WARN_ON(!nh->crif))
3191 return nh->crif->rif;
3194 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3196 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3199 for (i = 0; i < nhgi->count; i++) {
3200 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3202 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3208 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3209 .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3210 .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3211 .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3212 .automatic_shrinking = true,
3215 static struct mlxsw_sp_nexthop_group_vr_entry *
3216 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3217 const struct mlxsw_sp_fib *fib)
3219 struct mlxsw_sp_nexthop_group_vr_key key;
3221 memset(&key, 0, sizeof(key));
3222 key.vr_id = fib->vr->id;
3223 key.proto = fib->proto;
3224 return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3225 mlxsw_sp_nexthop_group_vr_ht_params);
3229 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3230 const struct mlxsw_sp_fib *fib)
3232 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3235 vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3239 vr_entry->key.vr_id = fib->vr->id;
3240 vr_entry->key.proto = fib->proto;
3241 refcount_set(&vr_entry->ref_count, 1);
3243 err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3244 mlxsw_sp_nexthop_group_vr_ht_params);
3246 goto err_hashtable_insert;
3248 list_add(&vr_entry->list, &nh_grp->vr_list);
3252 err_hashtable_insert:
3258 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3259 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3261 list_del(&vr_entry->list);
3262 rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3263 mlxsw_sp_nexthop_group_vr_ht_params);
3268 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3269 const struct mlxsw_sp_fib *fib)
3271 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3273 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3275 refcount_inc(&vr_entry->ref_count);
3279 return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3283 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3284 const struct mlxsw_sp_fib *fib)
3286 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3288 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3289 if (WARN_ON_ONCE(!vr_entry))
3292 if (!refcount_dec_and_test(&vr_entry->ref_count))
3295 mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3298 struct mlxsw_sp_nexthop_group_cmp_arg {
3299 enum mlxsw_sp_nexthop_group_type type;
3301 struct fib_info *fi;
3302 struct mlxsw_sp_fib6_entry *fib6_entry;
3308 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3309 const struct in6_addr *gw, int ifindex,
3314 for (i = 0; i < nh_grp->nhgi->count; i++) {
3315 const struct mlxsw_sp_nexthop *nh;
3317 nh = &nh_grp->nhgi->nexthops[i];
3318 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3319 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3327 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3328 const struct mlxsw_sp_fib6_entry *fib6_entry)
3330 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3332 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3335 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3336 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3337 struct in6_addr *gw;
3338 int ifindex, weight;
3340 ifindex = fib6_nh->fib_nh_dev->ifindex;
3341 weight = fib6_nh->fib_nh_weight;
3342 gw = &fib6_nh->fib_nh_gw6;
3343 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3352 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3354 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3355 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3357 if (nh_grp->type != cmp_arg->type)
3360 switch (cmp_arg->type) {
3361 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3362 return cmp_arg->fi != nh_grp->ipv4.fi;
3363 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3364 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3365 cmp_arg->fib6_entry);
3366 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3367 return cmp_arg->id != nh_grp->obj.id;
3374 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3376 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3377 const struct mlxsw_sp_nexthop *nh;
3378 struct fib_info *fi;
3382 switch (nh_grp->type) {
3383 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3384 fi = nh_grp->ipv4.fi;
3385 return jhash(&fi, sizeof(fi), seed);
3386 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3387 val = nh_grp->nhgi->count;
3388 for (i = 0; i < nh_grp->nhgi->count; i++) {
3389 nh = &nh_grp->nhgi->nexthops[i];
3390 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3391 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3393 return jhash(&val, sizeof(val), seed);
3394 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3395 return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3403 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3405 unsigned int val = fib6_entry->nrt6;
3406 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3408 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3409 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3410 struct net_device *dev = fib6_nh->fib_nh_dev;
3411 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3413 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3414 val ^= jhash(gw, sizeof(*gw), seed);
3417 return jhash(&val, sizeof(val), seed);
3421 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3423 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3425 switch (cmp_arg->type) {
3426 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3427 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3428 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3429 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3430 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3431 return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3438 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3439 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3440 .hashfn = mlxsw_sp_nexthop_group_hash,
3441 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3442 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3445 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3446 struct mlxsw_sp_nexthop_group *nh_grp)
3448 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3449 !nh_grp->nhgi->gateway)
3452 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3454 mlxsw_sp_nexthop_group_ht_params);
3457 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3458 struct mlxsw_sp_nexthop_group *nh_grp)
3460 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3461 !nh_grp->nhgi->gateway)
3464 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3466 mlxsw_sp_nexthop_group_ht_params);
3469 static struct mlxsw_sp_nexthop_group *
3470 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3471 struct fib_info *fi)
3473 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3475 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3477 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3479 mlxsw_sp_nexthop_group_ht_params);
3482 static struct mlxsw_sp_nexthop_group *
3483 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3484 struct mlxsw_sp_fib6_entry *fib6_entry)
3486 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3488 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3489 cmp_arg.fib6_entry = fib6_entry;
3490 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3492 mlxsw_sp_nexthop_group_ht_params);
3495 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3496 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3497 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3498 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3501 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3502 struct mlxsw_sp_nexthop *nh)
3504 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3505 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3508 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3509 struct mlxsw_sp_nexthop *nh)
3511 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3512 mlxsw_sp_nexthop_ht_params);
3515 static struct mlxsw_sp_nexthop *
3516 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3517 struct mlxsw_sp_nexthop_key key)
3519 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3520 mlxsw_sp_nexthop_ht_params);
3523 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3524 enum mlxsw_sp_l3proto proto,
3526 u32 adj_index, u16 ecmp_size,
3530 char raleu_pl[MLXSW_REG_RALEU_LEN];
3532 mlxsw_reg_raleu_pack(raleu_pl,
3533 (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3534 adj_index, ecmp_size, new_adj_index,
3536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3539 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3540 struct mlxsw_sp_nexthop_group *nh_grp,
3541 u32 old_adj_index, u16 old_ecmp_size)
3543 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3544 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3547 list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3548 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3549 vr_entry->key.proto,
3550 vr_entry->key.vr_id,
3556 goto err_mass_update_vr;
3561 list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3562 mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3563 vr_entry->key.vr_id,
3566 old_adj_index, old_ecmp_size);
3570 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3572 struct mlxsw_sp_nexthop *nh,
3573 bool force, char *ratr_pl)
3575 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3576 struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
3577 enum mlxsw_reg_ratr_op op;
3580 rif_index = rif ? rif->rif_index :
3581 mlxsw_sp->router->lb_crif->rif->rif_index;
3582 op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3583 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3584 mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3585 adj_index, rif_index);
3586 switch (nh->action) {
3587 case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3588 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3590 case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3591 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3592 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3594 case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3595 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3596 MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3597 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3603 if (nh->counter_valid)
3604 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3606 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3608 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3611 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3612 struct mlxsw_sp_nexthop *nh, bool force,
3617 for (i = 0; i < nh->num_adj_entries; i++) {
3620 err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3621 nh, force, ratr_pl);
3629 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3631 struct mlxsw_sp_nexthop *nh,
3632 bool force, char *ratr_pl)
3634 const struct mlxsw_sp_ipip_ops *ipip_ops;
3636 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3637 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3641 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3643 struct mlxsw_sp_nexthop *nh, bool force,
3648 for (i = 0; i < nh->num_adj_entries; i++) {
3651 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3652 nh, force, ratr_pl);
3660 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3661 struct mlxsw_sp_nexthop *nh, bool force,
3664 /* When action is discard or trap, the nexthop must be
3665 * programmed as an Ethernet nexthop.
3667 if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3668 nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3669 nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3670 return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3673 return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3678 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3679 struct mlxsw_sp_nexthop_group_info *nhgi,
3682 char ratr_pl[MLXSW_REG_RATR_LEN];
3683 u32 adj_index = nhgi->adj_index; /* base */
3684 struct mlxsw_sp_nexthop *nh;
3687 for (i = 0; i < nhgi->count; i++) {
3688 nh = &nhgi->nexthops[i];
3690 if (!nh->should_offload) {
3695 if (nh->update || reallocate) {
3698 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3705 adj_index += nh->num_adj_entries;
3711 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3712 struct mlxsw_sp_nexthop_group *nh_grp)
3714 struct mlxsw_sp_fib_entry *fib_entry;
3717 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3718 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3725 struct mlxsw_sp_adj_grp_size_range {
3726 u16 start; /* Inclusive */
3727 u16 end; /* Inclusive */
3730 /* Ordered by range start value */
3731 static const struct mlxsw_sp_adj_grp_size_range
3732 mlxsw_sp1_adj_grp_size_ranges[] = {
3733 { .start = 1, .end = 64 },
3734 { .start = 512, .end = 512 },
3735 { .start = 1024, .end = 1024 },
3736 { .start = 2048, .end = 2048 },
3737 { .start = 4096, .end = 4096 },
3740 /* Ordered by range start value */
3741 static const struct mlxsw_sp_adj_grp_size_range
3742 mlxsw_sp2_adj_grp_size_ranges[] = {
3743 { .start = 1, .end = 128 },
3744 { .start = 256, .end = 256 },
3745 { .start = 512, .end = 512 },
3746 { .start = 1024, .end = 1024 },
3747 { .start = 2048, .end = 2048 },
3748 { .start = 4096, .end = 4096 },
3751 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3752 u16 *p_adj_grp_size)
3756 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3757 const struct mlxsw_sp_adj_grp_size_range *size_range;
3759 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3761 if (*p_adj_grp_size >= size_range->start &&
3762 *p_adj_grp_size <= size_range->end)
3765 if (*p_adj_grp_size <= size_range->end) {
3766 *p_adj_grp_size = size_range->end;
3772 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3773 u16 *p_adj_grp_size,
3774 unsigned int alloc_size)
3778 for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3779 const struct mlxsw_sp_adj_grp_size_range *size_range;
3781 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3783 if (alloc_size >= size_range->end) {
3784 *p_adj_grp_size = size_range->end;
3790 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3791 u16 *p_adj_grp_size)
3793 unsigned int alloc_size;
3796 /* Round up the requested group size to the next size supported
3797 * by the device and make sure the request can be satisfied.
3799 mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3800 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3801 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3802 *p_adj_grp_size, &alloc_size);
3805 /* It is possible the allocation results in more allocated
3806 * entries than requested. Try to use as much of them as
3809 mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3815 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3817 int i, g = 0, sum_norm_weight = 0;
3818 struct mlxsw_sp_nexthop *nh;
3820 for (i = 0; i < nhgi->count; i++) {
3821 nh = &nhgi->nexthops[i];
3823 if (!nh->should_offload)
3826 g = gcd(nh->nh_weight, g);
3831 for (i = 0; i < nhgi->count; i++) {
3832 nh = &nhgi->nexthops[i];
3834 if (!nh->should_offload)
3836 nh->norm_nh_weight = nh->nh_weight / g;
3837 sum_norm_weight += nh->norm_nh_weight;
3840 nhgi->sum_norm_weight = sum_norm_weight;
3844 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3846 int i, weight = 0, lower_bound = 0;
3847 int total = nhgi->sum_norm_weight;
3848 u16 ecmp_size = nhgi->ecmp_size;
3850 for (i = 0; i < nhgi->count; i++) {
3851 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3854 if (!nh->should_offload)
3856 weight += nh->norm_nh_weight;
3857 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3858 nh->num_adj_entries = upper_bound - lower_bound;
3859 lower_bound = upper_bound;
3863 static struct mlxsw_sp_nexthop *
3864 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3865 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3868 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3869 struct mlxsw_sp_nexthop_group *nh_grp)
3873 for (i = 0; i < nh_grp->nhgi->count; i++) {
3874 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3877 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3879 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3884 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3885 struct mlxsw_sp_fib6_entry *fib6_entry)
3887 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3889 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3890 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3891 struct mlxsw_sp_nexthop *nh;
3893 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3894 if (nh && nh->offloaded)
3895 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3897 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3902 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3903 struct mlxsw_sp_nexthop_group *nh_grp)
3905 struct mlxsw_sp_fib6_entry *fib6_entry;
3907 /* Unfortunately, in IPv6 the route and the nexthop are described by
3908 * the same struct, so we need to iterate over all the routes using the
3909 * nexthop group and set / clear the offload indication for them.
3911 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3912 common.nexthop_group_node)
3913 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3917 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3918 const struct mlxsw_sp_nexthop *nh,
3921 struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3922 bool offload = false, trap = false;
3924 if (nh->offloaded) {
3925 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3930 nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3931 bucket_index, offload, trap);
3935 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3936 struct mlxsw_sp_nexthop_group *nh_grp)
3940 /* Do not update the flags if the nexthop group is being destroyed
3942 * 1. The nexthop objects is being deleted, in which case the flags are
3944 * 2. The nexthop group was replaced by a newer group, in which case
3945 * the flags of the nexthop object were already updated based on the
3948 if (nh_grp->can_destroy)
3951 nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3952 nh_grp->nhgi->adj_index_valid, false);
3954 /* Update flags of individual nexthop buckets in case of a resilient
3957 if (!nh_grp->nhgi->is_resilient)
3960 for (i = 0; i < nh_grp->nhgi->count; i++) {
3961 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3963 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3968 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3969 struct mlxsw_sp_nexthop_group *nh_grp)
3971 switch (nh_grp->type) {
3972 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3973 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3975 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3976 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3978 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3979 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3985 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3986 struct mlxsw_sp_nexthop_group *nh_grp)
3988 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3989 u16 ecmp_size, old_ecmp_size;
3990 struct mlxsw_sp_nexthop *nh;
3991 bool offload_change = false;
3993 bool old_adj_index_valid;
3998 return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4000 for (i = 0; i < nhgi->count; i++) {
4001 nh = &nhgi->nexthops[i];
4003 if (nh->should_offload != nh->offloaded) {
4004 offload_change = true;
4005 if (nh->should_offload)
4009 if (!offload_change) {
4010 /* Nothing was added or removed, so no need to reallocate. Just
4011 * update MAC on existing adjacency indexes.
4013 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
4015 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4018 /* Flags of individual nexthop buckets might need to be
4021 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4024 mlxsw_sp_nexthop_group_normalize(nhgi);
4025 if (!nhgi->sum_norm_weight) {
4026 /* No neigh of this group is connected so we just set
4027 * the trap and let everthing flow through kernel.
4033 ecmp_size = nhgi->sum_norm_weight;
4034 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4036 /* No valid allocation size available. */
4039 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4040 ecmp_size, &adj_index);
4042 /* We ran out of KVD linear space, just set the
4043 * trap and let everything flow through kernel.
4045 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4048 old_adj_index_valid = nhgi->adj_index_valid;
4049 old_adj_index = nhgi->adj_index;
4050 old_ecmp_size = nhgi->ecmp_size;
4051 nhgi->adj_index_valid = 1;
4052 nhgi->adj_index = adj_index;
4053 nhgi->ecmp_size = ecmp_size;
4054 mlxsw_sp_nexthop_group_rebalance(nhgi);
4055 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4057 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4061 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4063 if (!old_adj_index_valid) {
4064 /* The trap was set for fib entries, so we have to call
4065 * fib entry update to unset it and use adjacency index.
4067 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4069 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4075 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4076 old_adj_index, old_ecmp_size);
4077 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4078 old_ecmp_size, old_adj_index);
4080 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4087 old_adj_index_valid = nhgi->adj_index_valid;
4088 nhgi->adj_index_valid = 0;
4089 for (i = 0; i < nhgi->count; i++) {
4090 nh = &nhgi->nexthops[i];
4093 err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4095 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4096 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4097 if (old_adj_index_valid)
4098 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4099 nhgi->ecmp_size, nhgi->adj_index);
4103 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4107 nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4108 nh->should_offload = 1;
4109 } else if (nh->nhgi->is_resilient) {
4110 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4111 nh->should_offload = 1;
4113 nh->should_offload = 0;
4119 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4120 struct mlxsw_sp_neigh_entry *neigh_entry)
4122 struct neighbour *n, *old_n = neigh_entry->key.n;
4123 struct mlxsw_sp_nexthop *nh;
4124 struct net_device *dev;
4125 bool entry_connected;
4129 nh = list_first_entry(&neigh_entry->nexthop_list,
4130 struct mlxsw_sp_nexthop, neigh_list_node);
4131 dev = mlxsw_sp_nexthop_dev(nh);
4133 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4135 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4138 neigh_event_send(n, NULL);
4141 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4142 neigh_entry->key.n = n;
4143 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4145 goto err_neigh_entry_insert;
4147 read_lock_bh(&n->lock);
4148 nud_state = n->nud_state;
4150 read_unlock_bh(&n->lock);
4151 entry_connected = nud_state & NUD_VALID && !dead;
4153 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4155 neigh_release(old_n);
4157 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4158 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4165 err_neigh_entry_insert:
4166 neigh_entry->key.n = old_n;
4167 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4173 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4174 struct mlxsw_sp_neigh_entry *neigh_entry,
4175 bool removing, bool dead)
4177 struct mlxsw_sp_nexthop *nh;
4179 if (list_empty(&neigh_entry->nexthop_list))
4185 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4188 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4192 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4194 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4195 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4199 static void mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop *nh,
4200 struct mlxsw_sp_crif *crif)
4206 list_add(&nh->crif_list_node, &crif->nexthop_list);
4209 static void mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop *nh)
4214 list_del(&nh->crif_list_node);
4218 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4219 struct mlxsw_sp_nexthop *nh)
4221 struct mlxsw_sp_neigh_entry *neigh_entry;
4222 struct net_device *dev;
4223 struct neighbour *n;
4227 if (WARN_ON(!nh->crif->rif))
4230 if (!nh->nhgi->gateway || nh->neigh_entry)
4232 dev = mlxsw_sp_nexthop_dev(nh);
4234 /* Take a reference of neigh here ensuring that neigh would
4235 * not be destructed before the nexthop entry is finished.
4236 * The reference is taken either in neigh_lookup() or
4237 * in neigh_create() in case n is not found.
4239 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4241 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4244 neigh_event_send(n, NULL);
4246 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4248 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4249 if (IS_ERR(neigh_entry)) {
4251 goto err_neigh_entry_create;
4255 /* If that is the first nexthop connected to that neigh, add to
4256 * nexthop_neighs_list
4258 if (list_empty(&neigh_entry->nexthop_list))
4259 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4260 &mlxsw_sp->router->nexthop_neighs_list);
4262 nh->neigh_entry = neigh_entry;
4263 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4264 read_lock_bh(&n->lock);
4265 nud_state = n->nud_state;
4267 read_unlock_bh(&n->lock);
4268 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4272 err_neigh_entry_create:
4277 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4278 struct mlxsw_sp_nexthop *nh)
4280 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4281 struct neighbour *n;
4285 n = neigh_entry->key.n;
4287 __mlxsw_sp_nexthop_neigh_update(nh, true);
4288 list_del(&nh->neigh_list_node);
4289 nh->neigh_entry = NULL;
4291 /* If that is the last nexthop connected to that neigh, remove from
4292 * nexthop_neighs_list
4294 if (list_empty(&neigh_entry->nexthop_list))
4295 list_del(&neigh_entry->nexthop_neighs_list_node);
4297 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4298 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4303 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4305 struct net_device *ul_dev;
4309 ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4310 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4316 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4317 struct mlxsw_sp_nexthop *nh,
4318 struct mlxsw_sp_ipip_entry *ipip_entry)
4320 struct mlxsw_sp_crif *crif;
4323 if (!nh->nhgi->gateway || nh->ipip_entry)
4326 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, ipip_entry->ol_dev);
4330 nh->ipip_entry = ipip_entry;
4331 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4332 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4333 mlxsw_sp_nexthop_crif_init(nh, crif);
4336 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4337 struct mlxsw_sp_nexthop *nh)
4339 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4344 __mlxsw_sp_nexthop_neigh_update(nh, true);
4345 nh->ipip_entry = NULL;
4348 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4349 const struct fib_nh *fib_nh,
4350 enum mlxsw_sp_ipip_type *p_ipipt)
4352 struct net_device *dev = fib_nh->fib_nh_dev;
4355 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4356 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4359 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4360 struct mlxsw_sp_nexthop *nh,
4361 const struct net_device *dev)
4363 const struct mlxsw_sp_ipip_ops *ipip_ops;
4364 struct mlxsw_sp_ipip_entry *ipip_entry;
4365 struct mlxsw_sp_crif *crif;
4368 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4370 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4371 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4372 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4373 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4378 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4379 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, dev);
4383 mlxsw_sp_nexthop_crif_init(nh, crif);
4388 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4390 goto err_neigh_init;
4395 mlxsw_sp_nexthop_crif_fini(nh);
4399 static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
4400 struct mlxsw_sp_nexthop *nh)
4403 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4404 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4406 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4407 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4412 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4413 struct mlxsw_sp_nexthop *nh)
4415 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4416 mlxsw_sp_nexthop_crif_fini(nh);
4419 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4420 struct mlxsw_sp_nexthop_group *nh_grp,
4421 struct mlxsw_sp_nexthop *nh,
4422 struct fib_nh *fib_nh)
4424 struct net_device *dev = fib_nh->fib_nh_dev;
4425 struct in_device *in_dev;
4428 nh->nhgi = nh_grp->nhgi;
4429 nh->key.fib_nh = fib_nh;
4430 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4431 nh->nh_weight = fib_nh->fib_nh_weight;
4435 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4436 nh->neigh_tbl = &arp_tbl;
4437 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4441 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4442 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4446 nh->ifindex = dev->ifindex;
4449 in_dev = __in_dev_get_rcu(dev);
4450 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4451 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4457 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4459 goto err_nexthop_neigh_init;
4463 err_nexthop_neigh_init:
4464 list_del(&nh->router_list_node);
4465 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4466 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4470 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4471 struct mlxsw_sp_nexthop *nh)
4473 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4474 list_del(&nh->router_list_node);
4475 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4476 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4479 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4480 unsigned long event, struct fib_nh *fib_nh)
4482 struct mlxsw_sp_nexthop_key key;
4483 struct mlxsw_sp_nexthop *nh;
4485 key.fib_nh = fib_nh;
4486 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4491 case FIB_EVENT_NH_ADD:
4492 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4494 case FIB_EVENT_NH_DEL:
4495 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4499 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4502 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4503 struct mlxsw_sp_rif *rif)
4505 struct net_device *dev = mlxsw_sp_rif_dev(rif);
4506 struct mlxsw_sp_nexthop *nh;
4509 list_for_each_entry(nh, &rif->crif->nexthop_list, crif_list_node) {
4511 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4514 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4515 removing = !mlxsw_sp_ipip_netdev_ul_up(dev);
4522 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4523 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4527 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4528 struct mlxsw_sp_rif *rif)
4530 struct mlxsw_sp_nexthop *nh, *tmp;
4532 list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4534 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4535 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4539 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4541 enum mlxsw_reg_ratr_trap_action trap_action;
4542 char ratr_pl[MLXSW_REG_RATR_LEN];
4545 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4546 &mlxsw_sp->router->adj_trap_index);
4550 trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4551 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4552 MLXSW_REG_RATR_TYPE_ETHERNET,
4553 mlxsw_sp->router->adj_trap_index,
4554 mlxsw_sp->router->lb_crif->rif->rif_index);
4555 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4556 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4557 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4559 goto err_ratr_write;
4564 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4565 mlxsw_sp->router->adj_trap_index);
4569 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4571 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4572 mlxsw_sp->router->adj_trap_index);
4575 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4579 if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4582 err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4586 refcount_set(&mlxsw_sp->router->num_groups, 1);
4591 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4593 if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4596 mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4600 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4601 const struct mlxsw_sp_nexthop_group *nh_grp,
4602 unsigned long *activity)
4607 ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4611 mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4612 nh_grp->nhgi->count);
4613 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4617 for (i = 0; i < nh_grp->nhgi->count; i++) {
4618 if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4620 bitmap_set(activity, i, 1);
4627 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4630 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4631 const struct mlxsw_sp_nexthop_group *nh_grp)
4633 unsigned long *activity;
4635 activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4639 mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4640 nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4641 nh_grp->nhgi->count, activity);
4643 bitmap_free(activity);
4647 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4649 unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4651 mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4652 msecs_to_jiffies(interval));
4655 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4657 struct mlxsw_sp_nexthop_group_info *nhgi;
4658 struct mlxsw_sp_router *router;
4659 bool reschedule = false;
4661 router = container_of(work, struct mlxsw_sp_router,
4662 nh_grp_activity_dw.work);
4664 mutex_lock(&router->lock);
4666 list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4667 mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4671 mutex_unlock(&router->lock);
4675 mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4679 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4680 const struct nh_notifier_single_info *nh,
4681 struct netlink_ext_ack *extack)
4686 NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4687 else if (nh->has_encap)
4688 NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4696 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4697 const struct nh_notifier_single_info *nh,
4698 struct netlink_ext_ack *extack)
4702 err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4706 /* Device only nexthops with an IPIP device are programmed as
4707 * encapsulating adjacency entries.
4709 if (!nh->gw_family && !nh->is_reject &&
4710 !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4711 NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4719 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4720 const struct nh_notifier_grp_info *nh_grp,
4721 struct netlink_ext_ack *extack)
4725 if (nh_grp->is_fdb) {
4726 NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4730 for (i = 0; i < nh_grp->num_nh; i++) {
4731 const struct nh_notifier_single_info *nh;
4734 nh = &nh_grp->nh_entries[i].nh;
4735 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4745 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4746 const struct nh_notifier_res_table_info *nh_res_table,
4747 struct netlink_ext_ack *extack)
4749 unsigned int alloc_size;
4750 bool valid_size = false;
4753 if (nh_res_table->num_nh_buckets < 32) {
4754 NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4758 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4759 const struct mlxsw_sp_adj_grp_size_range *size_range;
4761 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4763 if (nh_res_table->num_nh_buckets >= size_range->start &&
4764 nh_res_table->num_nh_buckets <= size_range->end) {
4771 NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4775 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4776 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4777 nh_res_table->num_nh_buckets,
4779 if (err || nh_res_table->num_nh_buckets != alloc_size) {
4780 NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4788 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4789 const struct nh_notifier_res_table_info *nh_res_table,
4790 struct netlink_ext_ack *extack)
4795 err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4801 for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4802 const struct nh_notifier_single_info *nh;
4805 nh = &nh_res_table->nhs[i];
4806 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4815 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4816 unsigned long event,
4817 struct nh_notifier_info *info)
4819 struct nh_notifier_single_info *nh;
4821 if (event != NEXTHOP_EVENT_REPLACE &&
4822 event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4823 event != NEXTHOP_EVENT_BUCKET_REPLACE)
4826 switch (info->type) {
4827 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4828 return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4830 case NH_NOTIFIER_INFO_TYPE_GRP:
4831 return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4834 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4835 return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4838 case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4839 nh = &info->nh_res_bucket->new_nh;
4840 return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4843 NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4848 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4849 const struct nh_notifier_info *info)
4851 const struct net_device *dev;
4853 switch (info->type) {
4854 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4855 dev = info->nh->dev;
4856 return info->nh->gw_family || info->nh->is_reject ||
4857 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4858 case NH_NOTIFIER_INFO_TYPE_GRP:
4859 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4860 /* Already validated earlier. */
4867 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4868 struct mlxsw_sp_nexthop *nh)
4870 nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4871 nh->should_offload = 1;
4872 /* While nexthops that discard packets do not forward packets
4873 * via an egress RIF, they still need to be programmed using a
4874 * valid RIF, so use the loopback RIF created during init.
4876 nh->crif = mlxsw_sp->router->lb_crif;
4879 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4880 struct mlxsw_sp_nexthop *nh)
4883 nh->should_offload = 0;
4887 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4888 struct mlxsw_sp_nexthop_group *nh_grp,
4889 struct mlxsw_sp_nexthop *nh,
4890 struct nh_notifier_single_info *nh_obj, int weight)
4892 struct net_device *dev = nh_obj->dev;
4895 nh->nhgi = nh_grp->nhgi;
4896 nh->nh_weight = weight;
4898 switch (nh_obj->gw_family) {
4900 memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4901 nh->neigh_tbl = &arp_tbl;
4904 memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4905 #if IS_ENABLED(CONFIG_IPV6)
4906 nh->neigh_tbl = &nd_tbl;
4911 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4912 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4913 nh->ifindex = dev->ifindex;
4915 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4919 if (nh_obj->is_reject)
4920 mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4922 /* In a resilient nexthop group, all the nexthops must be written to
4923 * the adjacency table. Even if they do not have a valid neighbour or
4926 if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4927 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4928 nh->should_offload = 1;
4934 list_del(&nh->router_list_node);
4935 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4939 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4940 struct mlxsw_sp_nexthop *nh)
4942 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4943 mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4944 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4945 list_del(&nh->router_list_node);
4946 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4947 nh->should_offload = 0;
4951 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4952 struct mlxsw_sp_nexthop_group *nh_grp,
4953 struct nh_notifier_info *info)
4955 struct mlxsw_sp_nexthop_group_info *nhgi;
4956 struct mlxsw_sp_nexthop *nh;
4957 bool is_resilient = false;
4961 switch (info->type) {
4962 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4965 case NH_NOTIFIER_INFO_TYPE_GRP:
4966 nhs = info->nh_grp->num_nh;
4968 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4969 nhs = info->nh_res_table->num_nh_buckets;
4970 is_resilient = true;
4976 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4979 nh_grp->nhgi = nhgi;
4980 nhgi->nh_grp = nh_grp;
4981 nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4982 nhgi->is_resilient = is_resilient;
4984 for (i = 0; i < nhgi->count; i++) {
4985 struct nh_notifier_single_info *nh_obj;
4988 nh = &nhgi->nexthops[i];
4989 switch (info->type) {
4990 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4994 case NH_NOTIFIER_INFO_TYPE_GRP:
4995 nh_obj = &info->nh_grp->nh_entries[i].nh;
4996 weight = info->nh_grp->nh_entries[i].weight;
4998 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4999 nh_obj = &info->nh_res_table->nhs[i];
5004 goto err_nexthop_obj_init;
5006 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
5009 goto err_nexthop_obj_init;
5011 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5014 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5016 NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
5017 goto err_group_refresh;
5020 /* Add resilient nexthop groups to a list so that the activity of their
5021 * nexthop buckets will be periodically queried and cleared.
5023 if (nhgi->is_resilient) {
5024 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5025 mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
5026 list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
5032 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5035 err_nexthop_obj_init:
5036 for (i--; i >= 0; i--) {
5037 nh = &nhgi->nexthops[i];
5038 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5045 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5046 struct mlxsw_sp_nexthop_group *nh_grp)
5048 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5049 struct mlxsw_sp_router *router = mlxsw_sp->router;
5052 if (nhgi->is_resilient) {
5053 list_del(&nhgi->list);
5054 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5055 cancel_delayed_work(&router->nh_grp_activity_dw);
5058 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5059 for (i = nhgi->count - 1; i >= 0; i--) {
5060 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5062 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5064 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5065 WARN_ON_ONCE(nhgi->adj_index_valid);
5069 static struct mlxsw_sp_nexthop_group *
5070 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5071 struct nh_notifier_info *info)
5073 struct mlxsw_sp_nexthop_group *nh_grp;
5076 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5078 return ERR_PTR(-ENOMEM);
5079 INIT_LIST_HEAD(&nh_grp->vr_list);
5080 err = rhashtable_init(&nh_grp->vr_ht,
5081 &mlxsw_sp_nexthop_group_vr_ht_params);
5083 goto err_nexthop_group_vr_ht_init;
5084 INIT_LIST_HEAD(&nh_grp->fib_list);
5085 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5086 nh_grp->obj.id = info->id;
5088 err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5090 goto err_nexthop_group_info_init;
5092 nh_grp->can_destroy = false;
5096 err_nexthop_group_info_init:
5097 rhashtable_destroy(&nh_grp->vr_ht);
5098 err_nexthop_group_vr_ht_init:
5100 return ERR_PTR(err);
5104 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5105 struct mlxsw_sp_nexthop_group *nh_grp)
5107 if (!nh_grp->can_destroy)
5109 mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5110 WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5111 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5112 rhashtable_destroy(&nh_grp->vr_ht);
5116 static struct mlxsw_sp_nexthop_group *
5117 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5119 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5121 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5123 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5125 mlxsw_sp_nexthop_group_ht_params);
5128 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5129 struct mlxsw_sp_nexthop_group *nh_grp)
5131 return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5135 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5136 struct mlxsw_sp_nexthop_group *nh_grp,
5137 struct mlxsw_sp_nexthop_group *old_nh_grp,
5138 struct netlink_ext_ack *extack)
5140 struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5141 struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5144 old_nh_grp->nhgi = new_nhgi;
5145 new_nhgi->nh_grp = old_nh_grp;
5146 nh_grp->nhgi = old_nhgi;
5147 old_nhgi->nh_grp = nh_grp;
5149 if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5150 /* Both the old adjacency index and the new one are valid.
5151 * Routes are currently using the old one. Tell the device to
5152 * replace the old adjacency index with the new one.
5154 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5155 old_nhgi->adj_index,
5156 old_nhgi->ecmp_size);
5158 NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5161 } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5162 /* The old adjacency index is valid, while the new one is not.
5163 * Iterate over all the routes using the group and change them
5164 * to trap packets to the CPU.
5166 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5168 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5171 } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5172 /* The old adjacency index is invalid, while the new one is.
5173 * Iterate over all the routes using the group and change them
5174 * to forward packets using the new valid index.
5176 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5178 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5183 /* Make sure the flags are set / cleared based on the new nexthop group
5186 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5188 /* At this point 'nh_grp' is just a shell that is not used by anyone
5189 * and its nexthop group info is the old info that was just replaced
5190 * with the new one. Remove it.
5192 nh_grp->can_destroy = true;
5193 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5198 old_nhgi->nh_grp = old_nh_grp;
5199 nh_grp->nhgi = new_nhgi;
5200 new_nhgi->nh_grp = nh_grp;
5201 old_nh_grp->nhgi = old_nhgi;
5205 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5206 struct nh_notifier_info *info)
5208 struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5209 struct netlink_ext_ack *extack = info->extack;
5212 nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5214 return PTR_ERR(nh_grp);
5216 old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5218 err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5220 err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5221 old_nh_grp, extack);
5224 nh_grp->can_destroy = true;
5225 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5231 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5232 struct nh_notifier_info *info)
5234 struct mlxsw_sp_nexthop_group *nh_grp;
5236 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5240 nh_grp->can_destroy = true;
5241 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5243 /* If the group still has routes using it, then defer the delete
5244 * operation until the last route using it is deleted.
5246 if (!list_empty(&nh_grp->fib_list))
5248 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5251 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5252 u32 adj_index, char *ratr_pl)
5254 MLXSW_REG_ZERO(ratr, ratr_pl);
5255 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5256 mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5257 mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5259 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5262 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5264 /* Clear the opcode and activity on both the old and new payload as
5265 * they are irrelevant for the comparison.
5267 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5268 mlxsw_reg_ratr_a_set(ratr_pl, 0);
5269 mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5270 mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5272 /* If the contents of the adjacency entry are consistent with the
5273 * replacement request, then replacement was successful.
5275 if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5282 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5283 struct mlxsw_sp_nexthop *nh,
5284 struct nh_notifier_info *info)
5286 u16 bucket_index = info->nh_res_bucket->bucket_index;
5287 struct netlink_ext_ack *extack = info->extack;
5288 bool force = info->nh_res_bucket->force;
5289 char ratr_pl_new[MLXSW_REG_RATR_LEN];
5290 char ratr_pl[MLXSW_REG_RATR_LEN];
5294 /* No point in trying an atomic replacement if the idle timer interval
5295 * is smaller than the interval in which we query and clear activity.
5297 if (!force && info->nh_res_bucket->idle_timer_ms <
5298 MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5301 adj_index = nh->nhgi->adj_index + bucket_index;
5302 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5304 NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5309 err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5312 NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5316 err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5318 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5325 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5330 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5331 struct nh_notifier_info *info)
5333 u16 bucket_index = info->nh_res_bucket->bucket_index;
5334 struct netlink_ext_ack *extack = info->extack;
5335 struct mlxsw_sp_nexthop_group_info *nhgi;
5336 struct nh_notifier_single_info *nh_obj;
5337 struct mlxsw_sp_nexthop_group *nh_grp;
5338 struct mlxsw_sp_nexthop *nh;
5341 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5343 NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5347 nhgi = nh_grp->nhgi;
5349 if (bucket_index >= nhgi->count) {
5350 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5354 nh = &nhgi->nexthops[bucket_index];
5355 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5357 nh_obj = &info->nh_res_bucket->new_nh;
5358 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5360 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5361 goto err_nexthop_obj_init;
5364 err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5366 goto err_nexthop_obj_bucket_adj_update;
5370 err_nexthop_obj_bucket_adj_update:
5371 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5372 err_nexthop_obj_init:
5373 nh_obj = &info->nh_res_bucket->old_nh;
5374 mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5375 /* The old adjacency entry was not overwritten */
5381 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5382 unsigned long event, void *ptr)
5384 struct nh_notifier_info *info = ptr;
5385 struct mlxsw_sp_router *router;
5388 router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5389 err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5393 mutex_lock(&router->lock);
5396 case NEXTHOP_EVENT_REPLACE:
5397 err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5399 case NEXTHOP_EVENT_DEL:
5400 mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5402 case NEXTHOP_EVENT_BUCKET_REPLACE:
5403 err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5410 mutex_unlock(&router->lock);
5413 return notifier_from_errno(err);
5416 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5417 struct fib_info *fi)
5419 const struct fib_nh *nh = fib_info_nh(fi, 0);
5421 return nh->fib_nh_gw_family ||
5422 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5426 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5427 struct mlxsw_sp_nexthop_group *nh_grp)
5429 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5430 struct mlxsw_sp_nexthop_group_info *nhgi;
5431 struct mlxsw_sp_nexthop *nh;
5434 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5437 nh_grp->nhgi = nhgi;
5438 nhgi->nh_grp = nh_grp;
5439 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5441 for (i = 0; i < nhgi->count; i++) {
5442 struct fib_nh *fib_nh;
5444 nh = &nhgi->nexthops[i];
5445 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5446 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5448 goto err_nexthop4_init;
5450 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5453 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5455 goto err_group_refresh;
5460 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5464 for (i--; i >= 0; i--) {
5465 nh = &nhgi->nexthops[i];
5466 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5473 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5474 struct mlxsw_sp_nexthop_group *nh_grp)
5476 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5479 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5480 for (i = nhgi->count - 1; i >= 0; i--) {
5481 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5483 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5485 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5486 WARN_ON_ONCE(nhgi->adj_index_valid);
5490 static struct mlxsw_sp_nexthop_group *
5491 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5493 struct mlxsw_sp_nexthop_group *nh_grp;
5496 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5498 return ERR_PTR(-ENOMEM);
5499 INIT_LIST_HEAD(&nh_grp->vr_list);
5500 err = rhashtable_init(&nh_grp->vr_ht,
5501 &mlxsw_sp_nexthop_group_vr_ht_params);
5503 goto err_nexthop_group_vr_ht_init;
5504 INIT_LIST_HEAD(&nh_grp->fib_list);
5505 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5506 nh_grp->ipv4.fi = fi;
5509 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5511 goto err_nexthop_group_info_init;
5513 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5515 goto err_nexthop_group_insert;
5517 nh_grp->can_destroy = true;
5521 err_nexthop_group_insert:
5522 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5523 err_nexthop_group_info_init:
5525 rhashtable_destroy(&nh_grp->vr_ht);
5526 err_nexthop_group_vr_ht_init:
5528 return ERR_PTR(err);
5532 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5533 struct mlxsw_sp_nexthop_group *nh_grp)
5535 if (!nh_grp->can_destroy)
5537 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5538 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5539 fib_info_put(nh_grp->ipv4.fi);
5540 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5541 rhashtable_destroy(&nh_grp->vr_ht);
5545 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5546 struct mlxsw_sp_fib_entry *fib_entry,
5547 struct fib_info *fi)
5549 struct mlxsw_sp_nexthop_group *nh_grp;
5552 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5554 if (WARN_ON_ONCE(!nh_grp))
5559 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5561 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5563 return PTR_ERR(nh_grp);
5566 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5567 fib_entry->nh_group = nh_grp;
5571 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5572 struct mlxsw_sp_fib_entry *fib_entry)
5574 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5576 list_del(&fib_entry->nexthop_group_node);
5577 if (!list_empty(&nh_grp->fib_list))
5580 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5581 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5585 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5589 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5591 struct mlxsw_sp_fib4_entry *fib4_entry;
5593 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5595 return !fib4_entry->dscp;
5599 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5601 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5603 switch (fib_entry->fib_node->fib->proto) {
5604 case MLXSW_SP_L3_PROTO_IPV4:
5605 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5608 case MLXSW_SP_L3_PROTO_IPV6:
5612 switch (fib_entry->type) {
5613 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5614 return !!nh_group->nhgi->adj_index_valid;
5615 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5616 return !!mlxsw_sp_nhgi_rif(nh_group->nhgi);
5617 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5618 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5619 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5626 static struct mlxsw_sp_nexthop *
5627 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5628 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5632 for (i = 0; i < nh_grp->nhgi->count; i++) {
5633 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5634 struct net_device *dev = mlxsw_sp_nexthop_dev(nh);
5635 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5637 if (dev && dev == rt->fib6_nh->fib_nh_dev &&
5638 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5639 &rt->fib6_nh->fib_nh_gw6))
5647 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5648 struct fib_entry_notifier_info *fen_info)
5650 u32 *p_dst = (u32 *) &fen_info->dst;
5651 struct fib_rt_info fri;
5653 fri.fi = fen_info->fi;
5654 fri.tb_id = fen_info->tb_id;
5655 fri.dst = cpu_to_be32(*p_dst);
5656 fri.dst_len = fen_info->dst_len;
5657 fri.dscp = fen_info->dscp;
5658 fri.type = fen_info->type;
5659 fri.offload = false;
5661 fri.offload_failed = true;
5662 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5666 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5667 struct mlxsw_sp_fib_entry *fib_entry)
5669 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5670 int dst_len = fib_entry->fib_node->key.prefix_len;
5671 struct mlxsw_sp_fib4_entry *fib4_entry;
5672 struct fib_rt_info fri;
5673 bool should_offload;
5675 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5676 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5678 fri.fi = fib4_entry->fi;
5679 fri.tb_id = fib4_entry->tb_id;
5680 fri.dst = cpu_to_be32(*p_dst);
5681 fri.dst_len = dst_len;
5682 fri.dscp = fib4_entry->dscp;
5683 fri.type = fib4_entry->type;
5684 fri.offload = should_offload;
5685 fri.trap = !should_offload;
5686 fri.offload_failed = false;
5687 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5691 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5692 struct mlxsw_sp_fib_entry *fib_entry)
5694 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5695 int dst_len = fib_entry->fib_node->key.prefix_len;
5696 struct mlxsw_sp_fib4_entry *fib4_entry;
5697 struct fib_rt_info fri;
5699 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5701 fri.fi = fib4_entry->fi;
5702 fri.tb_id = fib4_entry->tb_id;
5703 fri.dst = cpu_to_be32(*p_dst);
5704 fri.dst_len = dst_len;
5705 fri.dscp = fib4_entry->dscp;
5706 fri.type = fib4_entry->type;
5707 fri.offload = false;
5709 fri.offload_failed = false;
5710 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5713 #if IS_ENABLED(CONFIG_IPV6)
5715 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5716 struct fib6_info **rt_arr,
5721 /* In IPv6 a multipath route is represented using multiple routes, so
5722 * we need to set the flags on all of them.
5724 for (i = 0; i < nrt6; i++)
5725 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5726 false, false, true);
5730 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5731 struct fib6_info **rt_arr,
5737 #if IS_ENABLED(CONFIG_IPV6)
5739 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5740 struct mlxsw_sp_fib_entry *fib_entry)
5742 struct mlxsw_sp_fib6_entry *fib6_entry;
5743 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5744 bool should_offload;
5746 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5748 /* In IPv6 a multipath route is represented using multiple routes, so
5749 * we need to set the flags on all of them.
5751 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5753 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5754 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5755 should_offload, !should_offload, false);
5759 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5760 struct mlxsw_sp_fib_entry *fib_entry)
5765 #if IS_ENABLED(CONFIG_IPV6)
5767 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5768 struct mlxsw_sp_fib_entry *fib_entry)
5770 struct mlxsw_sp_fib6_entry *fib6_entry;
5771 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5773 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5775 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5776 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5777 false, false, false);
5781 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5782 struct mlxsw_sp_fib_entry *fib_entry)
5788 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5789 struct mlxsw_sp_fib_entry *fib_entry)
5791 switch (fib_entry->fib_node->fib->proto) {
5792 case MLXSW_SP_L3_PROTO_IPV4:
5793 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5795 case MLXSW_SP_L3_PROTO_IPV6:
5796 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5802 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5803 struct mlxsw_sp_fib_entry *fib_entry)
5805 switch (fib_entry->fib_node->fib->proto) {
5806 case MLXSW_SP_L3_PROTO_IPV4:
5807 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5809 case MLXSW_SP_L3_PROTO_IPV6:
5810 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5816 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5817 struct mlxsw_sp_fib_entry *fib_entry,
5818 enum mlxsw_reg_ralue_op op)
5821 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
5822 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5824 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
5825 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5833 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
5834 const struct mlxsw_sp_fib_entry *fib_entry,
5835 enum mlxsw_reg_ralue_op op)
5837 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5838 enum mlxsw_reg_ralxx_protocol proto;
5841 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
5843 switch (fib->proto) {
5844 case MLXSW_SP_L3_PROTO_IPV4:
5845 p_dip = (u32 *) fib_entry->fib_node->key.addr;
5846 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
5847 fib_entry->fib_node->key.prefix_len,
5850 case MLXSW_SP_L3_PROTO_IPV6:
5851 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
5852 fib_entry->fib_node->key.prefix_len,
5853 fib_entry->fib_node->key.addr);
5858 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5859 struct mlxsw_sp_fib_entry *fib_entry,
5860 enum mlxsw_reg_ralue_op op)
5862 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5863 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5864 char ralue_pl[MLXSW_REG_RALUE_LEN];
5865 enum mlxsw_reg_ralue_trap_action trap_action;
5867 u32 adjacency_index = 0;
5870 /* In case the nexthop group adjacency index is valid, use it
5871 * with provided ECMP size. Otherwise, setup trap and pass
5872 * traffic to kernel.
5874 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5875 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5876 adjacency_index = nhgi->adj_index;
5877 ecmp_size = nhgi->ecmp_size;
5878 } else if (!nhgi->adj_index_valid && nhgi->count &&
5879 mlxsw_sp_nhgi_rif(nhgi)) {
5880 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5881 adjacency_index = mlxsw_sp->router->adj_trap_index;
5884 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5885 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5888 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5889 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
5890 adjacency_index, ecmp_size);
5891 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5894 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5895 struct mlxsw_sp_fib_entry *fib_entry,
5896 enum mlxsw_reg_ralue_op op)
5898 struct mlxsw_sp_rif *rif = mlxsw_sp_nhgi_rif(fib_entry->nh_group->nhgi);
5899 enum mlxsw_reg_ralue_trap_action trap_action;
5900 char ralue_pl[MLXSW_REG_RALUE_LEN];
5904 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5905 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5906 rif_index = rif->rif_index;
5908 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5909 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5912 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5913 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
5915 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5918 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5919 struct mlxsw_sp_fib_entry *fib_entry,
5920 enum mlxsw_reg_ralue_op op)
5922 char ralue_pl[MLXSW_REG_RALUE_LEN];
5924 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5925 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5926 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5929 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5930 struct mlxsw_sp_fib_entry *fib_entry,
5931 enum mlxsw_reg_ralue_op op)
5933 enum mlxsw_reg_ralue_trap_action trap_action;
5934 char ralue_pl[MLXSW_REG_RALUE_LEN];
5936 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5937 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5938 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
5939 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5943 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5944 struct mlxsw_sp_fib_entry *fib_entry,
5945 enum mlxsw_reg_ralue_op op)
5947 enum mlxsw_reg_ralue_trap_action trap_action;
5948 char ralue_pl[MLXSW_REG_RALUE_LEN];
5951 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5952 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5954 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5955 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
5956 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5960 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5961 struct mlxsw_sp_fib_entry *fib_entry,
5962 enum mlxsw_reg_ralue_op op)
5964 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5965 const struct mlxsw_sp_ipip_ops *ipip_ops;
5966 char ralue_pl[MLXSW_REG_RALUE_LEN];
5969 if (WARN_ON(!ipip_entry))
5972 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5973 err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5974 fib_entry->decap.tunnel_index);
5978 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5979 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
5980 fib_entry->decap.tunnel_index);
5981 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5984 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5985 struct mlxsw_sp_fib_entry *fib_entry,
5986 enum mlxsw_reg_ralue_op op)
5988 char ralue_pl[MLXSW_REG_RALUE_LEN];
5990 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5991 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
5992 fib_entry->decap.tunnel_index);
5993 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5996 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5997 struct mlxsw_sp_fib_entry *fib_entry,
5998 enum mlxsw_reg_ralue_op op)
6000 switch (fib_entry->type) {
6001 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6002 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
6003 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6004 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
6005 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6006 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
6007 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6008 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
6009 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6010 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
6012 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6013 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
6015 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6016 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
6021 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6022 struct mlxsw_sp_fib_entry *fib_entry,
6023 enum mlxsw_reg_ralue_op op)
6025 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
6030 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6035 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6036 struct mlxsw_sp_fib_entry *fib_entry)
6038 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6039 MLXSW_REG_RALUE_OP_WRITE_WRITE);
6042 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6043 struct mlxsw_sp_fib_entry *fib_entry)
6045 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6046 MLXSW_REG_RALUE_OP_WRITE_DELETE);
6050 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6051 const struct fib_entry_notifier_info *fen_info,
6052 struct mlxsw_sp_fib_entry *fib_entry)
6054 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6055 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6056 struct mlxsw_sp_router *router = mlxsw_sp->router;
6057 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6058 int ifindex = nhgi->nexthops[0].ifindex;
6059 struct mlxsw_sp_ipip_entry *ipip_entry;
6061 switch (fen_info->type) {
6063 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6064 MLXSW_SP_L3_PROTO_IPV4, dip);
6065 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6066 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6067 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6071 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6072 MLXSW_SP_L3_PROTO_IPV4,
6076 tunnel_index = router->nve_decap_config.tunnel_index;
6077 fib_entry->decap.tunnel_index = tunnel_index;
6078 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6083 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6086 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6088 case RTN_UNREACHABLE:
6090 /* Packets hitting these routes need to be trapped, but
6091 * can do so with a lower priority than packets directed
6092 * at the host, so use action type local instead of trap.
6094 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6098 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6100 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6108 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6109 struct mlxsw_sp_fib_entry *fib_entry)
6111 switch (fib_entry->type) {
6112 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6113 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6121 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6122 struct mlxsw_sp_fib4_entry *fib4_entry)
6124 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6127 static struct mlxsw_sp_fib4_entry *
6128 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6129 struct mlxsw_sp_fib_node *fib_node,
6130 const struct fib_entry_notifier_info *fen_info)
6132 struct mlxsw_sp_fib4_entry *fib4_entry;
6133 struct mlxsw_sp_fib_entry *fib_entry;
6136 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6138 return ERR_PTR(-ENOMEM);
6139 fib_entry = &fib4_entry->common;
6141 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6143 goto err_nexthop4_group_get;
6145 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6148 goto err_nexthop_group_vr_link;
6150 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6152 goto err_fib4_entry_type_set;
6154 fib4_entry->fi = fen_info->fi;
6155 fib_info_hold(fib4_entry->fi);
6156 fib4_entry->tb_id = fen_info->tb_id;
6157 fib4_entry->type = fen_info->type;
6158 fib4_entry->dscp = fen_info->dscp;
6160 fib_entry->fib_node = fib_node;
6164 err_fib4_entry_type_set:
6165 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6166 err_nexthop_group_vr_link:
6167 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6168 err_nexthop4_group_get:
6170 return ERR_PTR(err);
6173 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6174 struct mlxsw_sp_fib4_entry *fib4_entry)
6176 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6178 fib_info_put(fib4_entry->fi);
6179 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6180 mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6182 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6186 static struct mlxsw_sp_fib4_entry *
6187 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6188 const struct fib_entry_notifier_info *fen_info)
6190 struct mlxsw_sp_fib4_entry *fib4_entry;
6191 struct mlxsw_sp_fib_node *fib_node;
6192 struct mlxsw_sp_fib *fib;
6193 struct mlxsw_sp_vr *vr;
6195 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6198 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6200 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6201 sizeof(fen_info->dst),
6206 fib4_entry = container_of(fib_node->fib_entry,
6207 struct mlxsw_sp_fib4_entry, common);
6208 if (fib4_entry->tb_id == fen_info->tb_id &&
6209 fib4_entry->dscp == fen_info->dscp &&
6210 fib4_entry->type == fen_info->type &&
6211 fib4_entry->fi == fen_info->fi)
6217 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6218 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6219 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6220 .key_len = sizeof(struct mlxsw_sp_fib_key),
6221 .automatic_shrinking = true,
6224 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6225 struct mlxsw_sp_fib_node *fib_node)
6227 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6228 mlxsw_sp_fib_ht_params);
6231 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6232 struct mlxsw_sp_fib_node *fib_node)
6234 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6235 mlxsw_sp_fib_ht_params);
6238 static struct mlxsw_sp_fib_node *
6239 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6240 size_t addr_len, unsigned char prefix_len)
6242 struct mlxsw_sp_fib_key key;
6244 memset(&key, 0, sizeof(key));
6245 memcpy(key.addr, addr, addr_len);
6246 key.prefix_len = prefix_len;
6247 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6250 static struct mlxsw_sp_fib_node *
6251 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6252 size_t addr_len, unsigned char prefix_len)
6254 struct mlxsw_sp_fib_node *fib_node;
6256 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6260 list_add(&fib_node->list, &fib->node_list);
6261 memcpy(fib_node->key.addr, addr, addr_len);
6262 fib_node->key.prefix_len = prefix_len;
6267 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6269 list_del(&fib_node->list);
6273 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6274 struct mlxsw_sp_fib_node *fib_node)
6276 struct mlxsw_sp_prefix_usage req_prefix_usage;
6277 struct mlxsw_sp_fib *fib = fib_node->fib;
6278 struct mlxsw_sp_lpm_tree *lpm_tree;
6281 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6282 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6285 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6286 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6287 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6289 if (IS_ERR(lpm_tree))
6290 return PTR_ERR(lpm_tree);
6292 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6294 goto err_lpm_tree_replace;
6297 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6300 err_lpm_tree_replace:
6301 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6305 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6306 struct mlxsw_sp_fib_node *fib_node)
6308 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6309 struct mlxsw_sp_prefix_usage req_prefix_usage;
6310 struct mlxsw_sp_fib *fib = fib_node->fib;
6313 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6315 /* Try to construct a new LPM tree from the current prefix usage
6316 * minus the unused one. If we fail, continue using the old one.
6318 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6319 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6320 fib_node->key.prefix_len);
6321 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6323 if (IS_ERR(lpm_tree))
6326 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6328 goto err_lpm_tree_replace;
6332 err_lpm_tree_replace:
6333 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6336 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6337 struct mlxsw_sp_fib_node *fib_node,
6338 struct mlxsw_sp_fib *fib)
6342 err = mlxsw_sp_fib_node_insert(fib, fib_node);
6345 fib_node->fib = fib;
6347 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6349 goto err_fib_lpm_tree_link;
6353 err_fib_lpm_tree_link:
6354 fib_node->fib = NULL;
6355 mlxsw_sp_fib_node_remove(fib, fib_node);
6359 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6360 struct mlxsw_sp_fib_node *fib_node)
6362 struct mlxsw_sp_fib *fib = fib_node->fib;
6364 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6365 fib_node->fib = NULL;
6366 mlxsw_sp_fib_node_remove(fib, fib_node);
6369 static struct mlxsw_sp_fib_node *
6370 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6371 size_t addr_len, unsigned char prefix_len,
6372 enum mlxsw_sp_l3proto proto)
6374 struct mlxsw_sp_fib_node *fib_node;
6375 struct mlxsw_sp_fib *fib;
6376 struct mlxsw_sp_vr *vr;
6379 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6381 return ERR_CAST(vr);
6382 fib = mlxsw_sp_vr_fib(vr, proto);
6384 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6388 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6391 goto err_fib_node_create;
6394 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6396 goto err_fib_node_init;
6401 mlxsw_sp_fib_node_destroy(fib_node);
6402 err_fib_node_create:
6403 mlxsw_sp_vr_put(mlxsw_sp, vr);
6404 return ERR_PTR(err);
6407 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6408 struct mlxsw_sp_fib_node *fib_node)
6410 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6412 if (fib_node->fib_entry)
6414 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6415 mlxsw_sp_fib_node_destroy(fib_node);
6416 mlxsw_sp_vr_put(mlxsw_sp, vr);
6419 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6420 struct mlxsw_sp_fib_entry *fib_entry)
6422 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6425 fib_node->fib_entry = fib_entry;
6427 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6429 goto err_fib_entry_update;
6433 err_fib_entry_update:
6434 fib_node->fib_entry = NULL;
6439 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6440 struct mlxsw_sp_fib_entry *fib_entry)
6442 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6444 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6445 fib_node->fib_entry = NULL;
6448 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6450 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6451 struct mlxsw_sp_fib4_entry *fib4_replaced;
6453 if (!fib_node->fib_entry)
6456 fib4_replaced = container_of(fib_node->fib_entry,
6457 struct mlxsw_sp_fib4_entry, common);
6458 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6459 fib4_replaced->tb_id == RT_TABLE_LOCAL)
6466 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6467 const struct fib_entry_notifier_info *fen_info)
6469 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6470 struct mlxsw_sp_fib_entry *replaced;
6471 struct mlxsw_sp_fib_node *fib_node;
6474 if (fen_info->fi->nh &&
6475 !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6478 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6479 &fen_info->dst, sizeof(fen_info->dst),
6481 MLXSW_SP_L3_PROTO_IPV4);
6482 if (IS_ERR(fib_node)) {
6483 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6484 return PTR_ERR(fib_node);
6487 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6488 if (IS_ERR(fib4_entry)) {
6489 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6490 err = PTR_ERR(fib4_entry);
6491 goto err_fib4_entry_create;
6494 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6495 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6496 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6500 replaced = fib_node->fib_entry;
6501 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6503 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6504 goto err_fib_node_entry_link;
6507 /* Nothing to replace */
6511 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6512 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6514 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6518 err_fib_node_entry_link:
6519 fib_node->fib_entry = replaced;
6520 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6521 err_fib4_entry_create:
6522 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6526 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6527 struct fib_entry_notifier_info *fen_info)
6529 struct mlxsw_sp_fib4_entry *fib4_entry;
6530 struct mlxsw_sp_fib_node *fib_node;
6532 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6535 fib_node = fib4_entry->common.fib_node;
6537 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6538 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6539 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6542 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6544 /* Multicast routes aren't supported, so ignore them. Neighbour
6545 * Discovery packets are specifically trapped.
6547 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6550 /* Cloned routes are irrelevant in the forwarding path. */
6551 if (rt->fib6_flags & RTF_CACHE)
6557 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6559 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6561 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6563 return ERR_PTR(-ENOMEM);
6565 /* In case of route replace, replaced route is deleted with
6566 * no notification. Take reference to prevent accessing freed
6569 mlxsw_sp_rt6->rt = rt;
6572 return mlxsw_sp_rt6;
6575 #if IS_ENABLED(CONFIG_IPV6)
6576 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6578 fib6_info_release(rt);
6581 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6586 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6588 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6590 if (!mlxsw_sp_rt6->rt->nh)
6591 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6592 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6593 kfree(mlxsw_sp_rt6);
6596 static struct fib6_info *
6597 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6599 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6603 static struct mlxsw_sp_rt6 *
6604 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6605 const struct fib6_info *rt)
6607 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6609 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6610 if (mlxsw_sp_rt6->rt == rt)
6611 return mlxsw_sp_rt6;
6617 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6618 const struct fib6_info *rt,
6619 enum mlxsw_sp_ipip_type *ret)
6621 return rt->fib6_nh->fib_nh_dev &&
6622 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6625 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6626 struct mlxsw_sp_nexthop_group *nh_grp,
6627 struct mlxsw_sp_nexthop *nh,
6628 const struct fib6_info *rt)
6630 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6633 nh->nhgi = nh_grp->nhgi;
6634 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6635 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6636 #if IS_ENABLED(CONFIG_IPV6)
6637 nh->neigh_tbl = &nd_tbl;
6639 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6641 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6645 nh->ifindex = dev->ifindex;
6647 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6649 goto err_nexthop_type_init;
6653 err_nexthop_type_init:
6654 list_del(&nh->router_list_node);
6655 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6659 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6660 struct mlxsw_sp_nexthop *nh)
6662 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6663 list_del(&nh->router_list_node);
6664 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6667 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6668 const struct fib6_info *rt)
6670 return rt->fib6_nh->fib_nh_gw_family ||
6671 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6675 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6676 struct mlxsw_sp_nexthop_group *nh_grp,
6677 struct mlxsw_sp_fib6_entry *fib6_entry)
6679 struct mlxsw_sp_nexthop_group_info *nhgi;
6680 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6681 struct mlxsw_sp_nexthop *nh;
6684 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6688 nh_grp->nhgi = nhgi;
6689 nhgi->nh_grp = nh_grp;
6690 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6691 struct mlxsw_sp_rt6, list);
6692 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6693 nhgi->count = fib6_entry->nrt6;
6694 for (i = 0; i < nhgi->count; i++) {
6695 struct fib6_info *rt = mlxsw_sp_rt6->rt;
6697 nh = &nhgi->nexthops[i];
6698 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6700 goto err_nexthop6_init;
6701 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6703 nh_grp->nhgi = nhgi;
6704 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6707 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6709 goto err_group_refresh;
6714 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6718 for (i--; i >= 0; i--) {
6719 nh = &nhgi->nexthops[i];
6720 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6727 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6728 struct mlxsw_sp_nexthop_group *nh_grp)
6730 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6733 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6734 for (i = nhgi->count - 1; i >= 0; i--) {
6735 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6737 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6739 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6740 WARN_ON_ONCE(nhgi->adj_index_valid);
6744 static struct mlxsw_sp_nexthop_group *
6745 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6746 struct mlxsw_sp_fib6_entry *fib6_entry)
6748 struct mlxsw_sp_nexthop_group *nh_grp;
6751 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6753 return ERR_PTR(-ENOMEM);
6754 INIT_LIST_HEAD(&nh_grp->vr_list);
6755 err = rhashtable_init(&nh_grp->vr_ht,
6756 &mlxsw_sp_nexthop_group_vr_ht_params);
6758 goto err_nexthop_group_vr_ht_init;
6759 INIT_LIST_HEAD(&nh_grp->fib_list);
6760 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6762 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6764 goto err_nexthop_group_info_init;
6766 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6768 goto err_nexthop_group_insert;
6770 nh_grp->can_destroy = true;
6774 err_nexthop_group_insert:
6775 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6776 err_nexthop_group_info_init:
6777 rhashtable_destroy(&nh_grp->vr_ht);
6778 err_nexthop_group_vr_ht_init:
6780 return ERR_PTR(err);
6784 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6785 struct mlxsw_sp_nexthop_group *nh_grp)
6787 if (!nh_grp->can_destroy)
6789 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6790 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6791 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6792 rhashtable_destroy(&nh_grp->vr_ht);
6796 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6797 struct mlxsw_sp_fib6_entry *fib6_entry)
6799 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6800 struct mlxsw_sp_nexthop_group *nh_grp;
6803 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6805 if (WARN_ON_ONCE(!nh_grp))
6810 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6812 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6814 return PTR_ERR(nh_grp);
6817 /* The route and the nexthop are described by the same struct, so we
6818 * need to the update the nexthop offload indication for the new route.
6820 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6823 list_add_tail(&fib6_entry->common.nexthop_group_node,
6825 fib6_entry->common.nh_group = nh_grp;
6830 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6831 struct mlxsw_sp_fib_entry *fib_entry)
6833 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6835 list_del(&fib_entry->nexthop_group_node);
6836 if (!list_empty(&nh_grp->fib_list))
6839 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6840 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6844 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6848 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6849 struct mlxsw_sp_fib6_entry *fib6_entry)
6851 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6852 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6855 mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6856 fib6_entry->common.nh_group = NULL;
6857 list_del(&fib6_entry->common.nexthop_group_node);
6859 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6861 goto err_nexthop6_group_get;
6863 err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6866 goto err_nexthop_group_vr_link;
6868 /* In case this entry is offloaded, then the adjacency index
6869 * currently associated with it in the device's table is that
6870 * of the old group. Start using the new one instead.
6872 err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
6874 goto err_fib_entry_update;
6876 if (list_empty(&old_nh_grp->fib_list))
6877 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6881 err_fib_entry_update:
6882 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6884 err_nexthop_group_vr_link:
6885 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6886 err_nexthop6_group_get:
6887 list_add_tail(&fib6_entry->common.nexthop_group_node,
6888 &old_nh_grp->fib_list);
6889 fib6_entry->common.nh_group = old_nh_grp;
6890 mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6895 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6896 struct mlxsw_sp_fib6_entry *fib6_entry,
6897 struct fib6_info **rt_arr, unsigned int nrt6)
6899 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6902 for (i = 0; i < nrt6; i++) {
6903 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6904 if (IS_ERR(mlxsw_sp_rt6)) {
6905 err = PTR_ERR(mlxsw_sp_rt6);
6906 goto err_rt6_unwind;
6909 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6913 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
6915 goto err_rt6_unwind;
6920 for (; i > 0; i--) {
6922 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6923 struct mlxsw_sp_rt6, list);
6924 list_del(&mlxsw_sp_rt6->list);
6925 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6931 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6932 struct mlxsw_sp_fib6_entry *fib6_entry,
6933 struct fib6_info **rt_arr, unsigned int nrt6)
6935 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6938 for (i = 0; i < nrt6; i++) {
6939 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6941 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6945 list_del(&mlxsw_sp_rt6->list);
6946 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6949 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
6953 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
6954 struct mlxsw_sp_fib_entry *fib_entry,
6955 const struct fib6_info *rt)
6957 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6958 union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
6959 u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
6960 struct mlxsw_sp_router *router = mlxsw_sp->router;
6961 int ifindex = nhgi->nexthops[0].ifindex;
6962 struct mlxsw_sp_ipip_entry *ipip_entry;
6964 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6965 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6966 MLXSW_SP_L3_PROTO_IPV6,
6969 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6970 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6971 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
6974 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6975 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
6978 tunnel_index = router->nve_decap_config.tunnel_index;
6979 fib_entry->decap.tunnel_index = tunnel_index;
6980 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6986 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6987 struct mlxsw_sp_fib_entry *fib_entry,
6988 const struct fib6_info *rt)
6990 if (rt->fib6_flags & RTF_LOCAL)
6991 return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
6993 if (rt->fib6_flags & RTF_ANYCAST)
6994 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6995 else if (rt->fib6_type == RTN_BLACKHOLE)
6996 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6997 else if (rt->fib6_flags & RTF_REJECT)
6998 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6999 else if (fib_entry->nh_group->nhgi->gateway)
7000 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7002 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7008 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7010 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7012 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7015 list_del(&mlxsw_sp_rt6->list);
7016 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7020 static struct mlxsw_sp_fib6_entry *
7021 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7022 struct mlxsw_sp_fib_node *fib_node,
7023 struct fib6_info **rt_arr, unsigned int nrt6)
7025 struct mlxsw_sp_fib6_entry *fib6_entry;
7026 struct mlxsw_sp_fib_entry *fib_entry;
7027 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7030 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7032 return ERR_PTR(-ENOMEM);
7033 fib_entry = &fib6_entry->common;
7035 INIT_LIST_HEAD(&fib6_entry->rt6_list);
7037 for (i = 0; i < nrt6; i++) {
7038 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7039 if (IS_ERR(mlxsw_sp_rt6)) {
7040 err = PTR_ERR(mlxsw_sp_rt6);
7041 goto err_rt6_unwind;
7043 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7047 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7049 goto err_rt6_unwind;
7051 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7054 goto err_nexthop_group_vr_link;
7056 err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7058 goto err_fib6_entry_type_set;
7060 fib_entry->fib_node = fib_node;
7064 err_fib6_entry_type_set:
7065 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7066 err_nexthop_group_vr_link:
7067 mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7069 for (; i > 0; i--) {
7071 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7072 struct mlxsw_sp_rt6, list);
7073 list_del(&mlxsw_sp_rt6->list);
7074 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7077 return ERR_PTR(err);
7081 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7082 struct mlxsw_sp_fib6_entry *fib6_entry)
7084 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7087 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7088 struct mlxsw_sp_fib6_entry *fib6_entry)
7090 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7092 mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7093 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7095 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7096 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7097 WARN_ON(fib6_entry->nrt6);
7101 static struct mlxsw_sp_fib6_entry *
7102 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7103 const struct fib6_info *rt)
7105 struct mlxsw_sp_fib6_entry *fib6_entry;
7106 struct mlxsw_sp_fib_node *fib_node;
7107 struct mlxsw_sp_fib *fib;
7108 struct fib6_info *cmp_rt;
7109 struct mlxsw_sp_vr *vr;
7111 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7114 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7116 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7117 sizeof(rt->fib6_dst.addr),
7122 fib6_entry = container_of(fib_node->fib_entry,
7123 struct mlxsw_sp_fib6_entry, common);
7124 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7125 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7126 rt->fib6_metric == cmp_rt->fib6_metric &&
7127 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7133 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7135 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7136 struct mlxsw_sp_fib6_entry *fib6_replaced;
7137 struct fib6_info *rt, *rt_replaced;
7139 if (!fib_node->fib_entry)
7142 fib6_replaced = container_of(fib_node->fib_entry,
7143 struct mlxsw_sp_fib6_entry,
7145 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7146 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7147 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7148 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7154 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7155 struct fib6_info **rt_arr,
7158 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7159 struct mlxsw_sp_fib_entry *replaced;
7160 struct mlxsw_sp_fib_node *fib_node;
7161 struct fib6_info *rt = rt_arr[0];
7164 if (rt->fib6_src.plen)
7167 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7170 if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7173 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7175 sizeof(rt->fib6_dst.addr),
7177 MLXSW_SP_L3_PROTO_IPV6);
7178 if (IS_ERR(fib_node))
7179 return PTR_ERR(fib_node);
7181 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7183 if (IS_ERR(fib6_entry)) {
7184 err = PTR_ERR(fib6_entry);
7185 goto err_fib6_entry_create;
7188 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7189 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7190 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7194 replaced = fib_node->fib_entry;
7195 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7197 goto err_fib_node_entry_link;
7199 /* Nothing to replace */
7203 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7204 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7206 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7210 err_fib_node_entry_link:
7211 fib_node->fib_entry = replaced;
7212 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7213 err_fib6_entry_create:
7214 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7218 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7219 struct fib6_info **rt_arr,
7222 struct mlxsw_sp_fib6_entry *fib6_entry;
7223 struct mlxsw_sp_fib_node *fib_node;
7224 struct fib6_info *rt = rt_arr[0];
7227 if (rt->fib6_src.plen)
7230 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7233 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7235 sizeof(rt->fib6_dst.addr),
7237 MLXSW_SP_L3_PROTO_IPV6);
7238 if (IS_ERR(fib_node))
7239 return PTR_ERR(fib_node);
7241 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7242 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7246 fib6_entry = container_of(fib_node->fib_entry,
7247 struct mlxsw_sp_fib6_entry, common);
7248 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7251 goto err_fib6_entry_nexthop_add;
7255 err_fib6_entry_nexthop_add:
7256 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7260 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7261 struct fib6_info **rt_arr,
7264 struct mlxsw_sp_fib6_entry *fib6_entry;
7265 struct mlxsw_sp_fib_node *fib_node;
7266 struct fib6_info *rt = rt_arr[0];
7268 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7271 /* Multipath routes are first added to the FIB trie and only then
7272 * notified. If we vetoed the addition, we will get a delete
7273 * notification for a route we do not have. Therefore, do not warn if
7274 * route was not found.
7276 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7280 /* If not all the nexthops are deleted, then only reduce the nexthop
7283 if (nrt6 != fib6_entry->nrt6) {
7284 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7289 fib_node = fib6_entry->common.fib_node;
7291 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7292 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7293 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7296 static struct mlxsw_sp_mr_table *
7297 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7299 if (family == RTNL_FAMILY_IPMR)
7300 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7302 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7305 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7306 struct mfc_entry_notifier_info *men_info,
7309 struct mlxsw_sp_mr_table *mrt;
7310 struct mlxsw_sp_vr *vr;
7312 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7316 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7317 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7320 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7321 struct mfc_entry_notifier_info *men_info)
7323 struct mlxsw_sp_mr_table *mrt;
7324 struct mlxsw_sp_vr *vr;
7326 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7330 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7331 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7332 mlxsw_sp_vr_put(mlxsw_sp, vr);
7336 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7337 struct vif_entry_notifier_info *ven_info)
7339 struct mlxsw_sp_mr_table *mrt;
7340 struct mlxsw_sp_rif *rif;
7341 struct mlxsw_sp_vr *vr;
7343 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7347 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7348 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7349 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7350 ven_info->vif_index,
7351 ven_info->vif_flags, rif);
7355 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7356 struct vif_entry_notifier_info *ven_info)
7358 struct mlxsw_sp_mr_table *mrt;
7359 struct mlxsw_sp_vr *vr;
7361 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7365 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7366 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7367 mlxsw_sp_vr_put(mlxsw_sp, vr);
7370 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7371 struct mlxsw_sp_fib_node *fib_node)
7373 struct mlxsw_sp_fib4_entry *fib4_entry;
7375 fib4_entry = container_of(fib_node->fib_entry,
7376 struct mlxsw_sp_fib4_entry, common);
7377 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7378 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7379 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7382 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7383 struct mlxsw_sp_fib_node *fib_node)
7385 struct mlxsw_sp_fib6_entry *fib6_entry;
7387 fib6_entry = container_of(fib_node->fib_entry,
7388 struct mlxsw_sp_fib6_entry, common);
7389 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7390 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7391 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7394 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7395 struct mlxsw_sp_fib_node *fib_node)
7397 switch (fib_node->fib->proto) {
7398 case MLXSW_SP_L3_PROTO_IPV4:
7399 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7401 case MLXSW_SP_L3_PROTO_IPV6:
7402 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7407 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7408 struct mlxsw_sp_vr *vr,
7409 enum mlxsw_sp_l3proto proto)
7411 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7412 struct mlxsw_sp_fib_node *fib_node, *tmp;
7414 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7415 bool do_break = &tmp->list == &fib->node_list;
7417 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7423 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7425 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
7428 for (i = 0; i < max_vrs; i++) {
7429 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7431 if (!mlxsw_sp_vr_is_used(vr))
7434 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7435 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7436 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7438 /* If virtual router was only used for IPv4, then it's no
7441 if (!mlxsw_sp_vr_is_used(vr))
7443 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7447 struct mlxsw_sp_fib6_event_work {
7448 struct fib6_info **rt_arr;
7452 struct mlxsw_sp_fib_event_work {
7453 struct work_struct work;
7455 struct mlxsw_sp_fib6_event_work fib6_work;
7456 struct fib_entry_notifier_info fen_info;
7457 struct fib_rule_notifier_info fr_info;
7458 struct fib_nh_notifier_info fnh_info;
7459 struct mfc_entry_notifier_info men_info;
7460 struct vif_entry_notifier_info ven_info;
7462 struct mlxsw_sp *mlxsw_sp;
7463 unsigned long event;
7467 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7468 struct fib6_entry_notifier_info *fen6_info)
7470 struct fib6_info *rt = fen6_info->rt;
7471 struct fib6_info **rt_arr;
7472 struct fib6_info *iter;
7476 nrt6 = fen6_info->nsiblings + 1;
7478 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7482 fib6_work->rt_arr = rt_arr;
7483 fib6_work->nrt6 = nrt6;
7488 if (!fen6_info->nsiblings)
7491 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7492 if (i == fen6_info->nsiblings)
7495 rt_arr[i + 1] = iter;
7496 fib6_info_hold(iter);
7499 WARN_ON_ONCE(i != fen6_info->nsiblings);
7505 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7509 for (i = 0; i < fib6_work->nrt6; i++)
7510 mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7511 kfree(fib6_work->rt_arr);
7514 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7516 struct mlxsw_sp_fib_event_work *fib_work =
7517 container_of(work, struct mlxsw_sp_fib_event_work, work);
7518 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7521 mutex_lock(&mlxsw_sp->router->lock);
7522 mlxsw_sp_span_respin(mlxsw_sp);
7524 switch (fib_work->event) {
7525 case FIB_EVENT_ENTRY_REPLACE:
7526 err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7527 &fib_work->fen_info);
7529 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7530 mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7531 &fib_work->fen_info);
7533 fib_info_put(fib_work->fen_info.fi);
7535 case FIB_EVENT_ENTRY_DEL:
7536 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7537 fib_info_put(fib_work->fen_info.fi);
7539 case FIB_EVENT_NH_ADD:
7540 case FIB_EVENT_NH_DEL:
7541 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7542 fib_work->fnh_info.fib_nh);
7543 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7546 mutex_unlock(&mlxsw_sp->router->lock);
7550 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7552 struct mlxsw_sp_fib_event_work *fib_work =
7553 container_of(work, struct mlxsw_sp_fib_event_work, work);
7554 struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7555 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7558 mutex_lock(&mlxsw_sp->router->lock);
7559 mlxsw_sp_span_respin(mlxsw_sp);
7561 switch (fib_work->event) {
7562 case FIB_EVENT_ENTRY_REPLACE:
7563 err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7567 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7568 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7572 mlxsw_sp_router_fib6_work_fini(fib6_work);
7574 case FIB_EVENT_ENTRY_APPEND:
7575 err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7579 dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7580 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7584 mlxsw_sp_router_fib6_work_fini(fib6_work);
7586 case FIB_EVENT_ENTRY_DEL:
7587 mlxsw_sp_router_fib6_del(mlxsw_sp,
7590 mlxsw_sp_router_fib6_work_fini(fib6_work);
7593 mutex_unlock(&mlxsw_sp->router->lock);
7597 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7599 struct mlxsw_sp_fib_event_work *fib_work =
7600 container_of(work, struct mlxsw_sp_fib_event_work, work);
7601 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7606 mutex_lock(&mlxsw_sp->router->lock);
7607 switch (fib_work->event) {
7608 case FIB_EVENT_ENTRY_REPLACE:
7609 case FIB_EVENT_ENTRY_ADD:
7610 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7612 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7615 dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7616 mr_cache_put(fib_work->men_info.mfc);
7618 case FIB_EVENT_ENTRY_DEL:
7619 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7620 mr_cache_put(fib_work->men_info.mfc);
7622 case FIB_EVENT_VIF_ADD:
7623 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7624 &fib_work->ven_info);
7626 dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7627 dev_put(fib_work->ven_info.dev);
7629 case FIB_EVENT_VIF_DEL:
7630 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7631 &fib_work->ven_info);
7632 dev_put(fib_work->ven_info.dev);
7635 mutex_unlock(&mlxsw_sp->router->lock);
7640 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7641 struct fib_notifier_info *info)
7643 struct fib_entry_notifier_info *fen_info;
7644 struct fib_nh_notifier_info *fnh_info;
7646 switch (fib_work->event) {
7647 case FIB_EVENT_ENTRY_REPLACE:
7648 case FIB_EVENT_ENTRY_DEL:
7649 fen_info = container_of(info, struct fib_entry_notifier_info,
7651 fib_work->fen_info = *fen_info;
7652 /* Take reference on fib_info to prevent it from being
7653 * freed while work is queued. Release it afterwards.
7655 fib_info_hold(fib_work->fen_info.fi);
7657 case FIB_EVENT_NH_ADD:
7658 case FIB_EVENT_NH_DEL:
7659 fnh_info = container_of(info, struct fib_nh_notifier_info,
7661 fib_work->fnh_info = *fnh_info;
7662 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
7667 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
7668 struct fib_notifier_info *info)
7670 struct fib6_entry_notifier_info *fen6_info;
7673 switch (fib_work->event) {
7674 case FIB_EVENT_ENTRY_REPLACE:
7675 case FIB_EVENT_ENTRY_APPEND:
7676 case FIB_EVENT_ENTRY_DEL:
7677 fen6_info = container_of(info, struct fib6_entry_notifier_info,
7679 err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
7690 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
7691 struct fib_notifier_info *info)
7693 switch (fib_work->event) {
7694 case FIB_EVENT_ENTRY_REPLACE:
7695 case FIB_EVENT_ENTRY_ADD:
7696 case FIB_EVENT_ENTRY_DEL:
7697 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
7698 mr_cache_hold(fib_work->men_info.mfc);
7700 case FIB_EVENT_VIF_ADD:
7701 case FIB_EVENT_VIF_DEL:
7702 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
7703 dev_hold(fib_work->ven_info.dev);
7708 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7709 struct fib_notifier_info *info,
7710 struct mlxsw_sp *mlxsw_sp)
7712 struct netlink_ext_ack *extack = info->extack;
7713 struct fib_rule_notifier_info *fr_info;
7714 struct fib_rule *rule;
7717 /* nothing to do at the moment */
7718 if (event == FIB_EVENT_RULE_DEL)
7721 fr_info = container_of(info, struct fib_rule_notifier_info, info);
7722 rule = fr_info->rule;
7724 /* Rule only affects locally generated traffic */
7725 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7728 switch (info->family) {
7730 if (!fib4_rule_default(rule) && !rule->l3mdev)
7734 if (!fib6_rule_default(rule) && !rule->l3mdev)
7737 case RTNL_FAMILY_IPMR:
7738 if (!ipmr_rule_default(rule) && !rule->l3mdev)
7741 case RTNL_FAMILY_IP6MR:
7742 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7748 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7753 /* Called with rcu_read_lock() */
7754 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7755 unsigned long event, void *ptr)
7757 struct mlxsw_sp_fib_event_work *fib_work;
7758 struct fib_notifier_info *info = ptr;
7759 struct mlxsw_sp_router *router;
7762 if ((info->family != AF_INET && info->family != AF_INET6 &&
7763 info->family != RTNL_FAMILY_IPMR &&
7764 info->family != RTNL_FAMILY_IP6MR))
7767 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7770 case FIB_EVENT_RULE_ADD:
7771 case FIB_EVENT_RULE_DEL:
7772 err = mlxsw_sp_router_fib_rule_event(event, info,
7774 return notifier_from_errno(err);
7775 case FIB_EVENT_ENTRY_ADD:
7776 case FIB_EVENT_ENTRY_REPLACE:
7777 case FIB_EVENT_ENTRY_APPEND:
7778 if (info->family == AF_INET) {
7779 struct fib_entry_notifier_info *fen_info = ptr;
7781 if (fen_info->fi->fib_nh_is_v6) {
7782 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7783 return notifier_from_errno(-EINVAL);
7789 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
7793 fib_work->mlxsw_sp = router->mlxsw_sp;
7794 fib_work->event = event;
7796 switch (info->family) {
7798 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
7799 mlxsw_sp_router_fib4_event(fib_work, info);
7802 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
7803 err = mlxsw_sp_router_fib6_event(fib_work, info);
7807 case RTNL_FAMILY_IP6MR:
7808 case RTNL_FAMILY_IPMR:
7809 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
7810 mlxsw_sp_router_fibmr_event(fib_work, info);
7814 mlxsw_core_schedule_work(&fib_work->work);
7823 static struct mlxsw_sp_rif *
7824 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7825 const struct net_device *dev)
7827 int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7830 for (i = 0; i < max_rifs; i++)
7831 if (mlxsw_sp->router->rifs[i] &&
7832 mlxsw_sp_rif_dev_is(mlxsw_sp->router->rifs[i], dev))
7833 return mlxsw_sp->router->rifs[i];
7838 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7839 const struct net_device *dev)
7841 struct mlxsw_sp_rif *rif;
7843 mutex_lock(&mlxsw_sp->router->lock);
7844 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7845 mutex_unlock(&mlxsw_sp->router->lock);
7850 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7852 struct mlxsw_sp_rif *rif;
7855 mutex_lock(&mlxsw_sp->router->lock);
7856 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7860 /* We only return the VID for VLAN RIFs. Otherwise we return an
7861 * invalid value (0).
7863 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7866 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7869 mutex_unlock(&mlxsw_sp->router->lock);
7873 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7875 char ritr_pl[MLXSW_REG_RITR_LEN];
7878 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7879 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7883 mlxsw_reg_ritr_enable_set(ritr_pl, false);
7884 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7887 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
7888 struct mlxsw_sp_rif *rif)
7890 /* Signal to nexthop cleanup that the RIF is going away. */
7891 rif->crif->rif = NULL;
7893 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
7894 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
7895 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
7898 static bool __mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
7900 struct inet6_dev *inet6_dev;
7901 struct in_device *idev;
7903 idev = __in_dev_get_rcu(dev);
7904 if (idev && idev->ifa_list)
7907 inet6_dev = __in6_dev_get(dev);
7908 if (inet6_dev && !list_empty(&inet6_dev->addr_list))
7914 static bool mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
7916 bool addr_list_empty;
7919 addr_list_empty = __mlxsw_sp_dev_addr_list_empty(dev);
7922 return addr_list_empty;
7926 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
7927 unsigned long event)
7929 bool addr_list_empty;
7935 addr_list_empty = mlxsw_sp_dev_addr_list_empty(dev);
7937 /* macvlans do not have a RIF, but rather piggy back on the
7938 * RIF of their lower device.
7940 if (netif_is_macvlan(dev) && addr_list_empty)
7943 if (rif && addr_list_empty &&
7944 !netif_is_l3_slave(mlxsw_sp_rif_dev(rif)))
7946 /* It is possible we already removed the RIF ourselves
7947 * if it was assigned to a netdev that is now a bridge
7956 static enum mlxsw_sp_rif_type
7957 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
7958 const struct net_device *dev)
7960 enum mlxsw_sp_fid_type type;
7962 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
7963 return MLXSW_SP_RIF_TYPE_IPIP_LB;
7965 /* Otherwise RIF type is derived from the type of the underlying FID. */
7966 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
7967 type = MLXSW_SP_FID_TYPE_8021Q;
7968 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
7969 type = MLXSW_SP_FID_TYPE_8021Q;
7970 else if (netif_is_bridge_master(dev))
7971 type = MLXSW_SP_FID_TYPE_8021D;
7973 type = MLXSW_SP_FID_TYPE_RFID;
7975 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
7978 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
7981 *p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
7983 if (*p_rif_index == 0)
7985 *p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
7987 /* RIF indexes must be aligned to the allocation size. */
7988 WARN_ON_ONCE(*p_rif_index % rif_entries);
7993 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7996 gen_pool_free(mlxsw_sp->router->rifs_table,
7997 MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
8000 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8002 struct mlxsw_sp_crif *crif)
8004 struct net_device *l3_dev = crif ? crif->key.dev : NULL;
8005 struct mlxsw_sp_rif *rif;
8007 rif = kzalloc(rif_size, GFP_KERNEL);
8011 INIT_LIST_HEAD(&rif->neigh_list);
8013 ether_addr_copy(rif->addr, l3_dev->dev_addr);
8014 rif->mtu = l3_dev->mtu;
8017 rif->rif_index = rif_index;
8026 static void mlxsw_sp_rif_free(struct mlxsw_sp_rif *rif)
8028 WARN_ON(!list_empty(&rif->neigh_list));
8031 rif->crif->rif = NULL;
8035 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8038 return mlxsw_sp->router->rifs[rif_index];
8041 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8043 return rif->rif_index;
8046 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8048 return lb_rif->common.rif_index;
8051 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8053 struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
8054 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
8055 struct mlxsw_sp_vr *ul_vr;
8057 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8058 if (WARN_ON(IS_ERR(ul_vr)))
8064 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8066 return lb_rif->ul_rif_id;
8070 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8072 return mlxsw_sp_rif_counter_valid_get(rif,
8073 MLXSW_SP_RIF_COUNTER_EGRESS) &&
8074 mlxsw_sp_rif_counter_valid_get(rif,
8075 MLXSW_SP_RIF_COUNTER_INGRESS);
8079 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8083 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8087 /* Clear stale data. */
8088 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8089 MLXSW_SP_RIF_COUNTER_INGRESS,
8092 goto err_clear_ingress;
8094 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8096 goto err_alloc_egress;
8098 /* Clear stale data. */
8099 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8100 MLXSW_SP_RIF_COUNTER_EGRESS,
8103 goto err_clear_egress;
8108 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8111 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8116 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8118 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8119 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8123 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8124 struct netdev_notifier_offload_xstats_info *info)
8126 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8128 netdev_offload_xstats_report_used(info->report_used);
8132 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8133 struct rtnl_hw_stats64 *p_stats)
8135 struct mlxsw_sp_rif_counter_set_basic ingress;
8136 struct mlxsw_sp_rif_counter_set_basic egress;
8139 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8140 MLXSW_SP_RIF_COUNTER_INGRESS,
8145 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8146 MLXSW_SP_RIF_COUNTER_EGRESS,
8151 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
8152 ((SET.good_unicast_ ## SFX) + \
8153 (SET.good_multicast_ ## SFX) + \
8154 (SET.good_broadcast_ ## SFX))
8156 p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8157 p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8158 p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8159 p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8160 p_stats->rx_errors = ingress.error_packets;
8161 p_stats->tx_errors = egress.error_packets;
8162 p_stats->rx_dropped = ingress.discard_packets;
8163 p_stats->tx_dropped = egress.discard_packets;
8164 p_stats->multicast = ingress.good_multicast_packets +
8165 ingress.good_broadcast_packets;
8167 #undef MLXSW_SP_ROUTER_ALL_GOOD
8173 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8174 struct netdev_notifier_offload_xstats_info *info)
8176 struct rtnl_hw_stats64 stats = {};
8179 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8182 err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8186 netdev_offload_xstats_report_delta(info->report_delta, &stats);
8190 struct mlxsw_sp_router_hwstats_notify_work {
8191 struct work_struct work;
8192 struct net_device *dev;
8195 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8197 struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8198 container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8202 rtnl_offload_xstats_notify(hws_work->dev);
8204 dev_put(hws_work->dev);
8209 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8211 struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8213 /* To collect notification payload, the core ends up sending another
8214 * notifier block message, which would deadlock on the attempt to
8215 * acquire the router lock again. Just postpone the notification until
8219 hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8223 INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8225 hws_work->dev = dev;
8226 mlxsw_core_schedule_work(&hws_work->work);
8229 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8231 return mlxsw_sp_rif_dev(rif)->ifindex;
8234 bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif)
8236 return !!mlxsw_sp_rif_dev(rif);
8239 bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
8240 const struct net_device *dev)
8242 return mlxsw_sp_rif_dev(rif) == dev;
8245 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8247 struct rtnl_hw_stats64 stats = {};
8249 if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8250 netdev_offload_xstats_push_delta(mlxsw_sp_rif_dev(rif),
8251 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8255 static struct mlxsw_sp_rif *
8256 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8257 const struct mlxsw_sp_rif_params *params,
8258 struct netlink_ext_ack *extack)
8260 u8 rif_entries = params->double_entry ? 2 : 1;
8261 u32 tb_id = l3mdev_fib_table(params->dev);
8262 const struct mlxsw_sp_rif_ops *ops;
8263 struct mlxsw_sp_fid *fid = NULL;
8264 enum mlxsw_sp_rif_type type;
8265 struct mlxsw_sp_crif *crif;
8266 struct mlxsw_sp_rif *rif;
8267 struct mlxsw_sp_vr *vr;
8271 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8272 ops = mlxsw_sp->router->rif_ops_arr[type];
8274 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8276 return ERR_CAST(vr);
8279 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8281 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8282 goto err_rif_index_alloc;
8285 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, params->dev);
8286 if (WARN_ON(!crif)) {
8288 goto err_crif_lookup;
8291 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, crif);
8296 dev_hold(params->dev);
8297 mlxsw_sp->router->rifs[rif_index] = rif;
8298 rif->mlxsw_sp = mlxsw_sp;
8300 rif->rif_entries = rif_entries;
8303 fid = ops->fid_get(rif, extack);
8312 ops->setup(rif, params);
8314 err = ops->configure(rif, extack);
8318 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8319 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8321 goto err_mr_rif_add;
8324 if (netdev_offload_xstats_enabled(params->dev,
8325 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8326 err = mlxsw_sp_router_port_l3_stats_enable(rif);
8328 goto err_stats_enable;
8329 mlxsw_sp_router_hwstats_notify_schedule(params->dev);
8331 mlxsw_sp_rif_counters_alloc(rif);
8334 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8339 for (i--; i >= 0; i--)
8340 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8341 ops->deconfigure(rif);
8344 mlxsw_sp_fid_put(fid);
8346 mlxsw_sp->router->rifs[rif_index] = NULL;
8347 dev_put(params->dev);
8348 mlxsw_sp_rif_free(rif);
8351 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8352 err_rif_index_alloc:
8354 mlxsw_sp_vr_put(mlxsw_sp, vr);
8355 return ERR_PTR(err);
8358 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8360 struct net_device *dev = mlxsw_sp_rif_dev(rif);
8361 const struct mlxsw_sp_rif_ops *ops = rif->ops;
8362 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8363 struct mlxsw_sp_crif *crif = rif->crif;
8364 struct mlxsw_sp_fid *fid = rif->fid;
8365 u8 rif_entries = rif->rif_entries;
8366 u16 rif_index = rif->rif_index;
8367 struct mlxsw_sp_vr *vr;
8370 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8371 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8372 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8374 if (netdev_offload_xstats_enabled(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8375 mlxsw_sp_rif_push_l3_stats(rif);
8376 mlxsw_sp_router_port_l3_stats_disable(rif);
8377 mlxsw_sp_router_hwstats_notify_schedule(dev);
8379 mlxsw_sp_rif_counters_free(rif);
8382 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8383 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8384 ops->deconfigure(rif);
8386 /* Loopback RIFs are not associated with a FID. */
8387 mlxsw_sp_fid_put(fid);
8388 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8390 mlxsw_sp_rif_free(rif);
8391 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8393 mlxsw_sp_vr_put(mlxsw_sp, vr);
8395 if (crif->can_destroy)
8396 mlxsw_sp_crif_free(crif);
8399 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8400 struct net_device *dev)
8402 struct mlxsw_sp_rif *rif;
8404 mutex_lock(&mlxsw_sp->router->lock);
8405 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8408 mlxsw_sp_rif_destroy(rif);
8410 mutex_unlock(&mlxsw_sp->router->lock);
8414 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8415 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8417 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8419 params->vid = mlxsw_sp_port_vlan->vid;
8420 params->lag = mlxsw_sp_port->lagged;
8422 params->lag_id = mlxsw_sp_port->lag_id;
8424 params->system_port = mlxsw_sp_port->local_port;
8427 static struct mlxsw_sp_rif_subport *
8428 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8430 return container_of(rif, struct mlxsw_sp_rif_subport, common);
8433 static struct mlxsw_sp_rif *
8434 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8435 const struct mlxsw_sp_rif_params *params,
8436 struct netlink_ext_ack *extack)
8438 struct mlxsw_sp_rif_subport *rif_subport;
8439 struct mlxsw_sp_rif *rif;
8441 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8443 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8445 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8446 refcount_inc(&rif_subport->ref_count);
8450 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8452 struct mlxsw_sp_rif_subport *rif_subport;
8454 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8455 if (!refcount_dec_and_test(&rif_subport->ref_count))
8458 mlxsw_sp_rif_destroy(rif);
8461 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8462 struct mlxsw_sp_rif_mac_profile *profile,
8463 struct netlink_ext_ack *extack)
8465 u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8466 struct mlxsw_sp_router *router = mlxsw_sp->router;
8469 id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8470 max_rif_mac_profiles, GFP_KERNEL);
8478 NL_SET_ERR_MSG_MOD(extack,
8479 "Exceeded number of supported router interface MAC profiles");
8484 static struct mlxsw_sp_rif_mac_profile *
8485 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8487 struct mlxsw_sp_rif_mac_profile *profile;
8489 profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8495 static struct mlxsw_sp_rif_mac_profile *
8496 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8498 struct mlxsw_sp_rif_mac_profile *profile;
8500 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8504 ether_addr_copy(profile->mac_prefix, mac);
8505 refcount_set(&profile->ref_count, 1);
8509 static struct mlxsw_sp_rif_mac_profile *
8510 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8512 struct mlxsw_sp_router *router = mlxsw_sp->router;
8513 struct mlxsw_sp_rif_mac_profile *profile;
8516 idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8517 if (ether_addr_equal_masked(profile->mac_prefix, mac,
8518 mlxsw_sp->mac_mask))
8525 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8527 const struct mlxsw_sp *mlxsw_sp = priv;
8529 return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8532 static u64 mlxsw_sp_rifs_occ_get(void *priv)
8534 const struct mlxsw_sp *mlxsw_sp = priv;
8536 return atomic_read(&mlxsw_sp->router->rifs_count);
8539 static struct mlxsw_sp_rif_mac_profile *
8540 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8541 struct netlink_ext_ack *extack)
8543 struct mlxsw_sp_rif_mac_profile *profile;
8546 profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8548 return ERR_PTR(-ENOMEM);
8550 err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8552 goto profile_index_alloc_err;
8554 atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8557 profile_index_alloc_err:
8559 return ERR_PTR(err);
8562 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8565 struct mlxsw_sp_rif_mac_profile *profile;
8567 atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8568 profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8572 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8573 const char *mac, u8 *p_mac_profile,
8574 struct netlink_ext_ack *extack)
8576 struct mlxsw_sp_rif_mac_profile *profile;
8578 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8580 refcount_inc(&profile->ref_count);
8584 profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8585 if (IS_ERR(profile))
8586 return PTR_ERR(profile);
8589 *p_mac_profile = profile->id;
8593 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8596 struct mlxsw_sp_rif_mac_profile *profile;
8598 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8600 if (WARN_ON(!profile))
8603 if (!refcount_dec_and_test(&profile->ref_count))
8606 mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8609 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8611 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8612 struct mlxsw_sp_rif_mac_profile *profile;
8614 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8615 rif->mac_profile_id);
8616 if (WARN_ON(!profile))
8619 return refcount_read(&profile->ref_count) > 1;
8622 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8623 const char *new_mac)
8625 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8626 struct mlxsw_sp_rif_mac_profile *profile;
8628 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8629 rif->mac_profile_id);
8630 if (WARN_ON(!profile))
8633 ether_addr_copy(profile->mac_prefix, new_mac);
8638 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8639 struct mlxsw_sp_rif *rif,
8640 const char *new_mac,
8641 struct netlink_ext_ack *extack)
8646 if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8647 !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8648 return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8650 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8651 &mac_profile, extack);
8655 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8656 rif->mac_profile_id = mac_profile;
8661 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8662 struct net_device *l3_dev,
8663 struct netlink_ext_ack *extack)
8665 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8666 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8667 struct mlxsw_sp_rif_params params = {
8670 u16 vid = mlxsw_sp_port_vlan->vid;
8671 struct mlxsw_sp_rif *rif;
8672 struct mlxsw_sp_fid *fid;
8675 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
8676 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
8678 return PTR_ERR(rif);
8680 /* FID was already created, just take a reference */
8681 fid = rif->ops->fid_get(rif, extack);
8682 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8684 goto err_fid_port_vid_map;
8686 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8688 goto err_port_vid_learning_set;
8690 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8691 BR_STATE_FORWARDING);
8693 goto err_port_vid_stp_set;
8695 mlxsw_sp_port_vlan->fid = fid;
8699 err_port_vid_stp_set:
8700 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8701 err_port_vid_learning_set:
8702 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8703 err_fid_port_vid_map:
8704 mlxsw_sp_fid_put(fid);
8705 mlxsw_sp_rif_subport_put(rif);
8710 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8712 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8713 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8714 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8715 u16 vid = mlxsw_sp_port_vlan->vid;
8717 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8720 mlxsw_sp_port_vlan->fid = NULL;
8721 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8722 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8723 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8724 mlxsw_sp_fid_put(fid);
8725 mlxsw_sp_rif_subport_put(rif);
8729 mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8730 struct net_device *l3_dev,
8731 struct netlink_ext_ack *extack)
8733 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8735 lockdep_assert_held(&mlxsw_sp->router->lock);
8737 if (!mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev))
8740 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8745 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8747 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8749 mutex_lock(&mlxsw_sp->router->lock);
8750 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8751 mutex_unlock(&mlxsw_sp->router->lock);
8754 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8755 struct net_device *port_dev,
8756 unsigned long event, u16 vid,
8757 struct netlink_ext_ack *extack)
8759 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8760 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8762 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8763 if (WARN_ON(!mlxsw_sp_port_vlan))
8768 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8771 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8778 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8779 unsigned long event,
8780 struct netlink_ext_ack *extack)
8782 if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
8785 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8786 MLXSW_SP_DEFAULT_VID, extack);
8789 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8790 struct net_device *lag_dev,
8791 unsigned long event, u16 vid,
8792 struct netlink_ext_ack *extack)
8794 struct net_device *port_dev;
8795 struct list_head *iter;
8798 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8799 if (mlxsw_sp_port_dev_check(port_dev)) {
8800 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8812 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8813 unsigned long event,
8814 struct netlink_ext_ack *extack)
8816 if (netif_is_bridge_port(lag_dev))
8819 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8820 MLXSW_SP_DEFAULT_VID, extack);
8823 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8824 struct net_device *l3_dev,
8825 unsigned long event,
8826 struct netlink_ext_ack *extack)
8828 struct mlxsw_sp_rif_params params = {
8831 struct mlxsw_sp_rif *rif;
8835 if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8838 br_vlan_get_proto(l3_dev, &proto);
8839 if (proto == ETH_P_8021AD) {
8840 NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8844 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8846 return PTR_ERR(rif);
8849 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8850 mlxsw_sp_rif_destroy(rif);
8857 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8858 struct net_device *vlan_dev,
8859 unsigned long event,
8860 struct netlink_ext_ack *extack)
8862 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8863 u16 vid = vlan_dev_vlan_id(vlan_dev);
8865 if (netif_is_bridge_port(vlan_dev))
8868 if (mlxsw_sp_port_dev_check(real_dev))
8869 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8870 event, vid, extack);
8871 else if (netif_is_lag_master(real_dev))
8872 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8874 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8875 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8881 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8883 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
8884 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8886 return ether_addr_equal_masked(mac, vrrp4, mask);
8889 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
8891 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
8892 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8894 return ether_addr_equal_masked(mac, vrrp6, mask);
8897 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8898 const u8 *mac, bool adding)
8900 char ritr_pl[MLXSW_REG_RITR_LEN];
8901 u8 vrrp_id = adding ? mac[5] : 0;
8904 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
8905 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
8908 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8909 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8913 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
8914 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
8916 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
8918 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8921 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
8922 const struct net_device *macvlan_dev,
8923 struct netlink_ext_ack *extack)
8925 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8926 struct mlxsw_sp_rif *rif;
8929 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8931 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
8935 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8936 mlxsw_sp_fid_index(rif->fid), true);
8940 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
8941 macvlan_dev->dev_addr, true);
8943 goto err_rif_vrrp_add;
8945 /* Make sure the bridge driver does not have this MAC pointing at
8948 if (rif->ops->fdb_del)
8949 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
8954 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8955 mlxsw_sp_fid_index(rif->fid), false);
8959 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8960 const struct net_device *macvlan_dev)
8962 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8963 struct mlxsw_sp_rif *rif;
8965 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8966 /* If we do not have a RIF, then we already took care of
8967 * removing the macvlan's MAC during RIF deletion.
8971 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
8973 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8974 mlxsw_sp_fid_index(rif->fid), false);
8977 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8978 const struct net_device *macvlan_dev)
8980 mutex_lock(&mlxsw_sp->router->lock);
8981 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8982 mutex_unlock(&mlxsw_sp->router->lock);
8985 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
8986 struct net_device *macvlan_dev,
8987 unsigned long event,
8988 struct netlink_ext_ack *extack)
8992 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
8994 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9001 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9002 struct net_device *dev,
9003 unsigned long event,
9004 struct netlink_ext_ack *extack)
9006 if (mlxsw_sp_port_dev_check(dev))
9007 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
9008 else if (netif_is_lag_master(dev))
9009 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
9010 else if (netif_is_bridge_master(dev))
9011 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
9013 else if (is_vlan_dev(dev))
9014 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9016 else if (netif_is_macvlan(dev))
9017 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9023 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9024 unsigned long event, void *ptr)
9026 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9027 struct net_device *dev = ifa->ifa_dev->dev;
9028 struct mlxsw_sp_router *router;
9029 struct mlxsw_sp_rif *rif;
9032 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9033 if (event == NETDEV_UP)
9036 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9037 mutex_lock(&router->lock);
9038 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9039 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9042 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
9044 mutex_unlock(&router->lock);
9045 return notifier_from_errno(err);
9048 static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9049 unsigned long event, void *ptr)
9051 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9052 struct net_device *dev = ivi->ivi_dev->dev;
9053 struct mlxsw_sp *mlxsw_sp;
9054 struct mlxsw_sp_rif *rif;
9057 mlxsw_sp = mlxsw_sp_lower_get(dev);
9061 mutex_lock(&mlxsw_sp->router->lock);
9062 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9063 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9066 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
9068 mutex_unlock(&mlxsw_sp->router->lock);
9069 return notifier_from_errno(err);
9072 struct mlxsw_sp_inet6addr_event_work {
9073 struct work_struct work;
9074 struct mlxsw_sp *mlxsw_sp;
9075 struct net_device *dev;
9076 unsigned long event;
9079 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9081 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9082 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9083 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9084 struct net_device *dev = inet6addr_work->dev;
9085 unsigned long event = inet6addr_work->event;
9086 struct mlxsw_sp_rif *rif;
9089 mutex_lock(&mlxsw_sp->router->lock);
9091 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9092 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9095 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
9097 mutex_unlock(&mlxsw_sp->router->lock);
9100 kfree(inet6addr_work);
9103 /* Called with rcu_read_lock() */
9104 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9105 unsigned long event, void *ptr)
9107 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9108 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9109 struct net_device *dev = if6->idev->dev;
9110 struct mlxsw_sp_router *router;
9112 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9113 if (event == NETDEV_UP)
9116 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9117 if (!inet6addr_work)
9120 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9121 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9122 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9123 inet6addr_work->dev = dev;
9124 inet6addr_work->event = event;
9126 mlxsw_core_schedule_work(&inet6addr_work->work);
9131 static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9132 unsigned long event, void *ptr)
9134 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9135 struct net_device *dev = i6vi->i6vi_dev->dev;
9136 struct mlxsw_sp *mlxsw_sp;
9137 struct mlxsw_sp_rif *rif;
9140 mlxsw_sp = mlxsw_sp_lower_get(dev);
9144 mutex_lock(&mlxsw_sp->router->lock);
9145 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9146 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9149 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
9151 mutex_unlock(&mlxsw_sp->router->lock);
9152 return notifier_from_errno(err);
9155 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9156 const char *mac, int mtu, u8 mac_profile)
9158 char ritr_pl[MLXSW_REG_RITR_LEN];
9161 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9162 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9166 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9167 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9168 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9169 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9170 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9174 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9175 struct mlxsw_sp_rif *rif,
9176 struct netlink_ext_ack *extack)
9178 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9183 fid_index = mlxsw_sp_fid_index(rif->fid);
9185 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9189 old_mac_profile = rif->mac_profile_id;
9190 err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9193 goto err_rif_mac_profile_replace;
9195 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9196 dev->mtu, rif->mac_profile_id);
9200 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9202 goto err_rif_fdb_op;
9204 if (rif->mtu != dev->mtu) {
9205 struct mlxsw_sp_vr *vr;
9208 /* The RIF is relevant only to its mr_table instance, as unlike
9209 * unicast routing, in multicast routing a RIF cannot be shared
9210 * between several multicast routing tables.
9212 vr = &mlxsw_sp->router->vrs[rif->vr_id];
9213 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9214 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9218 ether_addr_copy(rif->addr, dev->dev_addr);
9219 rif->mtu = dev->mtu;
9221 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9226 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9229 mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9230 err_rif_mac_profile_replace:
9231 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9235 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9236 struct netdev_notifier_pre_changeaddr_info *info)
9238 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9239 struct mlxsw_sp_rif_mac_profile *profile;
9240 struct netlink_ext_ack *extack;
9241 u8 max_rif_mac_profiles;
9244 extack = netdev_notifier_info_to_extack(&info->info);
9246 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9250 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9251 occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9252 if (occ < max_rif_mac_profiles)
9255 if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9258 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9262 static bool mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp *mlxsw_sp,
9263 struct net_device *dev)
9265 struct vlan_dev_priv *vlan;
9267 if (netif_is_lag_master(dev) ||
9268 netif_is_bridge_master(dev) ||
9269 mlxsw_sp_port_dev_check(dev) ||
9270 mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev) ||
9271 netif_is_l3_master(dev))
9274 if (!is_vlan_dev(dev))
9277 vlan = vlan_dev_priv(dev);
9278 return netif_is_lag_master(vlan->real_dev) ||
9279 netif_is_bridge_master(vlan->real_dev) ||
9280 mlxsw_sp_port_dev_check(vlan->real_dev);
9283 static struct mlxsw_sp_crif *
9284 mlxsw_sp_crif_register(struct mlxsw_sp_router *router, struct net_device *dev)
9286 struct mlxsw_sp_crif *crif;
9289 if (WARN_ON(mlxsw_sp_crif_lookup(router, dev)))
9292 crif = mlxsw_sp_crif_alloc(dev);
9294 return ERR_PTR(-ENOMEM);
9296 err = mlxsw_sp_crif_insert(router, crif);
9298 goto err_netdev_insert;
9303 mlxsw_sp_crif_free(crif);
9304 return ERR_PTR(err);
9307 static void mlxsw_sp_crif_unregister(struct mlxsw_sp_router *router,
9308 struct mlxsw_sp_crif *crif)
9310 struct mlxsw_sp_nexthop *nh, *tmp;
9312 mlxsw_sp_crif_remove(router, crif);
9314 list_for_each_entry_safe(nh, tmp, &crif->nexthop_list, crif_list_node)
9315 mlxsw_sp_nexthop_type_fini(router->mlxsw_sp, nh);
9318 crif->can_destroy = true;
9320 mlxsw_sp_crif_free(crif);
9323 static int mlxsw_sp_netdevice_register(struct mlxsw_sp_router *router,
9324 struct net_device *dev)
9326 struct mlxsw_sp_crif *crif;
9328 if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9331 crif = mlxsw_sp_crif_register(router, dev);
9332 return PTR_ERR_OR_ZERO(crif);
9335 static void mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router *router,
9336 struct net_device *dev)
9338 struct mlxsw_sp_crif *crif;
9340 if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9343 /* netdev_run_todo(), by way of netdev_wait_allrefs_any(), rebroadcasts
9344 * the NETDEV_UNREGISTER message, so we can get here twice. If that's
9345 * what happened, the netdevice state is NETREG_UNREGISTERED. In that
9346 * case, we expect to have collected the CRIF already, and warn if it
9347 * still exists. Otherwise we expect the CRIF to exist.
9349 crif = mlxsw_sp_crif_lookup(router, dev);
9350 if (dev->reg_state == NETREG_UNREGISTERED) {
9357 mlxsw_sp_crif_unregister(router, crif);
9360 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9363 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9364 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9365 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9366 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9374 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9375 unsigned long event,
9376 struct netdev_notifier_offload_xstats_info *info)
9378 switch (info->type) {
9379 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9386 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9387 return mlxsw_sp_router_port_l3_stats_enable(rif);
9388 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9389 mlxsw_sp_router_port_l3_stats_disable(rif);
9391 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9392 mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9394 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9395 return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9403 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9404 struct net_device *dev,
9405 unsigned long event,
9406 struct netdev_notifier_offload_xstats_info *info)
9408 struct mlxsw_sp_rif *rif;
9410 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9414 return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9417 static bool mlxsw_sp_is_router_event(unsigned long event)
9420 case NETDEV_PRE_CHANGEADDR:
9421 case NETDEV_CHANGEADDR:
9422 case NETDEV_CHANGEMTU:
9429 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9430 unsigned long event, void *ptr)
9432 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9433 struct mlxsw_sp *mlxsw_sp;
9434 struct mlxsw_sp_rif *rif;
9436 mlxsw_sp = mlxsw_sp_lower_get(dev);
9440 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9445 case NETDEV_CHANGEMTU:
9446 case NETDEV_CHANGEADDR:
9447 return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9448 case NETDEV_PRE_CHANGEADDR:
9449 return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9458 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9459 struct net_device *l3_dev,
9460 struct netlink_ext_ack *extack)
9462 struct mlxsw_sp_rif *rif;
9464 /* If netdev is already associated with a RIF, then we need to
9465 * destroy it and create a new one with the new virtual router ID.
9467 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9469 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
9472 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
9475 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9476 struct net_device *l3_dev)
9478 struct mlxsw_sp_rif *rif;
9480 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9483 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
9486 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9488 struct netdev_notifier_changeupper_info *info = ptr;
9490 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9492 return netif_is_l3_master(info->upper_dev);
9496 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9497 struct netdev_notifier_changeupper_info *info)
9499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9502 /* We do not create a RIF for a macvlan, but only use it to
9503 * direct more MAC addresses to the router.
9505 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9509 case NETDEV_PRECHANGEUPPER:
9511 case NETDEV_CHANGEUPPER:
9512 if (info->linking) {
9513 struct netlink_ext_ack *extack;
9515 extack = netdev_notifier_info_to_extack(&info->info);
9516 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9518 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9527 mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
9528 u16 vid, struct net_device *dev,
9529 struct netlink_ext_ack *extack)
9531 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9533 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9535 if (WARN_ON(!mlxsw_sp_port_vlan))
9538 return mlxsw_sp_port_vlan_router_join_existing(mlxsw_sp_port_vlan,
9542 static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9543 struct net_device *lag_dev,
9544 struct netlink_ext_ack *extack)
9546 u16 default_vid = MLXSW_SP_DEFAULT_VID;
9548 return mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port,
9549 default_vid, lag_dev,
9553 int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9554 struct net_device *lag_dev,
9555 struct netlink_ext_ack *extack)
9559 mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
9560 err = __mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, extack);
9561 mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
9566 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
9567 unsigned long event, void *ptr)
9569 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
9570 struct mlxsw_sp_router *router;
9571 struct mlxsw_sp *mlxsw_sp;
9574 router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
9575 mlxsw_sp = router->mlxsw_sp;
9577 mutex_lock(&mlxsw_sp->router->lock);
9579 if (event == NETDEV_REGISTER) {
9580 err = mlxsw_sp_netdevice_register(router, dev);
9582 /* No need to roll this back, UNREGISTER will collect it
9588 if (mlxsw_sp_is_offload_xstats_event(event))
9589 err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
9591 else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
9592 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
9594 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
9595 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
9597 else if (mlxsw_sp_is_router_event(event))
9598 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
9599 else if (mlxsw_sp_is_vrf_event(event, ptr))
9600 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
9602 if (event == NETDEV_UNREGISTER)
9603 mlxsw_sp_netdevice_unregister(router, dev);
9606 mutex_unlock(&mlxsw_sp->router->lock);
9608 return notifier_from_errno(err);
9611 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9612 struct netdev_nested_priv *priv)
9614 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9616 if (!netif_is_macvlan(dev))
9619 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9620 mlxsw_sp_fid_index(rif->fid), false);
9623 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9625 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9626 struct netdev_nested_priv priv = {
9627 .data = (void *)rif,
9630 if (!netif_is_macvlan_port(dev))
9633 netdev_warn(dev, "Router interface is deleted. Upper macvlans will not work\n");
9634 return netdev_walk_all_upper_dev_rcu(dev,
9635 __mlxsw_sp_rif_macvlan_flush, &priv);
9638 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9639 const struct mlxsw_sp_rif_params *params)
9641 struct mlxsw_sp_rif_subport *rif_subport;
9643 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9644 refcount_set(&rif_subport->ref_count, 1);
9645 rif_subport->vid = params->vid;
9646 rif_subport->lag = params->lag;
9648 rif_subport->lag_id = params->lag_id;
9650 rif_subport->system_port = params->system_port;
9653 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9655 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9656 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9657 struct mlxsw_sp_rif_subport *rif_subport;
9658 char ritr_pl[MLXSW_REG_RITR_LEN];
9661 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9662 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9663 rif->rif_index, rif->vr_id, dev->mtu);
9664 mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
9665 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9666 efid = mlxsw_sp_fid_index(rif->fid);
9667 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9668 rif_subport->lag ? rif_subport->lag_id :
9669 rif_subport->system_port,
9671 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9674 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
9675 struct netlink_ext_ack *extack)
9677 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9681 err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
9682 &mac_profile, extack);
9685 rif->mac_profile_id = mac_profile;
9687 err = mlxsw_sp_rif_subport_op(rif, true);
9689 goto err_rif_subport_op;
9691 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9692 mlxsw_sp_fid_index(rif->fid), true);
9694 goto err_rif_fdb_op;
9696 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9698 goto err_fid_rif_set;
9703 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9704 mlxsw_sp_fid_index(rif->fid), false);
9706 mlxsw_sp_rif_subport_op(rif, false);
9708 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
9712 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9714 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9715 struct mlxsw_sp_fid *fid = rif->fid;
9717 mlxsw_sp_fid_rif_unset(fid);
9718 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9719 mlxsw_sp_fid_index(fid), false);
9720 mlxsw_sp_rif_macvlan_flush(rif);
9721 mlxsw_sp_rif_subport_op(rif, false);
9722 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9725 static struct mlxsw_sp_fid *
9726 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9727 struct netlink_ext_ack *extack)
9729 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9732 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9733 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
9734 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
9735 .setup = mlxsw_sp_rif_subport_setup,
9736 .configure = mlxsw_sp_rif_subport_configure,
9737 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
9738 .fid_get = mlxsw_sp_rif_subport_fid_get,
9741 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
9743 enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
9744 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9745 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9746 char ritr_pl[MLXSW_REG_RITR_LEN];
9748 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9750 mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
9751 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9752 mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
9754 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9757 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9759 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9762 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
9763 struct netlink_ext_ack *extack)
9765 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9766 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9767 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9771 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9772 &mac_profile, extack);
9775 rif->mac_profile_id = mac_profile;
9777 err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
9779 goto err_rif_fid_op;
9781 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9782 mlxsw_sp_router_port(mlxsw_sp), true);
9784 goto err_fid_mc_flood_set;
9786 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9787 mlxsw_sp_router_port(mlxsw_sp), true);
9789 goto err_fid_bc_flood_set;
9791 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9792 mlxsw_sp_fid_index(rif->fid), true);
9794 goto err_rif_fdb_op;
9796 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9798 goto err_fid_rif_set;
9803 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9804 mlxsw_sp_fid_index(rif->fid), false);
9806 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9807 mlxsw_sp_router_port(mlxsw_sp), false);
9808 err_fid_bc_flood_set:
9809 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9810 mlxsw_sp_router_port(mlxsw_sp), false);
9811 err_fid_mc_flood_set:
9812 mlxsw_sp_rif_fid_op(rif, fid_index, false);
9814 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9818 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9820 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9821 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9822 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9823 struct mlxsw_sp_fid *fid = rif->fid;
9825 mlxsw_sp_fid_rif_unset(fid);
9826 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9827 mlxsw_sp_fid_index(fid), false);
9828 mlxsw_sp_rif_macvlan_flush(rif);
9829 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9830 mlxsw_sp_router_port(mlxsw_sp), false);
9831 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9832 mlxsw_sp_router_port(mlxsw_sp), false);
9833 mlxsw_sp_rif_fid_op(rif, fid_index, false);
9834 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9837 static struct mlxsw_sp_fid *
9838 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9839 struct netlink_ext_ack *extack)
9841 int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
9843 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif_ifindex);
9846 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9848 struct switchdev_notifier_fdb_info info = {};
9849 struct net_device *dev;
9851 dev = br_fdb_find_port(mlxsw_sp_rif_dev(rif), mac, 0);
9857 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9861 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9862 .type = MLXSW_SP_RIF_TYPE_FID,
9863 .rif_size = sizeof(struct mlxsw_sp_rif),
9864 .configure = mlxsw_sp_rif_fid_configure,
9865 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9866 .fid_get = mlxsw_sp_rif_fid_fid_get,
9867 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
9870 static struct mlxsw_sp_fid *
9871 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9872 struct netlink_ext_ack *extack)
9874 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9875 struct net_device *br_dev;
9879 if (is_vlan_dev(dev)) {
9880 vid = vlan_dev_vlan_id(dev);
9881 br_dev = vlan_dev_real_dev(dev);
9882 if (WARN_ON(!netif_is_bridge_master(br_dev)))
9883 return ERR_PTR(-EINVAL);
9885 err = br_vlan_get_pvid(dev, &vid);
9886 if (err < 0 || !vid) {
9887 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9888 return ERR_PTR(-EINVAL);
9892 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9895 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9897 struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
9898 struct switchdev_notifier_fdb_info info = {};
9899 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9900 struct net_device *br_dev;
9901 struct net_device *dev;
9903 br_dev = is_vlan_dev(rif_dev) ? vlan_dev_real_dev(rif_dev) : rif_dev;
9904 dev = br_fdb_find_port(br_dev, mac, vid);
9910 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9914 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
9917 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9918 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9919 char ritr_pl[MLXSW_REG_RITR_LEN];
9921 mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
9922 dev->mtu, dev->dev_addr,
9923 rif->mac_profile_id, vid, efid);
9925 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9928 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
9929 struct netlink_ext_ack *extack)
9931 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9932 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9933 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9937 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9938 &mac_profile, extack);
9941 rif->mac_profile_id = mac_profile;
9943 err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
9945 goto err_rif_vlan_fid_op;
9947 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9948 mlxsw_sp_router_port(mlxsw_sp), true);
9950 goto err_fid_mc_flood_set;
9952 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9953 mlxsw_sp_router_port(mlxsw_sp), true);
9955 goto err_fid_bc_flood_set;
9957 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9958 mlxsw_sp_fid_index(rif->fid), true);
9960 goto err_rif_fdb_op;
9962 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9964 goto err_fid_rif_set;
9969 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9970 mlxsw_sp_fid_index(rif->fid), false);
9972 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9973 mlxsw_sp_router_port(mlxsw_sp), false);
9974 err_fid_bc_flood_set:
9975 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9976 mlxsw_sp_router_port(mlxsw_sp), false);
9977 err_fid_mc_flood_set:
9978 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
9979 err_rif_vlan_fid_op:
9980 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9984 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
9986 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9987 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9988 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9990 mlxsw_sp_fid_rif_unset(rif->fid);
9991 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9992 mlxsw_sp_fid_index(rif->fid), false);
9993 mlxsw_sp_rif_macvlan_flush(rif);
9994 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9995 mlxsw_sp_router_port(mlxsw_sp), false);
9996 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9997 mlxsw_sp_router_port(mlxsw_sp), false);
9998 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
9999 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10002 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10003 struct netlink_ext_ack *extack)
10005 return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
10008 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
10009 .type = MLXSW_SP_RIF_TYPE_VLAN,
10010 .rif_size = sizeof(struct mlxsw_sp_rif),
10011 .configure = mlxsw_sp1_rif_vlan_configure,
10012 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
10013 .fid_get = mlxsw_sp_rif_vlan_fid_get,
10014 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
10017 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10018 struct netlink_ext_ack *extack)
10020 u16 efid = mlxsw_sp_fid_index(rif->fid);
10022 return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
10025 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
10026 .type = MLXSW_SP_RIF_TYPE_VLAN,
10027 .rif_size = sizeof(struct mlxsw_sp_rif),
10028 .configure = mlxsw_sp2_rif_vlan_configure,
10029 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
10030 .fid_get = mlxsw_sp_rif_vlan_fid_get,
10031 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
10034 static struct mlxsw_sp_rif_ipip_lb *
10035 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
10037 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
10041 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
10042 const struct mlxsw_sp_rif_params *params)
10044 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
10045 struct mlxsw_sp_rif_ipip_lb *rif_lb;
10047 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
10049 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
10050 rif_lb->lb_config = params_lb->lb_config;
10054 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10055 struct netlink_ext_ack *extack)
10057 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10058 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10059 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10060 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10061 struct mlxsw_sp_vr *ul_vr;
10064 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
10066 return PTR_ERR(ul_vr);
10068 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
10070 goto err_loopback_op;
10072 lb_rif->ul_vr_id = ul_vr->id;
10073 lb_rif->ul_rif_id = 0;
10074 ++ul_vr->rif_count;
10078 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10082 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10084 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10085 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10086 struct mlxsw_sp_vr *ul_vr;
10088 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
10089 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
10091 --ul_vr->rif_count;
10092 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10095 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
10096 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10097 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10098 .setup = mlxsw_sp_rif_ipip_lb_setup,
10099 .configure = mlxsw_sp1_rif_ipip_lb_configure,
10100 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
10103 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
10104 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10105 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
10106 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10107 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
10111 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
10113 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10114 char ritr_pl[MLXSW_REG_RITR_LEN];
10116 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
10117 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
10118 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
10119 MLXSW_REG_RITR_LOOPBACK_GENERIC);
10121 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10124 static struct mlxsw_sp_rif *
10125 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
10126 struct mlxsw_sp_crif *ul_crif,
10127 struct netlink_ext_ack *extack)
10129 struct mlxsw_sp_rif *ul_rif;
10130 u8 rif_entries = 1;
10134 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
10136 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
10137 return ERR_PTR(err);
10140 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id,
10144 goto err_rif_alloc;
10147 mlxsw_sp->router->rifs[rif_index] = ul_rif;
10148 ul_rif->mlxsw_sp = mlxsw_sp;
10149 ul_rif->rif_entries = rif_entries;
10150 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
10152 goto ul_rif_op_err;
10154 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
10158 mlxsw_sp->router->rifs[rif_index] = NULL;
10159 mlxsw_sp_rif_free(ul_rif);
10161 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10162 return ERR_PTR(err);
10165 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
10167 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10168 u8 rif_entries = ul_rif->rif_entries;
10169 u16 rif_index = ul_rif->rif_index;
10171 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
10172 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
10173 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
10174 mlxsw_sp_rif_free(ul_rif);
10175 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10178 static struct mlxsw_sp_rif *
10179 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10180 struct mlxsw_sp_crif *ul_crif,
10181 struct netlink_ext_ack *extack)
10183 struct mlxsw_sp_vr *vr;
10186 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10188 return ERR_CAST(vr);
10190 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10193 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, ul_crif, extack);
10194 if (IS_ERR(vr->ul_rif)) {
10195 err = PTR_ERR(vr->ul_rif);
10196 goto err_ul_rif_create;
10200 refcount_set(&vr->ul_rif_refcnt, 1);
10205 mlxsw_sp_vr_put(mlxsw_sp, vr);
10206 return ERR_PTR(err);
10209 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10211 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10212 struct mlxsw_sp_vr *vr;
10214 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10216 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10220 mlxsw_sp_ul_rif_destroy(ul_rif);
10221 mlxsw_sp_vr_put(mlxsw_sp, vr);
10224 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10227 struct mlxsw_sp_rif *ul_rif;
10230 mutex_lock(&mlxsw_sp->router->lock);
10231 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, NULL);
10232 if (IS_ERR(ul_rif)) {
10233 err = PTR_ERR(ul_rif);
10236 *ul_rif_index = ul_rif->rif_index;
10238 mutex_unlock(&mlxsw_sp->router->lock);
10242 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
10244 struct mlxsw_sp_rif *ul_rif;
10246 mutex_lock(&mlxsw_sp->router->lock);
10247 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
10248 if (WARN_ON(!ul_rif))
10251 mlxsw_sp_ul_rif_put(ul_rif);
10253 mutex_unlock(&mlxsw_sp->router->lock);
10257 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10258 struct netlink_ext_ack *extack)
10260 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10261 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10262 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10263 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10264 struct mlxsw_sp_rif *ul_rif;
10267 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, extack);
10268 if (IS_ERR(ul_rif))
10269 return PTR_ERR(ul_rif);
10271 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10273 goto err_loopback_op;
10275 lb_rif->ul_vr_id = 0;
10276 lb_rif->ul_rif_id = ul_rif->rif_index;
10281 mlxsw_sp_ul_rif_put(ul_rif);
10285 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10287 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10288 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10289 struct mlxsw_sp_rif *ul_rif;
10291 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
10292 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
10293 mlxsw_sp_ul_rif_put(ul_rif);
10296 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
10297 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10298 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10299 .setup = mlxsw_sp_rif_ipip_lb_setup,
10300 .configure = mlxsw_sp2_rif_ipip_lb_configure,
10301 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
10304 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
10305 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10306 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
10307 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10308 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
10311 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
10313 struct gen_pool *rifs_table;
10316 rifs_table = gen_pool_create(0, -1);
10320 gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
10323 err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
10324 MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
10326 goto err_gen_pool_add;
10328 mlxsw_sp->router->rifs_table = rifs_table;
10333 gen_pool_destroy(rifs_table);
10337 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
10339 gen_pool_destroy(mlxsw_sp->router->rifs_table);
10342 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10344 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10345 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10346 struct mlxsw_core *core = mlxsw_sp->core;
10349 if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10351 mlxsw_sp->router->max_rif_mac_profile =
10352 MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10354 mlxsw_sp->router->rifs = kcalloc(max_rifs,
10355 sizeof(struct mlxsw_sp_rif *),
10357 if (!mlxsw_sp->router->rifs)
10360 err = mlxsw_sp_rifs_table_init(mlxsw_sp);
10362 goto err_rifs_table_init;
10364 idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10365 atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10366 atomic_set(&mlxsw_sp->router->rifs_count, 0);
10367 devl_resource_occ_get_register(devlink,
10368 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10369 mlxsw_sp_rif_mac_profiles_occ_get,
10371 devl_resource_occ_get_register(devlink,
10372 MLXSW_SP_RESOURCE_RIFS,
10373 mlxsw_sp_rifs_occ_get,
10378 err_rifs_table_init:
10379 kfree(mlxsw_sp->router->rifs);
10383 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10385 int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10386 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10389 WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
10390 for (i = 0; i < max_rifs; i++)
10391 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10393 devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
10394 devl_resource_occ_get_unregister(devlink,
10395 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10396 WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10397 idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10398 mlxsw_sp_rifs_table_fini(mlxsw_sp);
10399 kfree(mlxsw_sp->router->rifs);
10403 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10405 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10407 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10408 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10411 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10415 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10417 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10420 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10424 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10427 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10429 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10430 return mlxsw_sp_ipips_init(mlxsw_sp);
10433 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10435 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10436 return mlxsw_sp_ipips_init(mlxsw_sp);
10439 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10441 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10444 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10446 struct mlxsw_sp_router *router;
10448 /* Flush pending FIB notifications and then flush the device's
10449 * table before requesting another dump. The FIB notification
10450 * block is unregistered, so no need to take RTNL.
10452 mlxsw_core_flush_owq();
10453 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10454 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10457 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10458 struct mlxsw_sp_mp_hash_config {
10459 DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10460 DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10461 DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10462 DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10463 bool inc_parsing_depth;
10466 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10467 bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10469 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10470 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10472 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10473 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10475 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10477 unsigned long *inner_headers = config->inner_headers;
10478 unsigned long *inner_fields = config->inner_fields;
10481 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10482 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10483 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10484 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10486 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10487 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10488 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10489 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10490 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10491 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10492 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10493 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10496 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10498 unsigned long *headers = config->headers;
10499 unsigned long *fields = config->fields;
10501 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10502 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10503 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10504 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10508 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
10511 unsigned long *inner_headers = config->inner_headers;
10512 unsigned long *inner_fields = config->inner_fields;
10515 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10516 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10517 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
10518 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10519 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
10520 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10521 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10522 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
10524 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10525 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10526 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
10527 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10528 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10530 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
10531 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10532 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10534 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10535 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10536 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
10537 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10539 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
10540 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
10541 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
10542 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
10543 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
10544 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
10547 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
10548 struct mlxsw_sp_mp_hash_config *config)
10550 struct net *net = mlxsw_sp_net(mlxsw_sp);
10551 unsigned long *headers = config->headers;
10552 unsigned long *fields = config->fields;
10555 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
10557 mlxsw_sp_mp4_hash_outer_addr(config);
10560 mlxsw_sp_mp4_hash_outer_addr(config);
10561 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10562 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10563 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10564 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10568 mlxsw_sp_mp4_hash_outer_addr(config);
10570 mlxsw_sp_mp_hash_inner_l3(config);
10573 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
10575 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10576 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10577 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10578 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
10579 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10580 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
10581 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10582 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10583 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10584 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10585 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10586 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10587 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10589 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10594 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10596 unsigned long *headers = config->headers;
10597 unsigned long *fields = config->fields;
10599 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10600 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10601 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10602 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10603 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10604 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10607 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
10608 struct mlxsw_sp_mp_hash_config *config)
10610 u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
10611 unsigned long *headers = config->headers;
10612 unsigned long *fields = config->fields;
10614 switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
10616 mlxsw_sp_mp6_hash_outer_addr(config);
10617 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10618 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10621 mlxsw_sp_mp6_hash_outer_addr(config);
10622 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10623 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10624 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10625 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10629 mlxsw_sp_mp6_hash_outer_addr(config);
10630 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10631 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10633 mlxsw_sp_mp_hash_inner_l3(config);
10634 config->inc_parsing_depth = true;
10638 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10639 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10640 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10641 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
10642 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10643 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10645 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
10646 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10647 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10649 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10650 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10651 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
10652 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10653 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10654 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10655 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10656 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10658 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10659 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
10660 config->inc_parsing_depth = true;
10665 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
10666 bool old_inc_parsing_depth,
10667 bool new_inc_parsing_depth)
10671 if (!old_inc_parsing_depth && new_inc_parsing_depth) {
10672 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
10675 mlxsw_sp->router->inc_parsing_depth = true;
10676 } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
10677 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
10678 mlxsw_sp->router->inc_parsing_depth = false;
10684 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10686 bool old_inc_parsing_depth, new_inc_parsing_depth;
10687 struct mlxsw_sp_mp_hash_config config = {};
10688 char recr2_pl[MLXSW_REG_RECR2_LEN];
10693 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
10694 mlxsw_reg_recr2_pack(recr2_pl, seed);
10695 mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
10696 mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
10698 old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10699 new_inc_parsing_depth = config.inc_parsing_depth;
10700 err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
10701 old_inc_parsing_depth,
10702 new_inc_parsing_depth);
10706 for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
10707 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
10708 for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
10709 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
10710 for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
10711 mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
10712 for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
10713 mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
10715 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
10717 goto err_reg_write;
10722 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
10723 old_inc_parsing_depth);
10727 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
10729 bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10731 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
10735 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10740 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
10745 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
10747 char rdpm_pl[MLXSW_REG_RDPM_LEN];
10750 MLXSW_REG_ZERO(rdpm, rdpm_pl);
10752 /* HW is determining switch priority based on DSCP-bits, but the
10753 * kernel is still doing that based on the ToS. Since there's a
10754 * mismatch in bits we need to make sure to translate the right
10755 * value ToS would observe, skipping the 2 least-significant ECN bits.
10757 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
10758 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
10760 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
10763 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
10765 struct net *net = mlxsw_sp_net(mlxsw_sp);
10766 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10770 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
10772 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10773 usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
10775 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
10776 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
10777 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
10778 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10781 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10783 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10785 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
10786 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10789 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
10790 struct netlink_ext_ack *extack)
10792 struct mlxsw_sp_router *router = mlxsw_sp->router;
10793 struct mlxsw_sp_rif *lb_rif;
10796 router->lb_crif = mlxsw_sp_crif_alloc(NULL);
10797 if (!router->lb_crif)
10800 /* Create a generic loopback RIF associated with the main table
10801 * (default VRF). Any table can be used, but the main table exists
10802 * anyway, so we do not waste resources. Loopback RIFs are usually
10803 * created with a NULL CRIF, but this RIF is used as a fallback RIF
10804 * for blackhole nexthops, and nexthops expect to have a valid CRIF.
10806 lb_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, router->lb_crif,
10808 if (IS_ERR(lb_rif)) {
10809 err = PTR_ERR(lb_rif);
10810 goto err_ul_rif_get;
10816 mlxsw_sp_crif_free(router->lb_crif);
10820 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10822 mlxsw_sp_ul_rif_put(mlxsw_sp->router->lb_crif->rif);
10823 mlxsw_sp_crif_free(mlxsw_sp->router->lb_crif);
10826 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10828 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10830 mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10831 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10832 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10837 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10838 .init = mlxsw_sp1_router_init,
10839 .ipips_init = mlxsw_sp1_ipips_init,
10842 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10844 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10846 mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10847 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10848 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10853 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10854 .init = mlxsw_sp2_router_init,
10855 .ipips_init = mlxsw_sp2_ipips_init,
10858 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
10859 struct netlink_ext_ack *extack)
10861 struct mlxsw_sp_router *router;
10862 struct notifier_block *nb;
10865 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
10868 mutex_init(&router->lock);
10869 mlxsw_sp->router = router;
10870 router->mlxsw_sp = mlxsw_sp;
10872 err = mlxsw_sp->router_ops->init(mlxsw_sp);
10874 goto err_router_ops_init;
10876 INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
10877 INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
10878 mlxsw_sp_nh_grp_activity_work);
10879 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
10880 err = __mlxsw_sp_router_init(mlxsw_sp);
10882 goto err_router_init;
10884 err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
10886 goto err_ipips_init;
10888 err = rhashtable_init(&mlxsw_sp->router->crif_ht,
10889 &mlxsw_sp_crif_ht_params);
10891 goto err_crif_ht_init;
10893 err = mlxsw_sp_rifs_init(mlxsw_sp);
10895 goto err_rifs_init;
10897 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
10898 &mlxsw_sp_nexthop_ht_params);
10900 goto err_nexthop_ht_init;
10902 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
10903 &mlxsw_sp_nexthop_group_ht_params);
10905 goto err_nexthop_group_ht_init;
10907 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
10908 err = mlxsw_sp_lpm_init(mlxsw_sp);
10912 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
10916 err = mlxsw_sp_vrs_init(mlxsw_sp);
10920 err = mlxsw_sp_lb_rif_init(mlxsw_sp, extack);
10922 goto err_lb_rif_init;
10924 err = mlxsw_sp_neigh_init(mlxsw_sp);
10926 goto err_neigh_init;
10928 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
10930 goto err_mp_hash_init;
10932 err = mlxsw_sp_dscp_init(mlxsw_sp);
10934 goto err_dscp_init;
10936 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
10937 err = register_inetaddr_notifier(&router->inetaddr_nb);
10939 goto err_register_inetaddr_notifier;
10941 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10942 err = register_inet6addr_notifier(&router->inet6addr_nb);
10944 goto err_register_inet6addr_notifier;
10946 router->inetaddr_valid_nb.notifier_call = mlxsw_sp_inetaddr_valid_event;
10947 err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
10949 goto err_register_inetaddr_valid_notifier;
10951 nb = &router->inet6addr_valid_nb;
10952 nb->notifier_call = mlxsw_sp_inet6addr_valid_event;
10953 err = register_inet6addr_validator_notifier(nb);
10955 goto err_register_inet6addr_valid_notifier;
10957 mlxsw_sp->router->netevent_nb.notifier_call =
10958 mlxsw_sp_router_netevent_event;
10959 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10961 goto err_register_netevent_notifier;
10963 mlxsw_sp->router->nexthop_nb.notifier_call =
10964 mlxsw_sp_nexthop_obj_event;
10965 err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10966 &mlxsw_sp->router->nexthop_nb,
10969 goto err_register_nexthop_notifier;
10971 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10972 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10973 &mlxsw_sp->router->fib_nb,
10974 mlxsw_sp_router_fib_dump_flush, extack);
10976 goto err_register_fib_notifier;
10978 mlxsw_sp->router->netdevice_nb.notifier_call =
10979 mlxsw_sp_router_netdevice_event;
10980 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
10981 &mlxsw_sp->router->netdevice_nb);
10983 goto err_register_netdev_notifier;
10987 err_register_netdev_notifier:
10988 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10989 &mlxsw_sp->router->fib_nb);
10990 err_register_fib_notifier:
10991 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10992 &mlxsw_sp->router->nexthop_nb);
10993 err_register_nexthop_notifier:
10994 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10995 err_register_netevent_notifier:
10996 unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
10997 err_register_inet6addr_valid_notifier:
10998 unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
10999 err_register_inetaddr_valid_notifier:
11000 unregister_inet6addr_notifier(&router->inet6addr_nb);
11001 err_register_inet6addr_notifier:
11002 unregister_inetaddr_notifier(&router->inetaddr_nb);
11003 err_register_inetaddr_notifier:
11004 mlxsw_core_flush_owq();
11006 mlxsw_sp_mp_hash_fini(mlxsw_sp);
11008 mlxsw_sp_neigh_fini(mlxsw_sp);
11010 mlxsw_sp_lb_rif_fini(mlxsw_sp);
11012 mlxsw_sp_vrs_fini(mlxsw_sp);
11014 mlxsw_sp_mr_fini(mlxsw_sp);
11016 mlxsw_sp_lpm_fini(mlxsw_sp);
11018 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
11019 err_nexthop_group_ht_init:
11020 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
11021 err_nexthop_ht_init:
11022 mlxsw_sp_rifs_fini(mlxsw_sp);
11024 rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11026 mlxsw_sp_ipips_fini(mlxsw_sp);
11028 __mlxsw_sp_router_fini(mlxsw_sp);
11030 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
11031 err_router_ops_init:
11032 mutex_destroy(&mlxsw_sp->router->lock);
11033 kfree(mlxsw_sp->router);
11037 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11039 struct mlxsw_sp_router *router = mlxsw_sp->router;
11041 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11042 &router->netdevice_nb);
11043 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
11044 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11045 &router->nexthop_nb);
11046 unregister_netevent_notifier(&router->netevent_nb);
11047 unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11048 unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11049 unregister_inet6addr_notifier(&router->inet6addr_nb);
11050 unregister_inetaddr_notifier(&router->inetaddr_nb);
11051 mlxsw_core_flush_owq();
11052 mlxsw_sp_mp_hash_fini(mlxsw_sp);
11053 mlxsw_sp_neigh_fini(mlxsw_sp);
11054 mlxsw_sp_lb_rif_fini(mlxsw_sp);
11055 mlxsw_sp_vrs_fini(mlxsw_sp);
11056 mlxsw_sp_mr_fini(mlxsw_sp);
11057 mlxsw_sp_lpm_fini(mlxsw_sp);
11058 rhashtable_destroy(&router->nexthop_group_ht);
11059 rhashtable_destroy(&router->nexthop_ht);
11060 mlxsw_sp_rifs_fini(mlxsw_sp);
11061 rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11062 mlxsw_sp_ipips_fini(mlxsw_sp);
11063 __mlxsw_sp_router_fini(mlxsw_sp);
11064 cancel_delayed_work_sync(&router->nh_grp_activity_dw);
11065 mutex_destroy(&router->lock);