1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/random.h>
17 #include <linux/if_macvlan.h>
18 #include <net/netevent.h>
19 #include <net/neighbour.h>
21 #include <net/ip_fib.h>
22 #include <net/ip6_fib.h>
23 #include <net/fib_rules.h>
24 #include <net/ip_tunnels.h>
25 #include <net/l3mdev.h>
26 #include <net/addrconf.h>
27 #include <net/ndisc.h>
29 #include <net/fib_notifier.h>
30 #include <net/switchdev.h>
35 #include "spectrum_cnt.h"
36 #include "spectrum_dpipe.h"
37 #include "spectrum_ipip.h"
38 #include "spectrum_mr.h"
39 #include "spectrum_mr_tcam.h"
40 #include "spectrum_router.h"
41 #include "spectrum_span.h"
45 struct mlxsw_sp_lpm_tree;
46 struct mlxsw_sp_rif_ops;
48 struct mlxsw_sp_router {
49 struct mlxsw_sp *mlxsw_sp;
50 struct mlxsw_sp_rif **rifs;
51 struct mlxsw_sp_vr *vrs;
52 struct rhashtable neigh_ht;
53 struct rhashtable nexthop_group_ht;
54 struct rhashtable nexthop_ht;
55 struct list_head nexthop_list;
57 /* One tree for each protocol: IPv4 and IPv6 */
58 struct mlxsw_sp_lpm_tree *proto_trees[2];
59 struct mlxsw_sp_lpm_tree *trees;
60 unsigned int tree_count;
63 struct delayed_work dw;
64 unsigned long interval; /* ms */
66 struct delayed_work nexthop_probe_dw;
67 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
68 struct list_head nexthop_neighs_list;
69 struct list_head ipip_list;
71 struct notifier_block fib_nb;
72 struct notifier_block netevent_nb;
73 const struct mlxsw_sp_rif_ops **rif_ops_arr;
74 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
78 struct list_head nexthop_list;
79 struct list_head neigh_list;
80 struct net_device *dev;
81 struct mlxsw_sp_fid *fid;
82 unsigned char addr[ETH_ALEN];
86 const struct mlxsw_sp_rif_ops *ops;
87 struct mlxsw_sp *mlxsw_sp;
89 unsigned int counter_ingress;
90 bool counter_ingress_valid;
91 unsigned int counter_egress;
92 bool counter_egress_valid;
95 struct mlxsw_sp_rif_params {
96 struct net_device *dev;
105 struct mlxsw_sp_rif_subport {
106 struct mlxsw_sp_rif common;
115 struct mlxsw_sp_rif_ipip_lb {
116 struct mlxsw_sp_rif common;
117 struct mlxsw_sp_rif_ipip_lb_config lb_config;
118 u16 ul_vr_id; /* Reserved for Spectrum-2. */
121 struct mlxsw_sp_rif_params_ipip_lb {
122 struct mlxsw_sp_rif_params common;
123 struct mlxsw_sp_rif_ipip_lb_config lb_config;
126 struct mlxsw_sp_rif_ops {
127 enum mlxsw_sp_rif_type type;
130 void (*setup)(struct mlxsw_sp_rif *rif,
131 const struct mlxsw_sp_rif_params *params);
132 int (*configure)(struct mlxsw_sp_rif *rif);
133 void (*deconfigure)(struct mlxsw_sp_rif *rif);
134 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
135 struct netlink_ext_ack *extack);
136 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
139 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
140 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
141 struct mlxsw_sp_lpm_tree *lpm_tree);
142 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
143 const struct mlxsw_sp_fib *fib,
145 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
146 const struct mlxsw_sp_fib *fib);
148 static unsigned int *
149 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
150 enum mlxsw_sp_rif_counter_dir dir)
153 case MLXSW_SP_RIF_COUNTER_EGRESS:
154 return &rif->counter_egress;
155 case MLXSW_SP_RIF_COUNTER_INGRESS:
156 return &rif->counter_ingress;
162 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
163 enum mlxsw_sp_rif_counter_dir dir)
166 case MLXSW_SP_RIF_COUNTER_EGRESS:
167 return rif->counter_egress_valid;
168 case MLXSW_SP_RIF_COUNTER_INGRESS:
169 return rif->counter_ingress_valid;
175 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
176 enum mlxsw_sp_rif_counter_dir dir,
180 case MLXSW_SP_RIF_COUNTER_EGRESS:
181 rif->counter_egress_valid = valid;
183 case MLXSW_SP_RIF_COUNTER_INGRESS:
184 rif->counter_ingress_valid = valid;
189 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
190 unsigned int counter_index, bool enable,
191 enum mlxsw_sp_rif_counter_dir dir)
193 char ritr_pl[MLXSW_REG_RITR_LEN];
194 bool is_egress = false;
197 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
199 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
200 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
204 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
209 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
210 struct mlxsw_sp_rif *rif,
211 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
213 char ricnt_pl[MLXSW_REG_RICNT_LEN];
214 unsigned int *p_counter_index;
218 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
222 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
223 if (!p_counter_index)
225 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
226 MLXSW_REG_RICNT_OPCODE_NOP);
227 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
230 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
234 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
235 unsigned int counter_index)
237 char ricnt_pl[MLXSW_REG_RICNT_LEN];
239 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
240 MLXSW_REG_RICNT_OPCODE_CLEAR);
241 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
244 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
245 struct mlxsw_sp_rif *rif,
246 enum mlxsw_sp_rif_counter_dir dir)
248 unsigned int *p_counter_index;
251 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
252 if (!p_counter_index)
254 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
259 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
261 goto err_counter_clear;
263 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
264 *p_counter_index, true, dir);
266 goto err_counter_edit;
267 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
272 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
277 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
278 struct mlxsw_sp_rif *rif,
279 enum mlxsw_sp_rif_counter_dir dir)
281 unsigned int *p_counter_index;
283 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
286 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
287 if (WARN_ON(!p_counter_index))
289 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
290 *p_counter_index, false, dir);
291 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
293 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
296 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
298 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
299 struct devlink *devlink;
301 devlink = priv_to_devlink(mlxsw_sp->core);
302 if (!devlink_dpipe_table_counter_enabled(devlink,
303 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
305 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
308 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
310 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
312 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
315 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
317 struct mlxsw_sp_prefix_usage {
318 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
321 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
322 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
325 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
326 struct mlxsw_sp_prefix_usage *prefix_usage2)
328 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
332 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
333 struct mlxsw_sp_prefix_usage *prefix_usage2)
335 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
339 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
340 unsigned char prefix_len)
342 set_bit(prefix_len, prefix_usage->b);
346 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
347 unsigned char prefix_len)
349 clear_bit(prefix_len, prefix_usage->b);
352 struct mlxsw_sp_fib_key {
353 unsigned char addr[sizeof(struct in6_addr)];
354 unsigned char prefix_len;
357 enum mlxsw_sp_fib_entry_type {
358 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
359 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
360 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
362 /* This is a special case of local delivery, where a packet should be
363 * decapsulated on reception. Note that there is no corresponding ENCAP,
364 * because that's a type of next hop, not of FIB entry. (There can be
365 * several next hops in a REMOTE entry, and some of them may be
366 * encapsulating entries.)
368 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
369 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
372 struct mlxsw_sp_nexthop_group;
374 struct mlxsw_sp_fib_node {
375 struct list_head entry_list;
376 struct list_head list;
377 struct rhash_head ht_node;
378 struct mlxsw_sp_fib *fib;
379 struct mlxsw_sp_fib_key key;
382 struct mlxsw_sp_fib_entry_decap {
383 struct mlxsw_sp_ipip_entry *ipip_entry;
387 struct mlxsw_sp_fib_entry {
388 struct list_head list;
389 struct mlxsw_sp_fib_node *fib_node;
390 enum mlxsw_sp_fib_entry_type type;
391 struct list_head nexthop_group_node;
392 struct mlxsw_sp_nexthop_group *nh_group;
393 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
396 struct mlxsw_sp_fib4_entry {
397 struct mlxsw_sp_fib_entry common;
404 struct mlxsw_sp_fib6_entry {
405 struct mlxsw_sp_fib_entry common;
406 struct list_head rt6_list;
410 struct mlxsw_sp_rt6 {
411 struct list_head list;
412 struct fib6_info *rt;
415 struct mlxsw_sp_lpm_tree {
417 unsigned int ref_count;
418 enum mlxsw_sp_l3proto proto;
419 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
420 struct mlxsw_sp_prefix_usage prefix_usage;
423 struct mlxsw_sp_fib {
424 struct rhashtable ht;
425 struct list_head node_list;
426 struct mlxsw_sp_vr *vr;
427 struct mlxsw_sp_lpm_tree *lpm_tree;
428 enum mlxsw_sp_l3proto proto;
432 u16 id; /* virtual router ID */
433 u32 tb_id; /* kernel fib table id */
434 unsigned int rif_count;
435 struct mlxsw_sp_fib *fib4;
436 struct mlxsw_sp_fib *fib6;
437 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
440 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
442 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
443 struct mlxsw_sp_vr *vr,
444 enum mlxsw_sp_l3proto proto)
446 struct mlxsw_sp_lpm_tree *lpm_tree;
447 struct mlxsw_sp_fib *fib;
450 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
451 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
453 return ERR_PTR(-ENOMEM);
454 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
456 goto err_rhashtable_init;
457 INIT_LIST_HEAD(&fib->node_list);
460 fib->lpm_tree = lpm_tree;
461 mlxsw_sp_lpm_tree_hold(lpm_tree);
462 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
464 goto err_lpm_tree_bind;
468 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
474 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
475 struct mlxsw_sp_fib *fib)
477 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
478 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
479 WARN_ON(!list_empty(&fib->node_list));
480 rhashtable_destroy(&fib->ht);
484 static struct mlxsw_sp_lpm_tree *
485 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
487 static struct mlxsw_sp_lpm_tree *lpm_tree;
490 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
491 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
492 if (lpm_tree->ref_count == 0)
498 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
499 struct mlxsw_sp_lpm_tree *lpm_tree)
501 char ralta_pl[MLXSW_REG_RALTA_LEN];
503 mlxsw_reg_ralta_pack(ralta_pl, true,
504 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
506 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
509 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
510 struct mlxsw_sp_lpm_tree *lpm_tree)
512 char ralta_pl[MLXSW_REG_RALTA_LEN];
514 mlxsw_reg_ralta_pack(ralta_pl, false,
515 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
517 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
521 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
522 struct mlxsw_sp_prefix_usage *prefix_usage,
523 struct mlxsw_sp_lpm_tree *lpm_tree)
525 char ralst_pl[MLXSW_REG_RALST_LEN];
528 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
530 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
533 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
534 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
537 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
538 MLXSW_REG_RALST_BIN_NO_CHILD);
539 last_prefix = prefix;
541 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
544 static struct mlxsw_sp_lpm_tree *
545 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
546 struct mlxsw_sp_prefix_usage *prefix_usage,
547 enum mlxsw_sp_l3proto proto)
549 struct mlxsw_sp_lpm_tree *lpm_tree;
552 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
554 return ERR_PTR(-EBUSY);
555 lpm_tree->proto = proto;
556 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
560 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
563 goto err_left_struct_set;
564 memcpy(&lpm_tree->prefix_usage, prefix_usage,
565 sizeof(lpm_tree->prefix_usage));
566 memset(&lpm_tree->prefix_ref_count, 0,
567 sizeof(lpm_tree->prefix_ref_count));
568 lpm_tree->ref_count = 1;
572 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
576 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
577 struct mlxsw_sp_lpm_tree *lpm_tree)
579 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
582 static struct mlxsw_sp_lpm_tree *
583 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
584 struct mlxsw_sp_prefix_usage *prefix_usage,
585 enum mlxsw_sp_l3proto proto)
587 struct mlxsw_sp_lpm_tree *lpm_tree;
590 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
591 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
592 if (lpm_tree->ref_count != 0 &&
593 lpm_tree->proto == proto &&
594 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
596 mlxsw_sp_lpm_tree_hold(lpm_tree);
600 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
603 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
605 lpm_tree->ref_count++;
608 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
609 struct mlxsw_sp_lpm_tree *lpm_tree)
611 if (--lpm_tree->ref_count == 0)
612 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
615 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
617 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
619 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
620 struct mlxsw_sp_lpm_tree *lpm_tree;
624 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
627 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
628 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
629 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
630 sizeof(struct mlxsw_sp_lpm_tree),
632 if (!mlxsw_sp->router->lpm.trees)
635 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
636 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
637 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
640 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
641 MLXSW_SP_L3_PROTO_IPV4);
642 if (IS_ERR(lpm_tree)) {
643 err = PTR_ERR(lpm_tree);
644 goto err_ipv4_tree_get;
646 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
648 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
649 MLXSW_SP_L3_PROTO_IPV6);
650 if (IS_ERR(lpm_tree)) {
651 err = PTR_ERR(lpm_tree);
652 goto err_ipv6_tree_get;
654 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
659 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
660 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
662 kfree(mlxsw_sp->router->lpm.trees);
666 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
668 struct mlxsw_sp_lpm_tree *lpm_tree;
670 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
671 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
673 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
674 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
676 kfree(mlxsw_sp->router->lpm.trees);
679 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
681 return !!vr->fib4 || !!vr->fib6 ||
682 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
683 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
686 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
688 struct mlxsw_sp_vr *vr;
691 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
692 vr = &mlxsw_sp->router->vrs[i];
693 if (!mlxsw_sp_vr_is_used(vr))
699 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
700 const struct mlxsw_sp_fib *fib, u8 tree_id)
702 char raltb_pl[MLXSW_REG_RALTB_LEN];
704 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
705 (enum mlxsw_reg_ralxx_protocol) fib->proto,
707 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
710 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
711 const struct mlxsw_sp_fib *fib)
713 char raltb_pl[MLXSW_REG_RALTB_LEN];
715 /* Bind to tree 0 which is default */
716 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
717 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
718 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
721 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
723 /* For our purpose, squash main, default and local tables into one */
724 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
725 tb_id = RT_TABLE_MAIN;
729 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
732 struct mlxsw_sp_vr *vr;
735 tb_id = mlxsw_sp_fix_tb_id(tb_id);
737 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
738 vr = &mlxsw_sp->router->vrs[i];
739 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
745 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
748 struct mlxsw_sp_vr *vr;
750 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
758 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
759 enum mlxsw_sp_l3proto proto)
762 case MLXSW_SP_L3_PROTO_IPV4:
764 case MLXSW_SP_L3_PROTO_IPV6:
770 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
772 struct netlink_ext_ack *extack)
774 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
775 struct mlxsw_sp_fib *fib4;
776 struct mlxsw_sp_fib *fib6;
777 struct mlxsw_sp_vr *vr;
780 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
782 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
783 return ERR_PTR(-EBUSY);
785 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
787 return ERR_CAST(fib4);
788 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
791 goto err_fib6_create;
793 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
794 MLXSW_SP_L3_PROTO_IPV4);
795 if (IS_ERR(mr4_table)) {
796 err = PTR_ERR(mr4_table);
797 goto err_mr4_table_create;
799 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
800 MLXSW_SP_L3_PROTO_IPV6);
801 if (IS_ERR(mr6_table)) {
802 err = PTR_ERR(mr6_table);
803 goto err_mr6_table_create;
808 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
809 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
813 err_mr6_table_create:
814 mlxsw_sp_mr_table_destroy(mr4_table);
815 err_mr4_table_create:
816 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
818 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
822 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
823 struct mlxsw_sp_vr *vr)
825 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
826 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
827 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
828 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
829 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
831 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
835 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
836 struct netlink_ext_ack *extack)
838 struct mlxsw_sp_vr *vr;
840 tb_id = mlxsw_sp_fix_tb_id(tb_id);
841 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
843 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
847 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
849 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
850 list_empty(&vr->fib6->node_list) &&
851 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
852 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
853 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
857 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
858 enum mlxsw_sp_l3proto proto, u8 tree_id)
860 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
862 if (!mlxsw_sp_vr_is_used(vr))
864 if (fib->lpm_tree->id == tree_id)
869 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
870 struct mlxsw_sp_fib *fib,
871 struct mlxsw_sp_lpm_tree *new_tree)
873 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
876 fib->lpm_tree = new_tree;
877 mlxsw_sp_lpm_tree_hold(new_tree);
878 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
881 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
885 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
886 fib->lpm_tree = old_tree;
890 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
891 struct mlxsw_sp_fib *fib,
892 struct mlxsw_sp_lpm_tree *new_tree)
894 enum mlxsw_sp_l3proto proto = fib->proto;
895 struct mlxsw_sp_lpm_tree *old_tree;
896 u8 old_id, new_id = new_tree->id;
897 struct mlxsw_sp_vr *vr;
900 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
901 old_id = old_tree->id;
903 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
904 vr = &mlxsw_sp->router->vrs[i];
905 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
907 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
908 mlxsw_sp_vr_fib(vr, proto),
911 goto err_tree_replace;
914 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
915 sizeof(new_tree->prefix_ref_count));
916 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
917 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
922 for (i--; i >= 0; i--) {
923 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
925 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
926 mlxsw_sp_vr_fib(vr, proto),
932 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
934 struct mlxsw_sp_vr *vr;
938 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
941 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
942 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
944 if (!mlxsw_sp->router->vrs)
947 for (i = 0; i < max_vrs; i++) {
948 vr = &mlxsw_sp->router->vrs[i];
955 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
957 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
959 /* At this stage we're guaranteed not to have new incoming
960 * FIB notifications and the work queue is free from FIBs
961 * sitting on top of mlxsw netdevs. However, we can still
962 * have other FIBs queued. Flush the queue before flushing
963 * the device's tables. No need for locks, as we're the only
966 mlxsw_core_flush_owq();
967 mlxsw_sp_router_fib_flush(mlxsw_sp);
968 kfree(mlxsw_sp->router->vrs);
971 static struct net_device *
972 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
974 struct ip_tunnel *tun = netdev_priv(ol_dev);
975 struct net *net = dev_net(ol_dev);
977 return __dev_get_by_index(net, tun->parms.link);
980 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
982 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
985 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
987 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
990 static struct mlxsw_sp_rif *
991 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
992 const struct mlxsw_sp_rif_params *params,
993 struct netlink_ext_ack *extack);
995 static struct mlxsw_sp_rif_ipip_lb *
996 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
997 enum mlxsw_sp_ipip_type ipipt,
998 struct net_device *ol_dev,
999 struct netlink_ext_ack *extack)
1001 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1002 const struct mlxsw_sp_ipip_ops *ipip_ops;
1003 struct mlxsw_sp_rif *rif;
1005 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1006 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1007 .common.dev = ol_dev,
1008 .common.lag = false,
1009 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1012 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1014 return ERR_CAST(rif);
1015 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1018 static struct mlxsw_sp_ipip_entry *
1019 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1020 enum mlxsw_sp_ipip_type ipipt,
1021 struct net_device *ol_dev)
1023 const struct mlxsw_sp_ipip_ops *ipip_ops;
1024 struct mlxsw_sp_ipip_entry *ipip_entry;
1025 struct mlxsw_sp_ipip_entry *ret = NULL;
1027 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1028 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1030 return ERR_PTR(-ENOMEM);
1032 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1034 if (IS_ERR(ipip_entry->ol_lb)) {
1035 ret = ERR_CAST(ipip_entry->ol_lb);
1036 goto err_ol_ipip_lb_create;
1039 ipip_entry->ipipt = ipipt;
1040 ipip_entry->ol_dev = ol_dev;
1042 switch (ipip_ops->ul_proto) {
1043 case MLXSW_SP_L3_PROTO_IPV4:
1044 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1046 case MLXSW_SP_L3_PROTO_IPV6:
1053 err_ol_ipip_lb_create:
1059 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1061 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1066 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1067 const enum mlxsw_sp_l3proto ul_proto,
1068 union mlxsw_sp_l3addr saddr,
1070 struct mlxsw_sp_ipip_entry *ipip_entry)
1072 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1073 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1074 union mlxsw_sp_l3addr tun_saddr;
1076 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1079 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1080 return tun_ul_tb_id == ul_tb_id &&
1081 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1085 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1086 struct mlxsw_sp_fib_entry *fib_entry,
1087 struct mlxsw_sp_ipip_entry *ipip_entry)
1092 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1097 ipip_entry->decap_fib_entry = fib_entry;
1098 fib_entry->decap.ipip_entry = ipip_entry;
1099 fib_entry->decap.tunnel_index = tunnel_index;
1103 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1104 struct mlxsw_sp_fib_entry *fib_entry)
1106 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1107 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1108 fib_entry->decap.ipip_entry = NULL;
1109 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1110 1, fib_entry->decap.tunnel_index);
1113 static struct mlxsw_sp_fib_node *
1114 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1115 size_t addr_len, unsigned char prefix_len);
1116 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1117 struct mlxsw_sp_fib_entry *fib_entry);
1120 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1121 struct mlxsw_sp_ipip_entry *ipip_entry)
1123 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1125 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1126 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1128 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1132 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1133 struct mlxsw_sp_ipip_entry *ipip_entry,
1134 struct mlxsw_sp_fib_entry *decap_fib_entry)
1136 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1139 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1141 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1142 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1145 static struct mlxsw_sp_fib_entry *
1146 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1147 enum mlxsw_sp_l3proto proto,
1148 const union mlxsw_sp_l3addr *addr,
1149 enum mlxsw_sp_fib_entry_type type)
1151 struct mlxsw_sp_fib_entry *fib_entry;
1152 struct mlxsw_sp_fib_node *fib_node;
1153 unsigned char addr_prefix_len;
1154 struct mlxsw_sp_fib *fib;
1155 struct mlxsw_sp_vr *vr;
1160 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1163 fib = mlxsw_sp_vr_fib(vr, proto);
1166 case MLXSW_SP_L3_PROTO_IPV4:
1167 addr4 = be32_to_cpu(addr->addr4);
1170 addr_prefix_len = 32;
1172 case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1178 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1180 if (!fib_node || list_empty(&fib_node->entry_list))
1183 fib_entry = list_first_entry(&fib_node->entry_list,
1184 struct mlxsw_sp_fib_entry, list);
1185 if (fib_entry->type != type)
1191 /* Given an IPIP entry, find the corresponding decap route. */
1192 static struct mlxsw_sp_fib_entry *
1193 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1194 struct mlxsw_sp_ipip_entry *ipip_entry)
1196 static struct mlxsw_sp_fib_node *fib_node;
1197 const struct mlxsw_sp_ipip_ops *ipip_ops;
1198 struct mlxsw_sp_fib_entry *fib_entry;
1199 unsigned char saddr_prefix_len;
1200 union mlxsw_sp_l3addr saddr;
1201 struct mlxsw_sp_fib *ul_fib;
1202 struct mlxsw_sp_vr *ul_vr;
1208 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1210 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1211 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1215 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1216 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1217 ipip_entry->ol_dev);
1219 switch (ipip_ops->ul_proto) {
1220 case MLXSW_SP_L3_PROTO_IPV4:
1221 saddr4 = be32_to_cpu(saddr.addr4);
1224 saddr_prefix_len = 32;
1226 case MLXSW_SP_L3_PROTO_IPV6:
1231 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1233 if (!fib_node || list_empty(&fib_node->entry_list))
1236 fib_entry = list_first_entry(&fib_node->entry_list,
1237 struct mlxsw_sp_fib_entry, list);
1238 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1244 static struct mlxsw_sp_ipip_entry *
1245 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1246 enum mlxsw_sp_ipip_type ipipt,
1247 struct net_device *ol_dev)
1249 struct mlxsw_sp_ipip_entry *ipip_entry;
1251 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1252 if (IS_ERR(ipip_entry))
1255 list_add_tail(&ipip_entry->ipip_list_node,
1256 &mlxsw_sp->router->ipip_list);
1262 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1263 struct mlxsw_sp_ipip_entry *ipip_entry)
1265 list_del(&ipip_entry->ipip_list_node);
1266 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1270 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1271 const struct net_device *ul_dev,
1272 enum mlxsw_sp_l3proto ul_proto,
1273 union mlxsw_sp_l3addr ul_dip,
1274 struct mlxsw_sp_ipip_entry *ipip_entry)
1276 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1277 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1279 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1282 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1283 ul_tb_id, ipip_entry);
1286 /* Given decap parameters, find the corresponding IPIP entry. */
1287 static struct mlxsw_sp_ipip_entry *
1288 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1289 const struct net_device *ul_dev,
1290 enum mlxsw_sp_l3proto ul_proto,
1291 union mlxsw_sp_l3addr ul_dip)
1293 struct mlxsw_sp_ipip_entry *ipip_entry;
1295 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1297 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1305 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1306 const struct net_device *dev,
1307 enum mlxsw_sp_ipip_type *p_type)
1309 struct mlxsw_sp_router *router = mlxsw_sp->router;
1310 const struct mlxsw_sp_ipip_ops *ipip_ops;
1311 enum mlxsw_sp_ipip_type ipipt;
1313 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1314 ipip_ops = router->ipip_ops_arr[ipipt];
1315 if (dev->type == ipip_ops->dev_type) {
1324 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1325 const struct net_device *dev)
1327 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1330 static struct mlxsw_sp_ipip_entry *
1331 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1332 const struct net_device *ol_dev)
1334 struct mlxsw_sp_ipip_entry *ipip_entry;
1336 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1338 if (ipip_entry->ol_dev == ol_dev)
1344 static struct mlxsw_sp_ipip_entry *
1345 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1346 const struct net_device *ul_dev,
1347 struct mlxsw_sp_ipip_entry *start)
1349 struct mlxsw_sp_ipip_entry *ipip_entry;
1351 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1353 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1355 struct net_device *ipip_ul_dev =
1356 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1358 if (ipip_ul_dev == ul_dev)
1365 bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1366 const struct net_device *dev)
1368 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1371 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1372 const struct net_device *ol_dev,
1373 enum mlxsw_sp_ipip_type ipipt)
1375 const struct mlxsw_sp_ipip_ops *ops
1376 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1378 /* For deciding whether decap should be offloaded, we don't care about
1379 * overlay protocol, so ask whether either one is supported.
1381 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1382 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1385 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1386 struct net_device *ol_dev)
1388 struct mlxsw_sp_ipip_entry *ipip_entry;
1389 enum mlxsw_sp_l3proto ul_proto;
1390 enum mlxsw_sp_ipip_type ipipt;
1391 union mlxsw_sp_l3addr saddr;
1394 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1395 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1396 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1397 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1398 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1399 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1402 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1404 if (IS_ERR(ipip_entry))
1405 return PTR_ERR(ipip_entry);
1412 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1413 struct net_device *ol_dev)
1415 struct mlxsw_sp_ipip_entry *ipip_entry;
1417 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1419 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1423 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1424 struct mlxsw_sp_ipip_entry *ipip_entry)
1426 struct mlxsw_sp_fib_entry *decap_fib_entry;
1428 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1429 if (decap_fib_entry)
1430 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1435 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
1436 struct mlxsw_sp_vr *ul_vr, bool enable)
1438 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1439 struct mlxsw_sp_rif *rif = &lb_rif->common;
1440 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1441 char ritr_pl[MLXSW_REG_RITR_LEN];
1444 switch (lb_cf.ul_protocol) {
1445 case MLXSW_SP_L3_PROTO_IPV4:
1446 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1447 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1448 rif->rif_index, rif->vr_id, rif->dev->mtu);
1449 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1450 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1451 ul_vr->id, saddr4, lb_cf.okey);
1454 case MLXSW_SP_L3_PROTO_IPV6:
1455 return -EAFNOSUPPORT;
1458 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1461 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1462 struct net_device *ol_dev)
1464 struct mlxsw_sp_ipip_entry *ipip_entry;
1465 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1466 struct mlxsw_sp_vr *ul_vr;
1469 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1471 lb_rif = ipip_entry->ol_lb;
1472 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
1473 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
1476 lb_rif->common.mtu = ol_dev->mtu;
1483 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1484 struct net_device *ol_dev)
1486 struct mlxsw_sp_ipip_entry *ipip_entry;
1488 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1490 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1494 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1495 struct mlxsw_sp_ipip_entry *ipip_entry)
1497 if (ipip_entry->decap_fib_entry)
1498 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1501 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1502 struct net_device *ol_dev)
1504 struct mlxsw_sp_ipip_entry *ipip_entry;
1506 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1508 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1511 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1512 struct mlxsw_sp_rif *old_rif,
1513 struct mlxsw_sp_rif *new_rif);
1515 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1516 struct mlxsw_sp_ipip_entry *ipip_entry,
1518 struct netlink_ext_ack *extack)
1520 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1521 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1523 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1527 if (IS_ERR(new_lb_rif))
1528 return PTR_ERR(new_lb_rif);
1529 ipip_entry->ol_lb = new_lb_rif;
1532 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1533 &new_lb_rif->common);
1535 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1540 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1541 struct mlxsw_sp_rif *rif);
1544 * Update the offload related to an IPIP entry. This always updates decap, and
1545 * in addition to that it also:
1546 * @recreate_loopback: recreates the associated loopback RIF
1547 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1548 * relevant when recreate_loopback is true.
1549 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1550 * is only relevant when recreate_loopback is false.
1552 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1553 struct mlxsw_sp_ipip_entry *ipip_entry,
1554 bool recreate_loopback,
1556 bool update_nexthops,
1557 struct netlink_ext_ack *extack)
1561 /* RIFs can't be edited, so to update loopback, we need to destroy and
1562 * recreate it. That creates a window of opportunity where RALUE and
1563 * RATR registers end up referencing a RIF that's already gone. RATRs
1564 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1565 * of RALUE, demote the decap route back.
1567 if (ipip_entry->decap_fib_entry)
1568 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1570 if (recreate_loopback) {
1571 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1572 keep_encap, extack);
1575 } else if (update_nexthops) {
1576 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1577 &ipip_entry->ol_lb->common);
1580 if (ipip_entry->ol_dev->flags & IFF_UP)
1581 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1586 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1587 struct net_device *ol_dev,
1588 struct netlink_ext_ack *extack)
1590 struct mlxsw_sp_ipip_entry *ipip_entry =
1591 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1592 enum mlxsw_sp_l3proto ul_proto;
1593 union mlxsw_sp_l3addr saddr;
1599 /* For flat configuration cases, moving overlay to a different VRF might
1600 * cause local address conflict, and the conflicting tunnels need to be
1603 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1604 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1605 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1606 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1609 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1613 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1614 true, false, false, extack);
1618 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1619 struct mlxsw_sp_ipip_entry *ipip_entry,
1620 struct net_device *ul_dev,
1621 struct netlink_ext_ack *extack)
1623 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1624 true, true, false, extack);
1628 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1629 struct mlxsw_sp_ipip_entry *ipip_entry,
1630 struct net_device *ul_dev)
1632 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1633 false, false, true, NULL);
1637 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1638 struct mlxsw_sp_ipip_entry *ipip_entry,
1639 struct net_device *ul_dev)
1641 /* A down underlay device causes encapsulated packets to not be
1642 * forwarded, but decap still works. So refresh next hops without
1643 * touching anything else.
1645 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1646 false, false, true, NULL);
1650 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1651 struct net_device *ol_dev,
1652 struct netlink_ext_ack *extack)
1654 const struct mlxsw_sp_ipip_ops *ipip_ops;
1655 struct mlxsw_sp_ipip_entry *ipip_entry;
1658 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1660 /* A change might make a tunnel eligible for offloading, but
1661 * that is currently not implemented. What falls to slow path
1666 /* A change might make a tunnel not eligible for offloading. */
1667 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1668 ipip_entry->ipipt)) {
1669 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1673 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1674 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1678 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1679 struct mlxsw_sp_ipip_entry *ipip_entry)
1681 struct net_device *ol_dev = ipip_entry->ol_dev;
1683 if (ol_dev->flags & IFF_UP)
1684 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1685 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1688 /* The configuration where several tunnels have the same local address in the
1689 * same underlay table needs special treatment in the HW. That is currently not
1690 * implemented in the driver. This function finds and demotes the first tunnel
1691 * with a given source address, except the one passed in in the argument
1695 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1696 enum mlxsw_sp_l3proto ul_proto,
1697 union mlxsw_sp_l3addr saddr,
1699 const struct mlxsw_sp_ipip_entry *except)
1701 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1703 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1705 if (ipip_entry != except &&
1706 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1707 ul_tb_id, ipip_entry)) {
1708 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1716 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1717 struct net_device *ul_dev)
1719 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1721 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1723 struct net_device *ipip_ul_dev =
1724 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1726 if (ipip_ul_dev == ul_dev)
1727 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1731 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1732 struct net_device *ol_dev,
1733 unsigned long event,
1734 struct netdev_notifier_info *info)
1736 struct netdev_notifier_changeupper_info *chup;
1737 struct netlink_ext_ack *extack;
1740 case NETDEV_REGISTER:
1741 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1742 case NETDEV_UNREGISTER:
1743 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1746 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1749 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1751 case NETDEV_CHANGEUPPER:
1752 chup = container_of(info, typeof(*chup), info);
1753 extack = info->extack;
1754 if (netif_is_l3_master(chup->upper_dev))
1755 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1760 extack = info->extack;
1761 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1763 case NETDEV_CHANGEMTU:
1764 return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1770 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1771 struct mlxsw_sp_ipip_entry *ipip_entry,
1772 struct net_device *ul_dev,
1773 unsigned long event,
1774 struct netdev_notifier_info *info)
1776 struct netdev_notifier_changeupper_info *chup;
1777 struct netlink_ext_ack *extack;
1780 case NETDEV_CHANGEUPPER:
1781 chup = container_of(info, typeof(*chup), info);
1782 extack = info->extack;
1783 if (netif_is_l3_master(chup->upper_dev))
1784 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1791 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1794 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1802 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1803 struct net_device *ul_dev,
1804 unsigned long event,
1805 struct netdev_notifier_info *info)
1807 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1810 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1813 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1814 ul_dev, event, info);
1816 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1825 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1826 enum mlxsw_sp_l3proto ul_proto,
1827 const union mlxsw_sp_l3addr *ul_sip,
1830 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1831 struct mlxsw_sp_fib_entry *fib_entry;
1834 /* It is valid to create a tunnel with a local IP and only later
1835 * assign this IP address to a local interface
1837 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1843 fib_entry->decap.tunnel_index = tunnel_index;
1844 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1846 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1848 goto err_fib_entry_update;
1852 err_fib_entry_update:
1853 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1854 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1858 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1859 enum mlxsw_sp_l3proto ul_proto,
1860 const union mlxsw_sp_l3addr *ul_sip)
1862 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1863 struct mlxsw_sp_fib_entry *fib_entry;
1865 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1871 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1872 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1875 struct mlxsw_sp_neigh_key {
1876 struct neighbour *n;
1879 struct mlxsw_sp_neigh_entry {
1880 struct list_head rif_list_node;
1881 struct rhash_head ht_node;
1882 struct mlxsw_sp_neigh_key key;
1885 unsigned char ha[ETH_ALEN];
1886 struct list_head nexthop_list; /* list of nexthops using
1889 struct list_head nexthop_neighs_list_node;
1890 unsigned int counter_index;
1894 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1895 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1896 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1897 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1900 struct mlxsw_sp_neigh_entry *
1901 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1902 struct mlxsw_sp_neigh_entry *neigh_entry)
1905 if (list_empty(&rif->neigh_list))
1908 return list_first_entry(&rif->neigh_list,
1909 typeof(*neigh_entry),
1912 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
1914 return list_next_entry(neigh_entry, rif_list_node);
1917 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1919 return neigh_entry->key.n->tbl->family;
1923 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1925 return neigh_entry->ha;
1928 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1930 struct neighbour *n;
1932 n = neigh_entry->key.n;
1933 return ntohl(*((__be32 *) n->primary_key));
1937 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1939 struct neighbour *n;
1941 n = neigh_entry->key.n;
1942 return (struct in6_addr *) &n->primary_key;
1945 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1946 struct mlxsw_sp_neigh_entry *neigh_entry,
1949 if (!neigh_entry->counter_valid)
1952 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1956 static struct mlxsw_sp_neigh_entry *
1957 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1960 struct mlxsw_sp_neigh_entry *neigh_entry;
1962 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1966 neigh_entry->key.n = n;
1967 neigh_entry->rif = rif;
1968 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1973 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1979 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1980 struct mlxsw_sp_neigh_entry *neigh_entry)
1982 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
1983 &neigh_entry->ht_node,
1984 mlxsw_sp_neigh_ht_params);
1988 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1989 struct mlxsw_sp_neigh_entry *neigh_entry)
1991 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
1992 &neigh_entry->ht_node,
1993 mlxsw_sp_neigh_ht_params);
1997 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1998 struct mlxsw_sp_neigh_entry *neigh_entry)
2000 struct devlink *devlink;
2001 const char *table_name;
2003 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2005 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2008 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2015 devlink = priv_to_devlink(mlxsw_sp->core);
2016 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2020 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2021 struct mlxsw_sp_neigh_entry *neigh_entry)
2023 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2026 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2029 neigh_entry->counter_valid = true;
2033 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2034 struct mlxsw_sp_neigh_entry *neigh_entry)
2036 if (!neigh_entry->counter_valid)
2038 mlxsw_sp_flow_counter_free(mlxsw_sp,
2039 neigh_entry->counter_index);
2040 neigh_entry->counter_valid = false;
2043 static struct mlxsw_sp_neigh_entry *
2044 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2046 struct mlxsw_sp_neigh_entry *neigh_entry;
2047 struct mlxsw_sp_rif *rif;
2050 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2052 return ERR_PTR(-EINVAL);
2054 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2056 return ERR_PTR(-ENOMEM);
2058 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2060 goto err_neigh_entry_insert;
2062 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2063 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2067 err_neigh_entry_insert:
2068 mlxsw_sp_neigh_entry_free(neigh_entry);
2069 return ERR_PTR(err);
2073 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2074 struct mlxsw_sp_neigh_entry *neigh_entry)
2076 list_del(&neigh_entry->rif_list_node);
2077 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2078 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2079 mlxsw_sp_neigh_entry_free(neigh_entry);
2082 static struct mlxsw_sp_neigh_entry *
2083 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2085 struct mlxsw_sp_neigh_key key;
2088 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2089 &key, mlxsw_sp_neigh_ht_params);
2093 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2095 unsigned long interval;
2097 #if IS_ENABLED(CONFIG_IPV6)
2098 interval = min_t(unsigned long,
2099 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2100 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2102 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2104 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2107 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2111 struct net_device *dev;
2112 struct neighbour *n;
2117 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2119 if (!mlxsw_sp->router->rifs[rif]) {
2120 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2125 dev = mlxsw_sp->router->rifs[rif]->dev;
2126 n = neigh_lookup(&arp_tbl, &dipn, dev);
2130 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2131 neigh_event_send(n, NULL);
2135 #if IS_ENABLED(CONFIG_IPV6)
2136 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2140 struct net_device *dev;
2141 struct neighbour *n;
2142 struct in6_addr dip;
2145 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2148 if (!mlxsw_sp->router->rifs[rif]) {
2149 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2153 dev = mlxsw_sp->router->rifs[rif]->dev;
2154 n = neigh_lookup(&nd_tbl, &dip, dev);
2158 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2159 neigh_event_send(n, NULL);
2163 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2170 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2177 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2179 /* Hardware starts counting at 0, so add 1. */
2182 /* Each record consists of several neighbour entries. */
2183 for (i = 0; i < num_entries; i++) {
2186 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2187 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2193 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2197 /* One record contains one entry. */
2198 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2202 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2203 char *rauhtd_pl, int rec_index)
2205 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2206 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2207 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2210 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2211 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2217 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2219 u8 num_rec, last_rec_index, num_entries;
2221 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2222 last_rec_index = num_rec - 1;
2224 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2226 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2227 MLXSW_REG_RAUHTD_TYPE_IPV6)
2230 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2232 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2238 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2240 enum mlxsw_reg_rauhtd_type type)
2245 /* Make sure the neighbour's netdev isn't removed in the
2250 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2251 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2254 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2257 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2258 for (i = 0; i < num_rec; i++)
2259 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2261 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2267 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2269 enum mlxsw_reg_rauhtd_type type;
2273 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2277 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2278 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2282 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2283 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2289 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2291 struct mlxsw_sp_neigh_entry *neigh_entry;
2293 /* Take RTNL mutex here to prevent lists from changes */
2295 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2296 nexthop_neighs_list_node)
2297 /* If this neigh have nexthops, make the kernel think this neigh
2298 * is active regardless of the traffic.
2300 neigh_event_send(neigh_entry->key.n, NULL);
2305 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2307 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2309 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2310 msecs_to_jiffies(interval));
2313 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2315 struct mlxsw_sp_router *router;
2318 router = container_of(work, struct mlxsw_sp_router,
2319 neighs_update.dw.work);
2320 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2322 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2324 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2326 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2329 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2331 struct mlxsw_sp_neigh_entry *neigh_entry;
2332 struct mlxsw_sp_router *router;
2334 router = container_of(work, struct mlxsw_sp_router,
2335 nexthop_probe_dw.work);
2336 /* Iterate over nexthop neighbours, find those who are unresolved and
2337 * send arp on them. This solves the chicken-egg problem when
2338 * the nexthop wouldn't get offloaded until the neighbor is resolved
2339 * but it wouldn't get resolved ever in case traffic is flowing in HW
2340 * using different nexthop.
2342 * Take RTNL mutex here to prevent lists from changes.
2345 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2346 nexthop_neighs_list_node)
2347 if (!neigh_entry->connected)
2348 neigh_event_send(neigh_entry->key.n, NULL);
2351 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2352 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2356 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2357 struct mlxsw_sp_neigh_entry *neigh_entry,
2360 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2362 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2363 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2367 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2368 struct mlxsw_sp_neigh_entry *neigh_entry,
2369 enum mlxsw_reg_rauht_op op)
2371 struct neighbour *n = neigh_entry->key.n;
2372 u32 dip = ntohl(*((__be32 *) n->primary_key));
2373 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2375 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2377 if (neigh_entry->counter_valid)
2378 mlxsw_reg_rauht_pack_counter(rauht_pl,
2379 neigh_entry->counter_index);
2380 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2384 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2385 struct mlxsw_sp_neigh_entry *neigh_entry,
2386 enum mlxsw_reg_rauht_op op)
2388 struct neighbour *n = neigh_entry->key.n;
2389 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2390 const char *dip = n->primary_key;
2392 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2394 if (neigh_entry->counter_valid)
2395 mlxsw_reg_rauht_pack_counter(rauht_pl,
2396 neigh_entry->counter_index);
2397 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2400 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2402 struct neighbour *n = neigh_entry->key.n;
2404 /* Packets with a link-local destination address are trapped
2405 * after LPM lookup and never reach the neighbour table, so
2406 * there is no need to program such neighbours to the device.
2408 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2409 IPV6_ADDR_LINKLOCAL)
2415 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2416 struct mlxsw_sp_neigh_entry *neigh_entry,
2419 if (!adding && !neigh_entry->connected)
2421 neigh_entry->connected = adding;
2422 if (neigh_entry->key.n->tbl->family == AF_INET) {
2423 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2424 mlxsw_sp_rauht_op(adding));
2425 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2426 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2428 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2429 mlxsw_sp_rauht_op(adding));
2436 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2437 struct mlxsw_sp_neigh_entry *neigh_entry,
2441 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2443 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2444 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2447 struct mlxsw_sp_netevent_work {
2448 struct work_struct work;
2449 struct mlxsw_sp *mlxsw_sp;
2450 struct neighbour *n;
2453 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2455 struct mlxsw_sp_netevent_work *net_work =
2456 container_of(work, struct mlxsw_sp_netevent_work, work);
2457 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2458 struct mlxsw_sp_neigh_entry *neigh_entry;
2459 struct neighbour *n = net_work->n;
2460 unsigned char ha[ETH_ALEN];
2461 bool entry_connected;
2464 /* If these parameters are changed after we release the lock,
2465 * then we are guaranteed to receive another event letting us
2468 read_lock_bh(&n->lock);
2469 memcpy(ha, n->ha, ETH_ALEN);
2470 nud_state = n->nud_state;
2472 read_unlock_bh(&n->lock);
2475 mlxsw_sp_span_respin(mlxsw_sp);
2477 entry_connected = nud_state & NUD_VALID && !dead;
2478 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2479 if (!entry_connected && !neigh_entry)
2482 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2483 if (IS_ERR(neigh_entry))
2487 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2488 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2489 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2491 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2492 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2500 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2502 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2504 struct mlxsw_sp_netevent_work *net_work =
2505 container_of(work, struct mlxsw_sp_netevent_work, work);
2506 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2508 mlxsw_sp_mp_hash_init(mlxsw_sp);
2512 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2514 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2516 struct mlxsw_sp_netevent_work *net_work =
2517 container_of(work, struct mlxsw_sp_netevent_work, work);
2518 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2520 __mlxsw_sp_router_init(mlxsw_sp);
2524 static int mlxsw_sp_router_schedule_work(struct net *net,
2525 struct notifier_block *nb,
2526 void (*cb)(struct work_struct *))
2528 struct mlxsw_sp_netevent_work *net_work;
2529 struct mlxsw_sp_router *router;
2531 if (!net_eq(net, &init_net))
2534 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2538 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2539 INIT_WORK(&net_work->work, cb);
2540 net_work->mlxsw_sp = router->mlxsw_sp;
2541 mlxsw_core_schedule_work(&net_work->work);
2545 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2546 unsigned long event, void *ptr)
2548 struct mlxsw_sp_netevent_work *net_work;
2549 struct mlxsw_sp_port *mlxsw_sp_port;
2550 struct mlxsw_sp *mlxsw_sp;
2551 unsigned long interval;
2552 struct neigh_parms *p;
2553 struct neighbour *n;
2556 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2559 /* We don't care about changes in the default table. */
2560 if (!p->dev || (p->tbl->family != AF_INET &&
2561 p->tbl->family != AF_INET6))
2564 /* We are in atomic context and can't take RTNL mutex,
2565 * so use RCU variant to walk the device chain.
2567 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2571 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2572 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2573 mlxsw_sp->router->neighs_update.interval = interval;
2575 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2577 case NETEVENT_NEIGH_UPDATE:
2580 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2583 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2587 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2589 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2593 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2594 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2597 /* Take a reference to ensure the neighbour won't be
2598 * destructed until we drop the reference in delayed
2602 mlxsw_core_schedule_work(&net_work->work);
2603 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2605 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2606 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2607 return mlxsw_sp_router_schedule_work(ptr, nb,
2608 mlxsw_sp_router_mp_hash_event_work);
2610 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2611 return mlxsw_sp_router_schedule_work(ptr, nb,
2612 mlxsw_sp_router_update_priority_work);
2618 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2622 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2623 &mlxsw_sp_neigh_ht_params);
2627 /* Initialize the polling interval according to the default
2630 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2632 /* Create the delayed works for the activity_update */
2633 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2634 mlxsw_sp_router_neighs_update_work);
2635 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2636 mlxsw_sp_router_probe_unresolved_nexthops);
2637 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2638 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2642 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2644 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2645 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2646 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2649 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2650 struct mlxsw_sp_rif *rif)
2652 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2654 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2656 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2657 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2661 enum mlxsw_sp_nexthop_type {
2662 MLXSW_SP_NEXTHOP_TYPE_ETH,
2663 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2666 struct mlxsw_sp_nexthop_key {
2667 struct fib_nh *fib_nh;
2670 struct mlxsw_sp_nexthop {
2671 struct list_head neigh_list_node; /* member of neigh entry list */
2672 struct list_head rif_list_node;
2673 struct list_head router_list_node;
2674 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2677 struct rhash_head ht_node;
2678 struct mlxsw_sp_nexthop_key key;
2679 unsigned char gw_addr[sizeof(struct in6_addr)];
2683 int num_adj_entries;
2684 struct mlxsw_sp_rif *rif;
2685 u8 should_offload:1, /* set indicates this neigh is connected and
2686 * should be put to KVD linear area of this group.
2688 offloaded:1, /* set in case the neigh is actually put into
2689 * KVD linear area of this group.
2691 update:1; /* set indicates that MAC of this neigh should be
2694 enum mlxsw_sp_nexthop_type type;
2696 struct mlxsw_sp_neigh_entry *neigh_entry;
2697 struct mlxsw_sp_ipip_entry *ipip_entry;
2699 unsigned int counter_index;
2703 struct mlxsw_sp_nexthop_group {
2705 struct rhash_head ht_node;
2706 struct list_head fib_list; /* list of fib entries that use this group */
2707 struct neigh_table *neigh_tbl;
2708 u8 adj_index_valid:1,
2709 gateway:1; /* routes using the group use a gateway */
2713 int sum_norm_weight;
2714 struct mlxsw_sp_nexthop nexthops[0];
2715 #define nh_rif nexthops[0].rif
2718 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2719 struct mlxsw_sp_nexthop *nh)
2721 struct devlink *devlink;
2723 devlink = priv_to_devlink(mlxsw_sp->core);
2724 if (!devlink_dpipe_table_counter_enabled(devlink,
2725 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2728 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2731 nh->counter_valid = true;
2734 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2735 struct mlxsw_sp_nexthop *nh)
2737 if (!nh->counter_valid)
2739 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2740 nh->counter_valid = false;
2743 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2744 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2746 if (!nh->counter_valid)
2749 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2753 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2754 struct mlxsw_sp_nexthop *nh)
2757 if (list_empty(&router->nexthop_list))
2760 return list_first_entry(&router->nexthop_list,
2761 typeof(*nh), router_list_node);
2763 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2765 return list_next_entry(nh, router_list_node);
2768 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2770 return nh->offloaded;
2773 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2777 return nh->neigh_entry->ha;
2780 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2781 u32 *p_adj_size, u32 *p_adj_hash_index)
2783 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2784 u32 adj_hash_index = 0;
2787 if (!nh->offloaded || !nh_grp->adj_index_valid)
2790 *p_adj_index = nh_grp->adj_index;
2791 *p_adj_size = nh_grp->ecmp_size;
2793 for (i = 0; i < nh_grp->count; i++) {
2794 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2798 if (nh_iter->offloaded)
2799 adj_hash_index += nh_iter->num_adj_entries;
2802 *p_adj_hash_index = adj_hash_index;
2806 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2811 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2813 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2816 for (i = 0; i < nh_grp->count; i++) {
2817 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2819 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2825 static struct fib_info *
2826 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2828 return nh_grp->priv;
2831 struct mlxsw_sp_nexthop_group_cmp_arg {
2832 enum mlxsw_sp_l3proto proto;
2834 struct fib_info *fi;
2835 struct mlxsw_sp_fib6_entry *fib6_entry;
2840 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2841 const struct in6_addr *gw, int ifindex,
2846 for (i = 0; i < nh_grp->count; i++) {
2847 const struct mlxsw_sp_nexthop *nh;
2849 nh = &nh_grp->nexthops[i];
2850 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
2851 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2859 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2860 const struct mlxsw_sp_fib6_entry *fib6_entry)
2862 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2864 if (nh_grp->count != fib6_entry->nrt6)
2867 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2868 struct in6_addr *gw;
2869 int ifindex, weight;
2871 ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
2872 weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
2873 gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
2874 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2883 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2885 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2886 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2888 switch (cmp_arg->proto) {
2889 case MLXSW_SP_L3_PROTO_IPV4:
2890 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2891 case MLXSW_SP_L3_PROTO_IPV6:
2892 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2893 cmp_arg->fib6_entry);
2901 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2903 return nh_grp->neigh_tbl->family;
2906 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2908 const struct mlxsw_sp_nexthop_group *nh_grp = data;
2909 const struct mlxsw_sp_nexthop *nh;
2910 struct fib_info *fi;
2914 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2916 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2917 return jhash(&fi, sizeof(fi), seed);
2919 val = nh_grp->count;
2920 for (i = 0; i < nh_grp->count; i++) {
2921 nh = &nh_grp->nexthops[i];
2924 return jhash(&val, sizeof(val), seed);
2932 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2934 unsigned int val = fib6_entry->nrt6;
2935 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2936 struct net_device *dev;
2938 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2939 dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
2940 val ^= dev->ifindex;
2943 return jhash(&val, sizeof(val), seed);
2947 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2949 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2951 switch (cmp_arg->proto) {
2952 case MLXSW_SP_L3_PROTO_IPV4:
2953 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2954 case MLXSW_SP_L3_PROTO_IPV6:
2955 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2962 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
2963 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
2964 .hashfn = mlxsw_sp_nexthop_group_hash,
2965 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2966 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
2969 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2970 struct mlxsw_sp_nexthop_group *nh_grp)
2972 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2976 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
2978 mlxsw_sp_nexthop_group_ht_params);
2981 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2982 struct mlxsw_sp_nexthop_group *nh_grp)
2984 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2988 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
2990 mlxsw_sp_nexthop_group_ht_params);
2993 static struct mlxsw_sp_nexthop_group *
2994 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2995 struct fib_info *fi)
2997 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2999 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
3001 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3003 mlxsw_sp_nexthop_group_ht_params);
3006 static struct mlxsw_sp_nexthop_group *
3007 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3008 struct mlxsw_sp_fib6_entry *fib6_entry)
3010 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3012 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
3013 cmp_arg.fib6_entry = fib6_entry;
3014 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3016 mlxsw_sp_nexthop_group_ht_params);
3019 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3020 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3021 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3022 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3025 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3026 struct mlxsw_sp_nexthop *nh)
3028 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3029 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3032 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3033 struct mlxsw_sp_nexthop *nh)
3035 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3036 mlxsw_sp_nexthop_ht_params);
3039 static struct mlxsw_sp_nexthop *
3040 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3041 struct mlxsw_sp_nexthop_key key)
3043 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3044 mlxsw_sp_nexthop_ht_params);
3047 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3048 const struct mlxsw_sp_fib *fib,
3049 u32 adj_index, u16 ecmp_size,
3053 char raleu_pl[MLXSW_REG_RALEU_LEN];
3055 mlxsw_reg_raleu_pack(raleu_pl,
3056 (enum mlxsw_reg_ralxx_protocol) fib->proto,
3057 fib->vr->id, adj_index, ecmp_size, new_adj_index,
3059 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3062 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3063 struct mlxsw_sp_nexthop_group *nh_grp,
3064 u32 old_adj_index, u16 old_ecmp_size)
3066 struct mlxsw_sp_fib_entry *fib_entry;
3067 struct mlxsw_sp_fib *fib = NULL;
3070 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3071 if (fib == fib_entry->fib_node->fib)
3073 fib = fib_entry->fib_node->fib;
3074 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3085 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3086 struct mlxsw_sp_nexthop *nh)
3088 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3089 char ratr_pl[MLXSW_REG_RATR_LEN];
3091 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3092 true, MLXSW_REG_RATR_TYPE_ETHERNET,
3093 adj_index, neigh_entry->rif);
3094 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3095 if (nh->counter_valid)
3096 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3098 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3100 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3103 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3104 struct mlxsw_sp_nexthop *nh)
3108 for (i = 0; i < nh->num_adj_entries; i++) {
3111 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3119 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3121 struct mlxsw_sp_nexthop *nh)
3123 const struct mlxsw_sp_ipip_ops *ipip_ops;
3125 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3126 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3129 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3131 struct mlxsw_sp_nexthop *nh)
3135 for (i = 0; i < nh->num_adj_entries; i++) {
3138 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3148 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3149 struct mlxsw_sp_nexthop_group *nh_grp,
3152 u32 adj_index = nh_grp->adj_index; /* base */
3153 struct mlxsw_sp_nexthop *nh;
3157 for (i = 0; i < nh_grp->count; i++) {
3158 nh = &nh_grp->nexthops[i];
3160 if (!nh->should_offload) {
3165 if (nh->update || reallocate) {
3167 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3168 err = mlxsw_sp_nexthop_update
3169 (mlxsw_sp, adj_index, nh);
3171 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3172 err = mlxsw_sp_nexthop_ipip_update
3173 (mlxsw_sp, adj_index, nh);
3181 adj_index += nh->num_adj_entries;
3187 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3188 const struct mlxsw_sp_fib_entry *fib_entry);
3191 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3192 struct mlxsw_sp_nexthop_group *nh_grp)
3194 struct mlxsw_sp_fib_entry *fib_entry;
3197 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3198 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3201 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3209 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3210 enum mlxsw_reg_ralue_op op, int err);
3213 mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3215 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3216 struct mlxsw_sp_fib_entry *fib_entry;
3218 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3219 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3222 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3226 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3228 /* Valid sizes for an adjacency group are:
3229 * 1-64, 512, 1024, 2048 and 4096.
3231 if (*p_adj_grp_size <= 64)
3233 else if (*p_adj_grp_size <= 512)
3234 *p_adj_grp_size = 512;
3235 else if (*p_adj_grp_size <= 1024)
3236 *p_adj_grp_size = 1024;
3237 else if (*p_adj_grp_size <= 2048)
3238 *p_adj_grp_size = 2048;
3240 *p_adj_grp_size = 4096;
3243 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3244 unsigned int alloc_size)
3246 if (alloc_size >= 4096)
3247 *p_adj_grp_size = 4096;
3248 else if (alloc_size >= 2048)
3249 *p_adj_grp_size = 2048;
3250 else if (alloc_size >= 1024)
3251 *p_adj_grp_size = 1024;
3252 else if (alloc_size >= 512)
3253 *p_adj_grp_size = 512;
3256 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3257 u16 *p_adj_grp_size)
3259 unsigned int alloc_size;
3262 /* Round up the requested group size to the next size supported
3263 * by the device and make sure the request can be satisfied.
3265 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3266 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3267 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3268 *p_adj_grp_size, &alloc_size);
3271 /* It is possible the allocation results in more allocated
3272 * entries than requested. Try to use as much of them as
3275 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3281 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3283 int i, g = 0, sum_norm_weight = 0;
3284 struct mlxsw_sp_nexthop *nh;
3286 for (i = 0; i < nh_grp->count; i++) {
3287 nh = &nh_grp->nexthops[i];
3289 if (!nh->should_offload)
3292 g = gcd(nh->nh_weight, g);
3297 for (i = 0; i < nh_grp->count; i++) {
3298 nh = &nh_grp->nexthops[i];
3300 if (!nh->should_offload)
3302 nh->norm_nh_weight = nh->nh_weight / g;
3303 sum_norm_weight += nh->norm_nh_weight;
3306 nh_grp->sum_norm_weight = sum_norm_weight;
3310 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3312 int total = nh_grp->sum_norm_weight;
3313 u16 ecmp_size = nh_grp->ecmp_size;
3314 int i, weight = 0, lower_bound = 0;
3316 for (i = 0; i < nh_grp->count; i++) {
3317 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3320 if (!nh->should_offload)
3322 weight += nh->norm_nh_weight;
3323 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3324 nh->num_adj_entries = upper_bound - lower_bound;
3325 lower_bound = upper_bound;
3330 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3331 struct mlxsw_sp_nexthop_group *nh_grp)
3333 u16 ecmp_size, old_ecmp_size;
3334 struct mlxsw_sp_nexthop *nh;
3335 bool offload_change = false;
3337 bool old_adj_index_valid;
3342 if (!nh_grp->gateway) {
3343 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3347 for (i = 0; i < nh_grp->count; i++) {
3348 nh = &nh_grp->nexthops[i];
3350 if (nh->should_offload != nh->offloaded) {
3351 offload_change = true;
3352 if (nh->should_offload)
3356 if (!offload_change) {
3357 /* Nothing was added or removed, so no need to reallocate. Just
3358 * update MAC on existing adjacency indexes.
3360 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
3362 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3367 mlxsw_sp_nexthop_group_normalize(nh_grp);
3368 if (!nh_grp->sum_norm_weight)
3369 /* No neigh of this group is connected so we just set
3370 * the trap and let everthing flow through kernel.
3374 ecmp_size = nh_grp->sum_norm_weight;
3375 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3377 /* No valid allocation size available. */
3380 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3381 ecmp_size, &adj_index);
3383 /* We ran out of KVD linear space, just set the
3384 * trap and let everything flow through kernel.
3386 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3389 old_adj_index_valid = nh_grp->adj_index_valid;
3390 old_adj_index = nh_grp->adj_index;
3391 old_ecmp_size = nh_grp->ecmp_size;
3392 nh_grp->adj_index_valid = 1;
3393 nh_grp->adj_index = adj_index;
3394 nh_grp->ecmp_size = ecmp_size;
3395 mlxsw_sp_nexthop_group_rebalance(nh_grp);
3396 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
3398 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3402 if (!old_adj_index_valid) {
3403 /* The trap was set for fib entries, so we have to call
3404 * fib entry update to unset it and use adjacency index.
3406 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3408 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3414 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3415 old_adj_index, old_ecmp_size);
3416 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3417 old_ecmp_size, old_adj_index);
3419 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3423 /* Offload state within the group changed, so update the flags. */
3424 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3429 old_adj_index_valid = nh_grp->adj_index_valid;
3430 nh_grp->adj_index_valid = 0;
3431 for (i = 0; i < nh_grp->count; i++) {
3432 nh = &nh_grp->nexthops[i];
3435 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3437 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3438 if (old_adj_index_valid)
3439 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3440 nh_grp->ecmp_size, nh_grp->adj_index);
3443 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3447 nh->should_offload = 1;
3449 nh->should_offload = 0;
3454 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3455 struct mlxsw_sp_neigh_entry *neigh_entry,
3458 struct mlxsw_sp_nexthop *nh;
3460 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3462 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3463 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3467 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3468 struct mlxsw_sp_rif *rif)
3474 list_add(&nh->rif_list_node, &rif->nexthop_list);
3477 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3482 list_del(&nh->rif_list_node);
3486 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3487 struct mlxsw_sp_nexthop *nh)
3489 struct mlxsw_sp_neigh_entry *neigh_entry;
3490 struct neighbour *n;
3494 if (!nh->nh_grp->gateway || nh->neigh_entry)
3497 /* Take a reference of neigh here ensuring that neigh would
3498 * not be destructed before the nexthop entry is finished.
3499 * The reference is taken either in neigh_lookup() or
3500 * in neigh_create() in case n is not found.
3502 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3504 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3508 neigh_event_send(n, NULL);
3510 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3512 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3513 if (IS_ERR(neigh_entry)) {
3515 goto err_neigh_entry_create;
3519 /* If that is the first nexthop connected to that neigh, add to
3520 * nexthop_neighs_list
3522 if (list_empty(&neigh_entry->nexthop_list))
3523 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3524 &mlxsw_sp->router->nexthop_neighs_list);
3526 nh->neigh_entry = neigh_entry;
3527 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3528 read_lock_bh(&n->lock);
3529 nud_state = n->nud_state;
3531 read_unlock_bh(&n->lock);
3532 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3536 err_neigh_entry_create:
3541 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3542 struct mlxsw_sp_nexthop *nh)
3544 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3545 struct neighbour *n;
3549 n = neigh_entry->key.n;
3551 __mlxsw_sp_nexthop_neigh_update(nh, true);
3552 list_del(&nh->neigh_list_node);
3553 nh->neigh_entry = NULL;
3555 /* If that is the last nexthop connected to that neigh, remove from
3556 * nexthop_neighs_list
3558 if (list_empty(&neigh_entry->nexthop_list))
3559 list_del(&neigh_entry->nexthop_neighs_list_node);
3561 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3562 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3567 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3569 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3571 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3574 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3575 struct mlxsw_sp_nexthop *nh,
3576 struct mlxsw_sp_ipip_entry *ipip_entry)
3580 if (!nh->nh_grp->gateway || nh->ipip_entry)
3583 nh->ipip_entry = ipip_entry;
3584 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3585 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3586 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3589 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3590 struct mlxsw_sp_nexthop *nh)
3592 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3597 __mlxsw_sp_nexthop_neigh_update(nh, true);
3598 nh->ipip_entry = NULL;
3601 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3602 const struct fib_nh *fib_nh,
3603 enum mlxsw_sp_ipip_type *p_ipipt)
3605 struct net_device *dev = fib_nh->nh_dev;
3608 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3609 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3612 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3613 struct mlxsw_sp_nexthop *nh)
3616 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3617 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3618 mlxsw_sp_nexthop_rif_fini(nh);
3620 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3621 mlxsw_sp_nexthop_rif_fini(nh);
3622 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3627 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3628 struct mlxsw_sp_nexthop *nh,
3629 struct fib_nh *fib_nh)
3631 const struct mlxsw_sp_ipip_ops *ipip_ops;
3632 struct net_device *dev = fib_nh->nh_dev;
3633 struct mlxsw_sp_ipip_entry *ipip_entry;
3634 struct mlxsw_sp_rif *rif;
3637 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3639 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3640 if (ipip_ops->can_offload(mlxsw_sp, dev,
3641 MLXSW_SP_L3_PROTO_IPV4)) {
3642 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3643 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3648 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3649 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3653 mlxsw_sp_nexthop_rif_init(nh, rif);
3654 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3656 goto err_neigh_init;
3661 mlxsw_sp_nexthop_rif_fini(nh);
3665 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3666 struct mlxsw_sp_nexthop *nh)
3668 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3671 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3672 struct mlxsw_sp_nexthop_group *nh_grp,
3673 struct mlxsw_sp_nexthop *nh,
3674 struct fib_nh *fib_nh)
3676 struct net_device *dev = fib_nh->nh_dev;
3677 struct in_device *in_dev;
3680 nh->nh_grp = nh_grp;
3681 nh->key.fib_nh = fib_nh;
3682 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3683 nh->nh_weight = fib_nh->nh_weight;
3687 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
3688 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3692 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3693 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3698 in_dev = __in_dev_get_rtnl(dev);
3699 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3700 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3703 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3705 goto err_nexthop_neigh_init;
3709 err_nexthop_neigh_init:
3710 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3714 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3715 struct mlxsw_sp_nexthop *nh)
3717 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3718 list_del(&nh->router_list_node);
3719 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
3720 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3723 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3724 unsigned long event, struct fib_nh *fib_nh)
3726 struct mlxsw_sp_nexthop_key key;
3727 struct mlxsw_sp_nexthop *nh;
3729 if (mlxsw_sp->router->aborted)
3732 key.fib_nh = fib_nh;
3733 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3734 if (WARN_ON_ONCE(!nh))
3738 case FIB_EVENT_NH_ADD:
3739 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3741 case FIB_EVENT_NH_DEL:
3742 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3746 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3749 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3750 struct mlxsw_sp_rif *rif)
3752 struct mlxsw_sp_nexthop *nh;
3755 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3757 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3760 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3761 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3768 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3769 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3773 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3774 struct mlxsw_sp_rif *old_rif,
3775 struct mlxsw_sp_rif *new_rif)
3777 struct mlxsw_sp_nexthop *nh;
3779 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3780 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3782 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3785 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3786 struct mlxsw_sp_rif *rif)
3788 struct mlxsw_sp_nexthop *nh, *tmp;
3790 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
3791 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3792 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3796 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3797 const struct fib_info *fi)
3799 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3800 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
3803 static struct mlxsw_sp_nexthop_group *
3804 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
3806 struct mlxsw_sp_nexthop_group *nh_grp;
3807 struct mlxsw_sp_nexthop *nh;
3808 struct fib_nh *fib_nh;
3813 alloc_size = sizeof(*nh_grp) +
3814 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3815 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3817 return ERR_PTR(-ENOMEM);
3819 INIT_LIST_HEAD(&nh_grp->fib_list);
3820 nh_grp->neigh_tbl = &arp_tbl;
3822 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
3823 nh_grp->count = fi->fib_nhs;
3825 for (i = 0; i < nh_grp->count; i++) {
3826 nh = &nh_grp->nexthops[i];
3827 fib_nh = &fi->fib_nh[i];
3828 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
3830 goto err_nexthop4_init;
3832 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3834 goto err_nexthop_group_insert;
3835 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3838 err_nexthop_group_insert:
3840 for (i--; i >= 0; i--) {
3841 nh = &nh_grp->nexthops[i];
3842 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
3846 return ERR_PTR(err);
3850 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3851 struct mlxsw_sp_nexthop_group *nh_grp)
3853 struct mlxsw_sp_nexthop *nh;
3856 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
3857 for (i = 0; i < nh_grp->count; i++) {
3858 nh = &nh_grp->nexthops[i];
3859 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
3861 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3862 WARN_ON_ONCE(nh_grp->adj_index_valid);
3863 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
3867 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3868 struct mlxsw_sp_fib_entry *fib_entry,
3869 struct fib_info *fi)
3871 struct mlxsw_sp_nexthop_group *nh_grp;
3873 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
3875 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
3877 return PTR_ERR(nh_grp);
3879 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3880 fib_entry->nh_group = nh_grp;
3884 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3885 struct mlxsw_sp_fib_entry *fib_entry)
3887 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3889 list_del(&fib_entry->nexthop_group_node);
3890 if (!list_empty(&nh_grp->fib_list))
3892 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
3896 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3898 struct mlxsw_sp_fib4_entry *fib4_entry;
3900 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3902 return !fib4_entry->tos;
3906 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3908 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3910 switch (fib_entry->fib_node->fib->proto) {
3911 case MLXSW_SP_L3_PROTO_IPV4:
3912 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3915 case MLXSW_SP_L3_PROTO_IPV6:
3919 switch (fib_entry->type) {
3920 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3921 return !!nh_group->adj_index_valid;
3922 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
3923 return !!nh_group->nh_rif;
3924 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3925 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
3932 static struct mlxsw_sp_nexthop *
3933 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3934 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3938 for (i = 0; i < nh_grp->count; i++) {
3939 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3940 struct fib6_info *rt = mlxsw_sp_rt6->rt;
3942 if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
3943 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3944 &rt->fib6_nh.nh_gw))
3953 mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3955 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3958 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3959 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
3960 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
3961 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3965 for (i = 0; i < nh_grp->count; i++) {
3966 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3969 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3971 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3976 mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3978 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3981 if (!list_is_singular(&nh_grp->fib_list))
3984 for (i = 0; i < nh_grp->count; i++) {
3985 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3987 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3992 mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3994 struct mlxsw_sp_fib6_entry *fib6_entry;
3995 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3997 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4000 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
4001 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4002 list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
4006 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4007 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4008 struct mlxsw_sp_nexthop *nh;
4010 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
4011 if (nh && nh->offloaded)
4012 mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
4014 mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
4019 mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
4021 struct mlxsw_sp_fib6_entry *fib6_entry;
4022 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4024 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4026 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4027 struct fib6_info *rt = mlxsw_sp_rt6->rt;
4029 rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
4033 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
4035 switch (fib_entry->fib_node->fib->proto) {
4036 case MLXSW_SP_L3_PROTO_IPV4:
4037 mlxsw_sp_fib4_entry_offload_set(fib_entry);
4039 case MLXSW_SP_L3_PROTO_IPV6:
4040 mlxsw_sp_fib6_entry_offload_set(fib_entry);
4046 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
4048 switch (fib_entry->fib_node->fib->proto) {
4049 case MLXSW_SP_L3_PROTO_IPV4:
4050 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
4052 case MLXSW_SP_L3_PROTO_IPV6:
4053 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
4059 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
4060 enum mlxsw_reg_ralue_op op, int err)
4063 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
4064 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
4065 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
4068 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
4069 mlxsw_sp_fib_entry_offload_set(fib_entry);
4071 mlxsw_sp_fib_entry_offload_unset(fib_entry);
4079 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
4080 const struct mlxsw_sp_fib_entry *fib_entry,
4081 enum mlxsw_reg_ralue_op op)
4083 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
4084 enum mlxsw_reg_ralxx_protocol proto;
4087 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
4089 switch (fib->proto) {
4090 case MLXSW_SP_L3_PROTO_IPV4:
4091 p_dip = (u32 *) fib_entry->fib_node->key.addr;
4092 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
4093 fib_entry->fib_node->key.prefix_len,
4096 case MLXSW_SP_L3_PROTO_IPV6:
4097 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
4098 fib_entry->fib_node->key.prefix_len,
4099 fib_entry->fib_node->key.addr);
4104 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4105 struct mlxsw_sp_fib_entry *fib_entry,
4106 enum mlxsw_reg_ralue_op op)
4108 char ralue_pl[MLXSW_REG_RALUE_LEN];
4109 enum mlxsw_reg_ralue_trap_action trap_action;
4111 u32 adjacency_index = 0;
4114 /* In case the nexthop group adjacency index is valid, use it
4115 * with provided ECMP size. Otherwise, setup trap and pass
4116 * traffic to kernel.
4118 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4119 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4120 adjacency_index = fib_entry->nh_group->adj_index;
4121 ecmp_size = fib_entry->nh_group->ecmp_size;
4123 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4124 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4127 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4128 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4129 adjacency_index, ecmp_size);
4130 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4133 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4134 struct mlxsw_sp_fib_entry *fib_entry,
4135 enum mlxsw_reg_ralue_op op)
4137 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
4138 enum mlxsw_reg_ralue_trap_action trap_action;
4139 char ralue_pl[MLXSW_REG_RALUE_LEN];
4143 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4144 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4145 rif_index = rif->rif_index;
4147 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4148 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4151 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4152 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4154 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4157 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4158 struct mlxsw_sp_fib_entry *fib_entry,
4159 enum mlxsw_reg_ralue_op op)
4161 char ralue_pl[MLXSW_REG_RALUE_LEN];
4163 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4164 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4165 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4169 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4170 struct mlxsw_sp_fib_entry *fib_entry,
4171 enum mlxsw_reg_ralue_op op)
4173 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4174 const struct mlxsw_sp_ipip_ops *ipip_ops;
4176 if (WARN_ON(!ipip_entry))
4179 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4180 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4181 fib_entry->decap.tunnel_index);
4184 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4185 struct mlxsw_sp_fib_entry *fib_entry,
4186 enum mlxsw_reg_ralue_op op)
4188 char ralue_pl[MLXSW_REG_RALUE_LEN];
4190 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4191 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
4192 fib_entry->decap.tunnel_index);
4193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4196 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4197 struct mlxsw_sp_fib_entry *fib_entry,
4198 enum mlxsw_reg_ralue_op op)
4200 switch (fib_entry->type) {
4201 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4202 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
4203 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4204 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
4205 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4206 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
4207 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4208 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4210 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4211 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
4216 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4217 struct mlxsw_sp_fib_entry *fib_entry,
4218 enum mlxsw_reg_ralue_op op)
4220 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
4222 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
4227 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4228 struct mlxsw_sp_fib_entry *fib_entry)
4230 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4231 MLXSW_REG_RALUE_OP_WRITE_WRITE);
4234 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4235 struct mlxsw_sp_fib_entry *fib_entry)
4237 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4238 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4242 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4243 const struct fib_entry_notifier_info *fen_info,
4244 struct mlxsw_sp_fib_entry *fib_entry)
4246 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4247 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4248 struct net_device *dev = fen_info->fi->fib_dev;
4249 struct mlxsw_sp_ipip_entry *ipip_entry;
4250 struct fib_info *fi = fen_info->fi;
4252 switch (fen_info->type) {
4254 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4255 MLXSW_SP_L3_PROTO_IPV4, dip);
4256 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4257 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4258 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4262 if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id,
4266 t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp);
4267 fib_entry->decap.tunnel_index = t_index;
4268 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
4273 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4275 case RTN_UNREACHABLE: /* fall through */
4276 case RTN_BLACKHOLE: /* fall through */
4278 /* Packets hitting these routes need to be trapped, but
4279 * can do so with a lower priority than packets directed
4280 * at the host, so use action type local instead of trap.
4282 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4285 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4286 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4288 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4295 static struct mlxsw_sp_fib4_entry *
4296 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4297 struct mlxsw_sp_fib_node *fib_node,
4298 const struct fib_entry_notifier_info *fen_info)
4300 struct mlxsw_sp_fib4_entry *fib4_entry;
4301 struct mlxsw_sp_fib_entry *fib_entry;
4304 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4306 return ERR_PTR(-ENOMEM);
4307 fib_entry = &fib4_entry->common;
4309 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4311 goto err_fib4_entry_type_set;
4313 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4315 goto err_nexthop4_group_get;
4317 fib4_entry->prio = fen_info->fi->fib_priority;
4318 fib4_entry->tb_id = fen_info->tb_id;
4319 fib4_entry->type = fen_info->type;
4320 fib4_entry->tos = fen_info->tos;
4322 fib_entry->fib_node = fib_node;
4326 err_nexthop4_group_get:
4327 err_fib4_entry_type_set:
4329 return ERR_PTR(err);
4332 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4333 struct mlxsw_sp_fib4_entry *fib4_entry)
4335 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4339 static struct mlxsw_sp_fib4_entry *
4340 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4341 const struct fib_entry_notifier_info *fen_info)
4343 struct mlxsw_sp_fib4_entry *fib4_entry;
4344 struct mlxsw_sp_fib_node *fib_node;
4345 struct mlxsw_sp_fib *fib;
4346 struct mlxsw_sp_vr *vr;
4348 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4351 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4353 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4354 sizeof(fen_info->dst),
4359 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4360 if (fib4_entry->tb_id == fen_info->tb_id &&
4361 fib4_entry->tos == fen_info->tos &&
4362 fib4_entry->type == fen_info->type &&
4363 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4372 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4373 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4374 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4375 .key_len = sizeof(struct mlxsw_sp_fib_key),
4376 .automatic_shrinking = true,
4379 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4380 struct mlxsw_sp_fib_node *fib_node)
4382 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4383 mlxsw_sp_fib_ht_params);
4386 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4387 struct mlxsw_sp_fib_node *fib_node)
4389 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4390 mlxsw_sp_fib_ht_params);
4393 static struct mlxsw_sp_fib_node *
4394 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4395 size_t addr_len, unsigned char prefix_len)
4397 struct mlxsw_sp_fib_key key;
4399 memset(&key, 0, sizeof(key));
4400 memcpy(key.addr, addr, addr_len);
4401 key.prefix_len = prefix_len;
4402 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4405 static struct mlxsw_sp_fib_node *
4406 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4407 size_t addr_len, unsigned char prefix_len)
4409 struct mlxsw_sp_fib_node *fib_node;
4411 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4415 INIT_LIST_HEAD(&fib_node->entry_list);
4416 list_add(&fib_node->list, &fib->node_list);
4417 memcpy(fib_node->key.addr, addr, addr_len);
4418 fib_node->key.prefix_len = prefix_len;
4423 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4425 list_del(&fib_node->list);
4426 WARN_ON(!list_empty(&fib_node->entry_list));
4431 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4432 const struct mlxsw_sp_fib_entry *fib_entry)
4434 return list_first_entry(&fib_node->entry_list,
4435 struct mlxsw_sp_fib_entry, list) == fib_entry;
4438 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4439 struct mlxsw_sp_fib_node *fib_node)
4441 struct mlxsw_sp_prefix_usage req_prefix_usage;
4442 struct mlxsw_sp_fib *fib = fib_node->fib;
4443 struct mlxsw_sp_lpm_tree *lpm_tree;
4446 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4447 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4450 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4451 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4452 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4454 if (IS_ERR(lpm_tree))
4455 return PTR_ERR(lpm_tree);
4457 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4459 goto err_lpm_tree_replace;
4462 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
4465 err_lpm_tree_replace:
4466 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4470 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4471 struct mlxsw_sp_fib_node *fib_node)
4473 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4474 struct mlxsw_sp_prefix_usage req_prefix_usage;
4475 struct mlxsw_sp_fib *fib = fib_node->fib;
4478 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4480 /* Try to construct a new LPM tree from the current prefix usage
4481 * minus the unused one. If we fail, continue using the old one.
4483 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4484 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4485 fib_node->key.prefix_len);
4486 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4488 if (IS_ERR(lpm_tree))
4491 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4493 goto err_lpm_tree_replace;
4497 err_lpm_tree_replace:
4498 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4501 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4502 struct mlxsw_sp_fib_node *fib_node,
4503 struct mlxsw_sp_fib *fib)
4507 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4510 fib_node->fib = fib;
4512 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
4514 goto err_fib_lpm_tree_link;
4518 err_fib_lpm_tree_link:
4519 fib_node->fib = NULL;
4520 mlxsw_sp_fib_node_remove(fib, fib_node);
4524 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4525 struct mlxsw_sp_fib_node *fib_node)
4527 struct mlxsw_sp_fib *fib = fib_node->fib;
4529 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
4530 fib_node->fib = NULL;
4531 mlxsw_sp_fib_node_remove(fib, fib_node);
4534 static struct mlxsw_sp_fib_node *
4535 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4536 size_t addr_len, unsigned char prefix_len,
4537 enum mlxsw_sp_l3proto proto)
4539 struct mlxsw_sp_fib_node *fib_node;
4540 struct mlxsw_sp_fib *fib;
4541 struct mlxsw_sp_vr *vr;
4544 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
4546 return ERR_CAST(vr);
4547 fib = mlxsw_sp_vr_fib(vr, proto);
4549 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
4553 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
4556 goto err_fib_node_create;
4559 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4561 goto err_fib_node_init;
4566 mlxsw_sp_fib_node_destroy(fib_node);
4567 err_fib_node_create:
4568 mlxsw_sp_vr_put(mlxsw_sp, vr);
4569 return ERR_PTR(err);
4572 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4573 struct mlxsw_sp_fib_node *fib_node)
4575 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
4577 if (!list_empty(&fib_node->entry_list))
4579 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
4580 mlxsw_sp_fib_node_destroy(fib_node);
4581 mlxsw_sp_vr_put(mlxsw_sp, vr);
4584 static struct mlxsw_sp_fib4_entry *
4585 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4586 const struct mlxsw_sp_fib4_entry *new4_entry)
4588 struct mlxsw_sp_fib4_entry *fib4_entry;
4590 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4591 if (fib4_entry->tb_id > new4_entry->tb_id)
4593 if (fib4_entry->tb_id != new4_entry->tb_id)
4595 if (fib4_entry->tos > new4_entry->tos)
4597 if (fib4_entry->prio >= new4_entry->prio ||
4598 fib4_entry->tos < new4_entry->tos)
4606 mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4607 struct mlxsw_sp_fib4_entry *new4_entry)
4609 struct mlxsw_sp_fib_node *fib_node;
4611 if (WARN_ON(!fib4_entry))
4614 fib_node = fib4_entry->common.fib_node;
4615 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4617 if (fib4_entry->tb_id != new4_entry->tb_id ||
4618 fib4_entry->tos != new4_entry->tos ||
4619 fib4_entry->prio != new4_entry->prio)
4623 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
4628 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
4629 bool replace, bool append)
4631 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
4632 struct mlxsw_sp_fib4_entry *fib4_entry;
4634 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
4637 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4638 if (replace && WARN_ON(!fib4_entry))
4641 /* Insert new entry before replaced one, so that we can later
4642 * remove the second.
4645 list_add_tail(&new4_entry->common.list,
4646 &fib4_entry->common.list);
4648 struct mlxsw_sp_fib4_entry *last;
4650 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4651 if (new4_entry->tb_id > last->tb_id)
4657 list_add(&new4_entry->common.list,
4658 &fib4_entry->common.list);
4660 list_add(&new4_entry->common.list,
4661 &fib_node->entry_list);
4668 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
4670 list_del(&fib4_entry->common.list);
4673 static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4674 struct mlxsw_sp_fib_entry *fib_entry)
4676 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4678 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4681 /* To prevent packet loss, overwrite the previously offloaded
4684 if (!list_is_singular(&fib_node->entry_list)) {
4685 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4686 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4688 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4691 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4694 static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4695 struct mlxsw_sp_fib_entry *fib_entry)
4697 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4699 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4702 /* Promote the next entry by overwriting the deleted entry */
4703 if (!list_is_singular(&fib_node->entry_list)) {
4704 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4705 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4707 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4708 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4712 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4715 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4716 struct mlxsw_sp_fib4_entry *fib4_entry,
4717 bool replace, bool append)
4721 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
4725 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
4727 goto err_fib_node_entry_add;
4731 err_fib_node_entry_add:
4732 mlxsw_sp_fib4_node_list_remove(fib4_entry);
4737 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4738 struct mlxsw_sp_fib4_entry *fib4_entry)
4740 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
4741 mlxsw_sp_fib4_node_list_remove(fib4_entry);
4743 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4744 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
4747 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
4748 struct mlxsw_sp_fib4_entry *fib4_entry,
4751 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4752 struct mlxsw_sp_fib4_entry *replaced;
4757 /* We inserted the new entry before replaced one */
4758 replaced = list_next_entry(fib4_entry, common.list);
4760 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4761 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
4762 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4766 mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
4767 const struct fib_entry_notifier_info *fen_info,
4768 bool replace, bool append)
4770 struct mlxsw_sp_fib4_entry *fib4_entry;
4771 struct mlxsw_sp_fib_node *fib_node;
4774 if (mlxsw_sp->router->aborted)
4777 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4778 &fen_info->dst, sizeof(fen_info->dst),
4780 MLXSW_SP_L3_PROTO_IPV4);
4781 if (IS_ERR(fib_node)) {
4782 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4783 return PTR_ERR(fib_node);
4786 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4787 if (IS_ERR(fib4_entry)) {
4788 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
4789 err = PTR_ERR(fib4_entry);
4790 goto err_fib4_entry_create;
4793 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
4796 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4797 goto err_fib4_node_entry_link;
4800 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
4804 err_fib4_node_entry_link:
4805 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4806 err_fib4_entry_create:
4807 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4811 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4812 struct fib_entry_notifier_info *fen_info)
4814 struct mlxsw_sp_fib4_entry *fib4_entry;
4815 struct mlxsw_sp_fib_node *fib_node;
4817 if (mlxsw_sp->router->aborted)
4820 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4821 if (WARN_ON(!fib4_entry))
4823 fib_node = fib4_entry->common.fib_node;
4825 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4826 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4827 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4830 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
4832 /* Packets with link-local destination IP arriving to the router
4833 * are trapped to the CPU, so no need to program specific routes
4836 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
4839 /* Multicast routes aren't supported, so ignore them. Neighbour
4840 * Discovery packets are specifically trapped.
4842 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
4845 /* Cloned routes are irrelevant in the forwarding path. */
4846 if (rt->fib6_flags & RTF_CACHE)
4852 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
4854 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4856 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4858 return ERR_PTR(-ENOMEM);
4860 /* In case of route replace, replaced route is deleted with
4861 * no notification. Take reference to prevent accessing freed
4864 mlxsw_sp_rt6->rt = rt;
4867 return mlxsw_sp_rt6;
4870 #if IS_ENABLED(CONFIG_IPV6)
4871 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
4873 fib6_info_release(rt);
4876 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
4881 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4883 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4884 kfree(mlxsw_sp_rt6);
4887 static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
4889 /* RTF_CACHE routes are ignored */
4890 return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4893 static struct fib6_info *
4894 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4896 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4900 static struct mlxsw_sp_fib6_entry *
4901 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4902 const struct fib6_info *nrt, bool replace)
4904 struct mlxsw_sp_fib6_entry *fib6_entry;
4906 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
4909 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4910 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4912 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4915 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
4917 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
4919 if (rt->fib6_metric < nrt->fib6_metric)
4921 if (rt->fib6_metric == nrt->fib6_metric &&
4922 mlxsw_sp_fib6_rt_can_mp(rt))
4924 if (rt->fib6_metric > nrt->fib6_metric)
4931 static struct mlxsw_sp_rt6 *
4932 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4933 const struct fib6_info *rt)
4935 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4937 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4938 if (mlxsw_sp_rt6->rt == rt)
4939 return mlxsw_sp_rt6;
4945 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4946 const struct fib6_info *rt,
4947 enum mlxsw_sp_ipip_type *ret)
4949 return rt->fib6_nh.nh_dev &&
4950 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
4953 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4954 struct mlxsw_sp_nexthop_group *nh_grp,
4955 struct mlxsw_sp_nexthop *nh,
4956 const struct fib6_info *rt)
4958 const struct mlxsw_sp_ipip_ops *ipip_ops;
4959 struct mlxsw_sp_ipip_entry *ipip_entry;
4960 struct net_device *dev = rt->fib6_nh.nh_dev;
4961 struct mlxsw_sp_rif *rif;
4964 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4966 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4967 if (ipip_ops->can_offload(mlxsw_sp, dev,
4968 MLXSW_SP_L3_PROTO_IPV6)) {
4969 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4970 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4975 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4976 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4979 mlxsw_sp_nexthop_rif_init(nh, rif);
4981 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4983 goto err_nexthop_neigh_init;
4987 err_nexthop_neigh_init:
4988 mlxsw_sp_nexthop_rif_fini(nh);
4992 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4993 struct mlxsw_sp_nexthop *nh)
4995 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4998 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4999 struct mlxsw_sp_nexthop_group *nh_grp,
5000 struct mlxsw_sp_nexthop *nh,
5001 const struct fib6_info *rt)
5003 struct net_device *dev = rt->fib6_nh.nh_dev;
5005 nh->nh_grp = nh_grp;
5006 nh->nh_weight = rt->fib6_nh.nh_weight;
5007 memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
5008 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5010 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5014 nh->ifindex = dev->ifindex;
5016 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
5019 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
5020 struct mlxsw_sp_nexthop *nh)
5022 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5023 list_del(&nh->router_list_node);
5024 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5027 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5028 const struct fib6_info *rt)
5030 return rt->fib6_flags & RTF_GATEWAY ||
5031 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5034 static struct mlxsw_sp_nexthop_group *
5035 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
5036 struct mlxsw_sp_fib6_entry *fib6_entry)
5038 struct mlxsw_sp_nexthop_group *nh_grp;
5039 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5040 struct mlxsw_sp_nexthop *nh;
5045 alloc_size = sizeof(*nh_grp) +
5046 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
5047 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
5049 return ERR_PTR(-ENOMEM);
5050 INIT_LIST_HEAD(&nh_grp->fib_list);
5051 #if IS_ENABLED(CONFIG_IPV6)
5052 nh_grp->neigh_tbl = &nd_tbl;
5054 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
5055 struct mlxsw_sp_rt6, list);
5056 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5057 nh_grp->count = fib6_entry->nrt6;
5058 for (i = 0; i < nh_grp->count; i++) {
5059 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5061 nh = &nh_grp->nexthops[i];
5062 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
5064 goto err_nexthop6_init;
5065 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
5068 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5070 goto err_nexthop_group_insert;
5072 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5075 err_nexthop_group_insert:
5077 for (i--; i >= 0; i--) {
5078 nh = &nh_grp->nexthops[i];
5079 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5082 return ERR_PTR(err);
5086 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
5087 struct mlxsw_sp_nexthop_group *nh_grp)
5089 struct mlxsw_sp_nexthop *nh;
5090 int i = nh_grp->count;
5092 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5093 for (i--; i >= 0; i--) {
5094 nh = &nh_grp->nexthops[i];
5095 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5097 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5098 WARN_ON(nh_grp->adj_index_valid);
5102 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
5103 struct mlxsw_sp_fib6_entry *fib6_entry)
5105 struct mlxsw_sp_nexthop_group *nh_grp;
5107 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
5109 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
5111 return PTR_ERR(nh_grp);
5114 list_add_tail(&fib6_entry->common.nexthop_group_node,
5116 fib6_entry->common.nh_group = nh_grp;
5121 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
5122 struct mlxsw_sp_fib_entry *fib_entry)
5124 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5126 list_del(&fib_entry->nexthop_group_node);
5127 if (!list_empty(&nh_grp->fib_list))
5129 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5133 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5134 struct mlxsw_sp_fib6_entry *fib6_entry)
5136 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5139 fib6_entry->common.nh_group = NULL;
5140 list_del(&fib6_entry->common.nexthop_group_node);
5142 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5144 goto err_nexthop6_group_get;
5146 /* In case this entry is offloaded, then the adjacency index
5147 * currently associated with it in the device's table is that
5148 * of the old group. Start using the new one instead.
5150 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5152 goto err_fib_node_entry_add;
5154 if (list_empty(&old_nh_grp->fib_list))
5155 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5159 err_fib_node_entry_add:
5160 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5161 err_nexthop6_group_get:
5162 list_add_tail(&fib6_entry->common.nexthop_group_node,
5163 &old_nh_grp->fib_list);
5164 fib6_entry->common.nh_group = old_nh_grp;
5169 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5170 struct mlxsw_sp_fib6_entry *fib6_entry,
5171 struct fib6_info *rt)
5173 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5176 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5177 if (IS_ERR(mlxsw_sp_rt6))
5178 return PTR_ERR(mlxsw_sp_rt6);
5180 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5183 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5185 goto err_nexthop6_group_update;
5189 err_nexthop6_group_update:
5191 list_del(&mlxsw_sp_rt6->list);
5192 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5197 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5198 struct mlxsw_sp_fib6_entry *fib6_entry,
5199 struct fib6_info *rt)
5201 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5203 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
5204 if (WARN_ON(!mlxsw_sp_rt6))
5208 list_del(&mlxsw_sp_rt6->list);
5209 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5210 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5213 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5214 struct mlxsw_sp_fib_entry *fib_entry,
5215 const struct fib6_info *rt)
5217 /* Packets hitting RTF_REJECT routes need to be discarded by the
5218 * stack. We can rely on their destination device not having a
5219 * RIF (it's the loopback device) and can thus use action type
5220 * local, which will cause them to be trapped with a lower
5221 * priority than packets that need to be locally received.
5223 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5224 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5225 else if (rt->fib6_flags & RTF_REJECT)
5226 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5227 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5228 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5230 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5234 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5236 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5238 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5241 list_del(&mlxsw_sp_rt6->list);
5242 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5246 static struct mlxsw_sp_fib6_entry *
5247 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5248 struct mlxsw_sp_fib_node *fib_node,
5249 struct fib6_info *rt)
5251 struct mlxsw_sp_fib6_entry *fib6_entry;
5252 struct mlxsw_sp_fib_entry *fib_entry;
5253 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5256 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5258 return ERR_PTR(-ENOMEM);
5259 fib_entry = &fib6_entry->common;
5261 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5262 if (IS_ERR(mlxsw_sp_rt6)) {
5263 err = PTR_ERR(mlxsw_sp_rt6);
5264 goto err_rt6_create;
5267 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
5269 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5270 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5271 fib6_entry->nrt6 = 1;
5272 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5274 goto err_nexthop6_group_get;
5276 fib_entry->fib_node = fib_node;
5280 err_nexthop6_group_get:
5281 list_del(&mlxsw_sp_rt6->list);
5282 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5285 return ERR_PTR(err);
5288 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5289 struct mlxsw_sp_fib6_entry *fib6_entry)
5291 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5292 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5293 WARN_ON(fib6_entry->nrt6);
5297 static struct mlxsw_sp_fib6_entry *
5298 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5299 const struct fib6_info *nrt, bool replace)
5301 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
5303 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5304 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5306 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
5308 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
5310 if (replace && rt->fib6_metric == nrt->fib6_metric) {
5311 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5312 mlxsw_sp_fib6_rt_can_mp(nrt))
5314 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5315 fallback = fallback ?: fib6_entry;
5317 if (rt->fib6_metric > nrt->fib6_metric)
5318 return fallback ?: fib6_entry;
5325 mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5328 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5329 struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5330 struct mlxsw_sp_fib6_entry *fib6_entry;
5332 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5334 if (replace && WARN_ON(!fib6_entry))
5338 list_add_tail(&new6_entry->common.list,
5339 &fib6_entry->common.list);
5341 struct mlxsw_sp_fib6_entry *last;
5343 list_for_each_entry(last, &fib_node->entry_list, common.list) {
5344 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5346 if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
5352 list_add(&new6_entry->common.list,
5353 &fib6_entry->common.list);
5355 list_add(&new6_entry->common.list,
5356 &fib_node->entry_list);
5363 mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5365 list_del(&fib6_entry->common.list);
5368 static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
5369 struct mlxsw_sp_fib6_entry *fib6_entry,
5374 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
5378 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5380 goto err_fib_node_entry_add;
5384 err_fib_node_entry_add:
5385 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5390 mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5391 struct mlxsw_sp_fib6_entry *fib6_entry)
5393 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5394 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5397 static struct mlxsw_sp_fib6_entry *
5398 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5399 const struct fib6_info *rt)
5401 struct mlxsw_sp_fib6_entry *fib6_entry;
5402 struct mlxsw_sp_fib_node *fib_node;
5403 struct mlxsw_sp_fib *fib;
5404 struct mlxsw_sp_vr *vr;
5406 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5409 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5411 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5412 sizeof(rt->fib6_dst.addr),
5417 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5418 struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5420 if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
5421 rt->fib6_metric == iter_rt->fib6_metric &&
5422 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5429 static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5430 struct mlxsw_sp_fib6_entry *fib6_entry,
5433 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5434 struct mlxsw_sp_fib6_entry *replaced;
5439 replaced = list_next_entry(fib6_entry, common.list);
5441 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5442 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5443 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5446 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5447 struct fib6_info *rt, bool replace)
5449 struct mlxsw_sp_fib6_entry *fib6_entry;
5450 struct mlxsw_sp_fib_node *fib_node;
5453 if (mlxsw_sp->router->aborted)
5456 if (rt->fib6_src.plen)
5459 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5462 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5464 sizeof(rt->fib6_dst.addr),
5466 MLXSW_SP_L3_PROTO_IPV6);
5467 if (IS_ERR(fib_node))
5468 return PTR_ERR(fib_node);
5470 /* Before creating a new entry, try to append route to an existing
5473 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
5475 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5477 goto err_fib6_entry_nexthop_add;
5481 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5482 if (IS_ERR(fib6_entry)) {
5483 err = PTR_ERR(fib6_entry);
5484 goto err_fib6_entry_create;
5487 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
5489 goto err_fib6_node_entry_link;
5491 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5495 err_fib6_node_entry_link:
5496 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5497 err_fib6_entry_create:
5498 err_fib6_entry_nexthop_add:
5499 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5503 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5504 struct fib6_info *rt)
5506 struct mlxsw_sp_fib6_entry *fib6_entry;
5507 struct mlxsw_sp_fib_node *fib_node;
5509 if (mlxsw_sp->router->aborted)
5512 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5515 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5516 if (WARN_ON(!fib6_entry))
5519 /* If route is part of a multipath entry, but not the last one
5520 * removed, then only reduce its nexthop group.
5522 if (!list_is_singular(&fib6_entry->rt6_list)) {
5523 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5527 fib_node = fib6_entry->common.fib_node;
5529 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5530 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5531 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5534 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5535 enum mlxsw_reg_ralxx_protocol proto,
5538 char ralta_pl[MLXSW_REG_RALTA_LEN];
5539 char ralst_pl[MLXSW_REG_RALST_LEN];
5542 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
5543 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5547 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
5548 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5552 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5553 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5554 char raltb_pl[MLXSW_REG_RALTB_LEN];
5555 char ralue_pl[MLXSW_REG_RALUE_LEN];
5557 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
5558 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5563 mlxsw_reg_ralue_pack(ralue_pl, proto,
5564 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
5565 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5566 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5575 static struct mlxsw_sp_mr_table *
5576 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5578 if (family == RTNL_FAMILY_IPMR)
5579 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5581 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5584 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5585 struct mfc_entry_notifier_info *men_info,
5588 struct mlxsw_sp_mr_table *mrt;
5589 struct mlxsw_sp_vr *vr;
5591 if (mlxsw_sp->router->aborted)
5594 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
5598 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5599 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
5602 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5603 struct mfc_entry_notifier_info *men_info)
5605 struct mlxsw_sp_mr_table *mrt;
5606 struct mlxsw_sp_vr *vr;
5608 if (mlxsw_sp->router->aborted)
5611 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5615 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5616 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
5617 mlxsw_sp_vr_put(mlxsw_sp, vr);
5621 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5622 struct vif_entry_notifier_info *ven_info)
5624 struct mlxsw_sp_mr_table *mrt;
5625 struct mlxsw_sp_rif *rif;
5626 struct mlxsw_sp_vr *vr;
5628 if (mlxsw_sp->router->aborted)
5631 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
5635 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5636 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5637 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
5638 ven_info->vif_index,
5639 ven_info->vif_flags, rif);
5643 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5644 struct vif_entry_notifier_info *ven_info)
5646 struct mlxsw_sp_mr_table *mrt;
5647 struct mlxsw_sp_vr *vr;
5649 if (mlxsw_sp->router->aborted)
5652 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5656 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5657 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
5658 mlxsw_sp_vr_put(mlxsw_sp, vr);
5661 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5663 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5666 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5667 MLXSW_SP_LPM_TREE_MIN);
5671 /* The multicast router code does not need an abort trap as by default,
5672 * packets that don't match any routes are trapped to the CPU.
5675 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5676 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5677 MLXSW_SP_LPM_TREE_MIN + 1);
5680 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5681 struct mlxsw_sp_fib_node *fib_node)
5683 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
5685 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5687 bool do_break = &tmp->common.list == &fib_node->entry_list;
5689 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5690 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5691 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5692 /* Break when entry list is empty and node was freed.
5693 * Otherwise, we'll access freed memory in the next
5701 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5702 struct mlxsw_sp_fib_node *fib_node)
5704 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5706 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5708 bool do_break = &tmp->common.list == &fib_node->entry_list;
5710 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5711 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5712 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5718 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5719 struct mlxsw_sp_fib_node *fib_node)
5721 switch (fib_node->fib->proto) {
5722 case MLXSW_SP_L3_PROTO_IPV4:
5723 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5725 case MLXSW_SP_L3_PROTO_IPV6:
5726 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
5731 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5732 struct mlxsw_sp_vr *vr,
5733 enum mlxsw_sp_l3proto proto)
5735 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5736 struct mlxsw_sp_fib_node *fib_node, *tmp;
5738 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5739 bool do_break = &tmp->list == &fib->node_list;
5741 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5747 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
5751 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5752 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5754 if (!mlxsw_sp_vr_is_used(vr))
5757 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5758 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
5759 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
5761 /* If virtual router was only used for IPv4, then it's no
5764 if (!mlxsw_sp_vr_is_used(vr))
5766 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
5770 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
5774 if (mlxsw_sp->router->aborted)
5776 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
5777 mlxsw_sp_router_fib_flush(mlxsw_sp);
5778 mlxsw_sp->router->aborted = true;
5779 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5781 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5784 struct mlxsw_sp_fib_event_work {
5785 struct work_struct work;
5787 struct fib6_entry_notifier_info fen6_info;
5788 struct fib_entry_notifier_info fen_info;
5789 struct fib_rule_notifier_info fr_info;
5790 struct fib_nh_notifier_info fnh_info;
5791 struct mfc_entry_notifier_info men_info;
5792 struct vif_entry_notifier_info ven_info;
5794 struct mlxsw_sp *mlxsw_sp;
5795 unsigned long event;
5798 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
5800 struct mlxsw_sp_fib_event_work *fib_work =
5801 container_of(work, struct mlxsw_sp_fib_event_work, work);
5802 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5803 bool replace, append;
5806 /* Protect internal structures from changes */
5808 mlxsw_sp_span_respin(mlxsw_sp);
5810 switch (fib_work->event) {
5811 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5812 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5813 case FIB_EVENT_ENTRY_ADD:
5814 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5815 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5816 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
5819 mlxsw_sp_router_fib_abort(mlxsw_sp);
5820 fib_info_put(fib_work->fen_info.fi);
5822 case FIB_EVENT_ENTRY_DEL:
5823 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5824 fib_info_put(fib_work->fen_info.fi);
5826 case FIB_EVENT_RULE_ADD:
5827 /* if we get here, a rule was added that we do not support.
5828 * just do the fib_abort
5830 mlxsw_sp_router_fib_abort(mlxsw_sp);
5832 case FIB_EVENT_NH_ADD: /* fall through */
5833 case FIB_EVENT_NH_DEL:
5834 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5835 fib_work->fnh_info.fib_nh);
5836 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5843 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5845 struct mlxsw_sp_fib_event_work *fib_work =
5846 container_of(work, struct mlxsw_sp_fib_event_work, work);
5847 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5852 mlxsw_sp_span_respin(mlxsw_sp);
5854 switch (fib_work->event) {
5855 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5856 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5857 case FIB_EVENT_ENTRY_ADD:
5858 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5859 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
5860 fib_work->fen6_info.rt, replace);
5862 mlxsw_sp_router_fib_abort(mlxsw_sp);
5863 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5865 case FIB_EVENT_ENTRY_DEL:
5866 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5867 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5869 case FIB_EVENT_RULE_ADD:
5870 /* if we get here, a rule was added that we do not support.
5871 * just do the fib_abort
5873 mlxsw_sp_router_fib_abort(mlxsw_sp);
5880 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5882 struct mlxsw_sp_fib_event_work *fib_work =
5883 container_of(work, struct mlxsw_sp_fib_event_work, work);
5884 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5889 switch (fib_work->event) {
5890 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5891 case FIB_EVENT_ENTRY_ADD:
5892 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5894 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5897 mlxsw_sp_router_fib_abort(mlxsw_sp);
5898 mr_cache_put(fib_work->men_info.mfc);
5900 case FIB_EVENT_ENTRY_DEL:
5901 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5902 mr_cache_put(fib_work->men_info.mfc);
5904 case FIB_EVENT_VIF_ADD:
5905 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5906 &fib_work->ven_info);
5908 mlxsw_sp_router_fib_abort(mlxsw_sp);
5909 dev_put(fib_work->ven_info.dev);
5911 case FIB_EVENT_VIF_DEL:
5912 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5913 &fib_work->ven_info);
5914 dev_put(fib_work->ven_info.dev);
5916 case FIB_EVENT_RULE_ADD:
5917 /* if we get here, a rule was added that we do not support.
5918 * just do the fib_abort
5920 mlxsw_sp_router_fib_abort(mlxsw_sp);
5927 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5928 struct fib_notifier_info *info)
5930 struct fib_entry_notifier_info *fen_info;
5931 struct fib_nh_notifier_info *fnh_info;
5933 switch (fib_work->event) {
5934 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5935 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5936 case FIB_EVENT_ENTRY_ADD: /* fall through */
5937 case FIB_EVENT_ENTRY_DEL:
5938 fen_info = container_of(info, struct fib_entry_notifier_info,
5940 fib_work->fen_info = *fen_info;
5941 /* Take reference on fib_info to prevent it from being
5942 * freed while work is queued. Release it afterwards.
5944 fib_info_hold(fib_work->fen_info.fi);
5946 case FIB_EVENT_NH_ADD: /* fall through */
5947 case FIB_EVENT_NH_DEL:
5948 fnh_info = container_of(info, struct fib_nh_notifier_info,
5950 fib_work->fnh_info = *fnh_info;
5951 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5956 static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5957 struct fib_notifier_info *info)
5959 struct fib6_entry_notifier_info *fen6_info;
5961 switch (fib_work->event) {
5962 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5963 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5964 case FIB_EVENT_ENTRY_ADD: /* fall through */
5965 case FIB_EVENT_ENTRY_DEL:
5966 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5968 fib_work->fen6_info = *fen6_info;
5969 fib6_info_hold(fib_work->fen6_info.rt);
5975 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5976 struct fib_notifier_info *info)
5978 switch (fib_work->event) {
5979 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5980 case FIB_EVENT_ENTRY_ADD: /* fall through */
5981 case FIB_EVENT_ENTRY_DEL:
5982 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5983 mr_cache_hold(fib_work->men_info.mfc);
5985 case FIB_EVENT_VIF_ADD: /* fall through */
5986 case FIB_EVENT_VIF_DEL:
5987 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5988 dev_hold(fib_work->ven_info.dev);
5993 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5994 struct fib_notifier_info *info,
5995 struct mlxsw_sp *mlxsw_sp)
5997 struct netlink_ext_ack *extack = info->extack;
5998 struct fib_rule_notifier_info *fr_info;
5999 struct fib_rule *rule;
6002 /* nothing to do at the moment */
6003 if (event == FIB_EVENT_RULE_DEL)
6006 if (mlxsw_sp->router->aborted)
6009 fr_info = container_of(info, struct fib_rule_notifier_info, info);
6010 rule = fr_info->rule;
6012 switch (info->family) {
6014 if (!fib4_rule_default(rule) && !rule->l3mdev)
6018 if (!fib6_rule_default(rule) && !rule->l3mdev)
6021 case RTNL_FAMILY_IPMR:
6022 if (!ipmr_rule_default(rule) && !rule->l3mdev)
6025 case RTNL_FAMILY_IP6MR:
6026 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6032 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6037 /* Called with rcu_read_lock() */
6038 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
6039 unsigned long event, void *ptr)
6041 struct mlxsw_sp_fib_event_work *fib_work;
6042 struct fib_notifier_info *info = ptr;
6043 struct mlxsw_sp_router *router;
6046 if (!net_eq(info->net, &init_net) ||
6047 (info->family != AF_INET && info->family != AF_INET6 &&
6048 info->family != RTNL_FAMILY_IPMR &&
6049 info->family != RTNL_FAMILY_IP6MR))
6052 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6055 case FIB_EVENT_RULE_ADD: /* fall through */
6056 case FIB_EVENT_RULE_DEL:
6057 err = mlxsw_sp_router_fib_rule_event(event, info,
6059 if (!err || info->extack)
6060 return notifier_from_errno(err);
6062 case FIB_EVENT_ENTRY_ADD:
6063 if (router->aborted) {
6064 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
6065 return notifier_from_errno(-EINVAL);
6070 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
6071 if (WARN_ON(!fib_work))
6074 fib_work->mlxsw_sp = router->mlxsw_sp;
6075 fib_work->event = event;
6077 switch (info->family) {
6079 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
6080 mlxsw_sp_router_fib4_event(fib_work, info);
6083 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
6084 mlxsw_sp_router_fib6_event(fib_work, info);
6086 case RTNL_FAMILY_IP6MR:
6087 case RTNL_FAMILY_IPMR:
6088 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
6089 mlxsw_sp_router_fibmr_event(fib_work, info);
6093 mlxsw_core_schedule_work(&fib_work->work);
6098 struct mlxsw_sp_rif *
6099 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
6100 const struct net_device *dev)
6104 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6105 if (mlxsw_sp->router->rifs[i] &&
6106 mlxsw_sp->router->rifs[i]->dev == dev)
6107 return mlxsw_sp->router->rifs[i];
6112 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
6114 char ritr_pl[MLXSW_REG_RITR_LEN];
6117 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
6118 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6119 if (WARN_ON_ONCE(err))
6122 mlxsw_reg_ritr_enable_set(ritr_pl, false);
6123 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6126 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6127 struct mlxsw_sp_rif *rif)
6129 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6130 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6131 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6135 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6136 unsigned long event)
6138 struct inet6_dev *inet6_dev;
6139 bool addr_list_empty = true;
6140 struct in_device *idev;
6146 idev = __in_dev_get_rtnl(dev);
6147 if (idev && idev->ifa_list)
6148 addr_list_empty = false;
6150 inet6_dev = __in6_dev_get(dev);
6151 if (addr_list_empty && inet6_dev &&
6152 !list_empty(&inet6_dev->addr_list))
6153 addr_list_empty = false;
6155 /* macvlans do not have a RIF, but rather piggy back on the
6156 * RIF of their lower device.
6158 if (netif_is_macvlan(dev) && addr_list_empty)
6161 if (rif && addr_list_empty &&
6162 !netif_is_l3_slave(rif->dev))
6164 /* It is possible we already removed the RIF ourselves
6165 * if it was assigned to a netdev that is now a bridge
6174 static enum mlxsw_sp_rif_type
6175 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6176 const struct net_device *dev)
6178 enum mlxsw_sp_fid_type type;
6180 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6181 return MLXSW_SP_RIF_TYPE_IPIP_LB;
6183 /* Otherwise RIF type is derived from the type of the underlying FID. */
6184 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6185 type = MLXSW_SP_FID_TYPE_8021Q;
6186 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6187 type = MLXSW_SP_FID_TYPE_8021Q;
6188 else if (netif_is_bridge_master(dev))
6189 type = MLXSW_SP_FID_TYPE_8021D;
6191 type = MLXSW_SP_FID_TYPE_RFID;
6193 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6196 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6200 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6201 if (!mlxsw_sp->router->rifs[i]) {
6210 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6212 struct net_device *l3_dev)
6214 struct mlxsw_sp_rif *rif;
6216 rif = kzalloc(rif_size, GFP_KERNEL);
6220 INIT_LIST_HEAD(&rif->nexthop_list);
6221 INIT_LIST_HEAD(&rif->neigh_list);
6222 ether_addr_copy(rif->addr, l3_dev->dev_addr);
6223 rif->mtu = l3_dev->mtu;
6226 rif->rif_index = rif_index;
6231 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6234 return mlxsw_sp->router->rifs[rif_index];
6237 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6239 return rif->rif_index;
6242 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6244 return lb_rif->common.rif_index;
6247 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6249 return lb_rif->ul_vr_id;
6252 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6254 return rif->dev->ifindex;
6257 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6262 struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
6267 static struct mlxsw_sp_rif *
6268 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6269 const struct mlxsw_sp_rif_params *params,
6270 struct netlink_ext_ack *extack)
6272 u32 tb_id = l3mdev_fib_table(params->dev);
6273 const struct mlxsw_sp_rif_ops *ops;
6274 struct mlxsw_sp_fid *fid = NULL;
6275 enum mlxsw_sp_rif_type type;
6276 struct mlxsw_sp_rif *rif;
6277 struct mlxsw_sp_vr *vr;
6281 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6282 ops = mlxsw_sp->router->rif_ops_arr[type];
6284 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6286 return ERR_CAST(vr);
6289 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6291 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6292 goto err_rif_index_alloc;
6295 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6300 rif->mlxsw_sp = mlxsw_sp;
6304 fid = ops->fid_get(rif, extack);
6313 ops->setup(rif, params);
6315 err = ops->configure(rif);
6319 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6320 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6322 goto err_mr_rif_add;
6325 mlxsw_sp_rif_counters_alloc(rif);
6326 mlxsw_sp->router->rifs[rif_index] = rif;
6331 for (i--; i >= 0; i--)
6332 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6333 ops->deconfigure(rif);
6336 mlxsw_sp_fid_put(fid);
6340 err_rif_index_alloc:
6342 mlxsw_sp_vr_put(mlxsw_sp, vr);
6343 return ERR_PTR(err);
6346 void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6348 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6349 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6350 struct mlxsw_sp_fid *fid = rif->fid;
6351 struct mlxsw_sp_vr *vr;
6354 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6355 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6357 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6358 mlxsw_sp_rif_counters_free(rif);
6359 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6360 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6361 ops->deconfigure(rif);
6363 /* Loopback RIFs are not associated with a FID. */
6364 mlxsw_sp_fid_put(fid);
6367 mlxsw_sp_vr_put(mlxsw_sp, vr);
6370 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6371 struct net_device *dev)
6373 struct mlxsw_sp_rif *rif;
6375 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6378 mlxsw_sp_rif_destroy(rif);
6382 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6383 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6385 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6387 params->vid = mlxsw_sp_port_vlan->vid;
6388 params->lag = mlxsw_sp_port->lagged;
6390 params->lag_id = mlxsw_sp_port->lag_id;
6392 params->system_port = mlxsw_sp_port->local_port;
6396 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
6397 struct net_device *l3_dev,
6398 struct netlink_ext_ack *extack)
6400 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6401 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
6402 u16 vid = mlxsw_sp_port_vlan->vid;
6403 struct mlxsw_sp_rif *rif;
6404 struct mlxsw_sp_fid *fid;
6407 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6409 struct mlxsw_sp_rif_params params = {
6413 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
6414 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
6416 return PTR_ERR(rif);
6419 /* FID was already created, just take a reference */
6420 fid = rif->ops->fid_get(rif, extack);
6421 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6423 goto err_fid_port_vid_map;
6425 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
6427 goto err_port_vid_learning_set;
6429 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
6430 BR_STATE_FORWARDING);
6432 goto err_port_vid_stp_set;
6434 mlxsw_sp_port_vlan->fid = fid;
6438 err_port_vid_stp_set:
6439 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6440 err_port_vid_learning_set:
6441 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6442 err_fid_port_vid_map:
6443 mlxsw_sp_fid_put(fid);
6448 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6450 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6451 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
6452 u16 vid = mlxsw_sp_port_vlan->vid;
6454 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6457 mlxsw_sp_port_vlan->fid = NULL;
6458 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6459 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6460 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6461 /* If router port holds the last reference on the rFID, then the
6462 * associated Sub-port RIF will be destroyed.
6464 mlxsw_sp_fid_put(fid);
6467 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6468 struct net_device *port_dev,
6469 unsigned long event, u16 vid,
6470 struct netlink_ext_ack *extack)
6472 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
6473 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
6475 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
6476 if (WARN_ON(!mlxsw_sp_port_vlan))
6481 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
6484 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6491 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
6492 unsigned long event,
6493 struct netlink_ext_ack *extack)
6495 if (netif_is_bridge_port(port_dev) ||
6496 netif_is_lag_port(port_dev) ||
6497 netif_is_ovs_port(port_dev))
6500 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6504 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6505 struct net_device *lag_dev,
6506 unsigned long event, u16 vid,
6507 struct netlink_ext_ack *extack)
6509 struct net_device *port_dev;
6510 struct list_head *iter;
6513 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6514 if (mlxsw_sp_port_dev_check(port_dev)) {
6515 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6527 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
6528 unsigned long event,
6529 struct netlink_ext_ack *extack)
6531 if (netif_is_bridge_port(lag_dev))
6534 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6538 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
6539 unsigned long event,
6540 struct netlink_ext_ack *extack)
6542 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6543 struct mlxsw_sp_rif_params params = {
6546 struct mlxsw_sp_rif *rif;
6550 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
6552 return PTR_ERR(rif);
6555 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6556 mlxsw_sp_rif_destroy(rif);
6563 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
6564 unsigned long event,
6565 struct netlink_ext_ack *extack)
6567 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
6568 u16 vid = vlan_dev_vlan_id(vlan_dev);
6570 if (netif_is_bridge_port(vlan_dev))
6573 if (mlxsw_sp_port_dev_check(real_dev))
6574 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
6575 event, vid, extack);
6576 else if (netif_is_lag_master(real_dev))
6577 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
6579 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
6580 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
6585 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
6587 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
6588 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6590 return ether_addr_equal_masked(mac, vrrp4, mask);
6593 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
6595 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
6596 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6598 return ether_addr_equal_masked(mac, vrrp6, mask);
6601 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
6602 const u8 *mac, bool adding)
6604 char ritr_pl[MLXSW_REG_RITR_LEN];
6605 u8 vrrp_id = adding ? mac[5] : 0;
6608 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
6609 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
6612 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
6613 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6617 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
6618 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
6620 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
6622 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6625 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
6626 const struct net_device *macvlan_dev,
6627 struct netlink_ext_ack *extack)
6629 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6630 struct mlxsw_sp_rif *rif;
6633 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6635 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
6639 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6640 mlxsw_sp_fid_index(rif->fid), true);
6644 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
6645 macvlan_dev->dev_addr, true);
6647 goto err_rif_vrrp_add;
6649 /* Make sure the bridge driver does not have this MAC pointing at
6652 if (rif->ops->fdb_del)
6653 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
6658 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6659 mlxsw_sp_fid_index(rif->fid), false);
6663 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6664 const struct net_device *macvlan_dev)
6666 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6667 struct mlxsw_sp_rif *rif;
6669 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6670 /* If we do not have a RIF, then we already took care of
6671 * removing the macvlan's MAC during RIF deletion.
6675 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
6677 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6678 mlxsw_sp_fid_index(rif->fid), false);
6681 static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
6682 unsigned long event,
6683 struct netlink_ext_ack *extack)
6685 struct mlxsw_sp *mlxsw_sp;
6687 mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
6693 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
6695 mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6702 static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
6703 unsigned long event,
6704 struct netlink_ext_ack *extack)
6706 if (mlxsw_sp_port_dev_check(dev))
6707 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
6708 else if (netif_is_lag_master(dev))
6709 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
6710 else if (netif_is_bridge_master(dev))
6711 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
6712 else if (is_vlan_dev(dev))
6713 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
6714 else if (netif_is_macvlan(dev))
6715 return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack);
6720 int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6721 unsigned long event, void *ptr)
6723 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6724 struct net_device *dev = ifa->ifa_dev->dev;
6725 struct mlxsw_sp *mlxsw_sp;
6726 struct mlxsw_sp_rif *rif;
6729 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6730 if (event == NETDEV_UP)
6733 mlxsw_sp = mlxsw_sp_lower_get(dev);
6737 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6738 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6741 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
6743 return notifier_from_errno(err);
6746 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6747 unsigned long event, void *ptr)
6749 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6750 struct net_device *dev = ivi->ivi_dev->dev;
6751 struct mlxsw_sp *mlxsw_sp;
6752 struct mlxsw_sp_rif *rif;
6755 mlxsw_sp = mlxsw_sp_lower_get(dev);
6759 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6760 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6763 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
6765 return notifier_from_errno(err);
6768 struct mlxsw_sp_inet6addr_event_work {
6769 struct work_struct work;
6770 struct net_device *dev;
6771 unsigned long event;
6774 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6776 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6777 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6778 struct net_device *dev = inet6addr_work->dev;
6779 unsigned long event = inet6addr_work->event;
6780 struct mlxsw_sp *mlxsw_sp;
6781 struct mlxsw_sp_rif *rif;
6784 mlxsw_sp = mlxsw_sp_lower_get(dev);
6788 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6789 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6792 __mlxsw_sp_inetaddr_event(dev, event, NULL);
6796 kfree(inet6addr_work);
6799 /* Called with rcu_read_lock() */
6800 int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6801 unsigned long event, void *ptr)
6803 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6804 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6805 struct net_device *dev = if6->idev->dev;
6807 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6808 if (event == NETDEV_UP)
6811 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6814 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6815 if (!inet6addr_work)
6818 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6819 inet6addr_work->dev = dev;
6820 inet6addr_work->event = event;
6822 mlxsw_core_schedule_work(&inet6addr_work->work);
6827 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6828 unsigned long event, void *ptr)
6830 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6831 struct net_device *dev = i6vi->i6vi_dev->dev;
6832 struct mlxsw_sp *mlxsw_sp;
6833 struct mlxsw_sp_rif *rif;
6836 mlxsw_sp = mlxsw_sp_lower_get(dev);
6840 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6841 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6844 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
6846 return notifier_from_errno(err);
6849 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
6850 const char *mac, int mtu)
6852 char ritr_pl[MLXSW_REG_RITR_LEN];
6855 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
6856 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6860 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6861 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6862 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6863 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6866 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6868 struct mlxsw_sp *mlxsw_sp;
6869 struct mlxsw_sp_rif *rif;
6873 mlxsw_sp = mlxsw_sp_lower_get(dev);
6877 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6880 fid_index = mlxsw_sp_fid_index(rif->fid);
6882 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
6886 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6891 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
6893 goto err_rif_fdb_op;
6895 if (rif->mtu != dev->mtu) {
6896 struct mlxsw_sp_vr *vr;
6899 /* The RIF is relevant only to its mr_table instance, as unlike
6900 * unicast routing, in multicast routing a RIF cannot be shared
6901 * between several multicast routing tables.
6903 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6904 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6905 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
6909 ether_addr_copy(rif->addr, dev->dev_addr);
6910 rif->mtu = dev->mtu;
6912 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
6917 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
6919 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
6923 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
6924 struct net_device *l3_dev,
6925 struct netlink_ext_ack *extack)
6927 struct mlxsw_sp_rif *rif;
6929 /* If netdev is already associated with a RIF, then we need to
6930 * destroy it and create a new one with the new virtual router ID.
6932 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6934 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
6936 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
6939 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6940 struct net_device *l3_dev)
6942 struct mlxsw_sp_rif *rif;
6944 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6947 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
6950 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6951 struct netdev_notifier_changeupper_info *info)
6953 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6956 /* We do not create a RIF for a macvlan, but only use it to
6957 * direct more MAC addresses to the router.
6959 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
6963 case NETDEV_PRECHANGEUPPER:
6965 case NETDEV_CHANGEUPPER:
6966 if (info->linking) {
6967 struct netlink_ext_ack *extack;
6969 extack = netdev_notifier_info_to_extack(&info->info);
6970 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6972 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
6980 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
6982 struct mlxsw_sp_rif *rif = data;
6984 if (!netif_is_macvlan(dev))
6987 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
6988 mlxsw_sp_fid_index(rif->fid), false);
6991 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
6993 if (!netif_is_macvlan_port(rif->dev))
6996 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
6997 return netdev_walk_all_upper_dev_rcu(rif->dev,
6998 __mlxsw_sp_rif_macvlan_flush, rif);
7001 static struct mlxsw_sp_rif_subport *
7002 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
7004 return container_of(rif, struct mlxsw_sp_rif_subport, common);
7007 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
7008 const struct mlxsw_sp_rif_params *params)
7010 struct mlxsw_sp_rif_subport *rif_subport;
7012 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7013 rif_subport->vid = params->vid;
7014 rif_subport->lag = params->lag;
7016 rif_subport->lag_id = params->lag_id;
7018 rif_subport->system_port = params->system_port;
7021 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
7023 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7024 struct mlxsw_sp_rif_subport *rif_subport;
7025 char ritr_pl[MLXSW_REG_RITR_LEN];
7027 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7028 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7029 rif->rif_index, rif->vr_id, rif->dev->mtu);
7030 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7031 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
7032 rif_subport->lag ? rif_subport->lag_id :
7033 rif_subport->system_port,
7036 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7039 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
7043 err = mlxsw_sp_rif_subport_op(rif, true);
7047 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7048 mlxsw_sp_fid_index(rif->fid), true);
7050 goto err_rif_fdb_op;
7052 mlxsw_sp_fid_rif_set(rif->fid, rif);
7056 mlxsw_sp_rif_subport_op(rif, false);
7060 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
7062 struct mlxsw_sp_fid *fid = rif->fid;
7064 mlxsw_sp_fid_rif_set(fid, NULL);
7065 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7066 mlxsw_sp_fid_index(fid), false);
7067 mlxsw_sp_rif_macvlan_flush(rif);
7068 mlxsw_sp_rif_subport_op(rif, false);
7071 static struct mlxsw_sp_fid *
7072 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
7073 struct netlink_ext_ack *extack)
7075 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
7078 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
7079 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
7080 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
7081 .setup = mlxsw_sp_rif_subport_setup,
7082 .configure = mlxsw_sp_rif_subport_configure,
7083 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
7084 .fid_get = mlxsw_sp_rif_subport_fid_get,
7087 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
7088 enum mlxsw_reg_ritr_if_type type,
7089 u16 vid_fid, bool enable)
7091 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7092 char ritr_pl[MLXSW_REG_RITR_LEN];
7094 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7096 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7097 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
7099 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7102 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7104 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
7107 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
7109 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7110 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7113 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
7117 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7118 mlxsw_sp_router_port(mlxsw_sp), true);
7120 goto err_fid_mc_flood_set;
7122 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7123 mlxsw_sp_router_port(mlxsw_sp), true);
7125 goto err_fid_bc_flood_set;
7127 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7128 mlxsw_sp_fid_index(rif->fid), true);
7130 goto err_rif_fdb_op;
7132 mlxsw_sp_fid_rif_set(rif->fid, rif);
7136 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7137 mlxsw_sp_router_port(mlxsw_sp), false);
7138 err_fid_bc_flood_set:
7139 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7140 mlxsw_sp_router_port(mlxsw_sp), false);
7141 err_fid_mc_flood_set:
7142 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
7146 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
7148 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7149 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7150 struct mlxsw_sp_fid *fid = rif->fid;
7152 mlxsw_sp_fid_rif_set(fid, NULL);
7153 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7154 mlxsw_sp_fid_index(fid), false);
7155 mlxsw_sp_rif_macvlan_flush(rif);
7156 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7157 mlxsw_sp_router_port(mlxsw_sp), false);
7158 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7159 mlxsw_sp_router_port(mlxsw_sp), false);
7160 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
7163 static struct mlxsw_sp_fid *
7164 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
7165 struct netlink_ext_ack *extack)
7170 if (is_vlan_dev(rif->dev)) {
7171 vid = vlan_dev_vlan_id(rif->dev);
7173 err = br_vlan_get_pvid(rif->dev, &vid);
7174 if (err < 0 || !vid) {
7175 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7176 return ERR_PTR(-EINVAL);
7180 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
7183 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7185 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7186 struct switchdev_notifier_fdb_info info;
7187 struct net_device *br_dev;
7188 struct net_device *dev;
7190 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7191 dev = br_fdb_find_port(br_dev, mac, vid);
7197 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
7200 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
7201 .type = MLXSW_SP_RIF_TYPE_VLAN,
7202 .rif_size = sizeof(struct mlxsw_sp_rif),
7203 .configure = mlxsw_sp_rif_vlan_configure,
7204 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
7205 .fid_get = mlxsw_sp_rif_vlan_fid_get,
7206 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
7209 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7211 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7212 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7215 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7220 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7221 mlxsw_sp_router_port(mlxsw_sp), true);
7223 goto err_fid_mc_flood_set;
7225 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7226 mlxsw_sp_router_port(mlxsw_sp), true);
7228 goto err_fid_bc_flood_set;
7230 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7231 mlxsw_sp_fid_index(rif->fid), true);
7233 goto err_rif_fdb_op;
7235 mlxsw_sp_fid_rif_set(rif->fid, rif);
7239 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7240 mlxsw_sp_router_port(mlxsw_sp), false);
7241 err_fid_bc_flood_set:
7242 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7243 mlxsw_sp_router_port(mlxsw_sp), false);
7244 err_fid_mc_flood_set:
7245 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7249 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7251 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7252 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7253 struct mlxsw_sp_fid *fid = rif->fid;
7255 mlxsw_sp_fid_rif_set(fid, NULL);
7256 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7257 mlxsw_sp_fid_index(fid), false);
7258 mlxsw_sp_rif_macvlan_flush(rif);
7259 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7260 mlxsw_sp_router_port(mlxsw_sp), false);
7261 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7262 mlxsw_sp_router_port(mlxsw_sp), false);
7263 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7266 static struct mlxsw_sp_fid *
7267 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7268 struct netlink_ext_ack *extack)
7270 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
7273 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7275 struct switchdev_notifier_fdb_info info;
7276 struct net_device *dev;
7278 dev = br_fdb_find_port(rif->dev, mac, 0);
7284 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
7287 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7288 .type = MLXSW_SP_RIF_TYPE_FID,
7289 .rif_size = sizeof(struct mlxsw_sp_rif),
7290 .configure = mlxsw_sp_rif_fid_configure,
7291 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7292 .fid_get = mlxsw_sp_rif_fid_fid_get,
7293 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
7296 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
7297 .type = MLXSW_SP_RIF_TYPE_VLAN,
7298 .rif_size = sizeof(struct mlxsw_sp_rif),
7299 .configure = mlxsw_sp_rif_fid_configure,
7300 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7301 .fid_get = mlxsw_sp_rif_vlan_fid_get,
7302 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
7305 static struct mlxsw_sp_rif_ipip_lb *
7306 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7308 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7312 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7313 const struct mlxsw_sp_rif_params *params)
7315 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7316 struct mlxsw_sp_rif_ipip_lb *rif_lb;
7318 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7320 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7321 rif_lb->lb_config = params_lb->lb_config;
7325 mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7327 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7328 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7329 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7330 struct mlxsw_sp_vr *ul_vr;
7333 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
7335 return PTR_ERR(ul_vr);
7337 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
7339 goto err_loopback_op;
7341 lb_rif->ul_vr_id = ul_vr->id;
7346 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7350 static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7352 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7353 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7354 struct mlxsw_sp_vr *ul_vr;
7356 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7357 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
7360 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7363 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
7364 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
7365 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
7366 .setup = mlxsw_sp_rif_ipip_lb_setup,
7367 .configure = mlxsw_sp_rif_ipip_lb_configure,
7368 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
7371 static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
7372 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
7373 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
7374 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
7375 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
7378 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7380 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7382 mlxsw_sp->router->rifs = kcalloc(max_rifs,
7383 sizeof(struct mlxsw_sp_rif *),
7385 if (!mlxsw_sp->router->rifs)
7388 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
7393 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
7397 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7398 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
7400 kfree(mlxsw_sp->router->rifs);
7404 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
7406 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
7408 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
7409 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
7412 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
7414 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
7415 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
7416 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
7419 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
7421 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
7424 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
7426 struct mlxsw_sp_router *router;
7428 /* Flush pending FIB notifications and then flush the device's
7429 * table before requesting another dump. The FIB notification
7430 * block is unregistered, so no need to take RTNL.
7432 mlxsw_core_flush_owq();
7433 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7434 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
7437 #ifdef CONFIG_IP_ROUTE_MULTIPATH
7438 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7440 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7443 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7445 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7448 static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
7450 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
7452 mlxsw_sp_mp_hash_header_set(recr2_pl,
7453 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7454 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7455 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7456 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7459 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7460 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7461 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7462 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7465 static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
7467 bool only_l3 = !ip6_multipath_hash_policy(&init_net);
7469 mlxsw_sp_mp_hash_header_set(recr2_pl,
7470 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7471 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7472 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7473 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
7474 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
7476 mlxsw_sp_mp_hash_field_set(recr2_pl,
7477 MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7479 mlxsw_sp_mp_hash_header_set(recr2_pl,
7480 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
7481 mlxsw_sp_mp_hash_field_set(recr2_pl,
7482 MLXSW_REG_RECR2_TCP_UDP_SPORT);
7483 mlxsw_sp_mp_hash_field_set(recr2_pl,
7484 MLXSW_REG_RECR2_TCP_UDP_DPORT);
7488 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7490 char recr2_pl[MLXSW_REG_RECR2_LEN];
7493 get_random_bytes(&seed, sizeof(seed));
7494 mlxsw_reg_recr2_pack(recr2_pl, seed);
7495 mlxsw_sp_mp4_hash_init(recr2_pl);
7496 mlxsw_sp_mp6_hash_init(recr2_pl);
7498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7501 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7507 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
7509 char rdpm_pl[MLXSW_REG_RDPM_LEN];
7512 MLXSW_REG_ZERO(rdpm, rdpm_pl);
7514 /* HW is determining switch priority based on DSCP-bits, but the
7515 * kernel is still doing that based on the ToS. Since there's a
7516 * mismatch in bits we need to make sure to translate the right
7517 * value ToS would observe, skipping the 2 least-significant ECN bits.
7519 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
7520 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
7522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
7525 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7527 bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority;
7528 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7532 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7534 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7536 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
7537 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
7538 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
7539 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7545 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7547 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7549 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
7550 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7553 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7555 struct mlxsw_sp_router *router;
7558 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7561 mlxsw_sp->router = router;
7562 router->mlxsw_sp = mlxsw_sp;
7564 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
7565 err = __mlxsw_sp_router_init(mlxsw_sp);
7567 goto err_router_init;
7569 err = mlxsw_sp_rifs_init(mlxsw_sp);
7573 err = mlxsw_sp_ipips_init(mlxsw_sp);
7575 goto err_ipips_init;
7577 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
7578 &mlxsw_sp_nexthop_ht_params);
7580 goto err_nexthop_ht_init;
7582 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
7583 &mlxsw_sp_nexthop_group_ht_params);
7585 goto err_nexthop_group_ht_init;
7587 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
7588 err = mlxsw_sp_lpm_init(mlxsw_sp);
7592 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7596 err = mlxsw_sp_vrs_init(mlxsw_sp);
7600 err = mlxsw_sp_neigh_init(mlxsw_sp);
7602 goto err_neigh_init;
7604 mlxsw_sp->router->netevent_nb.notifier_call =
7605 mlxsw_sp_router_netevent_event;
7606 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7608 goto err_register_netevent_notifier;
7610 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7612 goto err_mp_hash_init;
7614 err = mlxsw_sp_dscp_init(mlxsw_sp);
7618 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7619 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
7620 mlxsw_sp_router_fib_dump_flush);
7622 goto err_register_fib_notifier;
7626 err_register_fib_notifier:
7629 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7630 err_register_netevent_notifier:
7631 mlxsw_sp_neigh_fini(mlxsw_sp);
7633 mlxsw_sp_vrs_fini(mlxsw_sp);
7635 mlxsw_sp_mr_fini(mlxsw_sp);
7637 mlxsw_sp_lpm_fini(mlxsw_sp);
7639 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7640 err_nexthop_group_ht_init:
7641 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
7642 err_nexthop_ht_init:
7643 mlxsw_sp_ipips_fini(mlxsw_sp);
7645 mlxsw_sp_rifs_fini(mlxsw_sp);
7647 __mlxsw_sp_router_fini(mlxsw_sp);
7649 kfree(mlxsw_sp->router);
7653 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7655 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
7656 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7657 mlxsw_sp_neigh_fini(mlxsw_sp);
7658 mlxsw_sp_vrs_fini(mlxsw_sp);
7659 mlxsw_sp_mr_fini(mlxsw_sp);
7660 mlxsw_sp_lpm_fini(mlxsw_sp);
7661 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7662 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
7663 mlxsw_sp_ipips_fini(mlxsw_sp);
7664 mlxsw_sp_rifs_fini(mlxsw_sp);
7665 __mlxsw_sp_router_fini(mlxsw_sp);
7666 kfree(mlxsw_sp->router);