2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <net/netevent.h>
44 #include <net/neighbour.h>
46 #include <net/ip_fib.h>
52 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
53 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
56 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
57 struct mlxsw_sp_prefix_usage *prefix_usage2)
61 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
62 if (!test_bit(prefix, prefix_usage2->b))
69 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
70 struct mlxsw_sp_prefix_usage *prefix_usage2)
72 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
76 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
78 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
80 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
84 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
85 struct mlxsw_sp_prefix_usage *prefix_usage2)
87 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
91 mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
93 memset(prefix_usage, 0, sizeof(*prefix_usage));
97 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
98 unsigned char prefix_len)
100 set_bit(prefix_len, prefix_usage->b);
104 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
105 unsigned char prefix_len)
107 clear_bit(prefix_len, prefix_usage->b);
110 struct mlxsw_sp_fib_key {
111 struct net_device *dev;
112 unsigned char addr[sizeof(struct in6_addr)];
113 unsigned char prefix_len;
116 enum mlxsw_sp_fib_entry_type {
117 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
118 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
119 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
122 struct mlxsw_sp_nexthop_group;
124 struct mlxsw_sp_fib_entry {
125 struct rhash_head ht_node;
126 struct list_head list;
127 struct mlxsw_sp_fib_key key;
128 enum mlxsw_sp_fib_entry_type type;
129 unsigned int ref_count;
130 u16 rif; /* used for action local */
131 struct mlxsw_sp_vr *vr;
133 struct list_head nexthop_group_node;
134 struct mlxsw_sp_nexthop_group *nh_group;
137 struct mlxsw_sp_fib {
138 struct rhashtable ht;
139 struct list_head entry_list;
140 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
141 struct mlxsw_sp_prefix_usage prefix_usage;
144 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
145 .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
146 .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
147 .key_len = sizeof(struct mlxsw_sp_fib_key),
148 .automatic_shrinking = true,
151 static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
152 struct mlxsw_sp_fib_entry *fib_entry)
154 unsigned char prefix_len = fib_entry->key.prefix_len;
157 err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
158 mlxsw_sp_fib_ht_params);
161 list_add_tail(&fib_entry->list, &fib->entry_list);
162 if (fib->prefix_ref_count[prefix_len]++ == 0)
163 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
167 static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
168 struct mlxsw_sp_fib_entry *fib_entry)
170 unsigned char prefix_len = fib_entry->key.prefix_len;
172 if (--fib->prefix_ref_count[prefix_len] == 0)
173 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
174 list_del(&fib_entry->list);
175 rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
176 mlxsw_sp_fib_ht_params);
179 static struct mlxsw_sp_fib_entry *
180 mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
181 size_t addr_len, unsigned char prefix_len,
182 struct net_device *dev)
184 struct mlxsw_sp_fib_entry *fib_entry;
186 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
189 fib_entry->key.dev = dev;
190 memcpy(fib_entry->key.addr, addr, addr_len);
191 fib_entry->key.prefix_len = prefix_len;
195 static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
200 static struct mlxsw_sp_fib_entry *
201 mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
202 size_t addr_len, unsigned char prefix_len,
203 struct net_device *dev)
205 struct mlxsw_sp_fib_key key;
207 memset(&key, 0, sizeof(key));
209 memcpy(key.addr, addr, addr_len);
210 key.prefix_len = prefix_len;
211 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
214 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
216 struct mlxsw_sp_fib *fib;
219 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
221 return ERR_PTR(-ENOMEM);
222 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
224 goto err_rhashtable_init;
225 INIT_LIST_HEAD(&fib->entry_list);
233 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
235 rhashtable_destroy(&fib->ht);
239 static struct mlxsw_sp_lpm_tree *
240 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
242 static struct mlxsw_sp_lpm_tree *lpm_tree;
245 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
246 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
247 if (lpm_tree->ref_count == 0) {
249 one_reserved = false;
257 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
258 struct mlxsw_sp_lpm_tree *lpm_tree)
260 char ralta_pl[MLXSW_REG_RALTA_LEN];
262 mlxsw_reg_ralta_pack(ralta_pl, true,
263 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
265 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
268 static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
269 struct mlxsw_sp_lpm_tree *lpm_tree)
271 char ralta_pl[MLXSW_REG_RALTA_LEN];
273 mlxsw_reg_ralta_pack(ralta_pl, false,
274 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
280 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
281 struct mlxsw_sp_prefix_usage *prefix_usage,
282 struct mlxsw_sp_lpm_tree *lpm_tree)
284 char ralst_pl[MLXSW_REG_RALST_LEN];
287 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
289 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
292 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
293 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
296 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
297 MLXSW_REG_RALST_BIN_NO_CHILD);
298 last_prefix = prefix;
300 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
303 static struct mlxsw_sp_lpm_tree *
304 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
305 struct mlxsw_sp_prefix_usage *prefix_usage,
306 enum mlxsw_sp_l3proto proto, bool one_reserved)
308 struct mlxsw_sp_lpm_tree *lpm_tree;
311 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
313 return ERR_PTR(-EBUSY);
314 lpm_tree->proto = proto;
315 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
319 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
322 goto err_left_struct_set;
323 memcpy(&lpm_tree->prefix_usage, prefix_usage,
324 sizeof(lpm_tree->prefix_usage));
328 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
332 static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
333 struct mlxsw_sp_lpm_tree *lpm_tree)
335 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
338 static struct mlxsw_sp_lpm_tree *
339 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
340 struct mlxsw_sp_prefix_usage *prefix_usage,
341 enum mlxsw_sp_l3proto proto, bool one_reserved)
343 struct mlxsw_sp_lpm_tree *lpm_tree;
346 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
347 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
348 if (lpm_tree->ref_count != 0 &&
349 lpm_tree->proto == proto &&
350 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
354 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
355 proto, one_reserved);
356 if (IS_ERR(lpm_tree))
360 lpm_tree->ref_count++;
364 static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
365 struct mlxsw_sp_lpm_tree *lpm_tree)
367 if (--lpm_tree->ref_count == 0)
368 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
372 static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
374 struct mlxsw_sp_lpm_tree *lpm_tree;
377 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
378 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
379 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
383 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
385 struct mlxsw_sp_vr *vr;
388 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
389 vr = &mlxsw_sp->router.vrs[i];
396 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
397 struct mlxsw_sp_vr *vr)
399 char raltb_pl[MLXSW_REG_RALTB_LEN];
401 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
402 (enum mlxsw_reg_ralxx_protocol) vr->proto,
404 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
407 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
408 struct mlxsw_sp_vr *vr)
410 char raltb_pl[MLXSW_REG_RALTB_LEN];
412 /* Bind to tree 0 which is default */
413 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
414 (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
415 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
418 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
420 /* For our purpose, squash main and local table into one */
421 if (tb_id == RT_TABLE_LOCAL)
422 tb_id = RT_TABLE_MAIN;
426 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
428 enum mlxsw_sp_l3proto proto)
430 struct mlxsw_sp_vr *vr;
433 tb_id = mlxsw_sp_fix_tb_id(tb_id);
435 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
436 vr = &mlxsw_sp->router.vrs[i];
437 if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
443 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
444 unsigned char prefix_len,
446 enum mlxsw_sp_l3proto proto)
448 struct mlxsw_sp_prefix_usage req_prefix_usage;
449 struct mlxsw_sp_lpm_tree *lpm_tree;
450 struct mlxsw_sp_vr *vr;
453 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
455 return ERR_PTR(-EBUSY);
456 vr->fib = mlxsw_sp_fib_create();
458 return ERR_CAST(vr->fib);
462 mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
463 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
464 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
466 if (IS_ERR(lpm_tree)) {
467 err = PTR_ERR(lpm_tree);
470 vr->lpm_tree = lpm_tree;
471 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
479 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
481 mlxsw_sp_fib_destroy(vr->fib);
486 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
487 struct mlxsw_sp_vr *vr)
489 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
490 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
491 mlxsw_sp_fib_destroy(vr->fib);
496 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
497 struct mlxsw_sp_prefix_usage *req_prefix_usage)
499 struct mlxsw_sp_lpm_tree *lpm_tree;
501 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
502 &vr->lpm_tree->prefix_usage))
505 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
507 if (IS_ERR(lpm_tree)) {
508 /* We failed to get a tree according to the required
509 * prefix usage. However, the current tree might be still good
510 * for us if our requirement is subset of the prefixes used
513 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
514 &vr->lpm_tree->prefix_usage))
516 return PTR_ERR(lpm_tree);
519 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
520 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
521 vr->lpm_tree = lpm_tree;
522 return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
525 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
526 unsigned char prefix_len,
528 enum mlxsw_sp_l3proto proto)
530 struct mlxsw_sp_vr *vr;
533 tb_id = mlxsw_sp_fix_tb_id(tb_id);
534 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
536 vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
540 struct mlxsw_sp_prefix_usage req_prefix_usage;
542 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
543 &vr->fib->prefix_usage);
544 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
545 /* Need to replace LPM tree in case new prefix is required. */
546 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
554 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
556 /* Destroy virtual router entity in case the associated FIB is empty
557 * and allow it to be used for other tables in future. Otherwise,
558 * check if some prefix usage did not disappear and change tree if
559 * that is the case. Note that in case new, smaller tree cannot be
560 * allocated, the original one will be kept being used.
562 if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
563 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
565 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
566 &vr->fib->prefix_usage);
569 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
571 struct mlxsw_sp_vr *vr;
575 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
578 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
579 mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
581 if (!mlxsw_sp->router.vrs)
584 for (i = 0; i < max_vrs; i++) {
585 vr = &mlxsw_sp->router.vrs[i];
592 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
594 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
596 mlxsw_sp_router_fib_flush(mlxsw_sp);
597 kfree(mlxsw_sp->router.vrs);
600 struct mlxsw_sp_neigh_key {
604 struct mlxsw_sp_neigh_entry {
605 struct rhash_head ht_node;
606 struct mlxsw_sp_neigh_key key;
609 struct delayed_work dw;
610 struct mlxsw_sp_port *mlxsw_sp_port;
611 unsigned char ha[ETH_ALEN];
612 struct list_head nexthop_list; /* list of nexthops using
615 struct list_head nexthop_neighs_list_node;
618 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
619 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
620 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
621 .key_len = sizeof(struct mlxsw_sp_neigh_key),
625 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
626 struct mlxsw_sp_neigh_entry *neigh_entry)
628 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
629 &neigh_entry->ht_node,
630 mlxsw_sp_neigh_ht_params);
634 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
635 struct mlxsw_sp_neigh_entry *neigh_entry)
637 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
638 &neigh_entry->ht_node,
639 mlxsw_sp_neigh_ht_params);
642 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
644 static struct mlxsw_sp_neigh_entry *
645 mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
647 struct mlxsw_sp_neigh_entry *neigh_entry;
649 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
652 neigh_entry->key.n = n;
653 neigh_entry->rif = rif;
654 INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
655 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
660 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
665 static struct mlxsw_sp_neigh_entry *
666 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
668 struct mlxsw_sp_neigh_key key;
671 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
672 &key, mlxsw_sp_neigh_ht_params);
675 int mlxsw_sp_router_neigh_construct(struct net_device *dev,
678 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
679 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
680 struct mlxsw_sp_neigh_entry *neigh_entry;
681 struct mlxsw_sp_rif *r;
684 if (n->tbl != &arp_tbl)
687 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
691 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
695 neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
698 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
700 goto err_neigh_entry_insert;
703 err_neigh_entry_insert:
704 mlxsw_sp_neigh_entry_destroy(neigh_entry);
708 void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
711 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
712 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
713 struct mlxsw_sp_neigh_entry *neigh_entry;
715 if (n->tbl != &arp_tbl)
718 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
721 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
722 mlxsw_sp_neigh_entry_destroy(neigh_entry);
726 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
728 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
730 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
733 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
737 struct net_device *dev;
743 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
745 if (!mlxsw_sp->rifs[rif]) {
746 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
751 dev = mlxsw_sp->rifs[rif]->dev;
752 n = neigh_lookup(&arp_tbl, &dipn, dev);
754 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
759 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
760 neigh_event_send(n, NULL);
764 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
771 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
773 /* Hardware starts counting at 0, so add 1. */
776 /* Each record consists of several neighbour entries. */
777 for (i = 0; i < num_entries; i++) {
780 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
781 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
787 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
788 char *rauhtd_pl, int rec_index)
790 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
791 case MLXSW_REG_RAUHTD_TYPE_IPV4:
792 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
795 case MLXSW_REG_RAUHTD_TYPE_IPV6:
801 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
803 u8 num_rec, last_rec_index, num_entries;
805 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
806 last_rec_index = num_rec - 1;
808 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
810 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
811 MLXSW_REG_RAUHTD_TYPE_IPV6)
814 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
816 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
821 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
827 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
831 /* Make sure the neighbour's netdev isn't removed in the
836 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
837 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
840 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
843 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
844 for (i = 0; i < num_rec; i++)
845 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
847 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
854 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
856 struct mlxsw_sp_neigh_entry *neigh_entry;
858 /* Take RTNL mutex here to prevent lists from changes */
860 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
861 nexthop_neighs_list_node) {
862 /* If this neigh have nexthops, make the kernel think this neigh
863 * is active regardless of the traffic.
865 if (!list_empty(&neigh_entry->nexthop_list))
866 neigh_event_send(neigh_entry->key.n, NULL);
872 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
874 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
876 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
877 msecs_to_jiffies(interval));
880 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
882 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
883 router.neighs_update.dw.work);
886 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
888 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
890 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
892 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
895 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
897 struct mlxsw_sp_neigh_entry *neigh_entry;
898 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
899 router.nexthop_probe_dw.work);
901 /* Iterate over nexthop neighbours, find those who are unresolved and
902 * send arp on them. This solves the chicken-egg problem when
903 * the nexthop wouldn't get offloaded until the neighbor is resolved
904 * but it wouldn't get resolved ever in case traffic is flowing in HW
905 * using different nexthop.
907 * Take RTNL mutex here to prevent lists from changes.
910 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
911 nexthop_neighs_list_node) {
912 if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
913 !list_empty(&neigh_entry->nexthop_list))
914 neigh_event_send(neigh_entry->key.n, NULL);
918 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
919 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
923 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
924 struct mlxsw_sp_neigh_entry *neigh_entry,
927 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
929 struct mlxsw_sp_neigh_entry *neigh_entry =
930 container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
931 struct neighbour *n = neigh_entry->key.n;
932 struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
933 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
934 char rauht_pl[MLXSW_REG_RAUHT_LEN];
935 struct net_device *dev;
936 bool entry_connected;
944 read_lock_bh(&n->lock);
945 dip = ntohl(*((__be32 *) n->primary_key));
946 memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
947 nud_state = n->nud_state;
949 read_unlock_bh(&n->lock);
951 entry_connected = nud_state & NUD_VALID;
952 adding = (!neigh_entry->offloaded) && entry_connected;
953 updating = neigh_entry->offloaded && entry_connected;
954 removing = neigh_entry->offloaded && !entry_connected;
956 if (adding || updating) {
957 mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
959 neigh_entry->ha, dip);
960 err = mlxsw_reg_write(mlxsw_sp->core,
961 MLXSW_REG(rauht), rauht_pl);
963 netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
964 neigh_entry->offloaded = false;
966 neigh_entry->offloaded = true;
968 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
969 } else if (removing) {
970 mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
972 neigh_entry->ha, dip);
973 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
976 netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
977 neigh_entry->offloaded = true;
979 neigh_entry->offloaded = false;
981 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
985 mlxsw_sp_port_dev_put(mlxsw_sp_port);
988 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
989 unsigned long event, void *ptr)
991 struct mlxsw_sp_neigh_entry *neigh_entry;
992 struct mlxsw_sp_port *mlxsw_sp_port;
993 struct mlxsw_sp *mlxsw_sp;
994 unsigned long interval;
995 struct net_device *dev;
996 struct neigh_parms *p;
1001 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1004 /* We don't care about changes in the default table. */
1005 if (!p->dev || p->tbl != &arp_tbl)
1008 /* We are in atomic context and can't take RTNL mutex,
1009 * so use RCU variant to walk the device chain.
1011 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1015 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1016 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1017 mlxsw_sp->router.neighs_update.interval = interval;
1019 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1021 case NETEVENT_NEIGH_UPDATE:
1025 if (n->tbl != &arp_tbl)
1028 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
1032 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1033 dip = ntohl(*((__be32 *) n->primary_key));
1034 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1035 if (WARN_ON(!neigh_entry)) {
1036 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1039 neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
1041 /* Take a reference to ensure the neighbour won't be
1042 * destructed until we drop the reference in delayed
1046 if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
1048 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1056 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1060 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
1061 &mlxsw_sp_neigh_ht_params);
1065 /* Initialize the polling interval according to the default
1068 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1070 /* Create the delayed works for the activity_update */
1071 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1072 mlxsw_sp_router_neighs_update_work);
1073 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1074 mlxsw_sp_router_probe_unresolved_nexthops);
1075 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
1076 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
1080 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1082 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
1083 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
1084 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1087 struct mlxsw_sp_nexthop {
1088 struct list_head neigh_list_node; /* member of neigh entry list */
1089 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1092 u8 should_offload:1, /* set indicates this neigh is connected and
1093 * should be put to KVD linear area of this group.
1095 offloaded:1, /* set in case the neigh is actually put into
1096 * KVD linear area of this group.
1098 update:1; /* set indicates that MAC of this neigh should be
1101 struct mlxsw_sp_neigh_entry *neigh_entry;
1104 struct mlxsw_sp_nexthop_group {
1105 struct list_head list; /* node in mlxsw->router.nexthop_group_list */
1106 struct list_head fib_list; /* list of fib entries that use this group */
1107 u8 adj_index_valid:1;
1111 struct mlxsw_sp_nexthop nexthops[0];
1114 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1115 struct mlxsw_sp_vr *vr,
1116 u32 adj_index, u16 ecmp_size,
1120 char raleu_pl[MLXSW_REG_RALEU_LEN];
1122 mlxsw_reg_raleu_pack(raleu_pl,
1123 (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
1124 adj_index, ecmp_size, new_adj_index,
1126 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1129 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1130 struct mlxsw_sp_nexthop_group *nh_grp,
1131 u32 old_adj_index, u16 old_ecmp_size)
1133 struct mlxsw_sp_fib_entry *fib_entry;
1134 struct mlxsw_sp_vr *vr = NULL;
1137 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1138 if (vr == fib_entry->vr)
1141 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
1152 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1153 struct mlxsw_sp_nexthop *nh)
1155 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1156 char ratr_pl[MLXSW_REG_RATR_LEN];
1158 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1159 true, adj_index, neigh_entry->rif);
1160 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1161 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1165 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1166 struct mlxsw_sp_nexthop_group *nh_grp)
1168 u32 adj_index = nh_grp->adj_index; /* base */
1169 struct mlxsw_sp_nexthop *nh;
1173 for (i = 0; i < nh_grp->count; i++) {
1174 nh = &nh_grp->nexthops[i];
1176 if (!nh->should_offload) {
1182 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1194 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1195 struct mlxsw_sp_fib_entry *fib_entry);
1198 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1199 struct mlxsw_sp_nexthop_group *nh_grp)
1201 struct mlxsw_sp_fib_entry *fib_entry;
1204 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1205 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1213 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1214 struct mlxsw_sp_nexthop_group *nh_grp)
1216 struct mlxsw_sp_nexthop *nh;
1217 bool offload_change = false;
1220 bool old_adj_index_valid;
1227 for (i = 0; i < nh_grp->count; i++) {
1228 nh = &nh_grp->nexthops[i];
1230 if (nh->should_offload ^ nh->offloaded) {
1231 offload_change = true;
1232 if (nh->should_offload)
1235 if (nh->should_offload)
1238 if (!offload_change) {
1239 /* Nothing was added or removed, so no need to reallocate. Just
1240 * update MAC on existing adjacency indexes.
1242 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
1244 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1250 /* No neigh of this group is connected so we just set
1251 * the trap and let everthing flow through kernel.
1255 ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1257 /* We ran out of KVD linear space, just set the
1258 * trap and let everything flow through kernel.
1260 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1264 old_adj_index_valid = nh_grp->adj_index_valid;
1265 old_adj_index = nh_grp->adj_index;
1266 old_ecmp_size = nh_grp->ecmp_size;
1267 nh_grp->adj_index_valid = 1;
1268 nh_grp->adj_index = adj_index;
1269 nh_grp->ecmp_size = ecmp_size;
1270 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
1272 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1276 if (!old_adj_index_valid) {
1277 /* The trap was set for fib entries, so we have to call
1278 * fib entry update to unset it and use adjacency index.
1280 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1282 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1288 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1289 old_adj_index, old_ecmp_size);
1290 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1292 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1298 old_adj_index_valid = nh_grp->adj_index_valid;
1299 nh_grp->adj_index_valid = 0;
1300 for (i = 0; i < nh_grp->count; i++) {
1301 nh = &nh_grp->nexthops[i];
1304 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1306 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1307 if (old_adj_index_valid)
1308 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1311 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1314 if (!removing && !nh->should_offload)
1315 nh->should_offload = 1;
1316 else if (removing && nh->offloaded)
1317 nh->should_offload = 0;
1322 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1323 struct mlxsw_sp_neigh_entry *neigh_entry,
1326 struct mlxsw_sp_nexthop *nh;
1328 /* Take RTNL mutex here to prevent lists from changes */
1330 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1332 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1333 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1338 static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1339 struct mlxsw_sp_nexthop_group *nh_grp,
1340 struct mlxsw_sp_nexthop *nh,
1341 struct fib_nh *fib_nh)
1343 struct mlxsw_sp_neigh_entry *neigh_entry;
1344 struct net_device *dev = fib_nh->nh_dev;
1345 struct neighbour *n;
1348 /* Take a reference of neigh here ensuring that neigh would
1349 * not be detructed before the nexthop entry is finished.
1350 * The reference is taken either in neigh_lookup() or
1351 * in neith_create() in case n is not found.
1353 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
1355 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
1358 neigh_event_send(n, NULL);
1360 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1366 /* If that is the first nexthop connected to that neigh, add to
1367 * nexthop_neighs_list
1369 if (list_empty(&neigh_entry->nexthop_list))
1370 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1371 &mlxsw_sp->router.nexthop_neighs_list);
1373 nh->nh_grp = nh_grp;
1374 nh->neigh_entry = neigh_entry;
1375 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1376 read_lock_bh(&n->lock);
1377 nud_state = n->nud_state;
1378 read_unlock_bh(&n->lock);
1379 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
1384 static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1385 struct mlxsw_sp_nexthop *nh)
1387 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1389 list_del(&nh->neigh_list_node);
1391 /* If that is the last nexthop connected to that neigh, remove from
1392 * nexthop_neighs_list
1394 if (list_empty(&nh->neigh_entry->nexthop_list))
1395 list_del(&nh->neigh_entry->nexthop_neighs_list_node);
1397 neigh_release(neigh_entry->key.n);
1400 static struct mlxsw_sp_nexthop_group *
1401 mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1403 struct mlxsw_sp_nexthop_group *nh_grp;
1404 struct mlxsw_sp_nexthop *nh;
1405 struct fib_nh *fib_nh;
1410 alloc_size = sizeof(*nh_grp) +
1411 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1412 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1414 return ERR_PTR(-ENOMEM);
1415 INIT_LIST_HEAD(&nh_grp->fib_list);
1416 nh_grp->count = fi->fib_nhs;
1417 for (i = 0; i < nh_grp->count; i++) {
1418 nh = &nh_grp->nexthops[i];
1419 fib_nh = &fi->fib_nh[i];
1420 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1422 goto err_nexthop_init;
1424 list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
1425 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1429 for (i--; i >= 0; i--)
1430 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1432 return ERR_PTR(err);
1436 mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1437 struct mlxsw_sp_nexthop_group *nh_grp)
1439 struct mlxsw_sp_nexthop *nh;
1442 list_del(&nh_grp->list);
1443 for (i = 0; i < nh_grp->count; i++) {
1444 nh = &nh_grp->nexthops[i];
1445 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1450 static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
1451 struct fib_info *fi)
1455 for (i = 0; i < fi->fib_nhs; i++) {
1456 struct fib_nh *fib_nh = &fi->fib_nh[i];
1457 struct neighbour *n = nh->neigh_entry->key.n;
1459 if (memcmp(n->primary_key, &fib_nh->nh_gw,
1460 sizeof(fib_nh->nh_gw)) == 0 &&
1461 n->dev == fib_nh->nh_dev)
1467 static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
1468 struct fib_info *fi)
1472 if (nh_grp->count != fi->fib_nhs)
1474 for (i = 0; i < nh_grp->count; i++) {
1475 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
1477 if (!mlxsw_sp_nexthop_match(nh, fi))
1483 static struct mlxsw_sp_nexthop_group *
1484 mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1486 struct mlxsw_sp_nexthop_group *nh_grp;
1488 list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
1490 if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
1496 static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1497 struct mlxsw_sp_fib_entry *fib_entry,
1498 struct fib_info *fi)
1500 struct mlxsw_sp_nexthop_group *nh_grp;
1502 nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
1504 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1506 return PTR_ERR(nh_grp);
1508 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1509 fib_entry->nh_group = nh_grp;
1513 static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1514 struct mlxsw_sp_fib_entry *fib_entry)
1516 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1518 list_del(&fib_entry->nexthop_group_node);
1519 if (!list_empty(&nh_grp->fib_list))
1521 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1524 static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1525 struct mlxsw_sp_fib_entry *fib_entry,
1526 enum mlxsw_reg_ralue_op op)
1528 char ralue_pl[MLXSW_REG_RALUE_LEN];
1529 u32 *p_dip = (u32 *) fib_entry->key.addr;
1530 struct mlxsw_sp_vr *vr = fib_entry->vr;
1531 enum mlxsw_reg_ralue_trap_action trap_action;
1533 u32 adjacency_index = 0;
1536 /* In case the nexthop group adjacency index is valid, use it
1537 * with provided ECMP size. Otherwise, setup trap and pass
1538 * traffic to kernel.
1540 if (fib_entry->nh_group->adj_index_valid) {
1541 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1542 adjacency_index = fib_entry->nh_group->adj_index;
1543 ecmp_size = fib_entry->nh_group->ecmp_size;
1545 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1546 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1549 mlxsw_reg_ralue_pack4(ralue_pl,
1550 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1551 vr->id, fib_entry->key.prefix_len, *p_dip);
1552 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1553 adjacency_index, ecmp_size);
1554 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1557 static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1558 struct mlxsw_sp_fib_entry *fib_entry,
1559 enum mlxsw_reg_ralue_op op)
1561 char ralue_pl[MLXSW_REG_RALUE_LEN];
1562 u32 *p_dip = (u32 *) fib_entry->key.addr;
1563 struct mlxsw_sp_vr *vr = fib_entry->vr;
1565 mlxsw_reg_ralue_pack4(ralue_pl,
1566 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1567 vr->id, fib_entry->key.prefix_len, *p_dip);
1568 mlxsw_reg_ralue_act_local_pack(ralue_pl,
1569 MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
1571 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1574 static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1575 struct mlxsw_sp_fib_entry *fib_entry,
1576 enum mlxsw_reg_ralue_op op)
1578 char ralue_pl[MLXSW_REG_RALUE_LEN];
1579 u32 *p_dip = (u32 *) fib_entry->key.addr;
1580 struct mlxsw_sp_vr *vr = fib_entry->vr;
1582 mlxsw_reg_ralue_pack4(ralue_pl,
1583 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1584 vr->id, fib_entry->key.prefix_len, *p_dip);
1585 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1589 static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1590 struct mlxsw_sp_fib_entry *fib_entry,
1591 enum mlxsw_reg_ralue_op op)
1593 switch (fib_entry->type) {
1594 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1595 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
1596 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1597 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1598 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1599 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1604 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1605 struct mlxsw_sp_fib_entry *fib_entry,
1606 enum mlxsw_reg_ralue_op op)
1608 switch (fib_entry->vr->proto) {
1609 case MLXSW_SP_L3_PROTO_IPV4:
1610 return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1611 case MLXSW_SP_L3_PROTO_IPV6:
1617 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1618 struct mlxsw_sp_fib_entry *fib_entry)
1620 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1621 MLXSW_REG_RALUE_OP_WRITE_WRITE);
1624 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1625 struct mlxsw_sp_fib_entry *fib_entry)
1627 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1628 MLXSW_REG_RALUE_OP_WRITE_DELETE);
1632 mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
1633 const struct fib_entry_notifier_info *fen_info,
1634 struct mlxsw_sp_fib_entry *fib_entry)
1636 struct fib_info *fi = fen_info->fi;
1637 struct mlxsw_sp_rif *r = NULL;
1641 if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
1642 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1645 if (fen_info->type != RTN_UNICAST)
1648 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1649 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1653 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
1655 /* In case router interface is not found for
1656 * at least one of the nexthops, that means
1657 * the nexthop points to some device unrelated
1658 * to us. Set trap and pass the packets for
1659 * this prefix to kernel.
1666 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1670 if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
1671 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1672 fib_entry->rif = r->rif;
1674 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1675 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
1679 fib_info_offload_inc(fen_info->fi);
1684 mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
1685 struct mlxsw_sp_fib_entry *fib_entry)
1687 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1688 fib_info_offload_dec(fib_entry->fi);
1689 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
1690 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1693 static struct mlxsw_sp_fib_entry *
1694 mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
1695 const struct fib_entry_notifier_info *fen_info)
1697 struct mlxsw_sp_fib_entry *fib_entry;
1698 struct fib_info *fi = fen_info->fi;
1699 struct mlxsw_sp_vr *vr;
1702 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
1703 MLXSW_SP_L3_PROTO_IPV4);
1705 return ERR_CAST(vr);
1707 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1708 sizeof(fen_info->dst),
1709 fen_info->dst_len, fi->fib_dev);
1711 /* Already exists, just take a reference */
1712 fib_entry->ref_count++;
1715 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
1716 sizeof(fen_info->dst),
1717 fen_info->dst_len, fi->fib_dev);
1720 goto err_fib_entry_create;
1724 fib_entry->ref_count = 1;
1726 err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
1728 goto err_fib4_entry_init;
1732 err_fib4_entry_init:
1733 mlxsw_sp_fib_entry_destroy(fib_entry);
1734 err_fib_entry_create:
1735 mlxsw_sp_vr_put(mlxsw_sp, vr);
1737 return ERR_PTR(err);
1740 static struct mlxsw_sp_fib_entry *
1741 mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
1742 const struct fib_entry_notifier_info *fen_info)
1744 struct mlxsw_sp_vr *vr;
1746 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
1747 MLXSW_SP_L3_PROTO_IPV4);
1751 return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1752 sizeof(fen_info->dst),
1754 fen_info->fi->fib_dev);
1757 static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
1758 struct mlxsw_sp_fib_entry *fib_entry)
1760 struct mlxsw_sp_vr *vr = fib_entry->vr;
1762 if (--fib_entry->ref_count == 0) {
1763 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
1764 mlxsw_sp_fib_entry_destroy(fib_entry);
1766 mlxsw_sp_vr_put(mlxsw_sp, vr);
1769 static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
1770 struct mlxsw_sp_fib_entry *fib_entry)
1772 unsigned int last_ref_count;
1775 last_ref_count = fib_entry->ref_count;
1776 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1777 } while (last_ref_count != 1);
1780 static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
1781 struct fib_entry_notifier_info *fen_info)
1783 struct mlxsw_sp_fib_entry *fib_entry;
1784 struct mlxsw_sp_vr *vr;
1787 if (mlxsw_sp->router.aborted)
1790 fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
1791 if (IS_ERR(fib_entry)) {
1792 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
1793 return PTR_ERR(fib_entry);
1796 if (fib_entry->ref_count != 1)
1800 err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
1802 dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
1803 goto err_fib_entry_insert;
1805 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1807 goto err_fib_entry_add;
1811 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
1812 err_fib_entry_insert:
1813 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1817 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
1818 struct fib_entry_notifier_info *fen_info)
1820 struct mlxsw_sp_fib_entry *fib_entry;
1822 if (mlxsw_sp->router.aborted)
1825 fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
1829 if (fib_entry->ref_count == 1) {
1830 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1831 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
1834 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1837 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
1839 char ralta_pl[MLXSW_REG_RALTA_LEN];
1840 char ralst_pl[MLXSW_REG_RALST_LEN];
1841 char raltb_pl[MLXSW_REG_RALTB_LEN];
1842 char ralue_pl[MLXSW_REG_RALUE_LEN];
1845 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
1846 MLXSW_SP_LPM_TREE_MIN);
1847 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
1851 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
1852 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
1856 mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
1857 MLXSW_SP_LPM_TREE_MIN);
1858 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
1862 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
1863 MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
1864 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1865 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1868 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
1870 struct mlxsw_sp_fib_entry *fib_entry;
1871 struct mlxsw_sp_fib_entry *tmp;
1872 struct mlxsw_sp_vr *vr;
1875 if (mlxsw_sp->router.aborted)
1877 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
1878 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
1879 vr = &mlxsw_sp->router.vrs[i];
1884 list_for_each_entry_safe(fib_entry, tmp,
1885 &vr->fib->entry_list, list) {
1886 bool do_break = &tmp->list == &vr->fib->entry_list;
1888 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1889 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
1891 mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
1898 static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
1902 mlxsw_sp_router_fib_flush(mlxsw_sp);
1903 mlxsw_sp->router.aborted = true;
1904 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
1906 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
1909 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1911 char rgcr_pl[MLXSW_REG_RGCR_LEN];
1915 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
1918 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
1919 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
1921 if (!mlxsw_sp->rifs)
1924 mlxsw_reg_rgcr_pack(rgcr_pl, true);
1925 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
1926 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1933 kfree(mlxsw_sp->rifs);
1937 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
1939 char rgcr_pl[MLXSW_REG_RGCR_LEN];
1942 mlxsw_reg_rgcr_pack(rgcr_pl, false);
1943 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1945 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
1946 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
1948 kfree(mlxsw_sp->rifs);
1951 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
1952 unsigned long event, void *ptr)
1954 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
1955 struct fib_entry_notifier_info *fen_info = ptr;
1958 if (!net_eq(fen_info->info.net, &init_net))
1962 case FIB_EVENT_ENTRY_ADD:
1963 err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
1965 mlxsw_sp_router_fib4_abort(mlxsw_sp);
1967 case FIB_EVENT_ENTRY_DEL:
1968 mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
1970 case FIB_EVENT_RULE_ADD: /* fall through */
1971 case FIB_EVENT_RULE_DEL:
1972 mlxsw_sp_router_fib4_abort(mlxsw_sp);
1978 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1982 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
1983 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
1984 err = __mlxsw_sp_router_init(mlxsw_sp);
1988 mlxsw_sp_lpm_init(mlxsw_sp);
1989 err = mlxsw_sp_vrs_init(mlxsw_sp);
1993 err = mlxsw_sp_neigh_init(mlxsw_sp);
1995 goto err_neigh_init;
1997 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
1998 register_fib_notifier(&mlxsw_sp->fib_nb);
2002 mlxsw_sp_vrs_fini(mlxsw_sp);
2004 __mlxsw_sp_router_fini(mlxsw_sp);
2008 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
2010 unregister_fib_notifier(&mlxsw_sp->fib_nb);
2011 mlxsw_sp_neigh_fini(mlxsw_sp);
2012 mlxsw_sp_vrs_fini(mlxsw_sp);
2013 __mlxsw_sp_router_fini(mlxsw_sp);