2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <net/netevent.h>
44 #include <net/neighbour.h>
46 #include <net/ip_fib.h>
52 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
53 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
56 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
57 struct mlxsw_sp_prefix_usage *prefix_usage2)
61 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
62 if (!test_bit(prefix, prefix_usage2->b))
69 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
70 struct mlxsw_sp_prefix_usage *prefix_usage2)
72 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
76 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
78 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
80 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
84 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
85 struct mlxsw_sp_prefix_usage *prefix_usage2)
87 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
91 mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
93 memset(prefix_usage, 0, sizeof(*prefix_usage));
97 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
98 unsigned char prefix_len)
100 set_bit(prefix_len, prefix_usage->b);
104 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
105 unsigned char prefix_len)
107 clear_bit(prefix_len, prefix_usage->b);
110 struct mlxsw_sp_fib_key {
111 struct net_device *dev;
112 unsigned char addr[sizeof(struct in6_addr)];
113 unsigned char prefix_len;
116 enum mlxsw_sp_fib_entry_type {
117 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
118 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
119 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
122 struct mlxsw_sp_nexthop_group;
124 struct mlxsw_sp_fib_entry {
125 struct rhash_head ht_node;
126 struct list_head list;
127 struct mlxsw_sp_fib_key key;
128 enum mlxsw_sp_fib_entry_type type;
129 unsigned int ref_count;
130 u16 rif; /* used for action local */
131 struct mlxsw_sp_vr *vr;
133 struct list_head nexthop_group_node;
134 struct mlxsw_sp_nexthop_group *nh_group;
137 struct mlxsw_sp_fib {
138 struct rhashtable ht;
139 struct list_head entry_list;
140 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
141 struct mlxsw_sp_prefix_usage prefix_usage;
144 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
145 .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
146 .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
147 .key_len = sizeof(struct mlxsw_sp_fib_key),
148 .automatic_shrinking = true,
151 static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
152 struct mlxsw_sp_fib_entry *fib_entry)
154 unsigned char prefix_len = fib_entry->key.prefix_len;
157 err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
158 mlxsw_sp_fib_ht_params);
161 list_add_tail(&fib_entry->list, &fib->entry_list);
162 if (fib->prefix_ref_count[prefix_len]++ == 0)
163 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
167 static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
168 struct mlxsw_sp_fib_entry *fib_entry)
170 unsigned char prefix_len = fib_entry->key.prefix_len;
172 if (--fib->prefix_ref_count[prefix_len] == 0)
173 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
174 list_del(&fib_entry->list);
175 rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
176 mlxsw_sp_fib_ht_params);
179 static struct mlxsw_sp_fib_entry *
180 mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
181 size_t addr_len, unsigned char prefix_len,
182 struct net_device *dev)
184 struct mlxsw_sp_fib_entry *fib_entry;
186 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
189 fib_entry->key.dev = dev;
190 memcpy(fib_entry->key.addr, addr, addr_len);
191 fib_entry->key.prefix_len = prefix_len;
195 static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
200 static struct mlxsw_sp_fib_entry *
201 mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
202 size_t addr_len, unsigned char prefix_len,
203 struct net_device *dev)
205 struct mlxsw_sp_fib_key key;
207 memset(&key, 0, sizeof(key));
209 memcpy(key.addr, addr, addr_len);
210 key.prefix_len = prefix_len;
211 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
214 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
216 struct mlxsw_sp_fib *fib;
219 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
221 return ERR_PTR(-ENOMEM);
222 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
224 goto err_rhashtable_init;
225 INIT_LIST_HEAD(&fib->entry_list);
233 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
235 rhashtable_destroy(&fib->ht);
239 static struct mlxsw_sp_lpm_tree *
240 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
242 static struct mlxsw_sp_lpm_tree *lpm_tree;
245 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
246 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
247 if (lpm_tree->ref_count == 0) {
249 one_reserved = false;
257 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
258 struct mlxsw_sp_lpm_tree *lpm_tree)
260 char ralta_pl[MLXSW_REG_RALTA_LEN];
262 mlxsw_reg_ralta_pack(ralta_pl, true,
263 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
265 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
268 static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
269 struct mlxsw_sp_lpm_tree *lpm_tree)
271 char ralta_pl[MLXSW_REG_RALTA_LEN];
273 mlxsw_reg_ralta_pack(ralta_pl, false,
274 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
280 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
281 struct mlxsw_sp_prefix_usage *prefix_usage,
282 struct mlxsw_sp_lpm_tree *lpm_tree)
284 char ralst_pl[MLXSW_REG_RALST_LEN];
287 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
289 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
292 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
293 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
296 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
297 MLXSW_REG_RALST_BIN_NO_CHILD);
298 last_prefix = prefix;
300 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
303 static struct mlxsw_sp_lpm_tree *
304 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
305 struct mlxsw_sp_prefix_usage *prefix_usage,
306 enum mlxsw_sp_l3proto proto, bool one_reserved)
308 struct mlxsw_sp_lpm_tree *lpm_tree;
311 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
313 return ERR_PTR(-EBUSY);
314 lpm_tree->proto = proto;
315 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
319 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
322 goto err_left_struct_set;
323 memcpy(&lpm_tree->prefix_usage, prefix_usage,
324 sizeof(lpm_tree->prefix_usage));
328 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
332 static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
333 struct mlxsw_sp_lpm_tree *lpm_tree)
335 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
338 static struct mlxsw_sp_lpm_tree *
339 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
340 struct mlxsw_sp_prefix_usage *prefix_usage,
341 enum mlxsw_sp_l3proto proto, bool one_reserved)
343 struct mlxsw_sp_lpm_tree *lpm_tree;
346 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
347 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
348 if (lpm_tree->ref_count != 0 &&
349 lpm_tree->proto == proto &&
350 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
354 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
355 proto, one_reserved);
356 if (IS_ERR(lpm_tree))
360 lpm_tree->ref_count++;
364 static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
365 struct mlxsw_sp_lpm_tree *lpm_tree)
367 if (--lpm_tree->ref_count == 0)
368 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
372 static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
374 struct mlxsw_sp_lpm_tree *lpm_tree;
377 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
378 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
379 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
383 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
385 struct mlxsw_sp_vr *vr;
388 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
389 vr = &mlxsw_sp->router.vrs[i];
396 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
397 struct mlxsw_sp_vr *vr)
399 char raltb_pl[MLXSW_REG_RALTB_LEN];
401 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
402 (enum mlxsw_reg_ralxx_protocol) vr->proto,
404 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
407 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
408 struct mlxsw_sp_vr *vr)
410 char raltb_pl[MLXSW_REG_RALTB_LEN];
412 /* Bind to tree 0 which is default */
413 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
414 (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
415 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
418 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
420 /* For our purpose, squash main and local table into one */
421 if (tb_id == RT_TABLE_LOCAL)
422 tb_id = RT_TABLE_MAIN;
426 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
428 enum mlxsw_sp_l3proto proto)
430 struct mlxsw_sp_vr *vr;
433 tb_id = mlxsw_sp_fix_tb_id(tb_id);
435 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
436 vr = &mlxsw_sp->router.vrs[i];
437 if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
443 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
444 unsigned char prefix_len,
446 enum mlxsw_sp_l3proto proto)
448 struct mlxsw_sp_prefix_usage req_prefix_usage;
449 struct mlxsw_sp_lpm_tree *lpm_tree;
450 struct mlxsw_sp_vr *vr;
453 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
455 return ERR_PTR(-EBUSY);
456 vr->fib = mlxsw_sp_fib_create();
458 return ERR_CAST(vr->fib);
462 mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
463 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
464 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
466 if (IS_ERR(lpm_tree)) {
467 err = PTR_ERR(lpm_tree);
470 vr->lpm_tree = lpm_tree;
471 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
479 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
481 mlxsw_sp_fib_destroy(vr->fib);
486 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
487 struct mlxsw_sp_vr *vr)
489 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
490 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
491 mlxsw_sp_fib_destroy(vr->fib);
496 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
497 struct mlxsw_sp_prefix_usage *req_prefix_usage)
499 struct mlxsw_sp_lpm_tree *lpm_tree;
501 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
502 &vr->lpm_tree->prefix_usage))
505 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
507 if (IS_ERR(lpm_tree)) {
508 /* We failed to get a tree according to the required
509 * prefix usage. However, the current tree might be still good
510 * for us if our requirement is subset of the prefixes used
513 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
514 &vr->lpm_tree->prefix_usage))
516 return PTR_ERR(lpm_tree);
519 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
520 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
521 vr->lpm_tree = lpm_tree;
522 return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
525 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
526 unsigned char prefix_len,
528 enum mlxsw_sp_l3proto proto)
530 struct mlxsw_sp_vr *vr;
533 tb_id = mlxsw_sp_fix_tb_id(tb_id);
534 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
536 vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
540 struct mlxsw_sp_prefix_usage req_prefix_usage;
542 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
543 &vr->fib->prefix_usage);
544 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
545 /* Need to replace LPM tree in case new prefix is required. */
546 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
554 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
556 /* Destroy virtual router entity in case the associated FIB is empty
557 * and allow it to be used for other tables in future. Otherwise,
558 * check if some prefix usage did not disappear and change tree if
559 * that is the case. Note that in case new, smaller tree cannot be
560 * allocated, the original one will be kept being used.
562 if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
563 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
565 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
566 &vr->fib->prefix_usage);
569 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
571 struct mlxsw_sp_vr *vr;
575 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
578 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
579 mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
581 if (!mlxsw_sp->router.vrs)
584 for (i = 0; i < max_vrs; i++) {
585 vr = &mlxsw_sp->router.vrs[i];
592 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
594 kfree(mlxsw_sp->router.vrs);
597 struct mlxsw_sp_neigh_key {
598 unsigned char addr[sizeof(struct in6_addr)];
599 struct net_device *dev;
602 struct mlxsw_sp_neigh_entry {
603 struct rhash_head ht_node;
604 struct mlxsw_sp_neigh_key key;
608 struct delayed_work dw;
609 struct mlxsw_sp_port *mlxsw_sp_port;
610 unsigned char ha[ETH_ALEN];
611 struct list_head nexthop_list; /* list of nexthops using
614 struct list_head nexthop_neighs_list_node;
617 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
618 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
619 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
620 .key_len = sizeof(struct mlxsw_sp_neigh_key),
624 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
625 struct mlxsw_sp_neigh_entry *neigh_entry)
627 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
628 &neigh_entry->ht_node,
629 mlxsw_sp_neigh_ht_params);
633 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
634 struct mlxsw_sp_neigh_entry *neigh_entry)
636 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
637 &neigh_entry->ht_node,
638 mlxsw_sp_neigh_ht_params);
641 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
643 static struct mlxsw_sp_neigh_entry *
644 mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
645 struct net_device *dev, u16 rif,
648 struct mlxsw_sp_neigh_entry *neigh_entry;
650 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
653 memcpy(neigh_entry->key.addr, addr, addr_len);
654 neigh_entry->key.dev = dev;
655 neigh_entry->rif = rif;
657 INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
658 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
663 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
668 static struct mlxsw_sp_neigh_entry *
669 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
670 size_t addr_len, struct net_device *dev)
672 struct mlxsw_sp_neigh_key key = {{ 0 } };
674 memcpy(key.addr, addr, addr_len);
676 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
677 &key, mlxsw_sp_neigh_ht_params);
680 int mlxsw_sp_router_neigh_construct(struct net_device *dev,
683 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
685 struct mlxsw_sp_neigh_entry *neigh_entry;
686 struct mlxsw_sp_rif *r;
690 if (n->tbl != &arp_tbl)
693 dip = ntohl(*((__be32 *) n->primary_key));
694 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
697 WARN_ON(neigh_entry->n != n);
701 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
705 neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
709 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
711 goto err_neigh_entry_insert;
714 err_neigh_entry_insert:
715 mlxsw_sp_neigh_entry_destroy(neigh_entry);
719 void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
722 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724 struct mlxsw_sp_neigh_entry *neigh_entry;
727 if (n->tbl != &arp_tbl)
730 dip = ntohl(*((__be32 *) n->primary_key));
731 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
735 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
736 mlxsw_sp_neigh_entry_destroy(neigh_entry);
740 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
742 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
744 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
747 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
751 struct net_device *dev;
757 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
759 if (!mlxsw_sp->rifs[rif]) {
760 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
765 dev = mlxsw_sp->rifs[rif]->dev;
766 n = neigh_lookup(&arp_tbl, &dipn, dev);
768 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
773 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
774 neigh_event_send(n, NULL);
778 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
785 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
787 /* Hardware starts counting at 0, so add 1. */
790 /* Each record consists of several neighbour entries. */
791 for (i = 0; i < num_entries; i++) {
794 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
795 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
801 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
802 char *rauhtd_pl, int rec_index)
804 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
805 case MLXSW_REG_RAUHTD_TYPE_IPV4:
806 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
809 case MLXSW_REG_RAUHTD_TYPE_IPV6:
815 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
821 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
825 /* Make sure the neighbour's netdev isn't removed in the
830 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
831 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
834 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
837 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
838 for (i = 0; i < num_rec; i++)
839 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
848 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
850 struct mlxsw_sp_neigh_entry *neigh_entry;
852 /* Take RTNL mutex here to prevent lists from changes */
854 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
855 nexthop_neighs_list_node) {
856 /* If this neigh have nexthops, make the kernel think this neigh
857 * is active regardless of the traffic.
859 if (!list_empty(&neigh_entry->nexthop_list))
860 neigh_event_send(neigh_entry->n, NULL);
866 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
868 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
870 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
871 msecs_to_jiffies(interval));
874 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
876 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
877 router.neighs_update.dw.work);
880 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
882 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
884 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
886 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
889 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
891 struct mlxsw_sp_neigh_entry *neigh_entry;
892 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
893 router.nexthop_probe_dw.work);
895 /* Iterate over nexthop neighbours, find those who are unresolved and
896 * send arp on them. This solves the chicken-egg problem when
897 * the nexthop wouldn't get offloaded until the neighbor is resolved
898 * but it wouldn't get resolved ever in case traffic is flowing in HW
899 * using different nexthop.
901 * Take RTNL mutex here to prevent lists from changes.
904 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
905 nexthop_neighs_list_node) {
906 if (!(neigh_entry->n->nud_state & NUD_VALID) &&
907 !list_empty(&neigh_entry->nexthop_list))
908 neigh_event_send(neigh_entry->n, NULL);
912 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
913 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
917 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
918 struct mlxsw_sp_neigh_entry *neigh_entry,
921 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
923 struct mlxsw_sp_neigh_entry *neigh_entry =
924 container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
925 struct neighbour *n = neigh_entry->n;
926 struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
927 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
928 char rauht_pl[MLXSW_REG_RAUHT_LEN];
929 struct net_device *dev;
930 bool entry_connected;
938 read_lock_bh(&n->lock);
939 dip = ntohl(*((__be32 *) n->primary_key));
940 memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
941 nud_state = n->nud_state;
943 read_unlock_bh(&n->lock);
945 entry_connected = nud_state & NUD_VALID;
946 adding = (!neigh_entry->offloaded) && entry_connected;
947 updating = neigh_entry->offloaded && entry_connected;
948 removing = neigh_entry->offloaded && !entry_connected;
950 if (adding || updating) {
951 mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
953 neigh_entry->ha, dip);
954 err = mlxsw_reg_write(mlxsw_sp->core,
955 MLXSW_REG(rauht), rauht_pl);
957 netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
958 neigh_entry->offloaded = false;
960 neigh_entry->offloaded = true;
962 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
963 } else if (removing) {
964 mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
966 neigh_entry->ha, dip);
967 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
970 netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
971 neigh_entry->offloaded = true;
973 neigh_entry->offloaded = false;
975 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
979 mlxsw_sp_port_dev_put(mlxsw_sp_port);
982 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
983 unsigned long event, void *ptr)
985 struct mlxsw_sp_neigh_entry *neigh_entry;
986 struct mlxsw_sp_port *mlxsw_sp_port;
987 struct mlxsw_sp *mlxsw_sp;
988 unsigned long interval;
989 struct net_device *dev;
990 struct neigh_parms *p;
995 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
998 /* We don't care about changes in the default table. */
999 if (!p->dev || p->tbl != &arp_tbl)
1002 /* We are in atomic context and can't take RTNL mutex,
1003 * so use RCU variant to walk the device chain.
1005 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1009 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1010 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1011 mlxsw_sp->router.neighs_update.interval = interval;
1013 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1015 case NETEVENT_NEIGH_UPDATE:
1019 if (n->tbl != &arp_tbl)
1022 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
1026 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1027 dip = ntohl(*((__be32 *) n->primary_key));
1028 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
1032 if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
1033 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1036 neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
1038 /* Take a reference to ensure the neighbour won't be
1039 * destructed until we drop the reference in delayed
1043 if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
1045 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1053 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1057 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
1058 &mlxsw_sp_neigh_ht_params);
1062 /* Initialize the polling interval according to the default
1065 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1067 /* Create the delayed works for the activity_update */
1068 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1069 mlxsw_sp_router_neighs_update_work);
1070 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1071 mlxsw_sp_router_probe_unresolved_nexthops);
1072 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
1073 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
1077 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1079 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
1080 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
1081 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1084 struct mlxsw_sp_nexthop {
1085 struct list_head neigh_list_node; /* member of neigh entry list */
1086 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1089 u8 should_offload:1, /* set indicates this neigh is connected and
1090 * should be put to KVD linear area of this group.
1092 offloaded:1, /* set in case the neigh is actually put into
1093 * KVD linear area of this group.
1095 update:1; /* set indicates that MAC of this neigh should be
1098 struct mlxsw_sp_neigh_entry *neigh_entry;
1101 struct mlxsw_sp_nexthop_group {
1102 struct list_head list; /* node in mlxsw->router.nexthop_group_list */
1103 struct list_head fib_list; /* list of fib entries that use this group */
1104 u8 adj_index_valid:1;
1108 struct mlxsw_sp_nexthop nexthops[0];
1111 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1112 struct mlxsw_sp_vr *vr,
1113 u32 adj_index, u16 ecmp_size,
1117 char raleu_pl[MLXSW_REG_RALEU_LEN];
1119 mlxsw_reg_raleu_pack(raleu_pl,
1120 (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
1121 adj_index, ecmp_size, new_adj_index,
1123 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1126 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1127 struct mlxsw_sp_nexthop_group *nh_grp,
1128 u32 old_adj_index, u16 old_ecmp_size)
1130 struct mlxsw_sp_fib_entry *fib_entry;
1131 struct mlxsw_sp_vr *vr = NULL;
1134 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1135 if (vr == fib_entry->vr)
1138 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
1149 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1150 struct mlxsw_sp_nexthop *nh)
1152 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1153 char ratr_pl[MLXSW_REG_RATR_LEN];
1155 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1156 true, adj_index, neigh_entry->rif);
1157 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1158 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1162 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1163 struct mlxsw_sp_nexthop_group *nh_grp)
1165 u32 adj_index = nh_grp->adj_index; /* base */
1166 struct mlxsw_sp_nexthop *nh;
1170 for (i = 0; i < nh_grp->count; i++) {
1171 nh = &nh_grp->nexthops[i];
1173 if (!nh->should_offload) {
1179 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1191 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1192 struct mlxsw_sp_fib_entry *fib_entry);
1195 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1196 struct mlxsw_sp_nexthop_group *nh_grp)
1198 struct mlxsw_sp_fib_entry *fib_entry;
1201 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1202 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1210 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1211 struct mlxsw_sp_nexthop_group *nh_grp)
1213 struct mlxsw_sp_nexthop *nh;
1214 bool offload_change = false;
1217 bool old_adj_index_valid;
1224 for (i = 0; i < nh_grp->count; i++) {
1225 nh = &nh_grp->nexthops[i];
1227 if (nh->should_offload ^ nh->offloaded) {
1228 offload_change = true;
1229 if (nh->should_offload)
1232 if (nh->should_offload)
1235 if (!offload_change) {
1236 /* Nothing was added or removed, so no need to reallocate. Just
1237 * update MAC on existing adjacency indexes.
1239 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
1241 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1247 /* No neigh of this group is connected so we just set
1248 * the trap and let everthing flow through kernel.
1252 ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1254 /* We ran out of KVD linear space, just set the
1255 * trap and let everything flow through kernel.
1257 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1261 old_adj_index_valid = nh_grp->adj_index_valid;
1262 old_adj_index = nh_grp->adj_index;
1263 old_ecmp_size = nh_grp->ecmp_size;
1264 nh_grp->adj_index_valid = 1;
1265 nh_grp->adj_index = adj_index;
1266 nh_grp->ecmp_size = ecmp_size;
1267 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
1269 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1273 if (!old_adj_index_valid) {
1274 /* The trap was set for fib entries, so we have to call
1275 * fib entry update to unset it and use adjacency index.
1277 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1279 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1285 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1286 old_adj_index, old_ecmp_size);
1287 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1289 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1295 old_adj_index_valid = nh_grp->adj_index_valid;
1296 nh_grp->adj_index_valid = 0;
1297 for (i = 0; i < nh_grp->count; i++) {
1298 nh = &nh_grp->nexthops[i];
1301 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1303 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1304 if (old_adj_index_valid)
1305 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1308 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1311 if (!removing && !nh->should_offload)
1312 nh->should_offload = 1;
1313 else if (removing && nh->offloaded)
1314 nh->should_offload = 0;
1319 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1320 struct mlxsw_sp_neigh_entry *neigh_entry,
1323 struct mlxsw_sp_nexthop *nh;
1325 /* Take RTNL mutex here to prevent lists from changes */
1327 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1329 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1330 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1335 static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1336 struct mlxsw_sp_nexthop_group *nh_grp,
1337 struct mlxsw_sp_nexthop *nh,
1338 struct fib_nh *fib_nh)
1340 struct mlxsw_sp_neigh_entry *neigh_entry;
1341 u32 gwip = ntohl(fib_nh->nh_gw);
1342 struct net_device *dev = fib_nh->nh_dev;
1343 struct neighbour *n;
1346 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
1349 __be32 gwipn = htonl(gwip);
1351 n = neigh_create(&arp_tbl, &gwipn, dev);
1354 neigh_event_send(n, NULL);
1355 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
1362 /* Take a reference of neigh here ensuring that neigh would
1363 * not be detructed before the nexthop entry is finished.
1364 * The second branch takes the reference in neith_create()
1370 /* If that is the first nexthop connected to that neigh, add to
1371 * nexthop_neighs_list
1373 if (list_empty(&neigh_entry->nexthop_list))
1374 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1375 &mlxsw_sp->router.nexthop_neighs_list);
1377 nh->nh_grp = nh_grp;
1378 nh->neigh_entry = neigh_entry;
1379 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1380 read_lock_bh(&n->lock);
1381 nud_state = n->nud_state;
1382 read_unlock_bh(&n->lock);
1383 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
1388 static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1389 struct mlxsw_sp_nexthop *nh)
1391 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1393 list_del(&nh->neigh_list_node);
1395 /* If that is the last nexthop connected to that neigh, remove from
1396 * nexthop_neighs_list
1398 if (list_empty(&nh->neigh_entry->nexthop_list))
1399 list_del(&nh->neigh_entry->nexthop_neighs_list_node);
1401 neigh_release(neigh_entry->n);
1404 static struct mlxsw_sp_nexthop_group *
1405 mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1407 struct mlxsw_sp_nexthop_group *nh_grp;
1408 struct mlxsw_sp_nexthop *nh;
1409 struct fib_nh *fib_nh;
1414 alloc_size = sizeof(*nh_grp) +
1415 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1416 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1418 return ERR_PTR(-ENOMEM);
1419 INIT_LIST_HEAD(&nh_grp->fib_list);
1420 nh_grp->count = fi->fib_nhs;
1421 for (i = 0; i < nh_grp->count; i++) {
1422 nh = &nh_grp->nexthops[i];
1423 fib_nh = &fi->fib_nh[i];
1424 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1426 goto err_nexthop_init;
1428 list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
1429 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1433 for (i--; i >= 0; i--)
1434 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1436 return ERR_PTR(err);
1440 mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1441 struct mlxsw_sp_nexthop_group *nh_grp)
1443 struct mlxsw_sp_nexthop *nh;
1446 list_del(&nh_grp->list);
1447 for (i = 0; i < nh_grp->count; i++) {
1448 nh = &nh_grp->nexthops[i];
1449 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1454 static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
1455 struct fib_info *fi)
1459 for (i = 0; i < fi->fib_nhs; i++) {
1460 struct fib_nh *fib_nh = &fi->fib_nh[i];
1461 u32 gwip = ntohl(fib_nh->nh_gw);
1463 if (memcmp(nh->neigh_entry->key.addr,
1464 &gwip, sizeof(u32)) == 0 &&
1465 nh->neigh_entry->key.dev == fib_nh->nh_dev)
1471 static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
1472 struct fib_info *fi)
1476 if (nh_grp->count != fi->fib_nhs)
1478 for (i = 0; i < nh_grp->count; i++) {
1479 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
1481 if (!mlxsw_sp_nexthop_match(nh, fi))
1487 static struct mlxsw_sp_nexthop_group *
1488 mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1490 struct mlxsw_sp_nexthop_group *nh_grp;
1492 list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
1494 if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
1500 static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1501 struct mlxsw_sp_fib_entry *fib_entry,
1502 struct fib_info *fi)
1504 struct mlxsw_sp_nexthop_group *nh_grp;
1506 nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
1508 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1510 return PTR_ERR(nh_grp);
1512 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1513 fib_entry->nh_group = nh_grp;
1517 static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1518 struct mlxsw_sp_fib_entry *fib_entry)
1520 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1522 list_del(&fib_entry->nexthop_group_node);
1523 if (!list_empty(&nh_grp->fib_list))
1525 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1528 static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1529 struct mlxsw_sp_fib_entry *fib_entry,
1530 enum mlxsw_reg_ralue_op op)
1532 char ralue_pl[MLXSW_REG_RALUE_LEN];
1533 u32 *p_dip = (u32 *) fib_entry->key.addr;
1534 struct mlxsw_sp_vr *vr = fib_entry->vr;
1535 enum mlxsw_reg_ralue_trap_action trap_action;
1537 u32 adjacency_index = 0;
1540 /* In case the nexthop group adjacency index is valid, use it
1541 * with provided ECMP size. Otherwise, setup trap and pass
1542 * traffic to kernel.
1544 if (fib_entry->nh_group->adj_index_valid) {
1545 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1546 adjacency_index = fib_entry->nh_group->adj_index;
1547 ecmp_size = fib_entry->nh_group->ecmp_size;
1549 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1550 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1553 mlxsw_reg_ralue_pack4(ralue_pl,
1554 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1555 vr->id, fib_entry->key.prefix_len, *p_dip);
1556 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1557 adjacency_index, ecmp_size);
1558 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1561 static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1562 struct mlxsw_sp_fib_entry *fib_entry,
1563 enum mlxsw_reg_ralue_op op)
1565 char ralue_pl[MLXSW_REG_RALUE_LEN];
1566 u32 *p_dip = (u32 *) fib_entry->key.addr;
1567 struct mlxsw_sp_vr *vr = fib_entry->vr;
1569 mlxsw_reg_ralue_pack4(ralue_pl,
1570 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1571 vr->id, fib_entry->key.prefix_len, *p_dip);
1572 mlxsw_reg_ralue_act_local_pack(ralue_pl,
1573 MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
1575 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1578 static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1579 struct mlxsw_sp_fib_entry *fib_entry,
1580 enum mlxsw_reg_ralue_op op)
1582 char ralue_pl[MLXSW_REG_RALUE_LEN];
1583 u32 *p_dip = (u32 *) fib_entry->key.addr;
1584 struct mlxsw_sp_vr *vr = fib_entry->vr;
1586 mlxsw_reg_ralue_pack4(ralue_pl,
1587 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1588 vr->id, fib_entry->key.prefix_len, *p_dip);
1589 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1590 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1593 static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1594 struct mlxsw_sp_fib_entry *fib_entry,
1595 enum mlxsw_reg_ralue_op op)
1597 switch (fib_entry->type) {
1598 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1599 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
1600 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1601 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1602 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1603 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1608 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1609 struct mlxsw_sp_fib_entry *fib_entry,
1610 enum mlxsw_reg_ralue_op op)
1612 switch (fib_entry->vr->proto) {
1613 case MLXSW_SP_L3_PROTO_IPV4:
1614 return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1615 case MLXSW_SP_L3_PROTO_IPV6:
1621 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1622 struct mlxsw_sp_fib_entry *fib_entry)
1624 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1625 MLXSW_REG_RALUE_OP_WRITE_WRITE);
1628 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1629 struct mlxsw_sp_fib_entry *fib_entry)
1631 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1632 MLXSW_REG_RALUE_OP_WRITE_DELETE);
1636 mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
1637 const struct fib_entry_notifier_info *fen_info,
1638 struct mlxsw_sp_fib_entry *fib_entry)
1640 struct fib_info *fi = fen_info->fi;
1641 struct mlxsw_sp_rif *r = NULL;
1645 if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
1646 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1649 if (fen_info->type != RTN_UNICAST)
1652 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1653 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1657 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
1659 /* In case router interface is not found for
1660 * at least one of the nexthops, that means
1661 * the nexthop points to some device unrelated
1662 * to us. Set trap and pass the packets for
1663 * this prefix to kernel.
1670 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1674 if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
1675 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1676 fib_entry->rif = r->rif;
1678 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1679 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
1683 fib_info_offload_inc(fen_info->fi);
1688 mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
1689 struct mlxsw_sp_fib_entry *fib_entry)
1691 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1692 fib_info_offload_dec(fib_entry->fi);
1693 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
1694 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1697 static struct mlxsw_sp_fib_entry *
1698 mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
1699 const struct fib_entry_notifier_info *fen_info)
1701 struct mlxsw_sp_fib_entry *fib_entry;
1702 struct fib_info *fi = fen_info->fi;
1703 struct mlxsw_sp_vr *vr;
1706 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
1707 MLXSW_SP_L3_PROTO_IPV4);
1709 return ERR_CAST(vr);
1711 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1712 sizeof(fen_info->dst),
1713 fen_info->dst_len, fi->fib_dev);
1715 /* Already exists, just take a reference */
1716 fib_entry->ref_count++;
1719 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
1720 sizeof(fen_info->dst),
1721 fen_info->dst_len, fi->fib_dev);
1724 goto err_fib_entry_create;
1728 fib_entry->ref_count = 1;
1730 err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
1732 goto err_fib4_entry_init;
1736 err_fib4_entry_init:
1737 mlxsw_sp_fib_entry_destroy(fib_entry);
1738 err_fib_entry_create:
1739 mlxsw_sp_vr_put(mlxsw_sp, vr);
1741 return ERR_PTR(err);
1744 static struct mlxsw_sp_fib_entry *
1745 mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
1746 const struct fib_entry_notifier_info *fen_info)
1748 struct mlxsw_sp_vr *vr;
1750 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
1751 MLXSW_SP_L3_PROTO_IPV4);
1755 return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1756 sizeof(fen_info->dst),
1758 fen_info->fi->fib_dev);
1761 static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
1762 struct mlxsw_sp_fib_entry *fib_entry)
1764 struct mlxsw_sp_vr *vr = fib_entry->vr;
1766 if (--fib_entry->ref_count == 0) {
1767 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
1768 mlxsw_sp_fib_entry_destroy(fib_entry);
1770 mlxsw_sp_vr_put(mlxsw_sp, vr);
1773 static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
1774 struct mlxsw_sp_fib_entry *fib_entry)
1776 unsigned int last_ref_count;
1779 last_ref_count = fib_entry->ref_count;
1780 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1781 } while (last_ref_count != 1);
1784 static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
1785 struct fib_entry_notifier_info *fen_info)
1787 struct mlxsw_sp_fib_entry *fib_entry;
1788 struct mlxsw_sp_vr *vr;
1791 if (mlxsw_sp->router.aborted)
1794 fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
1795 if (IS_ERR(fib_entry)) {
1796 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
1797 return PTR_ERR(fib_entry);
1800 if (fib_entry->ref_count != 1)
1804 err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
1806 dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
1807 goto err_fib_entry_insert;
1809 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1811 goto err_fib_entry_add;
1815 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
1816 err_fib_entry_insert:
1817 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1821 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
1822 struct fib_entry_notifier_info *fen_info)
1824 struct mlxsw_sp_fib_entry *fib_entry;
1826 if (mlxsw_sp->router.aborted)
1829 fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
1833 if (fib_entry->ref_count == 1) {
1834 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1835 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
1838 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1841 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
1843 char ralta_pl[MLXSW_REG_RALTA_LEN];
1844 char ralst_pl[MLXSW_REG_RALST_LEN];
1845 char raltb_pl[MLXSW_REG_RALTB_LEN];
1846 char ralue_pl[MLXSW_REG_RALUE_LEN];
1849 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
1850 MLXSW_SP_LPM_TREE_MIN);
1851 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
1855 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
1856 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
1860 mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
1861 MLXSW_SP_LPM_TREE_MIN);
1862 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
1866 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
1867 MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
1868 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1869 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1872 static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
1874 struct mlxsw_sp_fib_entry *fib_entry;
1875 struct mlxsw_sp_fib_entry *tmp;
1876 struct mlxsw_sp_vr *vr;
1880 if (mlxsw_sp->router.aborted)
1882 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
1883 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
1884 vr = &mlxsw_sp->router.vrs[i];
1888 list_for_each_entry_safe(fib_entry, tmp,
1889 &vr->fib->entry_list, list) {
1890 bool do_break = &tmp->list == &vr->fib->entry_list;
1892 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1893 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
1895 mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
1900 mlxsw_sp->router.aborted = true;
1901 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
1903 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
1906 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1908 char rgcr_pl[MLXSW_REG_RGCR_LEN];
1912 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
1915 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
1916 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
1918 if (!mlxsw_sp->rifs)
1921 mlxsw_reg_rgcr_pack(rgcr_pl, true);
1922 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
1923 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1930 kfree(mlxsw_sp->rifs);
1934 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
1936 char rgcr_pl[MLXSW_REG_RGCR_LEN];
1939 mlxsw_reg_rgcr_pack(rgcr_pl, false);
1940 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1942 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
1943 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
1945 kfree(mlxsw_sp->rifs);
1948 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
1949 unsigned long event, void *ptr)
1951 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
1952 struct fib_entry_notifier_info *fen_info = ptr;
1956 case FIB_EVENT_ENTRY_ADD:
1957 err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
1959 mlxsw_sp_router_fib4_abort(mlxsw_sp);
1961 case FIB_EVENT_ENTRY_DEL:
1962 mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
1964 case FIB_EVENT_RULE_ADD: /* fall through */
1965 case FIB_EVENT_RULE_DEL:
1966 mlxsw_sp_router_fib4_abort(mlxsw_sp);
1972 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1976 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
1977 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
1978 err = __mlxsw_sp_router_init(mlxsw_sp);
1982 mlxsw_sp_lpm_init(mlxsw_sp);
1983 err = mlxsw_sp_vrs_init(mlxsw_sp);
1987 err = mlxsw_sp_neigh_init(mlxsw_sp);
1989 goto err_neigh_init;
1991 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
1992 register_fib_notifier(&mlxsw_sp->fib_nb);
1996 mlxsw_sp_vrs_fini(mlxsw_sp);
1998 __mlxsw_sp_router_fini(mlxsw_sp);
2002 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
2004 unregister_fib_notifier(&mlxsw_sp->fib_nb);
2005 mlxsw_sp_neigh_fini(mlxsw_sp);
2006 mlxsw_sp_vrs_fini(mlxsw_sp);
2007 __mlxsw_sp_router_fini(mlxsw_sp);