1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
6 #include <linux/kernel.h>
7 #include <linux/list.h>
8 #include <linux/netlink.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <net/inet_ecn.h>
16 #include "spectrum_nve.h"
18 const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[] = {
19 [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp1_nve_vxlan_ops,
22 const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[] = {
23 [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp2_nve_vxlan_ops,
26 struct mlxsw_sp_nve_mc_entry;
27 struct mlxsw_sp_nve_mc_record;
28 struct mlxsw_sp_nve_mc_list;
30 struct mlxsw_sp_nve_mc_record_ops {
31 enum mlxsw_reg_tnumt_record_type type;
32 int (*entry_add)(struct mlxsw_sp_nve_mc_record *mc_record,
33 struct mlxsw_sp_nve_mc_entry *mc_entry,
34 const union mlxsw_sp_l3addr *addr);
35 void (*entry_del)(const struct mlxsw_sp_nve_mc_record *mc_record,
36 const struct mlxsw_sp_nve_mc_entry *mc_entry);
37 void (*entry_set)(const struct mlxsw_sp_nve_mc_record *mc_record,
38 const struct mlxsw_sp_nve_mc_entry *mc_entry,
39 char *tnumt_pl, unsigned int entry_index);
40 bool (*entry_compare)(const struct mlxsw_sp_nve_mc_record *mc_record,
41 const struct mlxsw_sp_nve_mc_entry *mc_entry,
42 const union mlxsw_sp_l3addr *addr);
45 struct mlxsw_sp_nve_mc_list_key {
49 struct mlxsw_sp_nve_mc_ipv6_entry {
50 struct in6_addr addr6;
54 struct mlxsw_sp_nve_mc_entry {
57 struct mlxsw_sp_nve_mc_ipv6_entry ipv6_entry;
62 struct mlxsw_sp_nve_mc_record {
63 struct list_head list;
64 enum mlxsw_sp_l3proto proto;
65 unsigned int num_entries;
66 struct mlxsw_sp *mlxsw_sp;
67 struct mlxsw_sp_nve_mc_list *mc_list;
68 const struct mlxsw_sp_nve_mc_record_ops *ops;
70 struct mlxsw_sp_nve_mc_entry entries[0];
73 struct mlxsw_sp_nve_mc_list {
74 struct list_head records_list;
75 struct rhash_head ht_node;
76 struct mlxsw_sp_nve_mc_list_key key;
79 static const struct rhashtable_params mlxsw_sp_nve_mc_list_ht_params = {
80 .key_len = sizeof(struct mlxsw_sp_nve_mc_list_key),
81 .key_offset = offsetof(struct mlxsw_sp_nve_mc_list, key),
82 .head_offset = offsetof(struct mlxsw_sp_nve_mc_list, ht_node),
86 mlxsw_sp_nve_mc_record_ipv4_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
87 struct mlxsw_sp_nve_mc_entry *mc_entry,
88 const union mlxsw_sp_l3addr *addr)
90 mc_entry->addr4 = addr->addr4;
96 mlxsw_sp_nve_mc_record_ipv4_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
97 const struct mlxsw_sp_nve_mc_entry *mc_entry)
102 mlxsw_sp_nve_mc_record_ipv4_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
103 const struct mlxsw_sp_nve_mc_entry *mc_entry,
104 char *tnumt_pl, unsigned int entry_index)
106 u32 udip = be32_to_cpu(mc_entry->addr4);
108 mlxsw_reg_tnumt_udip_set(tnumt_pl, entry_index, udip);
112 mlxsw_sp_nve_mc_record_ipv4_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
113 const struct mlxsw_sp_nve_mc_entry *mc_entry,
114 const union mlxsw_sp_l3addr *addr)
116 return mc_entry->addr4 == addr->addr4;
119 static const struct mlxsw_sp_nve_mc_record_ops
120 mlxsw_sp_nve_mc_record_ipv4_ops = {
121 .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
122 .entry_add = &mlxsw_sp_nve_mc_record_ipv4_entry_add,
123 .entry_del = &mlxsw_sp_nve_mc_record_ipv4_entry_del,
124 .entry_set = &mlxsw_sp_nve_mc_record_ipv4_entry_set,
125 .entry_compare = &mlxsw_sp_nve_mc_record_ipv4_entry_compare,
129 mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
130 struct mlxsw_sp_nve_mc_entry *mc_entry,
131 const union mlxsw_sp_l3addr *addr)
139 mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
140 const struct mlxsw_sp_nve_mc_entry *mc_entry)
145 mlxsw_sp_nve_mc_record_ipv6_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
146 const struct mlxsw_sp_nve_mc_entry *mc_entry,
147 char *tnumt_pl, unsigned int entry_index)
149 u32 udip_ptr = mc_entry->ipv6_entry.addr6_kvdl_index;
151 mlxsw_reg_tnumt_udip_ptr_set(tnumt_pl, entry_index, udip_ptr);
155 mlxsw_sp_nve_mc_record_ipv6_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
156 const struct mlxsw_sp_nve_mc_entry *mc_entry,
157 const union mlxsw_sp_l3addr *addr)
159 return ipv6_addr_equal(&mc_entry->ipv6_entry.addr6, &addr->addr6);
162 static const struct mlxsw_sp_nve_mc_record_ops
163 mlxsw_sp_nve_mc_record_ipv6_ops = {
164 .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
165 .entry_add = &mlxsw_sp_nve_mc_record_ipv6_entry_add,
166 .entry_del = &mlxsw_sp_nve_mc_record_ipv6_entry_del,
167 .entry_set = &mlxsw_sp_nve_mc_record_ipv6_entry_set,
168 .entry_compare = &mlxsw_sp_nve_mc_record_ipv6_entry_compare,
171 static const struct mlxsw_sp_nve_mc_record_ops *
172 mlxsw_sp_nve_mc_record_ops_arr[] = {
173 [MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_nve_mc_record_ipv4_ops,
174 [MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
177 int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
178 enum mlxsw_sp_l3proto proto,
179 union mlxsw_sp_l3addr *addr)
182 case MLXSW_SP_L3_PROTO_IPV4:
183 addr->addr4 = cpu_to_be32(uip);
191 static struct mlxsw_sp_nve_mc_list *
192 mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
193 const struct mlxsw_sp_nve_mc_list_key *key)
195 struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
197 return rhashtable_lookup_fast(&nve->mc_list_ht, key,
198 mlxsw_sp_nve_mc_list_ht_params);
201 static struct mlxsw_sp_nve_mc_list *
202 mlxsw_sp_nve_mc_list_create(struct mlxsw_sp *mlxsw_sp,
203 const struct mlxsw_sp_nve_mc_list_key *key)
205 struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
206 struct mlxsw_sp_nve_mc_list *mc_list;
209 mc_list = kmalloc(sizeof(*mc_list), GFP_KERNEL);
211 return ERR_PTR(-ENOMEM);
213 INIT_LIST_HEAD(&mc_list->records_list);
216 err = rhashtable_insert_fast(&nve->mc_list_ht, &mc_list->ht_node,
217 mlxsw_sp_nve_mc_list_ht_params);
219 goto err_rhashtable_insert;
223 err_rhashtable_insert:
228 static void mlxsw_sp_nve_mc_list_destroy(struct mlxsw_sp *mlxsw_sp,
229 struct mlxsw_sp_nve_mc_list *mc_list)
231 struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
233 rhashtable_remove_fast(&nve->mc_list_ht, &mc_list->ht_node,
234 mlxsw_sp_nve_mc_list_ht_params);
235 WARN_ON(!list_empty(&mc_list->records_list));
239 static struct mlxsw_sp_nve_mc_list *
240 mlxsw_sp_nve_mc_list_get(struct mlxsw_sp *mlxsw_sp,
241 const struct mlxsw_sp_nve_mc_list_key *key)
243 struct mlxsw_sp_nve_mc_list *mc_list;
245 mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, key);
249 return mlxsw_sp_nve_mc_list_create(mlxsw_sp, key);
253 mlxsw_sp_nve_mc_list_put(struct mlxsw_sp *mlxsw_sp,
254 struct mlxsw_sp_nve_mc_list *mc_list)
256 if (!list_empty(&mc_list->records_list))
258 mlxsw_sp_nve_mc_list_destroy(mlxsw_sp, mc_list);
261 static struct mlxsw_sp_nve_mc_record *
262 mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp,
263 struct mlxsw_sp_nve_mc_list *mc_list,
264 enum mlxsw_sp_l3proto proto)
266 unsigned int num_max_entries = mlxsw_sp->nve->num_max_mc_entries[proto];
267 struct mlxsw_sp_nve_mc_record *mc_record;
270 mc_record = kzalloc(sizeof(*mc_record) + num_max_entries *
271 sizeof(struct mlxsw_sp_nve_mc_entry), GFP_KERNEL);
273 return ERR_PTR(-ENOMEM);
275 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
276 &mc_record->kvdl_index);
280 mc_record->ops = mlxsw_sp_nve_mc_record_ops_arr[proto];
281 mc_record->mlxsw_sp = mlxsw_sp;
282 mc_record->mc_list = mc_list;
283 mc_record->proto = proto;
284 list_add_tail(&mc_record->list, &mc_list->records_list);
294 mlxsw_sp_nve_mc_record_destroy(struct mlxsw_sp_nve_mc_record *mc_record)
296 struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
298 list_del(&mc_record->list);
299 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
300 mc_record->kvdl_index);
301 WARN_ON(mc_record->num_entries);
305 static struct mlxsw_sp_nve_mc_record *
306 mlxsw_sp_nve_mc_record_get(struct mlxsw_sp *mlxsw_sp,
307 struct mlxsw_sp_nve_mc_list *mc_list,
308 enum mlxsw_sp_l3proto proto)
310 struct mlxsw_sp_nve_mc_record *mc_record;
312 list_for_each_entry_reverse(mc_record, &mc_list->records_list, list) {
313 unsigned int num_entries = mc_record->num_entries;
314 struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
316 if (mc_record->proto == proto &&
317 num_entries < nve->num_max_mc_entries[proto])
321 return mlxsw_sp_nve_mc_record_create(mlxsw_sp, mc_list, proto);
325 mlxsw_sp_nve_mc_record_put(struct mlxsw_sp_nve_mc_record *mc_record)
327 if (mc_record->num_entries != 0)
330 mlxsw_sp_nve_mc_record_destroy(mc_record);
333 static struct mlxsw_sp_nve_mc_entry *
334 mlxsw_sp_nve_mc_free_entry_find(struct mlxsw_sp_nve_mc_record *mc_record)
336 struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
337 unsigned int num_max_entries;
340 num_max_entries = nve->num_max_mc_entries[mc_record->proto];
341 for (i = 0; i < num_max_entries; i++) {
342 if (mc_record->entries[i].valid)
344 return &mc_record->entries[i];
351 mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record)
353 enum mlxsw_reg_tnumt_record_type type = mc_record->ops->type;
354 struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
355 struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
356 char tnumt_pl[MLXSW_REG_TNUMT_LEN];
357 unsigned int num_max_entries;
358 unsigned int num_entries = 0;
359 u32 next_kvdl_index = 0;
360 bool next_valid = false;
363 if (!list_is_last(&mc_record->list, &mc_list->records_list)) {
364 struct mlxsw_sp_nve_mc_record *next_record;
366 next_record = list_next_entry(mc_record, list);
367 next_kvdl_index = next_record->kvdl_index;
371 mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
372 mc_record->kvdl_index, next_valid,
373 next_kvdl_index, mc_record->num_entries);
375 num_max_entries = mlxsw_sp->nve->num_max_mc_entries[mc_record->proto];
376 for (i = 0; i < num_max_entries; i++) {
377 struct mlxsw_sp_nve_mc_entry *mc_entry;
379 mc_entry = &mc_record->entries[i];
380 if (!mc_entry->valid)
382 mc_record->ops->entry_set(mc_record, mc_entry, tnumt_pl,
386 WARN_ON(num_entries != mc_record->num_entries);
388 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnumt), tnumt_pl);
392 mlxsw_sp_nve_mc_record_is_first(struct mlxsw_sp_nve_mc_record *mc_record)
394 struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
395 struct mlxsw_sp_nve_mc_record *first_record;
397 first_record = list_first_entry(&mc_list->records_list,
398 struct mlxsw_sp_nve_mc_record, list);
400 return mc_record == first_record;
403 static struct mlxsw_sp_nve_mc_entry *
404 mlxsw_sp_nve_mc_entry_find(struct mlxsw_sp_nve_mc_record *mc_record,
405 union mlxsw_sp_l3addr *addr)
407 struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
408 unsigned int num_max_entries;
411 num_max_entries = nve->num_max_mc_entries[mc_record->proto];
412 for (i = 0; i < num_max_entries; i++) {
413 struct mlxsw_sp_nve_mc_entry *mc_entry;
415 mc_entry = &mc_record->entries[i];
416 if (!mc_entry->valid)
418 if (mc_record->ops->entry_compare(mc_record, mc_entry, addr))
426 mlxsw_sp_nve_mc_record_ip_add(struct mlxsw_sp_nve_mc_record *mc_record,
427 union mlxsw_sp_l3addr *addr)
429 struct mlxsw_sp_nve_mc_entry *mc_entry = NULL;
432 mc_entry = mlxsw_sp_nve_mc_free_entry_find(mc_record);
433 if (WARN_ON(!mc_entry))
436 err = mc_record->ops->entry_add(mc_record, mc_entry, addr);
439 mc_record->num_entries++;
440 mc_entry->valid = true;
442 err = mlxsw_sp_nve_mc_record_refresh(mc_record);
444 goto err_record_refresh;
446 /* If this is a new record and not the first one, then we need to
447 * update the next pointer of the previous entry
449 if (mc_record->num_entries != 1 ||
450 mlxsw_sp_nve_mc_record_is_first(mc_record))
453 err = mlxsw_sp_nve_mc_record_refresh(list_prev_entry(mc_record, list));
455 goto err_prev_record_refresh;
459 err_prev_record_refresh:
461 mc_entry->valid = false;
462 mc_record->num_entries--;
463 mc_record->ops->entry_del(mc_record, mc_entry);
468 mlxsw_sp_nve_mc_record_entry_del(struct mlxsw_sp_nve_mc_record *mc_record,
469 struct mlxsw_sp_nve_mc_entry *mc_entry)
471 struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
473 mc_entry->valid = false;
474 mc_record->num_entries--;
476 /* When the record continues to exist we only need to invalidate
477 * the requested entry
479 if (mc_record->num_entries != 0) {
480 mlxsw_sp_nve_mc_record_refresh(mc_record);
481 mc_record->ops->entry_del(mc_record, mc_entry);
485 /* If the record needs to be deleted, but it is not the first,
486 * then we need to make sure that the previous record no longer
487 * points to it. Remove deleted record from the list to reflect
488 * that and then re-add it at the end, so that it could be
489 * properly removed by the record destruction code
491 if (!mlxsw_sp_nve_mc_record_is_first(mc_record)) {
492 struct mlxsw_sp_nve_mc_record *prev_record;
494 prev_record = list_prev_entry(mc_record, list);
495 list_del(&mc_record->list);
496 mlxsw_sp_nve_mc_record_refresh(prev_record);
497 list_add_tail(&mc_record->list, &mc_list->records_list);
498 mc_record->ops->entry_del(mc_record, mc_entry);
502 /* If the first record needs to be deleted, but the list is not
503 * singular, then the second record needs to be written in the
504 * first record's address, as this address is stored as a property
507 if (mlxsw_sp_nve_mc_record_is_first(mc_record) &&
508 !list_is_singular(&mc_list->records_list)) {
509 struct mlxsw_sp_nve_mc_record *next_record;
511 next_record = list_next_entry(mc_record, list);
512 swap(mc_record->kvdl_index, next_record->kvdl_index);
513 mlxsw_sp_nve_mc_record_refresh(next_record);
514 mc_record->ops->entry_del(mc_record, mc_entry);
518 /* This is the last case where the last remaining record needs to
519 * be deleted. Simply delete the entry
521 mc_record->ops->entry_del(mc_record, mc_entry);
524 static struct mlxsw_sp_nve_mc_record *
525 mlxsw_sp_nve_mc_record_find(struct mlxsw_sp_nve_mc_list *mc_list,
526 enum mlxsw_sp_l3proto proto,
527 union mlxsw_sp_l3addr *addr,
528 struct mlxsw_sp_nve_mc_entry **mc_entry)
530 struct mlxsw_sp_nve_mc_record *mc_record;
532 list_for_each_entry(mc_record, &mc_list->records_list, list) {
533 if (mc_record->proto != proto)
536 *mc_entry = mlxsw_sp_nve_mc_entry_find(mc_record, addr);
544 static int mlxsw_sp_nve_mc_list_ip_add(struct mlxsw_sp *mlxsw_sp,
545 struct mlxsw_sp_nve_mc_list *mc_list,
546 enum mlxsw_sp_l3proto proto,
547 union mlxsw_sp_l3addr *addr)
549 struct mlxsw_sp_nve_mc_record *mc_record;
552 mc_record = mlxsw_sp_nve_mc_record_get(mlxsw_sp, mc_list, proto);
553 if (IS_ERR(mc_record))
554 return PTR_ERR(mc_record);
556 err = mlxsw_sp_nve_mc_record_ip_add(mc_record, addr);
563 mlxsw_sp_nve_mc_record_put(mc_record);
567 static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp,
568 struct mlxsw_sp_nve_mc_list *mc_list,
569 enum mlxsw_sp_l3proto proto,
570 union mlxsw_sp_l3addr *addr)
572 struct mlxsw_sp_nve_mc_record *mc_record;
573 struct mlxsw_sp_nve_mc_entry *mc_entry;
575 mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr,
580 mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
581 mlxsw_sp_nve_mc_record_put(mc_record);
585 mlxsw_sp_nve_fid_flood_index_set(struct mlxsw_sp_fid *fid,
586 struct mlxsw_sp_nve_mc_list *mc_list)
588 struct mlxsw_sp_nve_mc_record *mc_record;
590 /* The address of the first record in the list is a property of
591 * the FID and we never change it. It only needs to be set when
592 * a new list is created
594 if (mlxsw_sp_fid_nve_flood_index_is_set(fid))
597 mc_record = list_first_entry(&mc_list->records_list,
598 struct mlxsw_sp_nve_mc_record, list);
600 return mlxsw_sp_fid_nve_flood_index_set(fid, mc_record->kvdl_index);
604 mlxsw_sp_nve_fid_flood_index_clear(struct mlxsw_sp_fid *fid,
605 struct mlxsw_sp_nve_mc_list *mc_list)
607 struct mlxsw_sp_nve_mc_record *mc_record;
609 /* The address of the first record needs to be invalidated only when
610 * the last record is about to be removed
612 if (!list_is_singular(&mc_list->records_list))
615 mc_record = list_first_entry(&mc_list->records_list,
616 struct mlxsw_sp_nve_mc_record, list);
617 if (mc_record->num_entries != 1)
620 return mlxsw_sp_fid_nve_flood_index_clear(fid);
623 int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
624 struct mlxsw_sp_fid *fid,
625 enum mlxsw_sp_l3proto proto,
626 union mlxsw_sp_l3addr *addr)
628 struct mlxsw_sp_nve_mc_list_key key = { 0 };
629 struct mlxsw_sp_nve_mc_list *mc_list;
632 key.fid_index = mlxsw_sp_fid_index(fid);
633 mc_list = mlxsw_sp_nve_mc_list_get(mlxsw_sp, &key);
635 return PTR_ERR(mc_list);
637 err = mlxsw_sp_nve_mc_list_ip_add(mlxsw_sp, mc_list, proto, addr);
641 err = mlxsw_sp_nve_fid_flood_index_set(fid, mc_list);
643 goto err_fid_flood_index_set;
647 err_fid_flood_index_set:
648 mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
650 mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
654 void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
655 struct mlxsw_sp_fid *fid,
656 enum mlxsw_sp_l3proto proto,
657 union mlxsw_sp_l3addr *addr)
659 struct mlxsw_sp_nve_mc_list_key key = { 0 };
660 struct mlxsw_sp_nve_mc_list *mc_list;
662 key.fid_index = mlxsw_sp_fid_index(fid);
663 mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
667 mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list);
668 mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
669 mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
673 mlxsw_sp_nve_mc_record_delete(struct mlxsw_sp_nve_mc_record *mc_record)
675 struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
676 unsigned int num_max_entries;
679 num_max_entries = nve->num_max_mc_entries[mc_record->proto];
680 for (i = 0; i < num_max_entries; i++) {
681 struct mlxsw_sp_nve_mc_entry *mc_entry = &mc_record->entries[i];
683 if (!mc_entry->valid)
685 mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
688 WARN_ON(mc_record->num_entries);
689 mlxsw_sp_nve_mc_record_put(mc_record);
692 static void mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp *mlxsw_sp,
693 struct mlxsw_sp_fid *fid)
695 struct mlxsw_sp_nve_mc_record *mc_record, *tmp;
696 struct mlxsw_sp_nve_mc_list_key key = { 0 };
697 struct mlxsw_sp_nve_mc_list *mc_list;
699 if (!mlxsw_sp_fid_nve_flood_index_is_set(fid))
702 mlxsw_sp_fid_nve_flood_index_clear(fid);
704 key.fid_index = mlxsw_sp_fid_index(fid);
705 mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
706 if (WARN_ON(!mc_list))
709 list_for_each_entry_safe(mc_record, tmp, &mc_list->records_list, list)
710 mlxsw_sp_nve_mc_record_delete(mc_record);
712 WARN_ON(!list_empty(&mc_list->records_list));
713 mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
716 u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp)
718 WARN_ON(mlxsw_sp->nve->num_nve_tunnels == 0);
720 return mlxsw_sp->nve->tunnel_index;
723 bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp,
724 u32 tb_id, __be32 addr)
726 struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
727 struct mlxsw_sp_nve_config *config = &nve->config;
729 if (nve->num_nve_tunnels &&
730 config->ul_proto == MLXSW_SP_L3_PROTO_IPV4 &&
731 config->ul_sip.addr4 == addr && config->ul_tb_id == tb_id)
737 static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
738 struct mlxsw_sp_nve_config *config)
740 struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
741 const struct mlxsw_sp_nve_ops *ops;
744 if (nve->num_nve_tunnels++ != 0)
747 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
752 ops = nve->nve_ops_arr[config->type];
753 err = ops->init(nve, config);
760 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
763 nve->num_nve_tunnels--;
767 static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp)
769 struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
770 const struct mlxsw_sp_nve_ops *ops;
772 ops = nve->nve_ops_arr[nve->config.type];
774 if (mlxsw_sp->nve->num_nve_tunnels == 1) {
776 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
779 nve->num_nve_tunnels--;
782 static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
785 char sfdf_pl[MLXSW_REG_SFDF_LEN];
787 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID);
788 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
789 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
792 static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp,
793 const struct mlxsw_sp_fid *fid,
794 const struct net_device *nve_dev,
797 const struct mlxsw_sp_nve_ops *ops;
798 enum mlxsw_sp_nve_type type;
800 if (WARN_ON(mlxsw_sp_fid_nve_type(fid, &type)))
803 ops = mlxsw_sp->nve->nve_ops_arr[type];
804 ops->fdb_clear_offload(nve_dev, vni);
807 int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
808 struct mlxsw_sp_nve_params *params,
809 struct netlink_ext_ack *extack)
811 struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
812 const struct mlxsw_sp_nve_ops *ops;
813 struct mlxsw_sp_nve_config config;
816 ops = nve->nve_ops_arr[params->type];
818 if (!ops->can_offload(nve, params->dev, extack))
821 memset(&config, 0, sizeof(config));
822 ops->nve_config(nve, params->dev, &config);
823 if (nve->num_nve_tunnels &&
824 memcmp(&config, &nve->config, sizeof(config))) {
825 NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
829 err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
831 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize NVE tunnel");
835 err = mlxsw_sp_fid_vni_set(fid, params->type, params->vni,
836 params->dev->ifindex);
838 NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
839 goto err_fid_vni_set;
842 nve->config = config;
844 err = ops->fdb_replay(params->dev, params->vni);
846 NL_SET_ERR_MSG_MOD(extack, "Failed to offload the FDB");
853 mlxsw_sp_fid_vni_clear(fid);
855 mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
859 void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
860 struct mlxsw_sp_fid *fid)
862 u16 fid_index = mlxsw_sp_fid_index(fid);
863 struct net_device *nve_dev;
867 mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
868 mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
870 if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) ||
871 mlxsw_sp_fid_vni(fid, &vni)))
874 nve_dev = dev_get_by_index(&init_net, nve_ifindex);
878 mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni);
879 mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev);
884 mlxsw_sp_fid_vni_clear(fid);
885 mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
888 int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port)
890 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
891 char tnqdr_pl[MLXSW_REG_TNQDR_LEN];
893 mlxsw_reg_tnqdr_pack(tnqdr_pl, mlxsw_sp_port->local_port);
894 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqdr), tnqdr_pl);
897 void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port)
901 static int mlxsw_sp_nve_qos_init(struct mlxsw_sp *mlxsw_sp)
903 char tnqcr_pl[MLXSW_REG_TNQCR_LEN];
905 mlxsw_reg_tnqcr_pack(tnqcr_pl);
906 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqcr), tnqcr_pl);
909 static int mlxsw_sp_nve_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
913 /* Iterate over inner ECN values */
914 for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
915 u8 outer_ecn = INET_ECN_encapsulate(0, i);
916 char tneem_pl[MLXSW_REG_TNEEM_LEN];
919 mlxsw_reg_tneem_pack(tneem_pl, i, outer_ecn);
920 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tneem),
929 static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
930 u8 inner_ecn, u8 outer_ecn)
932 char tndem_pl[MLXSW_REG_TNDEM_LEN];
933 bool trap_en, set_ce = false;
936 trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
937 new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
939 mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
940 trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
941 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
944 static int mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
948 /* Iterate over inner ECN values */
949 for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
952 /* Iterate over outer ECN values */
953 for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
956 err = __mlxsw_sp_nve_ecn_decap_init(mlxsw_sp, i, j);
965 static int mlxsw_sp_nve_ecn_init(struct mlxsw_sp *mlxsw_sp)
969 err = mlxsw_sp_nve_ecn_encap_init(mlxsw_sp);
973 return mlxsw_sp_nve_ecn_decap_init(mlxsw_sp);
976 static int mlxsw_sp_nve_resources_query(struct mlxsw_sp *mlxsw_sp)
980 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4) ||
981 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6))
983 max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4);
984 mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV4] = max;
985 max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6);
986 mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV6] = max;
991 int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
993 struct mlxsw_sp_nve *nve;
996 nve = kzalloc(sizeof(*mlxsw_sp->nve), GFP_KERNEL);
1000 nve->mlxsw_sp = mlxsw_sp;
1001 nve->nve_ops_arr = mlxsw_sp->nve_ops_arr;
1003 err = rhashtable_init(&nve->mc_list_ht,
1004 &mlxsw_sp_nve_mc_list_ht_params);
1006 goto err_rhashtable_init;
1008 err = mlxsw_sp_nve_qos_init(mlxsw_sp);
1010 goto err_nve_qos_init;
1012 err = mlxsw_sp_nve_ecn_init(mlxsw_sp);
1014 goto err_nve_ecn_init;
1016 err = mlxsw_sp_nve_resources_query(mlxsw_sp);
1018 goto err_nve_resources_query;
1022 err_nve_resources_query:
1025 rhashtable_destroy(&nve->mc_list_ht);
1026 err_rhashtable_init:
1027 mlxsw_sp->nve = NULL;
1032 void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
1034 WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
1035 rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
1036 mlxsw_sp->nve = NULL;
1037 kfree(mlxsw_sp->nve);