1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
26 struct mlxsw_sp_bridge_ops;
28 struct mlxsw_sp_bridge {
29 struct mlxsw_sp *mlxsw_sp;
31 struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 unsigned int interval; /* ms */
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
39 bool vlan_enabled_exists;
40 struct list_head bridges_list;
41 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
46 struct mlxsw_sp_bridge_device {
47 struct net_device *dev;
48 struct list_head list;
49 struct list_head ports_list;
50 struct list_head mids_list;
54 const struct mlxsw_sp_bridge_ops *ops;
57 struct mlxsw_sp_bridge_port {
58 struct net_device *dev;
59 struct mlxsw_sp_bridge_device *bridge_device;
60 struct list_head list;
61 struct list_head vlans_list;
62 unsigned int ref_count;
73 struct mlxsw_sp_bridge_vlan {
74 struct list_head list;
75 struct list_head port_vlan_list;
79 struct mlxsw_sp_bridge_ops {
80 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 struct mlxsw_sp_bridge_port *bridge_port,
82 struct mlxsw_sp_port *mlxsw_sp_port,
83 struct netlink_ext_ack *extack);
84 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 struct mlxsw_sp_bridge_port *bridge_port,
86 struct mlxsw_sp_port *mlxsw_sp_port);
87 int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
88 const struct net_device *vxlan_dev, u16 vid,
89 struct netlink_ext_ack *extack);
91 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
94 (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
96 u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
97 const struct mlxsw_sp_fid *fid);
101 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
102 struct mlxsw_sp_bridge_port *bridge_port,
106 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
107 struct mlxsw_sp_bridge_port *bridge_port);
110 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
111 struct mlxsw_sp_bridge_device
115 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
116 struct mlxsw_sp_bridge_port *bridge_port,
119 static struct mlxsw_sp_bridge_device *
120 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
121 const struct net_device *br_dev)
123 struct mlxsw_sp_bridge_device *bridge_device;
125 list_for_each_entry(bridge_device, &bridge->bridges_list, list)
126 if (bridge_device->dev == br_dev)
127 return bridge_device;
132 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
133 const struct net_device *br_dev)
135 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
138 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
141 struct mlxsw_sp *mlxsw_sp = data;
143 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
147 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
148 struct net_device *dev)
150 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
151 netdev_walk_all_upper_dev_rcu(dev,
152 mlxsw_sp_bridge_device_upper_rif_destroy,
156 static struct mlxsw_sp_bridge_device *
157 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
158 struct net_device *br_dev)
160 struct device *dev = bridge->mlxsw_sp->bus_info->dev;
161 struct mlxsw_sp_bridge_device *bridge_device;
162 bool vlan_enabled = br_vlan_enabled(br_dev);
164 if (vlan_enabled && bridge->vlan_enabled_exists) {
165 dev_err(dev, "Only one VLAN-aware bridge is supported\n");
166 return ERR_PTR(-EINVAL);
169 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
171 return ERR_PTR(-ENOMEM);
173 bridge_device->dev = br_dev;
174 bridge_device->vlan_enabled = vlan_enabled;
175 bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
176 bridge_device->mrouter = br_multicast_router(br_dev);
177 INIT_LIST_HEAD(&bridge_device->ports_list);
179 bridge->vlan_enabled_exists = true;
180 bridge_device->ops = bridge->bridge_8021q_ops;
182 bridge_device->ops = bridge->bridge_8021d_ops;
184 INIT_LIST_HEAD(&bridge_device->mids_list);
185 list_add(&bridge_device->list, &bridge->bridges_list);
187 return bridge_device;
191 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
192 struct mlxsw_sp_bridge_device *bridge_device)
194 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
196 list_del(&bridge_device->list);
197 if (bridge_device->vlan_enabled)
198 bridge->vlan_enabled_exists = false;
199 WARN_ON(!list_empty(&bridge_device->ports_list));
200 WARN_ON(!list_empty(&bridge_device->mids_list));
201 kfree(bridge_device);
204 static struct mlxsw_sp_bridge_device *
205 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
206 struct net_device *br_dev)
208 struct mlxsw_sp_bridge_device *bridge_device;
210 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
212 return bridge_device;
214 return mlxsw_sp_bridge_device_create(bridge, br_dev);
218 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
219 struct mlxsw_sp_bridge_device *bridge_device)
221 if (list_empty(&bridge_device->ports_list))
222 mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
225 static struct mlxsw_sp_bridge_port *
226 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
227 const struct net_device *brport_dev)
229 struct mlxsw_sp_bridge_port *bridge_port;
231 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
232 if (bridge_port->dev == brport_dev)
239 struct mlxsw_sp_bridge_port *
240 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
241 struct net_device *brport_dev)
243 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
244 struct mlxsw_sp_bridge_device *bridge_device;
249 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
253 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
256 static struct mlxsw_sp_bridge_port *
257 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
258 struct net_device *brport_dev)
260 struct mlxsw_sp_bridge_port *bridge_port;
261 struct mlxsw_sp_port *mlxsw_sp_port;
263 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
267 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
268 bridge_port->lagged = mlxsw_sp_port->lagged;
269 if (bridge_port->lagged)
270 bridge_port->lag_id = mlxsw_sp_port->lag_id;
272 bridge_port->system_port = mlxsw_sp_port->local_port;
273 bridge_port->dev = brport_dev;
274 bridge_port->bridge_device = bridge_device;
275 bridge_port->stp_state = BR_STATE_DISABLED;
276 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
278 INIT_LIST_HEAD(&bridge_port->vlans_list);
279 list_add(&bridge_port->list, &bridge_device->ports_list);
280 bridge_port->ref_count = 1;
286 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
288 list_del(&bridge_port->list);
289 WARN_ON(!list_empty(&bridge_port->vlans_list));
294 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
297 struct net_device *dev = bridge_port->dev;
298 struct mlxsw_sp *mlxsw_sp;
300 if (is_vlan_dev(dev))
301 mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
303 mlxsw_sp = mlxsw_sp_lower_get(dev);
305 /* In case ports were pulled from out of a bridged LAG, then
306 * it's possible the reference count isn't zero, yet the bridge
307 * port should be destroyed, as it's no longer an upper of ours.
309 if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
311 else if (bridge_port->ref_count == 0)
317 static struct mlxsw_sp_bridge_port *
318 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
319 struct net_device *brport_dev)
321 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
322 struct mlxsw_sp_bridge_device *bridge_device;
323 struct mlxsw_sp_bridge_port *bridge_port;
326 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
328 bridge_port->ref_count++;
332 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
333 if (IS_ERR(bridge_device))
334 return ERR_CAST(bridge_device);
336 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
339 goto err_bridge_port_create;
344 err_bridge_port_create:
345 mlxsw_sp_bridge_device_put(bridge, bridge_device);
349 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
350 struct mlxsw_sp_bridge_port *bridge_port)
352 struct mlxsw_sp_bridge_device *bridge_device;
354 bridge_port->ref_count--;
355 if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
357 bridge_device = bridge_port->bridge_device;
358 mlxsw_sp_bridge_port_destroy(bridge_port);
359 mlxsw_sp_bridge_device_put(bridge, bridge_device);
362 static struct mlxsw_sp_port_vlan *
363 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
364 const struct mlxsw_sp_bridge_device *
368 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
370 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
372 if (!mlxsw_sp_port_vlan->bridge_port)
374 if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
377 if (bridge_device->vlan_enabled &&
378 mlxsw_sp_port_vlan->vid != vid)
380 return mlxsw_sp_port_vlan;
386 static struct mlxsw_sp_port_vlan*
387 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
390 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
392 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
394 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
396 if (fid && mlxsw_sp_fid_index(fid) == fid_index)
397 return mlxsw_sp_port_vlan;
403 static struct mlxsw_sp_bridge_vlan *
404 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
407 struct mlxsw_sp_bridge_vlan *bridge_vlan;
409 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
410 if (bridge_vlan->vid == vid)
417 static struct mlxsw_sp_bridge_vlan *
418 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
420 struct mlxsw_sp_bridge_vlan *bridge_vlan;
422 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
426 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
427 bridge_vlan->vid = vid;
428 list_add(&bridge_vlan->list, &bridge_port->vlans_list);
434 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
436 list_del(&bridge_vlan->list);
437 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
441 static struct mlxsw_sp_bridge_vlan *
442 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
444 struct mlxsw_sp_bridge_vlan *bridge_vlan;
446 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
450 return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
453 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
455 if (list_empty(&bridge_vlan->port_vlan_list))
456 mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
459 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
460 struct net_device *dev,
461 unsigned long *brport_flags)
463 struct mlxsw_sp_bridge_port *bridge_port;
465 bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
466 if (WARN_ON(!bridge_port))
469 memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
472 static int mlxsw_sp_port_attr_get(struct net_device *dev,
473 struct switchdev_attr *attr)
475 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
476 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
479 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
480 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
481 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
482 attr->u.ppid.id_len);
484 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
485 mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
486 &attr->u.brport_flags);
488 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
489 attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
500 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
501 struct mlxsw_sp_bridge_vlan *bridge_vlan,
504 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
506 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
508 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
510 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
511 bridge_vlan->vid, state);
517 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
518 struct switchdev_trans *trans,
519 struct net_device *orig_dev,
522 struct mlxsw_sp_bridge_port *bridge_port;
523 struct mlxsw_sp_bridge_vlan *bridge_vlan;
526 if (switchdev_trans_ph_prepare(trans))
529 /* It's possible we failed to enslave the port, yet this
530 * operation is executed due to it being deferred.
532 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
537 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
538 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
541 goto err_port_bridge_vlan_stp_set;
544 bridge_port->stp_state = state;
548 err_port_bridge_vlan_stp_set:
549 list_for_each_entry_continue_reverse(bridge_vlan,
550 &bridge_port->vlans_list, list)
551 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
552 bridge_port->stp_state);
557 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
558 struct mlxsw_sp_bridge_vlan *bridge_vlan,
559 enum mlxsw_sp_flood_type packet_type,
562 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
564 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
566 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
568 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
570 mlxsw_sp_port->local_port,
578 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
579 struct mlxsw_sp_bridge_port *bridge_port,
580 enum mlxsw_sp_flood_type packet_type,
583 struct mlxsw_sp_bridge_vlan *bridge_vlan;
586 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
587 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
592 goto err_port_bridge_vlan_flood_set;
597 err_port_bridge_vlan_flood_set:
598 list_for_each_entry_continue_reverse(bridge_vlan,
599 &bridge_port->vlans_list, list)
600 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
601 packet_type, !member);
606 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
607 struct mlxsw_sp_bridge_vlan *bridge_vlan,
610 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
611 u16 vid = bridge_vlan->vid;
613 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
615 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
617 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
624 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
625 struct mlxsw_sp_bridge_port *bridge_port,
628 struct mlxsw_sp_bridge_vlan *bridge_vlan;
631 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
632 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
635 goto err_port_bridge_vlan_learning_set;
640 err_port_bridge_vlan_learning_set:
641 list_for_each_entry_continue_reverse(bridge_vlan,
642 &bridge_port->vlans_list, list)
643 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
648 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
649 struct switchdev_trans *trans,
650 struct net_device *orig_dev,
651 unsigned long brport_flags)
653 struct mlxsw_sp_bridge_port *bridge_port;
656 if (switchdev_trans_ph_prepare(trans))
659 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
664 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
665 MLXSW_SP_FLOOD_TYPE_UC,
666 brport_flags & BR_FLOOD);
670 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
671 brport_flags & BR_LEARNING);
675 if (bridge_port->bridge_device->multicast_enabled)
678 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
679 MLXSW_SP_FLOOD_TYPE_MC,
686 memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
690 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
692 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
695 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
696 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
699 mlxsw_sp->bridge->ageing_time = ageing_time;
703 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
704 struct switchdev_trans *trans,
705 unsigned long ageing_clock_t)
707 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
708 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
709 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
711 if (switchdev_trans_ph_prepare(trans)) {
712 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
713 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
719 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
722 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
723 struct switchdev_trans *trans,
724 struct net_device *orig_dev,
727 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
728 struct mlxsw_sp_bridge_device *bridge_device;
730 if (!switchdev_trans_ph_prepare(trans))
733 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
734 if (WARN_ON(!bridge_device))
737 if (bridge_device->vlan_enabled == vlan_enabled)
740 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
744 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
745 struct switchdev_trans *trans,
746 struct net_device *orig_dev,
747 bool is_port_mrouter)
749 struct mlxsw_sp_bridge_port *bridge_port;
752 if (switchdev_trans_ph_prepare(trans))
755 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
760 if (!bridge_port->bridge_device->multicast_enabled)
763 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
764 MLXSW_SP_FLOOD_TYPE_MC,
769 mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
772 bridge_port->mrouter = is_port_mrouter;
776 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
778 const struct mlxsw_sp_bridge_device *bridge_device;
780 bridge_device = bridge_port->bridge_device;
781 return bridge_device->multicast_enabled ? bridge_port->mrouter :
782 bridge_port->flags & BR_MCAST_FLOOD;
785 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
786 struct switchdev_trans *trans,
787 struct net_device *orig_dev,
790 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
791 struct mlxsw_sp_bridge_device *bridge_device;
792 struct mlxsw_sp_bridge_port *bridge_port;
795 if (switchdev_trans_ph_prepare(trans))
798 /* It's possible we failed to enslave the port, yet this
799 * operation is executed due to it being deferred.
801 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
805 if (bridge_device->multicast_enabled != !mc_disabled) {
806 bridge_device->multicast_enabled = !mc_disabled;
807 mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
811 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
812 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
813 bool member = mlxsw_sp_mc_flood(bridge_port);
815 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
817 packet_type, member);
822 bridge_device->multicast_enabled = !mc_disabled;
827 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
828 u16 mid_idx, bool add)
833 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
837 mlxsw_reg_smid_pack(smid_pl, mid_idx,
838 mlxsw_sp_router_port(mlxsw_sp), add);
839 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
845 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
846 struct mlxsw_sp_bridge_device *bridge_device,
849 struct mlxsw_sp_mid *mid;
851 list_for_each_entry(mid, &bridge_device->mids_list, list)
852 mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
856 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
857 struct switchdev_trans *trans,
858 struct net_device *orig_dev,
861 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
862 struct mlxsw_sp_bridge_device *bridge_device;
864 if (switchdev_trans_ph_prepare(trans))
867 /* It's possible we failed to enslave the port, yet this
868 * operation is executed due to it being deferred.
870 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
874 if (bridge_device->mrouter != is_mrouter)
875 mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
877 bridge_device->mrouter = is_mrouter;
881 static int mlxsw_sp_port_attr_set(struct net_device *dev,
882 const struct switchdev_attr *attr,
883 struct switchdev_trans *trans)
885 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
889 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
890 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
894 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
895 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
897 attr->u.brport_flags);
899 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
900 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
901 attr->u.ageing_time);
903 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
904 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
906 attr->u.vlan_filtering);
908 case SWITCHDEV_ATTR_ID_PORT_MROUTER:
909 err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
913 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
914 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
916 attr->u.mc_disabled);
918 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
919 err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
928 if (switchdev_trans_ph_commit(trans))
929 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
935 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
936 struct mlxsw_sp_bridge_port *bridge_port)
938 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
939 struct mlxsw_sp_bridge_device *bridge_device;
940 u8 local_port = mlxsw_sp_port->local_port;
941 u16 vid = mlxsw_sp_port_vlan->vid;
942 struct mlxsw_sp_fid *fid;
945 bridge_device = bridge_port->bridge_device;
946 fid = bridge_device->ops->fid_get(bridge_device, vid);
950 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
951 bridge_port->flags & BR_FLOOD);
953 goto err_fid_uc_flood_set;
955 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
956 mlxsw_sp_mc_flood(bridge_port));
958 goto err_fid_mc_flood_set;
960 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
963 goto err_fid_bc_flood_set;
965 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
967 goto err_fid_port_vid_map;
969 mlxsw_sp_port_vlan->fid = fid;
973 err_fid_port_vid_map:
974 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
975 err_fid_bc_flood_set:
976 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
977 err_fid_mc_flood_set:
978 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
979 err_fid_uc_flood_set:
980 mlxsw_sp_fid_put(fid);
985 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
987 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
988 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
989 u8 local_port = mlxsw_sp_port->local_port;
990 u16 vid = mlxsw_sp_port_vlan->vid;
992 mlxsw_sp_port_vlan->fid = NULL;
993 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
994 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
995 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
996 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
997 mlxsw_sp_fid_put(fid);
1001 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1002 u16 vid, bool is_pvid)
1006 else if (mlxsw_sp_port->pvid == vid)
1007 return 0; /* Dis-allow untagged packets */
1009 return mlxsw_sp_port->pvid;
1013 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1014 struct mlxsw_sp_bridge_port *bridge_port)
1016 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1017 struct mlxsw_sp_bridge_vlan *bridge_vlan;
1018 u16 vid = mlxsw_sp_port_vlan->vid;
1021 /* No need to continue if only VLAN flags were changed */
1022 if (mlxsw_sp_port_vlan->bridge_port) {
1023 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1027 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
1031 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1032 bridge_port->flags & BR_LEARNING);
1034 goto err_port_vid_learning_set;
1036 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1037 bridge_port->stp_state);
1039 goto err_port_vid_stp_set;
1041 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1044 goto err_bridge_vlan_get;
1047 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1048 &bridge_vlan->port_vlan_list);
1050 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1052 mlxsw_sp_port_vlan->bridge_port = bridge_port;
1056 err_bridge_vlan_get:
1057 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1058 err_port_vid_stp_set:
1059 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1060 err_port_vid_learning_set:
1061 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1066 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1068 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1069 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1070 struct mlxsw_sp_bridge_vlan *bridge_vlan;
1071 struct mlxsw_sp_bridge_port *bridge_port;
1072 u16 vid = mlxsw_sp_port_vlan->vid;
1073 bool last_port, last_vlan;
1075 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1076 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1079 bridge_port = mlxsw_sp_port_vlan->bridge_port;
1080 last_vlan = list_is_singular(&bridge_port->vlans_list);
1081 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1082 last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1084 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1085 mlxsw_sp_bridge_vlan_put(bridge_vlan);
1086 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1087 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1089 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1091 mlxsw_sp_fid_index(fid));
1093 mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1095 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1097 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1098 mlxsw_sp_port_vlan->bridge_port = NULL;
1102 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1103 struct mlxsw_sp_bridge_port *bridge_port,
1104 u16 vid, bool is_untagged, bool is_pvid)
1106 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1107 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1108 u16 old_pvid = mlxsw_sp_port->pvid;
1111 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
1112 if (IS_ERR(mlxsw_sp_port_vlan))
1113 return PTR_ERR(mlxsw_sp_port_vlan);
1115 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1118 goto err_port_vlan_set;
1120 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1122 goto err_port_pvid_set;
1124 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1126 goto err_port_vlan_bridge_join;
1130 err_port_vlan_bridge_join:
1131 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1133 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1135 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1140 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1141 const struct net_device *br_dev,
1142 const struct switchdev_obj_port_vlan *vlan)
1144 struct mlxsw_sp_rif *rif;
1145 struct mlxsw_sp_fid *fid;
1149 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1152 fid = mlxsw_sp_rif_fid(rif);
1153 pvid = mlxsw_sp_fid_8021q_vid(fid);
1155 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1156 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1158 netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1163 netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1172 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1173 const struct switchdev_obj_port_vlan *vlan,
1174 struct switchdev_trans *trans)
1176 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1177 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1178 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1179 struct net_device *orig_dev = vlan->obj.orig_dev;
1180 struct mlxsw_sp_bridge_port *bridge_port;
1183 if (netif_is_bridge_master(orig_dev)) {
1186 if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1187 br_vlan_enabled(orig_dev) &&
1188 switchdev_trans_ph_prepare(trans))
1189 err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1196 if (switchdev_trans_ph_prepare(trans))
1199 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1200 if (WARN_ON(!bridge_port))
1203 if (!bridge_port->bridge_device->vlan_enabled)
1206 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1209 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1219 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1221 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1222 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1226 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1227 struct mlxsw_sp_bridge_port *bridge_port,
1230 bool lagged = bridge_port->lagged;
1231 char sfdf_pl[MLXSW_REG_SFDF_LEN];
1234 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1235 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1236 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1237 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1239 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1242 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1244 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1245 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1248 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1250 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1251 MLXSW_REG_SFD_OP_WRITE_REMOVE;
1254 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1255 const char *mac, u16 fid,
1256 enum mlxsw_sp_l3proto proto,
1257 const union mlxsw_sp_l3addr *addr,
1258 bool adding, bool dynamic)
1260 enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1267 case MLXSW_SP_L3_PROTO_IPV4:
1268 uip = be32_to_cpu(addr->addr4);
1269 sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1271 case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1277 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1281 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1282 mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1283 mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1284 MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1286 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1287 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1291 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1299 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1300 const char *mac, u16 fid, bool adding,
1301 enum mlxsw_reg_sfd_rec_action action,
1308 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1312 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1313 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1314 mac, fid, action, local_port);
1315 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1316 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1320 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1328 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1329 const char *mac, u16 fid, bool adding,
1332 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1333 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1336 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1339 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1340 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1344 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1345 const char *mac, u16 fid, u16 lag_vid,
1346 bool adding, bool dynamic)
1352 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1356 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1357 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1358 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1360 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1361 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1365 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1374 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1375 struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1377 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1378 struct net_device *orig_dev = fdb_info->info.dev;
1379 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1380 struct mlxsw_sp_bridge_device *bridge_device;
1381 struct mlxsw_sp_bridge_port *bridge_port;
1384 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1388 bridge_device = bridge_port->bridge_device;
1389 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1392 if (!mlxsw_sp_port_vlan)
1395 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1396 vid = mlxsw_sp_port_vlan->vid;
1398 if (!bridge_port->lagged)
1399 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1400 bridge_port->system_port,
1401 fdb_info->addr, fid_index,
1404 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1405 bridge_port->lag_id,
1406 fdb_info->addr, fid_index,
1407 vid, adding, false);
1410 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1411 u16 fid, u16 mid_idx, bool adding)
1417 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1421 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1422 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1423 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1424 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1425 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1429 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1437 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1439 bool set_router_port)
1444 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1448 mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1449 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1450 if (mlxsw_sp->ports[i])
1451 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1454 mlxsw_reg_smid_port_mask_set(smid_pl,
1455 mlxsw_sp_router_port(mlxsw_sp), 1);
1457 for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1458 mlxsw_reg_smid_port_set(smid_pl, i, 1);
1460 mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1463 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1468 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1469 u16 mid_idx, bool add)
1471 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1475 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1479 mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1480 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1486 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1487 const unsigned char *addr,
1490 struct mlxsw_sp_mid *mid;
1492 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1493 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1500 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1501 struct mlxsw_sp_bridge_port *bridge_port,
1502 unsigned long *ports_bitmap)
1504 struct mlxsw_sp_port *mlxsw_sp_port;
1505 u64 max_lag_members, i;
1508 if (!bridge_port->lagged) {
1509 set_bit(bridge_port->system_port, ports_bitmap);
1511 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1513 lag_id = bridge_port->lag_id;
1514 for (i = 0; i < max_lag_members; i++) {
1515 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1518 set_bit(mlxsw_sp_port->local_port,
1525 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1526 struct mlxsw_sp_bridge_device *bridge_device,
1527 struct mlxsw_sp *mlxsw_sp)
1529 struct mlxsw_sp_bridge_port *bridge_port;
1531 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1532 if (bridge_port->mrouter) {
1533 mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1541 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1542 struct mlxsw_sp_mid *mid,
1543 struct mlxsw_sp_bridge_device *bridge_device)
1551 mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1553 if (mid_idx == MLXSW_SP_MID_MAX)
1556 num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1557 alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1558 flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1562 bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
1563 mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1566 err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1567 bridge_device->mrouter);
1568 kfree(flood_bitmap);
1572 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1577 set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1582 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1583 struct mlxsw_sp_mid *mid)
1588 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1590 return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1595 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1596 struct mlxsw_sp_bridge_device *bridge_device,
1597 const unsigned char *addr,
1600 struct mlxsw_sp_mid *mid;
1603 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1607 alloc_size = sizeof(unsigned long) *
1608 BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1610 mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1611 if (!mid->ports_in_mid)
1612 goto err_ports_in_mid_alloc;
1614 ether_addr_copy(mid->addr, addr);
1618 if (!bridge_device->multicast_enabled)
1621 if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1622 goto err_write_mdb_entry;
1625 list_add_tail(&mid->list, &bridge_device->mids_list);
1628 err_write_mdb_entry:
1629 kfree(mid->ports_in_mid);
1630 err_ports_in_mid_alloc:
1635 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1636 struct mlxsw_sp_mid *mid)
1638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1641 clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1642 if (bitmap_empty(mid->ports_in_mid,
1643 mlxsw_core_max_ports(mlxsw_sp->core))) {
1644 err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1645 list_del(&mid->list);
1646 kfree(mid->ports_in_mid);
1652 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1653 const struct switchdev_obj_port_mdb *mdb,
1654 struct switchdev_trans *trans)
1656 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1657 struct net_device *orig_dev = mdb->obj.orig_dev;
1658 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1659 struct net_device *dev = mlxsw_sp_port->dev;
1660 struct mlxsw_sp_bridge_device *bridge_device;
1661 struct mlxsw_sp_bridge_port *bridge_port;
1662 struct mlxsw_sp_mid *mid;
1666 if (switchdev_trans_ph_prepare(trans))
1669 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1673 bridge_device = bridge_port->bridge_device;
1674 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1677 if (!mlxsw_sp_port_vlan)
1680 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1682 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1684 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1687 netdev_err(dev, "Unable to allocate MC group\n");
1691 set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1693 if (!bridge_device->multicast_enabled)
1696 if (bridge_port->mrouter)
1699 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1701 netdev_err(dev, "Unable to set SMID\n");
1708 mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1713 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1714 struct mlxsw_sp_bridge_device
1717 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1718 struct mlxsw_sp_mid *mid;
1721 mc_enabled = bridge_device->multicast_enabled;
1723 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1725 mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1728 mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1733 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1734 struct mlxsw_sp_bridge_port *bridge_port,
1737 struct mlxsw_sp_bridge_device *bridge_device;
1738 struct mlxsw_sp_mid *mid;
1740 bridge_device = bridge_port->bridge_device;
1742 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1743 if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1744 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1748 struct mlxsw_sp_span_respin_work {
1749 struct work_struct work;
1750 struct mlxsw_sp *mlxsw_sp;
1753 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1755 struct mlxsw_sp_span_respin_work *respin_work =
1756 container_of(work, struct mlxsw_sp_span_respin_work, work);
1759 mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1764 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1766 struct mlxsw_sp_span_respin_work *respin_work;
1768 respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1772 INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1773 respin_work->mlxsw_sp = mlxsw_sp;
1775 mlxsw_core_schedule_work(&respin_work->work);
1778 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1779 const struct switchdev_obj *obj,
1780 struct switchdev_trans *trans)
1782 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1783 const struct switchdev_obj_port_vlan *vlan;
1787 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1788 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1789 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
1791 if (switchdev_trans_ph_prepare(trans)) {
1792 /* The event is emitted before the changes are actually
1793 * applied to the bridge. Therefore schedule the respin
1794 * call for later, so that the respin logic sees the
1795 * updated bridge state.
1797 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1800 case SWITCHDEV_OBJ_ID_PORT_MDB:
1801 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1802 SWITCHDEV_OBJ_PORT_MDB(obj),
1814 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1815 struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1817 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
1818 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1820 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1821 if (WARN_ON(!mlxsw_sp_port_vlan))
1824 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1825 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1826 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1827 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1830 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1831 const struct switchdev_obj_port_vlan *vlan)
1833 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1834 struct net_device *orig_dev = vlan->obj.orig_dev;
1835 struct mlxsw_sp_bridge_port *bridge_port;
1838 if (netif_is_bridge_master(orig_dev))
1841 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1842 if (WARN_ON(!bridge_port))
1845 if (!bridge_port->bridge_device->vlan_enabled)
1848 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1849 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1855 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1856 struct mlxsw_sp_bridge_port *bridge_port,
1857 struct mlxsw_sp_mid *mid)
1859 struct net_device *dev = mlxsw_sp_port->dev;
1862 if (bridge_port->bridge_device->multicast_enabled &&
1863 !bridge_port->mrouter) {
1864 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1866 netdev_err(dev, "Unable to remove port from SMID\n");
1869 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1871 netdev_err(dev, "Unable to remove MC SFD\n");
1876 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1877 const struct switchdev_obj_port_mdb *mdb)
1879 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1880 struct net_device *orig_dev = mdb->obj.orig_dev;
1881 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1882 struct mlxsw_sp_bridge_device *bridge_device;
1883 struct net_device *dev = mlxsw_sp_port->dev;
1884 struct mlxsw_sp_bridge_port *bridge_port;
1885 struct mlxsw_sp_mid *mid;
1888 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1892 bridge_device = bridge_port->bridge_device;
1893 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1896 if (!mlxsw_sp_port_vlan)
1899 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1901 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1903 netdev_err(dev, "Unable to remove port from MC DB\n");
1907 return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1911 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1912 struct mlxsw_sp_bridge_port *bridge_port)
1914 struct mlxsw_sp_bridge_device *bridge_device;
1915 struct mlxsw_sp_mid *mid, *tmp;
1917 bridge_device = bridge_port->bridge_device;
1919 list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1920 if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1921 __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1923 } else if (bridge_device->multicast_enabled &&
1924 bridge_port->mrouter) {
1925 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1930 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1931 const struct switchdev_obj *obj)
1933 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1937 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1938 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1939 SWITCHDEV_OBJ_PORT_VLAN(obj));
1941 case SWITCHDEV_OBJ_ID_PORT_MDB:
1942 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1943 SWITCHDEV_OBJ_PORT_MDB(obj));
1950 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1955 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1958 struct mlxsw_sp_port *mlxsw_sp_port;
1959 u64 max_lag_members;
1962 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1964 for (i = 0; i < max_lag_members; i++) {
1965 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1967 return mlxsw_sp_port;
1972 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1973 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1974 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1978 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1979 struct mlxsw_sp_bridge_port *bridge_port,
1980 struct mlxsw_sp_port *mlxsw_sp_port,
1981 struct netlink_ext_ack *extack)
1983 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1985 if (is_vlan_dev(bridge_port->dev)) {
1986 NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1990 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1991 if (WARN_ON(!mlxsw_sp_port_vlan))
1994 /* Let VLAN-aware bridge take care of its own VLANs */
1995 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2001 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2002 struct mlxsw_sp_bridge_port *bridge_port,
2003 struct mlxsw_sp_port *mlxsw_sp_port)
2005 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2006 /* Make sure untagged frames are allowed to ingress */
2007 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2011 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2012 const struct net_device *vxlan_dev, u16 vid,
2013 struct netlink_ext_ack *extack)
2015 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2016 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2017 struct mlxsw_sp_nve_params params = {
2018 .type = MLXSW_SP_NVE_TYPE_VXLAN,
2019 .vni = vxlan->cfg.vni,
2022 struct mlxsw_sp_fid *fid;
2025 /* If the VLAN is 0, we need to find the VLAN that is configured as
2026 * PVID and egress untagged on the bridge port of the VxLAN device.
2027 * It is possible no such VLAN exists
2030 err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2035 /* If no other port is member in the VLAN, then the FID does not exist.
2036 * NVE will be enabled on the FID once a port joins the VLAN
2038 fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2042 if (mlxsw_sp_fid_vni_is_set(fid)) {
2044 goto err_vni_exists;
2047 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2049 goto err_nve_fid_enable;
2051 /* The tunnel port does not hold a reference on the FID. Only
2052 * local ports and the router port
2054 mlxsw_sp_fid_put(fid);
2060 mlxsw_sp_fid_put(fid);
2064 static struct net_device *
2065 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2067 struct net_device *dev;
2068 struct list_head *iter;
2070 netdev_for_each_lower_dev(br_dev, dev, iter) {
2074 if (!netif_is_vxlan(dev))
2077 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2078 if (err || pvid != vid)
2087 static struct mlxsw_sp_fid *
2088 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2091 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2092 struct net_device *vxlan_dev;
2093 struct mlxsw_sp_fid *fid;
2096 fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2100 if (mlxsw_sp_fid_vni_is_set(fid))
2103 /* Find the VxLAN device that has the specified VLAN configured as
2104 * PVID and egress untagged. There can be at most one such device
2106 vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
2111 if (!netif_running(vxlan_dev))
2114 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
2117 goto err_vxlan_join;
2122 mlxsw_sp_fid_put(fid);
2123 return ERR_PTR(err);
2126 static struct mlxsw_sp_fid *
2127 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2130 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2132 return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2136 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2137 const struct mlxsw_sp_fid *fid)
2139 return mlxsw_sp_fid_8021q_vid(fid);
2142 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2143 .port_join = mlxsw_sp_bridge_8021q_port_join,
2144 .port_leave = mlxsw_sp_bridge_8021q_port_leave,
2145 .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
2146 .fid_get = mlxsw_sp_bridge_8021q_fid_get,
2147 .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
2148 .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
2152 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2153 const struct net_device *br_dev)
2155 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2157 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2159 if (mlxsw_sp_port_vlan->bridge_port &&
2160 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2169 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2170 struct mlxsw_sp_bridge_port *bridge_port,
2171 struct mlxsw_sp_port *mlxsw_sp_port,
2172 struct netlink_ext_ack *extack)
2174 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2175 struct net_device *dev = bridge_port->dev;
2178 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2179 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2180 if (WARN_ON(!mlxsw_sp_port_vlan))
2183 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2184 NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2188 /* Port is no longer usable as a router interface */
2189 if (mlxsw_sp_port_vlan->fid)
2190 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2192 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
2196 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2197 struct mlxsw_sp_bridge_port *bridge_port,
2198 struct mlxsw_sp_port *mlxsw_sp_port)
2200 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2201 struct net_device *dev = bridge_port->dev;
2204 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2205 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2206 if (!mlxsw_sp_port_vlan)
2209 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2213 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2214 const struct net_device *vxlan_dev, u16 vid,
2215 struct netlink_ext_ack *extack)
2217 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2218 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2219 struct mlxsw_sp_nve_params params = {
2220 .type = MLXSW_SP_NVE_TYPE_VXLAN,
2221 .vni = vxlan->cfg.vni,
2224 struct mlxsw_sp_fid *fid;
2227 fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2231 if (mlxsw_sp_fid_vni_is_set(fid)) {
2233 goto err_vni_exists;
2236 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2238 goto err_nve_fid_enable;
2240 /* The tunnel port does not hold a reference on the FID. Only
2241 * local ports and the router port
2243 mlxsw_sp_fid_put(fid);
2249 mlxsw_sp_fid_put(fid);
2253 static struct mlxsw_sp_fid *
2254 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2257 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2258 struct net_device *vxlan_dev;
2259 struct mlxsw_sp_fid *fid;
2262 fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2266 if (mlxsw_sp_fid_vni_is_set(fid))
2269 vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
2273 if (!netif_running(vxlan_dev))
2276 err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
2279 goto err_vxlan_join;
2284 mlxsw_sp_fid_put(fid);
2285 return ERR_PTR(err);
2288 static struct mlxsw_sp_fid *
2289 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2294 /* The only valid VLAN for a VLAN-unaware bridge is 0 */
2298 return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2302 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2303 const struct mlxsw_sp_fid *fid)
2308 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2309 .port_join = mlxsw_sp_bridge_8021d_port_join,
2310 .port_leave = mlxsw_sp_bridge_8021d_port_leave,
2311 .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
2312 .fid_get = mlxsw_sp_bridge_8021d_fid_get,
2313 .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
2314 .fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
2317 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2318 struct net_device *brport_dev,
2319 struct net_device *br_dev,
2320 struct netlink_ext_ack *extack)
2322 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2323 struct mlxsw_sp_bridge_device *bridge_device;
2324 struct mlxsw_sp_bridge_port *bridge_port;
2327 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2328 if (IS_ERR(bridge_port))
2329 return PTR_ERR(bridge_port);
2330 bridge_device = bridge_port->bridge_device;
2332 err = bridge_device->ops->port_join(bridge_device, bridge_port,
2333 mlxsw_sp_port, extack);
2340 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2344 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2345 struct net_device *brport_dev,
2346 struct net_device *br_dev)
2348 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2349 struct mlxsw_sp_bridge_device *bridge_device;
2350 struct mlxsw_sp_bridge_port *bridge_port;
2352 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2355 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2359 bridge_device->ops->port_leave(bridge_device, bridge_port,
2361 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2364 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2365 const struct net_device *br_dev,
2366 const struct net_device *vxlan_dev, u16 vid,
2367 struct netlink_ext_ack *extack)
2369 struct mlxsw_sp_bridge_device *bridge_device;
2371 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2372 if (WARN_ON(!bridge_device))
2375 return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2379 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2380 const struct net_device *vxlan_dev)
2382 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2383 struct mlxsw_sp_fid *fid;
2385 /* If the VxLAN device is down, then the FID does not have a VNI */
2386 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2390 mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2391 mlxsw_sp_fid_put(fid);
2395 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2396 enum mlxsw_sp_l3proto *proto,
2397 union mlxsw_sp_l3addr *addr)
2399 if (vxlan_addr->sa.sa_family == AF_INET) {
2400 addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2401 *proto = MLXSW_SP_L3_PROTO_IPV4;
2403 addr->addr6 = vxlan_addr->sin6.sin6_addr;
2404 *proto = MLXSW_SP_L3_PROTO_IPV6;
2409 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2410 const union mlxsw_sp_l3addr *addr,
2411 union vxlan_addr *vxlan_addr)
2414 case MLXSW_SP_L3_PROTO_IPV4:
2415 vxlan_addr->sa.sa_family = AF_INET;
2416 vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2418 case MLXSW_SP_L3_PROTO_IPV6:
2419 vxlan_addr->sa.sa_family = AF_INET6;
2420 vxlan_addr->sin6.sin6_addr = addr->addr6;
2425 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2427 enum mlxsw_sp_l3proto proto,
2428 union mlxsw_sp_l3addr *addr,
2429 __be32 vni, bool adding)
2431 struct switchdev_notifier_vxlan_fdb_info info;
2432 struct vxlan_dev *vxlan = netdev_priv(dev);
2433 enum switchdev_notifier_type type;
2435 type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2436 SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2437 mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2438 info.remote_port = vxlan->cfg.dst_port;
2439 info.remote_vni = vni;
2440 info.remote_ifindex = 0;
2441 ether_addr_copy(info.eth_addr, mac);
2443 info.offloaded = adding;
2444 call_switchdev_notifiers(type, dev, &info.info);
2447 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2449 enum mlxsw_sp_l3proto proto,
2450 union mlxsw_sp_l3addr *addr,
2454 if (netif_is_vxlan(dev))
2455 mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2460 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2461 const char *mac, u16 vid,
2462 struct net_device *dev, bool offloaded)
2464 struct switchdev_notifier_fdb_info info;
2468 info.offloaded = offloaded;
2469 call_switchdev_notifiers(type, dev, &info.info);
2472 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2473 char *sfn_pl, int rec_index,
2476 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2477 struct mlxsw_sp_bridge_device *bridge_device;
2478 struct mlxsw_sp_bridge_port *bridge_port;
2479 struct mlxsw_sp_port *mlxsw_sp_port;
2480 enum switchdev_notifier_type type;
2484 bool do_notification = true;
2487 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2488 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2489 if (!mlxsw_sp_port) {
2490 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2494 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2495 if (!mlxsw_sp_port_vlan) {
2496 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2500 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2502 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2506 bridge_device = bridge_port->bridge_device;
2507 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2510 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2513 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2517 if (!do_notification)
2519 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2520 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2526 do_notification = false;
2530 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2531 char *sfn_pl, int rec_index,
2534 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2535 struct mlxsw_sp_bridge_device *bridge_device;
2536 struct mlxsw_sp_bridge_port *bridge_port;
2537 struct mlxsw_sp_port *mlxsw_sp_port;
2538 enum switchdev_notifier_type type;
2543 bool do_notification = true;
2546 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2547 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2548 if (!mlxsw_sp_port) {
2549 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2553 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2554 if (!mlxsw_sp_port_vlan) {
2555 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2559 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2561 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2565 bridge_device = bridge_port->bridge_device;
2566 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2567 lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2568 mlxsw_sp_port_vlan->vid : 0;
2571 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2574 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2578 if (!do_notification)
2580 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2581 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2587 do_notification = false;
2592 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2593 const struct mlxsw_sp_fid *fid,
2595 struct net_device **nve_dev,
2596 u16 *p_vid, __be32 *p_vni)
2598 struct mlxsw_sp_bridge_device *bridge_device;
2599 struct net_device *br_dev, *dev;
2603 err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2607 err = mlxsw_sp_fid_vni(fid, p_vni);
2611 dev = __dev_get_by_index(&init_net, nve_ifindex);
2616 if (!netif_running(dev))
2619 if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2622 if (adding && netif_is_vxlan(dev)) {
2623 struct vxlan_dev *vxlan = netdev_priv(dev);
2625 if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2629 br_dev = netdev_master_upper_dev_get(dev);
2633 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2637 *p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2642 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2647 enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2648 enum switchdev_notifier_type type;
2649 struct net_device *nve_dev;
2650 union mlxsw_sp_l3addr addr;
2651 struct mlxsw_sp_fid *fid;
2658 mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2661 fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2663 goto err_fid_lookup;
2665 err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2666 (enum mlxsw_sp_l3proto) sfn_proto,
2669 goto err_ip_resolve;
2671 err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2672 &nve_dev, &vid, &vni);
2674 goto err_fdb_process;
2676 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2677 (enum mlxsw_sp_l3proto) sfn_proto,
2678 &addr, adding, true);
2682 mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2683 (enum mlxsw_sp_l3proto) sfn_proto,
2684 &addr, vni, adding);
2686 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2687 SWITCHDEV_FDB_DEL_TO_BRIDGE;
2688 mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2690 mlxsw_sp_fid_put(fid);
2697 mlxsw_sp_fid_put(fid);
2699 /* Remove an FDB entry in case we cannot process it. Otherwise the
2700 * device will keep sending the same notification over and over again.
2702 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2703 (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2707 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2708 char *sfn_pl, int rec_index)
2710 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2711 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2712 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2715 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2716 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2719 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2720 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2723 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2724 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2727 case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2728 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2731 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2732 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2738 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2740 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2742 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2743 msecs_to_jiffies(bridge->fdb_notify.interval));
2746 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2748 struct mlxsw_sp_bridge *bridge;
2749 struct mlxsw_sp *mlxsw_sp;
2755 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2759 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2760 mlxsw_sp = bridge->mlxsw_sp;
2763 mlxsw_reg_sfn_pack(sfn_pl);
2764 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2766 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2769 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2770 for (i = 0; i < num_rec; i++)
2771 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2776 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2779 struct mlxsw_sp_switchdev_event_work {
2780 struct work_struct work;
2782 struct switchdev_notifier_fdb_info fdb_info;
2783 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2785 struct net_device *dev;
2786 unsigned long event;
2790 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2791 struct mlxsw_sp_switchdev_event_work *
2793 struct mlxsw_sp_fid *fid, __be32 vni)
2795 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2796 struct switchdev_notifier_fdb_info *fdb_info;
2797 struct net_device *dev = switchdev_work->dev;
2798 enum mlxsw_sp_l3proto proto;
2799 union mlxsw_sp_l3addr addr;
2802 fdb_info = &switchdev_work->fdb_info;
2803 err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2807 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2810 switch (switchdev_work->event) {
2811 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2812 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2813 vxlan_fdb_info.eth_addr,
2814 mlxsw_sp_fid_index(fid),
2815 proto, &addr, true, false);
2818 vxlan_fdb_info.offloaded = true;
2819 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2820 &vxlan_fdb_info.info);
2821 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2822 vxlan_fdb_info.eth_addr,
2823 fdb_info->vid, dev, true);
2825 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2826 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2827 vxlan_fdb_info.eth_addr,
2828 mlxsw_sp_fid_index(fid),
2829 proto, &addr, false,
2831 vxlan_fdb_info.offloaded = false;
2832 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2833 &vxlan_fdb_info.info);
2839 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2842 struct mlxsw_sp_bridge_device *bridge_device;
2843 struct net_device *dev = switchdev_work->dev;
2844 struct net_device *br_dev;
2845 struct mlxsw_sp *mlxsw_sp;
2846 struct mlxsw_sp_fid *fid;
2850 if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2851 switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2854 if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2855 !switchdev_work->fdb_info.added_by_user)
2858 if (!netif_running(dev))
2860 br_dev = netdev_master_upper_dev_get(dev);
2863 if (!netif_is_bridge_master(br_dev))
2865 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2868 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2872 fid = bridge_device->ops->fid_lookup(bridge_device,
2873 switchdev_work->fdb_info.vid);
2877 err = mlxsw_sp_fid_vni(fid, &vni);
2881 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2885 mlxsw_sp_fid_put(fid);
2888 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2890 struct mlxsw_sp_switchdev_event_work *switchdev_work =
2891 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2892 struct net_device *dev = switchdev_work->dev;
2893 struct switchdev_notifier_fdb_info *fdb_info;
2894 struct mlxsw_sp_port *mlxsw_sp_port;
2898 if (netif_is_vxlan(dev)) {
2899 mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2903 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2907 switch (switchdev_work->event) {
2908 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2909 fdb_info = &switchdev_work->fdb_info;
2910 if (!fdb_info->added_by_user)
2912 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2915 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2917 fdb_info->vid, dev, true);
2919 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2920 fdb_info = &switchdev_work->fdb_info;
2921 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2923 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2924 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2925 /* These events are only used to potentially update an existing
2931 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2935 kfree(switchdev_work->fdb_info.addr);
2936 kfree(switchdev_work);
2941 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2942 struct mlxsw_sp_switchdev_event_work *
2945 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2946 struct mlxsw_sp_bridge_device *bridge_device;
2947 struct net_device *dev = switchdev_work->dev;
2948 u8 all_zeros_mac[ETH_ALEN] = { 0 };
2949 enum mlxsw_sp_l3proto proto;
2950 union mlxsw_sp_l3addr addr;
2951 struct net_device *br_dev;
2952 struct mlxsw_sp_fid *fid;
2956 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2957 br_dev = netdev_master_upper_dev_get(dev);
2959 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2963 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2967 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2970 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2971 err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2973 mlxsw_sp_fid_put(fid);
2976 vxlan_fdb_info->offloaded = true;
2977 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2978 &vxlan_fdb_info->info);
2979 mlxsw_sp_fid_put(fid);
2983 /* The device has a single FDB table, whereas Linux has two - one
2984 * in the bridge driver and another in the VxLAN driver. We only
2985 * program an entry to the device if the MAC points to the VxLAN
2986 * device in the bridge's FDB table
2988 vid = bridge_device->ops->fid_vid(bridge_device, fid);
2989 if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2990 goto err_br_fdb_find;
2992 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2993 mlxsw_sp_fid_index(fid), proto,
2994 &addr, true, false);
2996 goto err_fdb_tunnel_uc_op;
2997 vxlan_fdb_info->offloaded = true;
2998 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2999 &vxlan_fdb_info->info);
3000 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3001 vxlan_fdb_info->eth_addr, vid, dev, true);
3003 mlxsw_sp_fid_put(fid);
3007 err_fdb_tunnel_uc_op:
3009 mlxsw_sp_fid_put(fid);
3013 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3014 struct mlxsw_sp_switchdev_event_work *
3017 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3018 struct mlxsw_sp_bridge_device *bridge_device;
3019 struct net_device *dev = switchdev_work->dev;
3020 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3021 u8 all_zeros_mac[ETH_ALEN] = { 0 };
3022 enum mlxsw_sp_l3proto proto;
3023 union mlxsw_sp_l3addr addr;
3024 struct mlxsw_sp_fid *fid;
3027 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3029 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3033 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3037 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3040 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3041 mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3042 mlxsw_sp_fid_put(fid);
3046 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3047 mlxsw_sp_fid_index(fid), proto, &addr,
3049 vid = bridge_device->ops->fid_vid(bridge_device, fid);
3050 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3051 vxlan_fdb_info->eth_addr, vid, dev, false);
3053 mlxsw_sp_fid_put(fid);
3056 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3058 struct mlxsw_sp_switchdev_event_work *switchdev_work =
3059 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3060 struct net_device *dev = switchdev_work->dev;
3061 struct mlxsw_sp *mlxsw_sp;
3062 struct net_device *br_dev;
3066 if (!netif_running(dev))
3068 br_dev = netdev_master_upper_dev_get(dev);
3071 if (!netif_is_bridge_master(br_dev))
3073 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3077 switch (switchdev_work->event) {
3078 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3079 mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3081 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3082 mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3088 kfree(switchdev_work);
3093 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3095 struct switchdev_notifier_info *info)
3097 struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3098 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3099 struct vxlan_config *cfg = &vxlan->cfg;
3101 vxlan_fdb_info = container_of(info,
3102 struct switchdev_notifier_vxlan_fdb_info,
3105 if (vxlan_fdb_info->remote_port != cfg->dst_port)
3107 if (vxlan_fdb_info->remote_vni != cfg->vni)
3109 if (vxlan_fdb_info->vni != cfg->vni)
3111 if (vxlan_fdb_info->remote_ifindex)
3113 if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
3115 if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
3118 switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3123 /* Called under rcu_read_lock() */
3124 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3125 unsigned long event, void *ptr)
3127 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3128 struct mlxsw_sp_switchdev_event_work *switchdev_work;
3129 struct switchdev_notifier_fdb_info *fdb_info;
3130 struct switchdev_notifier_info *info = ptr;
3131 struct net_device *br_dev;
3134 /* Tunnel devices are not our uppers, so check their master instead */
3135 br_dev = netdev_master_upper_dev_get_rcu(dev);
3138 if (!netif_is_bridge_master(br_dev))
3140 if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3143 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3144 if (!switchdev_work)
3147 switchdev_work->dev = dev;
3148 switchdev_work->event = event;
3151 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
3152 case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
3153 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
3154 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3155 fdb_info = container_of(info,
3156 struct switchdev_notifier_fdb_info,
3158 INIT_WORK(&switchdev_work->work,
3159 mlxsw_sp_switchdev_bridge_fdb_event_work);
3160 memcpy(&switchdev_work->fdb_info, ptr,
3161 sizeof(switchdev_work->fdb_info));
3162 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3163 if (!switchdev_work->fdb_info.addr)
3164 goto err_addr_alloc;
3165 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3167 /* Take a reference on the device. This can be either
3168 * upper device containig mlxsw_sp_port or just a
3173 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
3174 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3175 INIT_WORK(&switchdev_work->work,
3176 mlxsw_sp_switchdev_vxlan_fdb_event_work);
3177 err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3180 goto err_vxlan_work_prepare;
3184 kfree(switchdev_work);
3188 mlxsw_core_schedule_work(&switchdev_work->work);
3192 err_vxlan_work_prepare:
3194 kfree(switchdev_work);
3198 struct notifier_block mlxsw_sp_switchdev_notifier = {
3199 .notifier_call = mlxsw_sp_switchdev_event,
3203 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3204 struct mlxsw_sp_bridge_device *bridge_device,
3205 const struct net_device *vxlan_dev, u16 vid,
3206 bool flag_untagged, bool flag_pvid,
3207 struct switchdev_trans *trans)
3209 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3210 __be32 vni = vxlan->cfg.vni;
3211 struct mlxsw_sp_fid *fid;
3215 /* We cannot have the same VLAN as PVID and egress untagged on multiple
3216 * VxLAN devices. Note that we get this notification before the VLAN is
3217 * actually added to the bridge's database, so it is not possible for
3218 * the lookup function to return 'vxlan_dev'
3220 if (flag_untagged && flag_pvid &&
3221 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
3224 if (switchdev_trans_ph_prepare(trans))
3227 if (!netif_running(vxlan_dev))
3230 /* First case: FID is not associated with this VNI, but the new VLAN
3231 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3234 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3236 if (!flag_untagged || !flag_pvid)
3238 return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
3239 vxlan_dev, vid, NULL);
3242 /* Second case: FID is associated with the VNI and the VLAN associated
3243 * with the FID is the same as the notified VLAN. This means the flags
3244 * (PVID / egress untagged) were toggled and that NVE should be
3245 * disabled on the FID
3247 old_vid = mlxsw_sp_fid_8021q_vid(fid);
3248 if (vid == old_vid) {
3249 if (WARN_ON(flag_untagged && flag_pvid)) {
3250 mlxsw_sp_fid_put(fid);
3253 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3254 mlxsw_sp_fid_put(fid);
3258 /* Third case: A new VLAN was configured on the VxLAN device, but this
3259 * VLAN is not PVID, so there is nothing to do.
3262 mlxsw_sp_fid_put(fid);
3266 /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3267 * mapped to the VNI should be unmapped
3269 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3270 mlxsw_sp_fid_put(fid);
3272 /* Fifth case: The new VLAN is also egress untagged, which means the
3273 * VLAN needs to be mapped to the VNI
3278 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
3281 goto err_vxlan_join;
3286 mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
3292 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3293 struct mlxsw_sp_bridge_device *bridge_device,
3294 const struct net_device *vxlan_dev, u16 vid)
3296 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3297 __be32 vni = vxlan->cfg.vni;
3298 struct mlxsw_sp_fid *fid;
3300 if (!netif_running(vxlan_dev))
3303 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3307 /* A different VLAN than the one mapped to the VNI is deleted */
3308 if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3311 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3314 mlxsw_sp_fid_put(fid);
3318 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3319 struct switchdev_notifier_port_obj_info *
3322 struct switchdev_obj_port_vlan *vlan =
3323 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3324 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3325 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3326 struct switchdev_trans *trans = port_obj_info->trans;
3327 struct mlxsw_sp_bridge_device *bridge_device;
3328 struct mlxsw_sp *mlxsw_sp;
3329 struct net_device *br_dev;
3332 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3336 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3340 port_obj_info->handled = true;
3342 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3346 if (!bridge_device->vlan_enabled)
3349 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
3352 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3364 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3365 struct switchdev_notifier_port_obj_info *
3368 struct switchdev_obj_port_vlan *vlan =
3369 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3370 struct mlxsw_sp_bridge_device *bridge_device;
3371 struct mlxsw_sp *mlxsw_sp;
3372 struct net_device *br_dev;
3375 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3379 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3383 port_obj_info->handled = true;
3385 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3389 if (!bridge_device->vlan_enabled)
3392 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
3393 mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
3398 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3399 struct switchdev_notifier_port_obj_info *
3404 switch (port_obj_info->obj->id) {
3405 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3406 err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3417 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3418 struct switchdev_notifier_port_obj_info *
3421 switch (port_obj_info->obj->id) {
3422 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3423 mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3430 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3431 unsigned long event, void *ptr)
3433 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3437 case SWITCHDEV_PORT_OBJ_ADD:
3438 if (netif_is_vxlan(dev))
3439 err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3441 err = switchdev_handle_port_obj_add(dev, ptr,
3442 mlxsw_sp_port_dev_check,
3443 mlxsw_sp_port_obj_add);
3444 return notifier_from_errno(err);
3445 case SWITCHDEV_PORT_OBJ_DEL:
3446 if (netif_is_vxlan(dev))
3447 mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3449 err = switchdev_handle_port_obj_del(dev, ptr,
3450 mlxsw_sp_port_dev_check,
3451 mlxsw_sp_port_obj_del);
3452 return notifier_from_errno(err);
3458 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3459 .notifier_call = mlxsw_sp_switchdev_blocking_event,
3463 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3465 return bridge_port->stp_state;
3468 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3470 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3471 struct notifier_block *nb;
3474 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3476 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3480 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3482 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3486 nb = &mlxsw_sp_switchdev_blocking_notifier;
3487 err = register_switchdev_blocking_notifier(nb);
3489 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3490 goto err_register_switchdev_blocking_notifier;
3493 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3494 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3495 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
3498 err_register_switchdev_blocking_notifier:
3499 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3503 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3505 struct notifier_block *nb;
3507 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3509 nb = &mlxsw_sp_switchdev_blocking_notifier;
3510 unregister_switchdev_blocking_notifier(nb);
3512 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3515 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3517 struct mlxsw_sp_bridge *bridge;
3519 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3522 mlxsw_sp->bridge = bridge;
3523 bridge->mlxsw_sp = mlxsw_sp;
3525 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3527 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3528 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3530 return mlxsw_sp_fdb_init(mlxsw_sp);
3533 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3535 mlxsw_sp_fdb_fini(mlxsw_sp);
3536 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3537 kfree(mlxsw_sp->bridge);
3540 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
3542 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
3545 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)