2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
60 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
61 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
63 fid = mlxsw_sp_vfid_to_fid(vfid);
67 fid = mlxsw_sp_port->pvid;
72 static struct mlxsw_sp_port *
73 mlxsw_sp_port_orig_get(struct net_device *dev,
74 struct mlxsw_sp_port *mlxsw_sp_port)
76 struct mlxsw_sp_port *mlxsw_sp_vport;
79 if (!is_vlan_dev(dev))
82 vid = vlan_dev_vlan_id(dev);
83 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
84 WARN_ON(!mlxsw_sp_vport);
86 return mlxsw_sp_vport;
89 static int mlxsw_sp_port_attr_get(struct net_device *dev,
90 struct switchdev_attr *attr)
92 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
93 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
95 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
100 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
101 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
102 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
103 attr->u.ppid.id_len);
105 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
106 attr->u.brport_flags =
107 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
108 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
109 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
118 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
121 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
122 enum mlxsw_reg_spms_state spms_state;
128 case BR_STATE_FORWARDING:
129 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
131 case BR_STATE_LEARNING:
132 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
134 case BR_STATE_LISTENING: /* fall-through */
135 case BR_STATE_DISABLED: /* fall-through */
136 case BR_STATE_BLOCKING:
137 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
143 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
146 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
148 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
149 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
150 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
152 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
153 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
156 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
161 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
162 struct switchdev_trans *trans,
165 if (switchdev_trans_ph_prepare(trans))
168 mlxsw_sp_port->stp_state = state;
169 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
172 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
174 return vfid >= MLXSW_SP_VFID_PORT_MAX;
177 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
178 u16 idx_begin, u16 idx_end, bool set,
181 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
182 u16 local_port = mlxsw_sp_port->local_port;
183 enum mlxsw_flood_table_type table_type;
184 u16 range = idx_end - idx_begin + 1;
188 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
189 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
190 if (mlxsw_sp_vfid_is_vport_br(idx_begin))
191 local_port = mlxsw_sp_port->local_port;
193 local_port = MLXSW_PORT_CPU_PORT;
195 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
198 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
202 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
203 table_type, range, local_port, set);
204 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
208 /* Flooding control allows one to decide whether a given port will
209 * flood unicast traffic for which there is no FDB entry.
214 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
215 table_type, range, local_port, set);
216 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
223 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
226 struct net_device *dev = mlxsw_sp_port->dev;
227 u16 vid, last_visited_vid;
230 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
231 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
233 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
237 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
238 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
241 last_visited_vid = vid;
242 goto err_port_flood_set;
249 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
250 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
251 netdev_err(dev, "Failed to configure unicast flooding\n");
255 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
256 bool set, bool only_uc)
258 /* In case of vFIDs, index into the flooding table is relative to
259 * the start of the vFIDs range.
261 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
265 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
266 struct switchdev_trans *trans,
267 unsigned long brport_flags)
269 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
273 if (!mlxsw_sp_port->bridged)
276 if (switchdev_trans_ph_prepare(trans))
279 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
280 set = mlxsw_sp_port->uc_flood ? false : true;
281 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
286 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
287 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
288 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
293 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
295 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
298 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
299 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
302 mlxsw_sp->ageing_time = ageing_time;
306 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
307 struct switchdev_trans *trans,
308 unsigned long ageing_clock_t)
310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
311 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
312 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
314 if (switchdev_trans_ph_prepare(trans))
317 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
320 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
321 struct switchdev_trans *trans,
322 struct net_device *orig_dev,
325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327 /* SWITCHDEV_TRANS_PREPARE phase */
328 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
329 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
336 static int mlxsw_sp_port_attr_set(struct net_device *dev,
337 const struct switchdev_attr *attr,
338 struct switchdev_trans *trans)
340 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
343 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
348 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
349 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
352 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
353 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
354 attr->u.brport_flags);
356 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
357 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
358 attr->u.ageing_time);
360 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
361 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
363 attr->u.vlan_filtering);
373 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
377 char spvid_pl[MLXSW_REG_SPVID_LEN];
379 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
380 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
383 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387 char spaft_pl[MLXSW_REG_SPAFT_LEN];
389 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
393 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
395 struct net_device *dev = mlxsw_sp_port->dev;
399 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
401 netdev_err(dev, "Failed to disallow untagged traffic\n");
405 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
407 netdev_err(dev, "Failed to set PVID\n");
411 /* Only allow if not already allowed. */
412 if (!mlxsw_sp_port->pvid) {
413 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
416 netdev_err(dev, "Failed to allow untagged traffic\n");
417 goto err_port_allow_untagged_set;
422 mlxsw_sp_port->pvid = vid;
425 err_port_allow_untagged_set:
426 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
430 static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
432 char sfmr_pl[MLXSW_REG_SFMR_LEN];
435 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
436 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
441 set_bit(fid, mlxsw_sp->active_fids);
445 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
447 char sfmr_pl[MLXSW_REG_SFMR_LEN];
449 clear_bit(fid, mlxsw_sp->active_fids);
451 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
453 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
456 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
458 enum mlxsw_reg_svfa_mt mt;
460 if (!list_empty(&mlxsw_sp_port->vports_list))
461 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
463 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
465 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
468 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
470 enum mlxsw_reg_svfa_mt mt;
472 if (list_empty(&mlxsw_sp_port->vports_list))
475 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
476 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
479 static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
485 for (vid = vid_begin; vid <= vid_end; vid++) {
486 err = mlxsw_sp_port_add_vid(dev, 0, vid);
488 goto err_port_add_vid;
493 for (vid--; vid >= vid_begin; vid--)
494 mlxsw_sp_port_kill_vid(dev, 0, vid);
498 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
499 u16 vid_begin, u16 vid_end, bool is_member,
505 for (vid = vid_begin; vid <= vid_end;
506 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
507 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
510 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
511 is_member, untagged);
519 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
520 u16 vid_begin, u16 vid_end,
521 bool flag_untagged, bool flag_pvid)
523 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
524 struct net_device *dev = mlxsw_sp_port->dev;
525 u16 vid, last_visited_vid, old_pvid;
526 enum mlxsw_reg_svfa_mt mt;
529 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
530 * not bridged, then packets ingressing through the port with
531 * the specified VIDs will be directed to CPU.
533 if (!mlxsw_sp_port->bridged)
534 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
536 for (vid = vid_begin; vid <= vid_end; vid++) {
537 if (!test_bit(vid, mlxsw_sp->active_fids)) {
538 err = mlxsw_sp_fid_create(mlxsw_sp, vid);
540 netdev_err(dev, "Failed to create FID=%d\n",
545 /* When creating a FID, we set a VID to FID mapping
546 * regardless of the port's mode.
548 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
549 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
552 netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
554 goto err_port_vid_to_fid_set;
559 /* Set FID mapping according to port's mode */
560 for (vid = vid_begin; vid <= vid_end; vid++) {
561 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
563 netdev_err(dev, "Failed to map FID=%d", vid);
564 last_visited_vid = --vid;
565 goto err_port_fid_map;
569 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
572 netdev_err(dev, "Failed to configure flooding\n");
573 goto err_port_flood_set;
576 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
577 true, flag_untagged);
579 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
581 goto err_port_vlans_set;
584 old_pvid = mlxsw_sp_port->pvid;
585 if (flag_pvid && old_pvid != vid_begin) {
586 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
588 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
589 goto err_port_pvid_set;
591 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
592 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
594 netdev_err(dev, "Unable to del PVID\n");
595 goto err_port_pvid_set;
599 /* Changing activity bits only if HW operation succeded */
600 for (vid = vid_begin; vid <= vid_end; vid++) {
601 set_bit(vid, mlxsw_sp_port->active_vlans);
603 set_bit(vid, mlxsw_sp_port->untagged_vlans);
605 clear_bit(vid, mlxsw_sp_port->untagged_vlans);
608 /* STP state change must be done after we set active VLANs */
609 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
610 mlxsw_sp_port->stp_state);
612 netdev_err(dev, "Failed to set STP state\n");
613 goto err_port_stp_state_set;
618 err_port_vid_to_fid_set:
619 mlxsw_sp_fid_destroy(mlxsw_sp, vid);
622 err_port_stp_state_set:
623 for (vid = vid_begin; vid <= vid_end; vid++)
624 clear_bit(vid, mlxsw_sp_port->active_vlans);
625 if (old_pvid != mlxsw_sp_port->pvid)
626 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
628 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
631 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false,
634 last_visited_vid = vid_end;
636 for (vid = last_visited_vid; vid >= vid_begin; vid--)
637 mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
641 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
642 const struct switchdev_obj_port_vlan *vlan,
643 struct switchdev_trans *trans)
645 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
646 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
648 if (switchdev_trans_ph_prepare(trans))
651 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
652 vlan->vid_begin, vlan->vid_end,
653 flag_untagged, flag_pvid);
656 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
658 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
659 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
662 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
664 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
665 MLXSW_REG_SFD_OP_WRITE_REMOVE;
668 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
669 const char *mac, u16 fid, bool adding,
675 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
679 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
680 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
681 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
683 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
689 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
690 const char *mac, u16 fid, u16 lag_vid,
691 bool adding, bool dynamic)
696 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
700 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
701 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
702 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
704 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
711 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
712 const struct switchdev_obj_port_fdb *fdb,
713 struct switchdev_trans *trans)
715 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
718 if (switchdev_trans_ph_prepare(trans))
721 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
722 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
725 if (!mlxsw_sp_port->lagged)
726 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
727 mlxsw_sp_port->local_port,
728 fdb->addr, fid, true, false);
730 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
731 mlxsw_sp_port->lag_id,
732 fdb->addr, fid, lag_vid,
736 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
737 u16 fid, u16 mid, bool adding)
742 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
746 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
747 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
748 MLXSW_REG_SFD_REC_ACTION_NOP, mid);
749 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
754 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
755 bool add, bool clear_all_ports)
757 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
761 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
765 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
766 if (clear_all_ports) {
767 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
768 if (mlxsw_sp->ports[i])
769 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
771 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
776 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
777 const unsigned char *addr,
780 struct mlxsw_sp_mid *mid;
782 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
783 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
789 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
790 const unsigned char *addr,
793 struct mlxsw_sp_mid *mid;
796 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
798 if (mid_idx == MLXSW_SP_MID_MAX)
801 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
805 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
806 ether_addr_copy(mid->addr, addr);
810 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
815 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
816 struct mlxsw_sp_mid *mid)
818 if (--mid->ref_count == 0) {
819 list_del(&mid->list);
820 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
827 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
828 const struct switchdev_obj_port_mdb *mdb,
829 struct switchdev_trans *trans)
831 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
832 struct net_device *dev = mlxsw_sp_port->dev;
833 struct mlxsw_sp_mid *mid;
834 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
837 if (switchdev_trans_ph_prepare(trans))
840 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
842 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
844 netdev_err(dev, "Unable to allocate MC group\n");
850 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
851 mid->ref_count == 1);
853 netdev_err(dev, "Unable to set SMID\n");
857 if (mid->ref_count == 1) {
858 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
861 netdev_err(dev, "Unable to set MC SFD\n");
869 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
873 static int mlxsw_sp_port_obj_add(struct net_device *dev,
874 const struct switchdev_obj *obj,
875 struct switchdev_trans *trans)
877 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
880 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
885 case SWITCHDEV_OBJ_ID_PORT_VLAN:
886 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
889 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
890 SWITCHDEV_OBJ_PORT_VLAN(obj),
893 case SWITCHDEV_OBJ_ID_PORT_FDB:
894 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
895 SWITCHDEV_OBJ_PORT_FDB(obj),
898 case SWITCHDEV_OBJ_ID_PORT_MDB:
899 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
900 SWITCHDEV_OBJ_PORT_MDB(obj),
911 static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
917 for (vid = vid_begin; vid <= vid_end; vid++) {
918 err = mlxsw_sp_port_kill_vid(dev, 0, vid);
926 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
927 u16 vid_begin, u16 vid_end, bool init)
929 struct net_device *dev = mlxsw_sp_port->dev;
933 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
934 * not bridged, then prevent packets ingressing through the
935 * port with the specified VIDs from being trapped to CPU.
937 if (!init && !mlxsw_sp_port->bridged)
938 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
940 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
943 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
951 pvid = mlxsw_sp_port->pvid;
952 if (pvid >= vid_begin && pvid <= vid_end) {
953 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
955 netdev_err(dev, "Unable to del PVID %d\n", pvid);
960 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
963 netdev_err(dev, "Failed to clear flooding\n");
967 for (vid = vid_begin; vid <= vid_end; vid++) {
968 /* Remove FID mapping in case of Virtual mode */
969 err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
971 netdev_err(dev, "Failed to unmap FID=%d", vid);
977 /* Changing activity bits only if HW operation succeded */
978 for (vid = vid_begin; vid <= vid_end; vid++)
979 clear_bit(vid, mlxsw_sp_port->active_vlans);
984 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
985 const struct switchdev_obj_port_vlan *vlan)
987 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
988 vlan->vid_begin, vlan->vid_end, false);
991 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
995 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
996 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
1000 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1001 const struct switchdev_obj_port_fdb *fdb)
1003 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1006 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1007 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1010 if (!mlxsw_sp_port->lagged)
1011 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1012 mlxsw_sp_port->local_port,
1016 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1017 mlxsw_sp_port->lag_id,
1018 fdb->addr, fid, lag_vid,
1022 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1023 const struct switchdev_obj_port_mdb *mdb)
1025 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1026 struct net_device *dev = mlxsw_sp_port->dev;
1027 struct mlxsw_sp_mid *mid;
1028 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1032 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
1034 netdev_err(dev, "Unable to remove port from MC DB\n");
1038 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1040 netdev_err(dev, "Unable to remove port from SMID\n");
1043 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1044 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1047 netdev_err(dev, "Unable to remove MC SFD\n");
1053 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1054 const struct switchdev_obj *obj)
1056 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1059 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1064 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1065 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1068 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1069 SWITCHDEV_OBJ_PORT_VLAN(obj));
1071 case SWITCHDEV_OBJ_ID_PORT_FDB:
1072 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1073 SWITCHDEV_OBJ_PORT_FDB(obj));
1075 case SWITCHDEV_OBJ_ID_PORT_MDB:
1076 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1077 SWITCHDEV_OBJ_PORT_MDB(obj));
1087 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1090 struct mlxsw_sp_port *mlxsw_sp_port;
1093 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
1094 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1096 return mlxsw_sp_port;
1101 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1102 struct switchdev_obj_port_fdb *fdb,
1103 switchdev_obj_dump_cb_t *cb,
1104 struct net_device *orig_dev)
1106 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1107 struct mlxsw_sp_port *tmp;
1119 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1123 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1126 tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
1127 vport_fid = mlxsw_sp_vfid_to_fid(tmp);
1130 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1132 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1133 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1137 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1139 /* Even in case of error, we have to run the dump to the end
1140 * so the session in firmware is finished.
1145 for (i = 0; i < num_rec; i++) {
1146 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1147 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1148 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1150 if (local_port == mlxsw_sp_port->local_port) {
1151 if (vport_fid && vport_fid == fid)
1153 else if (!vport_fid &&
1154 !mlxsw_sp_fid_is_vfid(fid))
1158 ether_addr_copy(fdb->addr, mac);
1159 fdb->ndm_state = NUD_REACHABLE;
1160 err = cb(&fdb->obj);
1165 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1166 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1167 mac, &fid, &lag_id);
1168 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1169 if (tmp && tmp->local_port ==
1170 mlxsw_sp_port->local_port) {
1171 /* LAG records can only point to LAG
1172 * devices or VLAN devices on top.
1174 if (!netif_is_lag_master(orig_dev) &&
1175 !is_vlan_dev(orig_dev))
1177 if (vport_fid && vport_fid == fid)
1179 else if (!vport_fid &&
1180 !mlxsw_sp_fid_is_vfid(fid))
1184 ether_addr_copy(fdb->addr, mac);
1185 fdb->ndm_state = NUD_REACHABLE;
1186 err = cb(&fdb->obj);
1193 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1197 return stored_err ? stored_err : err;
1200 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1201 struct switchdev_obj_port_vlan *vlan,
1202 switchdev_obj_dump_cb_t *cb)
1207 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1209 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1210 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1211 return cb(&vlan->obj);
1214 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1216 if (vid == mlxsw_sp_port->pvid)
1217 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1218 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1219 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1220 vlan->vid_begin = vid;
1221 vlan->vid_end = vid;
1222 err = cb(&vlan->obj);
1229 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1230 struct switchdev_obj *obj,
1231 switchdev_obj_dump_cb_t *cb)
1233 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1236 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1241 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1242 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1243 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1245 case SWITCHDEV_OBJ_ID_PORT_FDB:
1246 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1247 SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1258 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1259 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1260 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1261 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1262 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1263 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1266 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1268 struct net_device *dev)
1270 struct switchdev_notifier_fdb_info info;
1271 unsigned long notifier_type;
1273 if (learning_sync) {
1276 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1277 call_switchdev_notifiers(notifier_type, dev, &info.info);
1281 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1282 char *sfn_pl, int rec_index,
1285 struct mlxsw_sp_port *mlxsw_sp_port;
1289 bool do_notification = true;
1292 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1293 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1294 if (!mlxsw_sp_port) {
1295 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1299 if (mlxsw_sp_fid_is_vfid(fid)) {
1300 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1301 struct mlxsw_sp_port *mlxsw_sp_vport;
1303 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
1305 if (!mlxsw_sp_vport) {
1306 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1310 /* Override the physical port with the vPort. */
1311 mlxsw_sp_port = mlxsw_sp_vport;
1316 adding = adding && mlxsw_sp_port->learning;
1319 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1322 if (net_ratelimit())
1323 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1327 if (!do_notification)
1329 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1330 adding, mac, vid, mlxsw_sp_port->dev);
1335 do_notification = false;
1339 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1340 char *sfn_pl, int rec_index,
1343 struct mlxsw_sp_port *mlxsw_sp_port;
1344 struct net_device *dev;
1349 bool do_notification = true;
1352 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1353 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1354 if (!mlxsw_sp_port) {
1355 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1359 if (mlxsw_sp_fid_is_vfid(fid)) {
1360 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1361 struct mlxsw_sp_port *mlxsw_sp_vport;
1363 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
1365 if (!mlxsw_sp_vport) {
1366 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1370 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1371 dev = mlxsw_sp_vport->dev;
1373 /* Override the physical port with the vPort. */
1374 mlxsw_sp_port = mlxsw_sp_vport;
1376 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1380 adding = adding && mlxsw_sp_port->learning;
1383 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1386 if (net_ratelimit())
1387 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1391 if (!do_notification)
1393 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1399 do_notification = false;
1403 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1404 char *sfn_pl, int rec_index)
1406 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1407 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1408 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1411 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1412 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1415 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1416 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1419 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1420 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1426 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1428 schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
1429 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1432 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1434 struct mlxsw_sp *mlxsw_sp;
1440 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1444 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1448 mlxsw_reg_sfn_pack(sfn_pl);
1449 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1451 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1454 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1455 for (i = 0; i < num_rec; i++)
1456 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1462 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1465 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1469 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1471 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1474 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1475 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1476 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1480 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1482 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1485 static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
1489 for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
1490 mlxsw_sp_fid_destroy(mlxsw_sp, fid);
1493 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1495 return mlxsw_sp_fdb_init(mlxsw_sp);
1498 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1500 mlxsw_sp_fdb_fini(mlxsw_sp);
1501 mlxsw_sp_fids_fini(mlxsw_sp);
1504 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
1506 struct net_device *dev = mlxsw_sp_port->dev;
1509 /* Allow only untagged packets to ingress and tag them internally
1512 mlxsw_sp_port->pvid = 1;
1513 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
1516 netdev_err(dev, "Unable to init VLANs\n");
1520 /* Add implicit VLAN interface in the device, so that untagged
1521 * packets will be classified to the default vFID.
1523 err = mlxsw_sp_port_add_vid(dev, 0, 1);
1525 netdev_err(dev, "Failed to configure default vFID\n");
1530 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1532 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1535 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)