2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <net/switchdev.h>
53 #include <generated/utsrelease.h>
62 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
63 static const char mlxsw_sp_driver_version[] = "1.0";
69 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
72 * Packet control type.
73 * 0 - Ethernet control (e.g. EMADs, LACP)
76 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
79 * Packet protocol type. Must be set to 1 (Ethernet).
81 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83 /* tx_hdr_rx_is_router
84 * Packet is sent from the router. Valid for data packets only.
86 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
89 * Indicates if the 'fid' field is valid and should be used for
90 * forwarding lookup. Valid for data packets only.
92 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
95 * Switch partition ID. Must be set to 0.
97 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99 /* tx_hdr_control_tclass
100 * Indicates if the packet should use the control TClass and not one
101 * of the data TClasses.
103 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
106 * Egress TClass to be used on the egress device on the egress port.
108 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
111 * Destination local port for unicast packets.
112 * Destination multicast ID for multicast packets.
114 * Control packets are directed to a specific egress port, while data
115 * packets are transmitted through the CPU port (0) into the switch partition,
116 * where forwarding rules are applied.
118 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
121 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
122 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
123 * Valid for data packets only.
125 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
129 * 6 - Control packets
131 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
134 const struct mlxsw_tx_info *tx_info)
136 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138 memset(txhdr, 0, MLXSW_TXHDR_LEN);
140 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
141 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
142 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
143 mlxsw_tx_hdr_swid_set(txhdr, 0);
144 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
145 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
146 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
149 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151 char spad_pl[MLXSW_REG_SPAD_LEN];
154 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
157 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
161 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
164 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
165 char paos_pl[MLXSW_REG_PAOS_LEN];
167 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
168 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
169 MLXSW_PORT_ADMIN_STATUS_DOWN);
170 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
173 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
176 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
177 char paos_pl[MLXSW_REG_PAOS_LEN];
181 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
182 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
185 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
186 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
190 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
194 char ppad_pl[MLXSW_REG_PPAD_LEN];
196 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
197 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
201 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
204 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
206 ether_addr_copy(addr, mlxsw_sp->base_mac);
207 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
208 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
211 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
212 u16 vid, enum mlxsw_reg_spms_state state)
214 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
218 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
221 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
222 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
223 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
228 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
231 char pmtu_pl[MLXSW_REG_PMTU_LEN];
235 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
236 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
237 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
240 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
245 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
246 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
249 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
251 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
252 char pspa_pl[MLXSW_REG_PSPA_LEN];
254 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
255 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
258 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
261 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
262 char svpe_pl[MLXSW_REG_SVPE_LEN];
264 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
265 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
268 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
269 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
273 char svfa_pl[MLXSW_REG_SVFA_LEN];
275 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
277 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
280 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
281 u16 vid, bool learn_enable)
283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
287 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
290 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
292 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
298 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
300 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
301 char sspr_pl[MLXSW_REG_SSPR_LEN];
303 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
304 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
307 static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
311 char pmlp_pl[MLXSW_REG_PMLP_LEN];
314 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
315 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
318 *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
322 static int mlxsw_sp_port_open(struct net_device *dev)
324 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
327 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
330 netif_start_queue(dev);
334 static int mlxsw_sp_port_stop(struct net_device *dev)
336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
338 netif_stop_queue(dev);
339 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
342 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
343 struct net_device *dev)
345 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
347 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
348 const struct mlxsw_tx_info tx_info = {
349 .local_port = mlxsw_sp_port->local_port,
355 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
356 return NETDEV_TX_BUSY;
358 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
359 struct sk_buff *skb_orig = skb;
361 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
363 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
364 dev_kfree_skb_any(skb_orig);
369 if (eth_skb_pad(skb)) {
370 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
374 mlxsw_sp_txhdr_construct(skb, &tx_info);
376 /* Due to a race we might fail here because of a full queue. In that
377 * unlikely case we simply drop the packet.
379 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
382 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
383 u64_stats_update_begin(&pcpu_stats->syncp);
384 pcpu_stats->tx_packets++;
385 pcpu_stats->tx_bytes += len;
386 u64_stats_update_end(&pcpu_stats->syncp);
388 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
389 dev_kfree_skb_any(skb);
394 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
398 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
400 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
401 struct sockaddr *addr = p;
404 if (!is_valid_ether_addr(addr->sa_data))
405 return -EADDRNOTAVAIL;
407 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
410 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
414 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
416 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
419 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
426 static struct rtnl_link_stats64 *
427 mlxsw_sp_port_get_stats64(struct net_device *dev,
428 struct rtnl_link_stats64 *stats)
430 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
431 struct mlxsw_sp_port_pcpu_stats *p;
432 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
437 for_each_possible_cpu(i) {
438 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
440 start = u64_stats_fetch_begin_irq(&p->syncp);
441 rx_packets = p->rx_packets;
442 rx_bytes = p->rx_bytes;
443 tx_packets = p->tx_packets;
444 tx_bytes = p->tx_bytes;
445 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
447 stats->rx_packets += rx_packets;
448 stats->rx_bytes += rx_bytes;
449 stats->tx_packets += tx_packets;
450 stats->tx_bytes += tx_bytes;
451 /* tx_dropped is u32, updated without syncp protection. */
452 tx_dropped += p->tx_dropped;
454 stats->tx_dropped = tx_dropped;
458 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
459 u16 vid_end, bool is_member, bool untagged)
461 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
465 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
469 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
470 vid_end, is_member, untagged);
471 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
476 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
478 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
479 u16 vid, last_visited_vid;
482 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
483 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
486 last_visited_vid = vid;
487 goto err_port_vid_to_fid_set;
491 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
493 last_visited_vid = VLAN_N_VID;
494 goto err_port_vid_to_fid_set;
499 err_port_vid_to_fid_set:
500 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
501 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
506 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
508 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
512 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
516 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
517 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
526 static struct mlxsw_sp_vfid *
527 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
529 struct mlxsw_sp_vfid *vfid;
531 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
532 if (vfid->vid == vid)
539 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
541 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
542 MLXSW_SP_VFID_PORT_MAX);
545 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
547 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
548 char sfmr_pl[MLXSW_REG_SFMR_LEN];
550 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
551 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
554 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
556 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
557 char sfmr_pl[MLXSW_REG_SFMR_LEN];
559 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
560 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
563 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
566 struct device *dev = mlxsw_sp->bus_info->dev;
567 struct mlxsw_sp_vfid *vfid;
571 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
572 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
573 dev_err(dev, "No available vFIDs\n");
574 return ERR_PTR(-ERANGE);
577 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
579 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
583 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
585 goto err_allocate_vfid;
590 list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
591 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
596 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
597 return ERR_PTR(-ENOMEM);
600 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
601 struct mlxsw_sp_vfid *vfid)
603 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
604 list_del(&vfid->list);
606 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
611 static struct mlxsw_sp_port *
612 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
613 struct mlxsw_sp_vfid *vfid)
615 struct mlxsw_sp_port *mlxsw_sp_vport;
617 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
621 /* dev will be set correctly after the VLAN device is linked
622 * with the real device. In case of bridge SELF invocation, dev
625 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
626 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
627 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
628 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
629 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
630 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
631 mlxsw_sp_vport->vport.vfid = vfid;
632 mlxsw_sp_vport->vport.vid = vfid->vid;
634 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
636 return mlxsw_sp_vport;
639 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
641 list_del(&mlxsw_sp_vport->vport.list);
642 kfree(mlxsw_sp_vport);
645 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
648 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
649 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
650 struct mlxsw_sp_port *mlxsw_sp_vport;
651 struct mlxsw_sp_vfid *vfid;
654 /* VLAN 0 is added to HW filter when device goes up, but it is
655 * reserved in our case, so simply return.
660 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
661 netdev_warn(dev, "VID=%d already configured\n", vid);
665 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
667 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
669 netdev_err(dev, "Failed to create vFID for VID=%d\n",
671 return PTR_ERR(vfid);
675 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
676 if (!mlxsw_sp_vport) {
677 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
679 goto err_port_vport_create;
682 if (!vfid->nr_vports) {
683 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
686 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
688 goto err_vport_flood_set;
692 /* When adding the first VLAN interface on a bridged port we need to
693 * transition all the active 802.1Q bridge VLANs to use explicit
694 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
696 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
697 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
699 netdev_err(dev, "Failed to set to Virtual mode\n");
700 goto err_port_vp_mode_trans;
704 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
705 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
707 mlxsw_sp_vfid_to_fid(vfid->vfid),
710 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
712 goto err_port_vid_to_fid_set;
715 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
717 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
718 goto err_port_vid_learning_set;
721 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
723 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
725 goto err_port_add_vid;
728 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
729 MLXSW_REG_SPMS_STATE_FORWARDING);
731 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
732 goto err_port_stp_state_set;
739 err_port_stp_state_set:
740 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
742 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
743 err_port_vid_learning_set:
744 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
745 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
746 mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
747 err_port_vid_to_fid_set:
748 if (list_is_singular(&mlxsw_sp_port->vports_list))
749 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
750 err_port_vp_mode_trans:
751 if (!vfid->nr_vports)
752 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
755 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
756 err_port_vport_create:
757 if (!vfid->nr_vports)
758 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
762 int mlxsw_sp_port_kill_vid(struct net_device *dev,
763 __be16 __always_unused proto, u16 vid)
765 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
766 struct mlxsw_sp_port *mlxsw_sp_vport;
767 struct mlxsw_sp_vfid *vfid;
770 /* VLAN 0 is removed from HW filter when device goes down, but
771 * it is reserved in our case, so simply return.
776 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
777 if (!mlxsw_sp_vport) {
778 netdev_warn(dev, "VID=%d does not exist\n", vid);
782 vfid = mlxsw_sp_vport->vport.vfid;
784 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
785 MLXSW_REG_SPMS_STATE_DISCARDING);
787 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
791 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
793 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
798 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
800 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
804 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
805 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
807 mlxsw_sp_vfid_to_fid(vfid->vfid),
810 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
815 /* When removing the last VLAN interface on a bridged port we need to
816 * transition all active 802.1Q bridge VLANs to use VID to FID
817 * mappings and set port's mode to VLAN mode.
819 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
820 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
822 netdev_err(dev, "Failed to set to VLAN mode\n");
828 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
830 /* Destroy the vFID if no vPorts are assigned to it anymore. */
831 if (!vfid->nr_vports)
832 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
837 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
838 .ndo_open = mlxsw_sp_port_open,
839 .ndo_stop = mlxsw_sp_port_stop,
840 .ndo_start_xmit = mlxsw_sp_port_xmit,
841 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
842 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
843 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
844 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
845 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
846 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
847 .ndo_fdb_add = switchdev_port_fdb_add,
848 .ndo_fdb_del = switchdev_port_fdb_del,
849 .ndo_fdb_dump = switchdev_port_fdb_dump,
850 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
851 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
852 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
855 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
856 struct ethtool_drvinfo *drvinfo)
858 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
859 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
861 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
862 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
863 sizeof(drvinfo->version));
864 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
866 mlxsw_sp->bus_info->fw_rev.major,
867 mlxsw_sp->bus_info->fw_rev.minor,
868 mlxsw_sp->bus_info->fw_rev.subminor);
869 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
870 sizeof(drvinfo->bus_info));
873 struct mlxsw_sp_port_hw_stats {
874 char str[ETH_GSTRING_LEN];
875 u64 (*getter)(char *payload);
878 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
880 .str = "a_frames_transmitted_ok",
881 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
884 .str = "a_frames_received_ok",
885 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
888 .str = "a_frame_check_sequence_errors",
889 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
892 .str = "a_alignment_errors",
893 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
896 .str = "a_octets_transmitted_ok",
897 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
900 .str = "a_octets_received_ok",
901 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
904 .str = "a_multicast_frames_xmitted_ok",
905 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
908 .str = "a_broadcast_frames_xmitted_ok",
909 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
912 .str = "a_multicast_frames_received_ok",
913 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
916 .str = "a_broadcast_frames_received_ok",
917 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
920 .str = "a_in_range_length_errors",
921 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
924 .str = "a_out_of_range_length_field",
925 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
928 .str = "a_frame_too_long_errors",
929 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
932 .str = "a_symbol_error_during_carrier",
933 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
936 .str = "a_mac_control_frames_transmitted",
937 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
940 .str = "a_mac_control_frames_received",
941 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
944 .str = "a_unsupported_opcodes_received",
945 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
948 .str = "a_pause_mac_ctrl_frames_received",
949 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
952 .str = "a_pause_mac_ctrl_frames_xmitted",
953 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
957 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
959 static void mlxsw_sp_port_get_strings(struct net_device *dev,
960 u32 stringset, u8 *data)
967 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
968 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
970 p += ETH_GSTRING_LEN;
976 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
977 enum ethtool_phys_id_state state)
979 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
980 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
981 char mlcr_pl[MLXSW_REG_MLCR_LEN];
985 case ETHTOOL_ID_ACTIVE:
988 case ETHTOOL_ID_INACTIVE:
995 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
996 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
999 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1000 struct ethtool_stats *stats, u64 *data)
1002 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1003 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1004 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1008 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
1009 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1010 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1011 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1014 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1018 return MLXSW_SP_PORT_HW_STATS_LEN;
1024 struct mlxsw_sp_port_link_mode {
1031 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1033 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1034 .supported = SUPPORTED_100baseT_Full,
1035 .advertised = ADVERTISED_100baseT_Full,
1039 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1043 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1044 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1045 .supported = SUPPORTED_1000baseKX_Full,
1046 .advertised = ADVERTISED_1000baseKX_Full,
1050 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1051 .supported = SUPPORTED_10000baseT_Full,
1052 .advertised = ADVERTISED_10000baseT_Full,
1056 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1057 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1058 .supported = SUPPORTED_10000baseKX4_Full,
1059 .advertised = ADVERTISED_10000baseKX4_Full,
1063 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1064 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1065 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1066 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1067 .supported = SUPPORTED_10000baseKR_Full,
1068 .advertised = ADVERTISED_10000baseKR_Full,
1072 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1073 .supported = SUPPORTED_20000baseKR2_Full,
1074 .advertised = ADVERTISED_20000baseKR2_Full,
1078 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1079 .supported = SUPPORTED_40000baseCR4_Full,
1080 .advertised = ADVERTISED_40000baseCR4_Full,
1084 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1085 .supported = SUPPORTED_40000baseKR4_Full,
1086 .advertised = ADVERTISED_40000baseKR4_Full,
1090 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1091 .supported = SUPPORTED_40000baseSR4_Full,
1092 .advertised = ADVERTISED_40000baseSR4_Full,
1096 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1097 .supported = SUPPORTED_40000baseLR4_Full,
1098 .advertised = ADVERTISED_40000baseLR4_Full,
1102 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1103 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1104 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1108 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1109 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1110 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1114 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1115 .supported = SUPPORTED_56000baseKR4_Full,
1116 .advertised = ADVERTISED_56000baseKR4_Full,
1120 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1121 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1122 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1123 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1128 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1130 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1132 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1133 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1134 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1135 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1136 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1137 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1138 return SUPPORTED_FIBRE;
1140 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1141 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1142 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1143 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1144 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1145 return SUPPORTED_Backplane;
1149 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1154 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1155 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1156 modes |= mlxsw_sp_port_link_mode[i].supported;
1161 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1166 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1167 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1168 modes |= mlxsw_sp_port_link_mode[i].advertised;
1173 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1174 struct ethtool_cmd *cmd)
1176 u32 speed = SPEED_UNKNOWN;
1177 u8 duplex = DUPLEX_UNKNOWN;
1183 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1184 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1185 speed = mlxsw_sp_port_link_mode[i].speed;
1186 duplex = DUPLEX_FULL;
1191 ethtool_cmd_speed_set(cmd, speed);
1192 cmd->duplex = duplex;
1195 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1197 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1198 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1199 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1200 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1203 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1204 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1205 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1208 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1209 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1210 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1211 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1217 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1218 struct ethtool_cmd *cmd)
1220 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1221 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1222 char ptys_pl[MLXSW_REG_PTYS_LEN];
1224 u32 eth_proto_admin;
1228 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1229 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1231 netdev_err(dev, "Failed to get proto");
1234 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap,
1235 ð_proto_admin, ð_proto_oper);
1237 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1238 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1239 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1240 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1241 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1242 eth_proto_oper, cmd);
1244 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1245 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1246 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1248 cmd->transceiver = XCVR_INTERNAL;
1252 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1257 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1258 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1259 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1264 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1269 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1270 if (speed == mlxsw_sp_port_link_mode[i].speed)
1271 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1276 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1277 struct ethtool_cmd *cmd)
1279 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1280 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1281 char ptys_pl[MLXSW_REG_PTYS_LEN];
1285 u32 eth_proto_admin;
1289 speed = ethtool_cmd_speed(cmd);
1291 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1292 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1293 mlxsw_sp_to_ptys_speed(speed);
1295 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1296 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1298 netdev_err(dev, "Failed to get proto");
1301 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL);
1303 eth_proto_new = eth_proto_new & eth_proto_cap;
1304 if (!eth_proto_new) {
1305 netdev_err(dev, "Not supported proto admin requested");
1308 if (eth_proto_new == eth_proto_admin)
1311 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1312 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1314 netdev_err(dev, "Failed to set proto admin");
1318 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1320 netdev_err(dev, "Failed to get oper status");
1326 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1328 netdev_err(dev, "Failed to set admin status");
1332 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1334 netdev_err(dev, "Failed to set admin status");
1341 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1342 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1343 .get_link = ethtool_op_get_link,
1344 .get_strings = mlxsw_sp_port_get_strings,
1345 .set_phys_id = mlxsw_sp_port_set_phys_id,
1346 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1347 .get_sset_count = mlxsw_sp_port_get_sset_count,
1348 .get_settings = mlxsw_sp_port_get_settings,
1349 .set_settings = mlxsw_sp_port_set_settings,
1352 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1354 struct mlxsw_sp_port *mlxsw_sp_port;
1355 struct net_device *dev;
1360 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1363 mlxsw_sp_port = netdev_priv(dev);
1364 mlxsw_sp_port->dev = dev;
1365 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1366 mlxsw_sp_port->local_port = local_port;
1367 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1368 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1369 if (!mlxsw_sp_port->active_vlans) {
1371 goto err_port_active_vlans_alloc;
1373 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1374 if (!mlxsw_sp_port->untagged_vlans) {
1376 goto err_port_untagged_vlans_alloc;
1378 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1380 mlxsw_sp_port->pcpu_stats =
1381 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1382 if (!mlxsw_sp_port->pcpu_stats) {
1384 goto err_alloc_stats;
1387 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1388 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1390 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1392 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1393 mlxsw_sp_port->local_port);
1394 goto err_dev_addr_init;
1397 netif_carrier_off(dev);
1399 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1400 NETIF_F_HW_VLAN_CTAG_FILTER;
1402 /* Each packet needs to have a Tx header (metadata) on top all other
1405 dev->hard_header_len += MLXSW_TXHDR_LEN;
1407 err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
1409 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
1410 mlxsw_sp_port->local_port);
1411 goto err_port_module_check;
1415 dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
1416 mlxsw_sp_port->local_port);
1417 goto port_not_usable;
1420 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1422 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1423 mlxsw_sp_port->local_port);
1424 goto err_port_system_port_mapping_set;
1427 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1429 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1430 mlxsw_sp_port->local_port);
1431 goto err_port_swid_set;
1434 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1436 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1437 mlxsw_sp_port->local_port);
1438 goto err_port_mtu_set;
1441 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1443 goto err_port_admin_status_set;
1445 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1447 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1448 mlxsw_sp_port->local_port);
1449 goto err_port_buffers_init;
1452 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1453 err = register_netdev(dev);
1455 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1456 mlxsw_sp_port->local_port);
1457 goto err_register_netdev;
1460 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1462 goto err_port_vlan_init;
1464 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1468 unregister_netdev(dev);
1469 err_register_netdev:
1470 err_port_buffers_init:
1471 err_port_admin_status_set:
1474 err_port_system_port_mapping_set:
1476 err_port_module_check:
1478 free_percpu(mlxsw_sp_port->pcpu_stats);
1480 kfree(mlxsw_sp_port->untagged_vlans);
1481 err_port_untagged_vlans_alloc:
1482 kfree(mlxsw_sp_port->active_vlans);
1483 err_port_active_vlans_alloc:
1488 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1490 struct net_device *dev = mlxsw_sp_port->dev;
1491 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1493 list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1494 &mlxsw_sp_port->vports_list, vport.list) {
1495 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1497 /* vPorts created for VLAN devices should already be gone
1498 * by now, since we unregistered the port netdev.
1500 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1501 mlxsw_sp_port_kill_vid(dev, 0, vid);
1505 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1507 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1511 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1512 mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1513 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1514 free_percpu(mlxsw_sp_port->pcpu_stats);
1515 kfree(mlxsw_sp_port->untagged_vlans);
1516 kfree(mlxsw_sp_port->active_vlans);
1517 free_netdev(mlxsw_sp_port->dev);
1520 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1524 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1525 mlxsw_sp_port_remove(mlxsw_sp, i);
1526 kfree(mlxsw_sp->ports);
1529 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1535 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1536 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1537 if (!mlxsw_sp->ports)
1540 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1541 err = mlxsw_sp_port_create(mlxsw_sp, i);
1543 goto err_port_create;
1548 for (i--; i >= 1; i--)
1549 mlxsw_sp_port_remove(mlxsw_sp, i);
1550 kfree(mlxsw_sp->ports);
1554 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1555 char *pude_pl, void *priv)
1557 struct mlxsw_sp *mlxsw_sp = priv;
1558 struct mlxsw_sp_port *mlxsw_sp_port;
1559 enum mlxsw_reg_pude_oper_status status;
1562 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1563 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1564 if (!mlxsw_sp_port) {
1565 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1570 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1571 if (status == MLXSW_PORT_OPER_STATUS_UP) {
1572 netdev_info(mlxsw_sp_port->dev, "link up\n");
1573 netif_carrier_on(mlxsw_sp_port->dev);
1575 netdev_info(mlxsw_sp_port->dev, "link down\n");
1576 netif_carrier_off(mlxsw_sp_port->dev);
1580 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1581 .func = mlxsw_sp_pude_event_func,
1582 .trap_id = MLXSW_TRAP_ID_PUDE,
1585 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1586 enum mlxsw_event_trap_id trap_id)
1588 struct mlxsw_event_listener *el;
1589 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1593 case MLXSW_TRAP_ID_PUDE:
1594 el = &mlxsw_sp_pude_event;
1597 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1601 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1602 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1604 goto err_event_trap_set;
1609 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1613 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1614 enum mlxsw_event_trap_id trap_id)
1616 struct mlxsw_event_listener *el;
1619 case MLXSW_TRAP_ID_PUDE:
1620 el = &mlxsw_sp_pude_event;
1623 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1626 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1629 struct mlxsw_sp *mlxsw_sp = priv;
1630 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1631 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1633 if (unlikely(!mlxsw_sp_port)) {
1634 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1639 skb->dev = mlxsw_sp_port->dev;
1641 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1642 u64_stats_update_begin(&pcpu_stats->syncp);
1643 pcpu_stats->rx_packets++;
1644 pcpu_stats->rx_bytes += skb->len;
1645 u64_stats_update_end(&pcpu_stats->syncp);
1647 skb->protocol = eth_type_trans(skb, skb->dev);
1648 netif_receive_skb(skb);
1651 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1653 .func = mlxsw_sp_rx_listener_func,
1654 .local_port = MLXSW_PORT_DONT_CARE,
1655 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1657 /* Traps for specific L2 packet types, not trapped as FDB MC */
1659 .func = mlxsw_sp_rx_listener_func,
1660 .local_port = MLXSW_PORT_DONT_CARE,
1661 .trap_id = MLXSW_TRAP_ID_STP,
1664 .func = mlxsw_sp_rx_listener_func,
1665 .local_port = MLXSW_PORT_DONT_CARE,
1666 .trap_id = MLXSW_TRAP_ID_LACP,
1669 .func = mlxsw_sp_rx_listener_func,
1670 .local_port = MLXSW_PORT_DONT_CARE,
1671 .trap_id = MLXSW_TRAP_ID_EAPOL,
1674 .func = mlxsw_sp_rx_listener_func,
1675 .local_port = MLXSW_PORT_DONT_CARE,
1676 .trap_id = MLXSW_TRAP_ID_LLDP,
1679 .func = mlxsw_sp_rx_listener_func,
1680 .local_port = MLXSW_PORT_DONT_CARE,
1681 .trap_id = MLXSW_TRAP_ID_MMRP,
1684 .func = mlxsw_sp_rx_listener_func,
1685 .local_port = MLXSW_PORT_DONT_CARE,
1686 .trap_id = MLXSW_TRAP_ID_MVRP,
1689 .func = mlxsw_sp_rx_listener_func,
1690 .local_port = MLXSW_PORT_DONT_CARE,
1691 .trap_id = MLXSW_TRAP_ID_RPVST,
1694 .func = mlxsw_sp_rx_listener_func,
1695 .local_port = MLXSW_PORT_DONT_CARE,
1696 .trap_id = MLXSW_TRAP_ID_DHCP,
1699 .func = mlxsw_sp_rx_listener_func,
1700 .local_port = MLXSW_PORT_DONT_CARE,
1701 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1704 .func = mlxsw_sp_rx_listener_func,
1705 .local_port = MLXSW_PORT_DONT_CARE,
1706 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1709 .func = mlxsw_sp_rx_listener_func,
1710 .local_port = MLXSW_PORT_DONT_CARE,
1711 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1714 .func = mlxsw_sp_rx_listener_func,
1715 .local_port = MLXSW_PORT_DONT_CARE,
1716 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1719 .func = mlxsw_sp_rx_listener_func,
1720 .local_port = MLXSW_PORT_DONT_CARE,
1721 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1725 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
1727 char htgt_pl[MLXSW_REG_HTGT_LEN];
1728 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1732 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1733 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1737 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1738 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1742 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1743 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
1744 &mlxsw_sp_rx_listener[i],
1747 goto err_rx_listener_register;
1749 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
1750 mlxsw_sp_rx_listener[i].trap_id);
1751 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1753 goto err_rx_trap_set;
1758 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1759 &mlxsw_sp_rx_listener[i],
1761 err_rx_listener_register:
1762 for (i--; i >= 0; i--) {
1763 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1764 mlxsw_sp_rx_listener[i].trap_id);
1765 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1767 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1768 &mlxsw_sp_rx_listener[i],
1774 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
1776 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1779 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1780 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1781 mlxsw_sp_rx_listener[i].trap_id);
1782 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1784 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1785 &mlxsw_sp_rx_listener[i],
1790 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
1791 enum mlxsw_reg_sfgc_type type,
1792 enum mlxsw_reg_sfgc_bridge_type bridge_type)
1794 enum mlxsw_flood_table_type table_type;
1795 enum mlxsw_sp_flood_table flood_table;
1796 char sfgc_pl[MLXSW_REG_SFGC_LEN];
1798 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
1799 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
1801 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
1803 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
1804 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
1806 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
1808 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
1810 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
1813 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
1817 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1818 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1821 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1822 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
1826 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1827 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
1835 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
1837 char slcr_pl[MLXSW_REG_SLCR_LEN];
1839 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
1840 MLXSW_REG_SLCR_LAG_HASH_DMAC |
1841 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
1842 MLXSW_REG_SLCR_LAG_HASH_VLANID |
1843 MLXSW_REG_SLCR_LAG_HASH_SIP |
1844 MLXSW_REG_SLCR_LAG_HASH_DIP |
1845 MLXSW_REG_SLCR_LAG_HASH_SPORT |
1846 MLXSW_REG_SLCR_LAG_HASH_DPORT |
1847 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
1848 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
1851 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
1852 const struct mlxsw_bus_info *mlxsw_bus_info)
1854 struct mlxsw_sp *mlxsw_sp = priv;
1857 mlxsw_sp->core = mlxsw_core;
1858 mlxsw_sp->bus_info = mlxsw_bus_info;
1859 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
1860 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
1861 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
1863 err = mlxsw_sp_base_mac_get(mlxsw_sp);
1865 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
1869 err = mlxsw_sp_ports_create(mlxsw_sp);
1871 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
1875 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1877 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
1878 goto err_event_register;
1881 err = mlxsw_sp_traps_init(mlxsw_sp);
1883 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
1884 goto err_rx_listener_register;
1887 err = mlxsw_sp_flood_init(mlxsw_sp);
1889 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
1890 goto err_flood_init;
1893 err = mlxsw_sp_buffers_init(mlxsw_sp);
1895 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
1896 goto err_buffers_init;
1899 err = mlxsw_sp_lag_init(mlxsw_sp);
1901 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
1905 err = mlxsw_sp_switchdev_init(mlxsw_sp);
1907 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
1908 goto err_switchdev_init;
1917 mlxsw_sp_traps_fini(mlxsw_sp);
1918 err_rx_listener_register:
1919 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1921 mlxsw_sp_ports_remove(mlxsw_sp);
1925 static void mlxsw_sp_fini(void *priv)
1927 struct mlxsw_sp *mlxsw_sp = priv;
1929 mlxsw_sp_switchdev_fini(mlxsw_sp);
1930 mlxsw_sp_traps_fini(mlxsw_sp);
1931 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1932 mlxsw_sp_ports_remove(mlxsw_sp);
1935 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
1936 .used_max_vepa_channels = 1,
1937 .max_vepa_channels = 0,
1939 .max_lag = MLXSW_SP_LAG_MAX,
1940 .used_max_port_per_lag = 1,
1941 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
1943 .max_mid = MLXSW_SP_MID_MAX,
1946 .used_max_system_port = 1,
1947 .max_system_port = 64,
1948 .used_max_vlan_groups = 1,
1949 .max_vlan_groups = 127,
1950 .used_max_regions = 1,
1952 .used_flood_tables = 1,
1953 .used_flood_mode = 1,
1955 .max_fid_offset_flood_tables = 2,
1956 .fid_offset_flood_table_size = VLAN_N_VID - 1,
1957 .max_fid_flood_tables = 2,
1958 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
1959 .used_max_ib_mc = 1,
1966 .type = MLXSW_PORT_SWID_TYPE_ETH,
1971 static struct mlxsw_driver mlxsw_sp_driver = {
1972 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
1973 .owner = THIS_MODULE,
1974 .priv_size = sizeof(struct mlxsw_sp),
1975 .init = mlxsw_sp_init,
1976 .fini = mlxsw_sp_fini,
1977 .txhdr_construct = mlxsw_sp_txhdr_construct,
1978 .txhdr_len = MLXSW_TXHDR_LEN,
1979 .profile = &mlxsw_sp_config_profile,
1983 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
1985 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1986 char sfdf_pl[MLXSW_REG_SFDF_LEN];
1988 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
1989 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
1991 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1995 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
1998 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1999 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2001 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2002 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2003 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2004 mlxsw_sp_port->local_port);
2006 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2010 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2012 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2013 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2015 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2016 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2018 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2022 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2025 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2026 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2028 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2029 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2030 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2032 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2036 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2038 int err, last_err = 0;
2041 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2042 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2051 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2053 int err, last_err = 0;
2056 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2057 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2065 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2067 if (!list_empty(&mlxsw_sp_port->vports_list))
2068 if (mlxsw_sp_port->lagged)
2069 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2071 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2073 if (mlxsw_sp_port->lagged)
2074 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2076 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2079 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2081 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2082 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2084 if (mlxsw_sp_vport->lagged)
2085 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2088 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2091 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2093 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2096 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2098 struct net_device *dev = mlxsw_sp_port->dev;
2101 /* When port is not bridged untagged packets are tagged with
2102 * PVID=VID=1, thereby creating an implicit VLAN interface in
2103 * the device. Remove it and let bridge code take care of its
2106 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2110 mlxsw_sp_port->learning = 1;
2111 mlxsw_sp_port->learning_sync = 1;
2112 mlxsw_sp_port->uc_flood = 1;
2113 mlxsw_sp_port->bridged = 1;
2118 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2121 struct net_device *dev = mlxsw_sp_port->dev;
2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2126 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2128 mlxsw_sp_port->learning = 0;
2129 mlxsw_sp_port->learning_sync = 0;
2130 mlxsw_sp_port->uc_flood = 0;
2131 mlxsw_sp_port->bridged = 0;
2133 /* Add implicit VLAN interface in the device, so that untagged
2134 * packets will be classified to the default vFID.
2136 return mlxsw_sp_port_add_vid(dev, 0, 1);
2139 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2140 struct net_device *br_dev)
2142 return !mlxsw_sp->master_bridge.dev ||
2143 mlxsw_sp->master_bridge.dev == br_dev;
2146 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2147 struct net_device *br_dev)
2149 mlxsw_sp->master_bridge.dev = br_dev;
2150 mlxsw_sp->master_bridge.ref_count++;
2153 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2154 struct net_device *br_dev)
2156 if (--mlxsw_sp->master_bridge.ref_count == 0)
2157 mlxsw_sp->master_bridge.dev = NULL;
2160 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2162 char sldr_pl[MLXSW_REG_SLDR_LEN];
2164 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2165 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2168 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2170 char sldr_pl[MLXSW_REG_SLDR_LEN];
2172 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2173 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2176 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2177 u16 lag_id, u8 port_index)
2179 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2180 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2182 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2183 lag_id, port_index);
2184 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2187 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2190 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2191 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2193 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2195 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2198 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2201 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2202 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2204 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2209 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2212 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2213 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2215 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2217 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2220 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2221 struct net_device *lag_dev,
2224 struct mlxsw_sp_upper *lag;
2225 int free_lag_id = -1;
2228 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2229 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2230 if (lag->ref_count) {
2231 if (lag->dev == lag_dev) {
2235 } else if (free_lag_id < 0) {
2239 if (free_lag_id < 0)
2241 *p_lag_id = free_lag_id;
2246 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2247 struct net_device *lag_dev,
2248 struct netdev_lag_upper_info *lag_upper_info)
2252 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2254 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2259 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2260 u16 lag_id, u8 *p_port_index)
2264 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2265 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2273 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2274 struct net_device *lag_dev)
2276 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2277 struct mlxsw_sp_upper *lag;
2282 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2285 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2286 if (!lag->ref_count) {
2287 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2293 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2296 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2298 goto err_col_port_add;
2299 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2301 goto err_col_port_enable;
2303 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2304 mlxsw_sp_port->local_port);
2305 mlxsw_sp_port->lag_id = lag_id;
2306 mlxsw_sp_port->lagged = 1;
2311 if (!lag->ref_count)
2312 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2313 err_col_port_enable:
2314 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2318 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2319 struct net_device *br_dev,
2322 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2323 struct net_device *lag_dev)
2325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2326 struct mlxsw_sp_port *mlxsw_sp_vport;
2327 struct mlxsw_sp_upper *lag;
2328 u16 lag_id = mlxsw_sp_port->lag_id;
2331 if (!mlxsw_sp_port->lagged)
2333 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2334 WARN_ON(lag->ref_count == 0);
2336 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2339 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2343 /* In case we leave a LAG device that has bridges built on top,
2344 * then their teardown sequence is never issued and we need to
2345 * invoke the necessary cleanup routines ourselves.
2347 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2349 struct net_device *br_dev;
2351 if (!mlxsw_sp_vport->bridged)
2354 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2355 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2358 if (mlxsw_sp_port->bridged) {
2359 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2360 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2361 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2364 if (lag->ref_count == 1) {
2365 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2366 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2367 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2372 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2373 mlxsw_sp_port->local_port);
2374 mlxsw_sp_port->lagged = 0;
2379 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2382 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2383 char sldr_pl[MLXSW_REG_SLDR_LEN];
2385 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2386 mlxsw_sp_port->local_port);
2387 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2390 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2393 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2394 char sldr_pl[MLXSW_REG_SLDR_LEN];
2396 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2397 mlxsw_sp_port->local_port);
2398 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2401 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2402 bool lag_tx_enabled)
2405 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2406 mlxsw_sp_port->lag_id);
2408 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2409 mlxsw_sp_port->lag_id);
2412 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2413 struct netdev_lag_lower_state_info *info)
2415 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2418 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2419 struct net_device *vlan_dev)
2421 struct mlxsw_sp_port *mlxsw_sp_vport;
2422 u16 vid = vlan_dev_vlan_id(vlan_dev);
2424 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2425 if (!mlxsw_sp_vport) {
2426 WARN_ON(!mlxsw_sp_vport);
2430 mlxsw_sp_vport->dev = vlan_dev;
2435 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2436 struct net_device *vlan_dev)
2438 struct mlxsw_sp_port *mlxsw_sp_vport;
2439 u16 vid = vlan_dev_vlan_id(vlan_dev);
2441 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2442 if (!mlxsw_sp_vport) {
2443 WARN_ON(!mlxsw_sp_vport);
2447 /* When removing a VLAN device while still bridged we should first
2448 * remove it from the bridge, as we receive the bridge's notification
2449 * when the vPort is already gone.
2451 if (mlxsw_sp_vport->bridged) {
2452 struct net_device *br_dev;
2454 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2455 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
2458 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2463 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2464 unsigned long event, void *ptr)
2466 struct netdev_notifier_changeupper_info *info;
2467 struct mlxsw_sp_port *mlxsw_sp_port;
2468 struct net_device *upper_dev;
2469 struct mlxsw_sp *mlxsw_sp;
2472 mlxsw_sp_port = netdev_priv(dev);
2473 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2477 case NETDEV_PRECHANGEUPPER:
2478 upper_dev = info->upper_dev;
2479 if (!info->master || !info->linking)
2481 /* HW limitation forbids to put ports to multiple bridges. */
2482 if (netif_is_bridge_master(upper_dev) &&
2483 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2485 if (netif_is_lag_master(upper_dev) &&
2486 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2490 case NETDEV_CHANGEUPPER:
2491 upper_dev = info->upper_dev;
2492 if (is_vlan_dev(upper_dev)) {
2493 if (info->linking) {
2494 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
2497 netdev_err(dev, "Failed to link VLAN device\n");
2501 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
2504 netdev_err(dev, "Failed to unlink VLAN device\n");
2508 } else if (netif_is_bridge_master(upper_dev)) {
2509 if (info->linking) {
2510 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
2512 netdev_err(dev, "Failed to join bridge\n");
2515 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
2517 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
2519 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
2521 netdev_err(dev, "Failed to leave bridge\n");
2525 } else if (netif_is_lag_master(upper_dev)) {
2526 if (info->linking) {
2527 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2530 netdev_err(dev, "Failed to join link aggregation\n");
2534 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2537 netdev_err(dev, "Failed to leave link aggregation\n");
2548 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
2549 unsigned long event, void *ptr)
2551 struct netdev_notifier_changelowerstate_info *info;
2552 struct mlxsw_sp_port *mlxsw_sp_port;
2555 mlxsw_sp_port = netdev_priv(dev);
2559 case NETDEV_CHANGELOWERSTATE:
2560 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
2561 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
2562 info->lower_state_info);
2564 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
2572 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
2573 unsigned long event, void *ptr)
2576 case NETDEV_PRECHANGEUPPER:
2577 case NETDEV_CHANGEUPPER:
2578 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
2579 case NETDEV_CHANGELOWERSTATE:
2580 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
2586 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
2587 unsigned long event, void *ptr)
2589 struct net_device *dev;
2590 struct list_head *iter;
2593 netdev_for_each_lower_dev(lag_dev, dev, iter) {
2594 if (mlxsw_sp_port_dev_check(dev)) {
2595 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
2596 if (ret == NOTIFY_BAD)
2604 static struct mlxsw_sp_vfid *
2605 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
2606 const struct net_device *br_dev)
2608 struct mlxsw_sp_vfid *vfid;
2610 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
2611 if (vfid->br_dev == br_dev)
2618 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
2620 return vfid - MLXSW_SP_VFID_PORT_MAX;
2623 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
2625 return MLXSW_SP_VFID_PORT_MAX + br_vfid;
2628 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
2630 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
2631 MLXSW_SP_VFID_BR_MAX);
2634 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
2635 struct net_device *br_dev)
2637 struct device *dev = mlxsw_sp->bus_info->dev;
2638 struct mlxsw_sp_vfid *vfid;
2642 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
2643 if (n_vfid == MLXSW_SP_VFID_MAX) {
2644 dev_err(dev, "No available vFIDs\n");
2645 return ERR_PTR(-ERANGE);
2648 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
2650 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
2651 return ERR_PTR(err);
2654 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
2656 goto err_allocate_vfid;
2658 vfid->vfid = n_vfid;
2659 vfid->br_dev = br_dev;
2661 list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
2662 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
2667 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
2668 return ERR_PTR(-ENOMEM);
2671 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
2672 struct mlxsw_sp_vfid *vfid)
2674 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
2676 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
2677 list_del(&vfid->list);
2679 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
2684 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2685 struct net_device *br_dev,
2688 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2689 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2690 struct net_device *dev = mlxsw_sp_vport->dev;
2691 struct mlxsw_sp_vfid *vfid, *new_vfid;
2694 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2700 /* We need a vFID to go back to after leaving the bridge's vFID. */
2701 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
2703 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
2704 if (IS_ERR(new_vfid)) {
2705 netdev_err(dev, "Failed to create vFID for VID=%d\n",
2707 return PTR_ERR(new_vfid);
2711 /* Invalidate existing {Port, VID} to vFID mapping and create a new
2712 * one for the new vFID.
2714 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2715 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2717 mlxsw_sp_vfid_to_fid(vfid->vfid),
2720 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
2722 goto err_port_vid_to_fid_invalidate;
2725 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2726 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2728 mlxsw_sp_vfid_to_fid(new_vfid->vfid),
2731 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
2733 goto err_port_vid_to_fid_validate;
2736 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
2738 netdev_err(dev, "Failed to disable learning\n");
2739 goto err_port_vid_learning_set;
2742 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
2745 netdev_err(dev, "Failed clear to clear flooding\n");
2746 goto err_vport_flood_set;
2749 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
2750 MLXSW_REG_SPMS_STATE_FORWARDING);
2752 netdev_err(dev, "Failed to set STP state\n");
2753 goto err_port_stp_state_set;
2756 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
2757 netdev_err(dev, "Failed to flush FDB\n");
2759 /* Switch between the vFIDs and destroy the old one if needed. */
2760 new_vfid->nr_vports++;
2761 mlxsw_sp_vport->vport.vfid = new_vfid;
2763 if (!vfid->nr_vports)
2764 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
2766 mlxsw_sp_vport->learning = 0;
2767 mlxsw_sp_vport->learning_sync = 0;
2768 mlxsw_sp_vport->uc_flood = 0;
2769 mlxsw_sp_vport->bridged = 0;
2773 err_port_stp_state_set:
2774 err_vport_flood_set:
2775 err_port_vid_learning_set:
2776 err_port_vid_to_fid_validate:
2777 err_port_vid_to_fid_invalidate:
2778 /* Rollback vFID only if new. */
2779 if (!new_vfid->nr_vports)
2780 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
2784 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2785 struct net_device *br_dev)
2787 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
2788 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2789 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2790 struct net_device *dev = mlxsw_sp_vport->dev;
2791 struct mlxsw_sp_vfid *vfid;
2794 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2796 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
2798 netdev_err(dev, "Failed to create bridge vFID\n");
2799 return PTR_ERR(vfid);
2803 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
2805 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
2807 goto err_port_flood_set;
2810 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
2812 netdev_err(dev, "Failed to enable learning\n");
2813 goto err_port_vid_learning_set;
2816 /* We need to invalidate existing {Port, VID} to vFID mapping and
2817 * create a new one for the bridge's vFID.
2819 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2820 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2822 mlxsw_sp_vfid_to_fid(old_vfid->vfid),
2825 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
2827 goto err_port_vid_to_fid_invalidate;
2830 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2831 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2833 mlxsw_sp_vfid_to_fid(vfid->vfid),
2836 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
2838 goto err_port_vid_to_fid_validate;
2841 /* Switch between the vFIDs and destroy the old one if needed. */
2843 mlxsw_sp_vport->vport.vfid = vfid;
2844 old_vfid->nr_vports--;
2845 if (!old_vfid->nr_vports)
2846 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
2848 mlxsw_sp_vport->learning = 1;
2849 mlxsw_sp_vport->learning_sync = 1;
2850 mlxsw_sp_vport->uc_flood = 1;
2851 mlxsw_sp_vport->bridged = 1;
2855 err_port_vid_to_fid_validate:
2856 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2857 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
2858 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
2859 err_port_vid_to_fid_invalidate:
2860 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
2861 err_port_vid_learning_set:
2862 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
2864 if (!vfid->nr_vports)
2865 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
2870 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
2871 const struct net_device *br_dev)
2873 struct mlxsw_sp_port *mlxsw_sp_vport;
2875 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2877 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
2884 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
2885 unsigned long event, void *ptr,
2888 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2889 struct netdev_notifier_changeupper_info *info = ptr;
2890 struct mlxsw_sp_port *mlxsw_sp_vport;
2891 struct net_device *upper_dev;
2894 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2897 case NETDEV_PRECHANGEUPPER:
2898 upper_dev = info->upper_dev;
2899 if (!info->master || !info->linking)
2901 if (!netif_is_bridge_master(upper_dev))
2903 /* We can't have multiple VLAN interfaces configured on
2904 * the same port and being members in the same bridge.
2906 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
2910 case NETDEV_CHANGEUPPER:
2911 upper_dev = info->upper_dev;
2914 if (info->linking) {
2915 if (!mlxsw_sp_vport) {
2916 WARN_ON(!mlxsw_sp_vport);
2919 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
2922 netdev_err(dev, "Failed to join bridge\n");
2926 /* We ignore bridge's unlinking notifications if vPort
2927 * is gone, since we already left the bridge when the
2928 * VLAN device was unlinked from the real device.
2930 if (!mlxsw_sp_vport)
2932 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
2935 netdev_err(dev, "Failed to leave bridge\n");
2944 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
2945 unsigned long event, void *ptr,
2948 struct net_device *dev;
2949 struct list_head *iter;
2952 netdev_for_each_lower_dev(lag_dev, dev, iter) {
2953 if (mlxsw_sp_port_dev_check(dev)) {
2954 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
2956 if (ret == NOTIFY_BAD)
2964 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
2965 unsigned long event, void *ptr)
2967 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2968 u16 vid = vlan_dev_vlan_id(vlan_dev);
2970 if (mlxsw_sp_port_dev_check(real_dev))
2971 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
2973 else if (netif_is_lag_master(real_dev))
2974 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
2980 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2981 unsigned long event, void *ptr)
2983 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2985 if (mlxsw_sp_port_dev_check(dev))
2986 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
2988 if (netif_is_lag_master(dev))
2989 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
2991 if (is_vlan_dev(dev))
2992 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
2997 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
2998 .notifier_call = mlxsw_sp_netdevice_event,
3001 static int __init mlxsw_sp_module_init(void)
3005 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3006 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3008 goto err_core_driver_register;
3011 err_core_driver_register:
3012 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3016 static void __exit mlxsw_sp_module_exit(void)
3018 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3019 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3022 module_init(mlxsw_sp_module_init);
3023 module_exit(mlxsw_sp_module_exit);
3025 MODULE_LICENSE("Dual BSD/GPL");
3026 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3027 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3028 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);