1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <linux/refcount.h>
27 #include <linux/rhashtable.h>
28 #include <net/switchdev.h>
29 #include <net/pkt_cls.h>
30 #include <net/netevent.h>
31 #include <net/addrconf.h>
32 #include <linux/ptp_classify.h>
42 #include "spectrum_cnt.h"
43 #include "spectrum_dpipe.h"
44 #include "spectrum_acl_flex_actions.h"
45 #include "spectrum_span.h"
46 #include "spectrum_ptp.h"
47 #include "spectrum_trap.h"
49 #define MLXSW_SP_FWREV_MINOR 2010
50 #define MLXSW_SP_FWREV_SUBMINOR 1006
52 #define MLXSW_SP1_FWREV_MAJOR 13
53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56 .major = MLXSW_SP1_FWREV_MAJOR,
57 .minor = MLXSW_SP_FWREV_MINOR,
58 .subminor = MLXSW_SP_FWREV_SUBMINOR,
59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
62 #define MLXSW_SP1_FW_FILENAME \
63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
64 "." __stringify(MLXSW_SP_FWREV_MINOR) \
65 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
67 #define MLXSW_SP2_FWREV_MAJOR 29
69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70 .major = MLXSW_SP2_FWREV_MAJOR,
71 .minor = MLXSW_SP_FWREV_MINOR,
72 .subminor = MLXSW_SP_FWREV_SUBMINOR,
75 #define MLXSW_SP2_FW_FILENAME \
76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77 "." __stringify(MLXSW_SP_FWREV_MINOR) \
78 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
80 #define MLXSW_SP3_FWREV_MAJOR 30
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83 .major = MLXSW_SP3_FWREV_MAJOR,
84 .minor = MLXSW_SP_FWREV_MINOR,
85 .subminor = MLXSW_SP_FWREV_SUBMINOR,
88 #define MLXSW_SP3_FW_FILENAME \
89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90 "." __stringify(MLXSW_SP_FWREV_MINOR) \
91 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
94 "mellanox/lc_ini_bundle_" \
95 __stringify(MLXSW_SP_FWREV_MINOR) "_" \
96 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
104 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
107 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
117 * Packet control type.
118 * 0 - Ethernet control (e.g. EMADs, LACP)
121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
124 * Packet protocol type. Must be set to 1 (Ethernet).
126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
128 /* tx_hdr_rx_is_router
129 * Packet is sent from the router. Valid for data packets only.
131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
134 * Indicates if the 'fid' field is valid and should be used for
135 * forwarding lookup. Valid for data packets only.
137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
140 * Switch partition ID. Must be set to 0.
142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
144 /* tx_hdr_control_tclass
145 * Indicates if the packet should use the control TClass and not one
146 * of the data TClasses.
148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
151 * Egress TClass to be used on the egress device on the egress port.
153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
156 * Destination local port for unicast packets.
157 * Destination multicast ID for multicast packets.
159 * Control packets are directed to a specific egress port, while data
160 * packets are transmitted through the CPU port (0) into the switch partition,
161 * where forwarding rules are applied.
163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
166 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
167 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
168 * Valid for data packets only.
170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
174 * 6 - Control packets
176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
179 unsigned int counter_index, u64 *packets,
182 char mgpc_pl[MLXSW_REG_MGPC_LEN];
185 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
186 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
187 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
191 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
193 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
197 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
198 unsigned int counter_index)
200 char mgpc_pl[MLXSW_REG_MGPC_LEN];
202 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
203 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
204 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
207 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
208 unsigned int *p_counter_index)
212 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
216 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
218 goto err_counter_clear;
222 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
227 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
228 unsigned int counter_index)
230 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
234 void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
235 const struct mlxsw_tx_info *tx_info)
237 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
239 memset(txhdr, 0, MLXSW_TXHDR_LEN);
241 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
242 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
243 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
244 mlxsw_tx_hdr_swid_set(txhdr, 0);
245 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
246 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
247 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
251 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
252 struct mlxsw_sp_port *mlxsw_sp_port,
254 const struct mlxsw_tx_info *tx_info)
260 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
262 goto err_skb_cow_head;
265 if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
269 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
271 txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
272 memset(txhdr, 0, MLXSW_TXHDR_LEN);
274 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
275 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
276 mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
277 mlxsw_tx_hdr_fid_valid_set(txhdr, true);
278 mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
279 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
284 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
285 dev_kfree_skb_any(skb);
289 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
293 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
296 type = ptp_classify_raw(skb);
297 return !!ptp_parse_header(skb, type);
300 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
301 struct mlxsw_sp_port *mlxsw_sp_port,
303 const struct mlxsw_tx_info *tx_info)
305 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
307 /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
308 * need special handling and cannot be transmitted as regular control
311 if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
312 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
316 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
317 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
318 dev_kfree_skb_any(skb);
322 mlxsw_sp_txhdr_construct(skb, tx_info);
326 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
329 case BR_STATE_FORWARDING:
330 return MLXSW_REG_SPMS_STATE_FORWARDING;
331 case BR_STATE_LEARNING:
332 return MLXSW_REG_SPMS_STATE_LEARNING;
333 case BR_STATE_LISTENING:
334 case BR_STATE_DISABLED:
335 case BR_STATE_BLOCKING:
336 return MLXSW_REG_SPMS_STATE_DISCARDING;
342 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
345 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
350 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
353 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
354 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
356 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
361 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
363 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
366 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
369 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
373 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
377 char paos_pl[MLXSW_REG_PAOS_LEN];
379 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
380 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
381 MLXSW_PORT_ADMIN_STATUS_DOWN);
382 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
385 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
386 const unsigned char *addr)
388 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
389 char ppad_pl[MLXSW_REG_PPAD_LEN];
391 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
392 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
393 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
396 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
398 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
400 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
401 mlxsw_sp_port->local_port);
402 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
403 mlxsw_sp_port->dev->dev_addr);
406 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409 char pmtu_pl[MLXSW_REG_PMTU_LEN];
412 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
413 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
417 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
421 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
423 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
424 char pmtu_pl[MLXSW_REG_PMTU_LEN];
426 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
427 if (mtu > mlxsw_sp_port->max_mtu)
430 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
431 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
434 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
435 u16 local_port, u8 swid)
437 char pspa_pl[MLXSW_REG_PSPA_LEN];
439 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
440 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
443 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
446 char svpe_pl[MLXSW_REG_SVPE_LEN];
448 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
452 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
459 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
462 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
464 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
469 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
471 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
472 char spfsr_pl[MLXSW_REG_SPFSR_LEN];
475 if (mlxsw_sp_port->security == enable)
478 mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
479 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
483 mlxsw_sp_port->security = enable;
487 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
503 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
506 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
507 char spevet_pl[MLXSW_REG_SPEVET_LEN];
511 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
515 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
519 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
520 u16 vid, u16 ethtype)
522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523 char spvid_pl[MLXSW_REG_SPVID_LEN];
527 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
531 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
534 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
537 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
540 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
541 char spaft_pl[MLXSW_REG_SPAFT_LEN];
543 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
544 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
547 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
553 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
557 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
560 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
562 goto err_port_allow_untagged_set;
565 mlxsw_sp_port->pvid = vid;
568 err_port_allow_untagged_set:
569 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
574 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
576 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577 char sspr_pl[MLXSW_REG_SSPR_LEN];
579 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
584 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
585 u16 local_port, char *pmlp_pl,
586 struct mlxsw_sp_port_mapping *port_mapping)
595 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
596 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
597 width = mlxsw_reg_pmlp_width_get(pmlp_pl);
598 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
599 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
601 if (width && !is_power_of_2(width)) {
602 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
607 for (i = 0; i < width; i++) {
608 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
609 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
613 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
614 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
619 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
620 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
621 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
625 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
626 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
632 port_mapping->module = module;
633 port_mapping->slot_index = slot_index;
634 port_mapping->width = width;
635 port_mapping->module_width = width;
636 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
641 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
642 struct mlxsw_sp_port_mapping *port_mapping)
644 char pmlp_pl[MLXSW_REG_PMLP_LEN];
647 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
648 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
651 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
652 pmlp_pl, port_mapping);
656 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
657 const struct mlxsw_sp_port_mapping *port_mapping)
659 char pmlp_pl[MLXSW_REG_PMLP_LEN];
662 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
663 port_mapping->module);
665 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
666 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
667 for (i = 0; i < port_mapping->width; i++) {
668 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
669 port_mapping->slot_index);
670 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
671 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
674 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
680 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
681 port_mapping->module);
685 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
686 u8 slot_index, u8 module)
688 char pmlp_pl[MLXSW_REG_PMLP_LEN];
690 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
691 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
692 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
693 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
696 static int mlxsw_sp_port_open(struct net_device *dev)
698 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
699 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
702 err = mlxsw_env_module_port_up(mlxsw_sp->core,
703 mlxsw_sp_port->mapping.slot_index,
704 mlxsw_sp_port->mapping.module);
707 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
709 goto err_port_admin_status_set;
710 netif_start_queue(dev);
713 err_port_admin_status_set:
714 mlxsw_env_module_port_down(mlxsw_sp->core,
715 mlxsw_sp_port->mapping.slot_index,
716 mlxsw_sp_port->mapping.module);
720 static int mlxsw_sp_port_stop(struct net_device *dev)
722 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
725 netif_stop_queue(dev);
726 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
727 mlxsw_env_module_port_down(mlxsw_sp->core,
728 mlxsw_sp_port->mapping.slot_index,
729 mlxsw_sp_port->mapping.module);
733 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
734 struct net_device *dev)
736 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
737 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
738 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
739 const struct mlxsw_tx_info tx_info = {
740 .local_port = mlxsw_sp_port->local_port,
746 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
748 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
749 return NETDEV_TX_BUSY;
751 if (eth_skb_pad(skb)) {
752 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
756 err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
761 /* TX header is consumed by HW on the way so we shouldn't count its
762 * bytes as being sent.
764 len = skb->len - MLXSW_TXHDR_LEN;
766 /* Due to a race we might fail here because of a full queue. In that
767 * unlikely case we simply drop the packet.
769 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
772 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
773 u64_stats_update_begin(&pcpu_stats->syncp);
774 pcpu_stats->tx_packets++;
775 pcpu_stats->tx_bytes += len;
776 u64_stats_update_end(&pcpu_stats->syncp);
778 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
779 dev_kfree_skb_any(skb);
784 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
788 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791 struct sockaddr *addr = p;
794 if (!is_valid_ether_addr(addr->sa_data))
795 return -EADDRNOTAVAIL;
797 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
800 eth_hw_addr_set(dev, addr->sa_data);
804 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
806 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
807 struct mlxsw_sp_hdroom orig_hdroom;
808 struct mlxsw_sp_hdroom hdroom;
811 orig_hdroom = *mlxsw_sp_port->hdroom;
813 hdroom = orig_hdroom;
815 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
817 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
819 netdev_err(dev, "Failed to configure port's headroom\n");
823 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
825 goto err_port_mtu_set;
830 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
835 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
836 struct rtnl_link_stats64 *stats)
838 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
839 struct mlxsw_sp_port_pcpu_stats *p;
840 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
845 for_each_possible_cpu(i) {
846 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
848 start = u64_stats_fetch_begin(&p->syncp);
849 rx_packets = p->rx_packets;
850 rx_bytes = p->rx_bytes;
851 tx_packets = p->tx_packets;
852 tx_bytes = p->tx_bytes;
853 } while (u64_stats_fetch_retry(&p->syncp, start));
855 stats->rx_packets += rx_packets;
856 stats->rx_bytes += rx_bytes;
857 stats->tx_packets += tx_packets;
858 stats->tx_bytes += tx_bytes;
859 /* tx_dropped is u32, updated without syncp protection. */
860 tx_dropped += p->tx_dropped;
862 stats->tx_dropped = tx_dropped;
866 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
869 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
876 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
880 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
881 return mlxsw_sp_port_get_sw_stats64(dev, sp);
887 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
888 int prio, char *ppcnt_pl)
890 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
891 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
893 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
894 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
897 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
898 struct rtnl_link_stats64 *stats)
900 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
903 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
909 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
911 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
913 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
915 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
917 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
919 stats->rx_crc_errors =
920 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
921 stats->rx_frame_errors =
922 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
924 stats->rx_length_errors = (
925 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
926 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
927 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
929 stats->rx_errors = (stats->rx_crc_errors +
930 stats->rx_frame_errors + stats->rx_length_errors);
937 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
938 struct mlxsw_sp_port_xstats *xstats)
940 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
943 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
946 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
948 for (i = 0; i < TC_MAX_QUEUE; i++) {
949 err = mlxsw_sp_port_get_stats_raw(dev,
950 MLXSW_REG_PPCNT_TC_CONG_CNT,
955 xstats->wred_drop[i] =
956 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
957 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
960 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
966 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
967 xstats->tail_drop[i] =
968 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
971 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
972 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
977 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
978 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
982 static void update_stats_cache(struct work_struct *work)
984 struct mlxsw_sp_port *mlxsw_sp_port =
985 container_of(work, struct mlxsw_sp_port,
986 periodic_hw_stats.update_dw.work);
988 if (!netif_carrier_ok(mlxsw_sp_port->dev))
989 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
990 * necessary when port goes down.
994 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
995 &mlxsw_sp_port->periodic_hw_stats.stats);
996 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
997 &mlxsw_sp_port->periodic_hw_stats.xstats);
1000 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1001 MLXSW_HW_STATS_UPDATE_TIME);
1004 /* Return the stats from a cache that is updated periodically,
1005 * as this function might get called in an atomic context.
1008 mlxsw_sp_port_get_stats64(struct net_device *dev,
1009 struct rtnl_link_stats64 *stats)
1011 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1013 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1016 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1017 u16 vid_begin, u16 vid_end,
1018 bool is_member, bool untagged)
1020 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1024 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1028 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1029 vid_end, is_member, untagged);
1030 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1035 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1036 u16 vid_end, bool is_member, bool untagged)
1041 for (vid = vid_begin; vid <= vid_end;
1042 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1043 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1046 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1047 is_member, untagged);
1055 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1058 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1060 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1061 &mlxsw_sp_port->vlans_list, list) {
1062 if (!flush_default &&
1063 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1065 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1070 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1072 if (mlxsw_sp_port_vlan->bridge_port)
1073 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1074 else if (mlxsw_sp_port_vlan->fid)
1075 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1078 struct mlxsw_sp_port_vlan *
1079 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1081 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1082 bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1085 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1086 if (mlxsw_sp_port_vlan)
1087 return ERR_PTR(-EEXIST);
1089 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1091 return ERR_PTR(err);
1093 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1094 if (!mlxsw_sp_port_vlan) {
1096 goto err_port_vlan_alloc;
1099 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1100 mlxsw_sp_port_vlan->vid = vid;
1101 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1103 return mlxsw_sp_port_vlan;
1105 err_port_vlan_alloc:
1106 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1107 return ERR_PTR(err);
1110 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1112 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1113 u16 vid = mlxsw_sp_port_vlan->vid;
1115 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1116 list_del(&mlxsw_sp_port_vlan->list);
1117 kfree(mlxsw_sp_port_vlan);
1118 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1121 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1122 __be16 __always_unused proto, u16 vid)
1124 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1126 /* VLAN 0 is added to HW filter when device goes up, but it is
1127 * reserved in our case, so simply return.
1132 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1135 int mlxsw_sp_port_kill_vid(struct net_device *dev,
1136 __be16 __always_unused proto, u16 vid)
1138 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1139 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1141 /* VLAN 0 is removed from HW filter when device goes down, but
1142 * it is reserved in our case, so simply return.
1147 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1148 if (!mlxsw_sp_port_vlan)
1150 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1155 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1156 struct flow_block_offload *f)
1158 switch (f->binder_type) {
1159 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1160 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1161 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1162 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1163 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1164 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1165 case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1166 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1172 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1175 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1178 case TC_SETUP_BLOCK:
1179 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1180 case TC_SETUP_QDISC_RED:
1181 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1182 case TC_SETUP_QDISC_PRIO:
1183 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1184 case TC_SETUP_QDISC_ETS:
1185 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1186 case TC_SETUP_QDISC_TBF:
1187 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1188 case TC_SETUP_QDISC_FIFO:
1189 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1195 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1197 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1200 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1201 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1202 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1205 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1206 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1208 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1209 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1214 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1216 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1217 char pplr_pl[MLXSW_REG_PPLR_LEN];
1220 if (netif_running(dev))
1221 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1223 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1224 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1227 if (netif_running(dev))
1228 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1233 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1235 static int mlxsw_sp_handle_feature(struct net_device *dev,
1236 netdev_features_t wanted_features,
1237 netdev_features_t feature,
1238 mlxsw_sp_feature_handler feature_handler)
1240 netdev_features_t changes = wanted_features ^ dev->features;
1241 bool enable = !!(wanted_features & feature);
1244 if (!(changes & feature))
1247 err = feature_handler(dev, enable);
1249 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1250 enable ? "Enable" : "Disable", &feature, err);
1255 dev->features |= feature;
1257 dev->features &= ~feature;
1261 static int mlxsw_sp_set_features(struct net_device *dev,
1262 netdev_features_t features)
1264 netdev_features_t oper_features = dev->features;
1267 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1268 mlxsw_sp_feature_hw_tc);
1269 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1270 mlxsw_sp_feature_loopback);
1273 dev->features = oper_features;
1280 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1283 struct hwtstamp_config config;
1286 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1289 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1294 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1300 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1303 struct hwtstamp_config config;
1306 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1311 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1317 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1319 struct hwtstamp_config config = {0};
1321 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1325 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1327 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1331 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1333 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1339 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1340 .ndo_open = mlxsw_sp_port_open,
1341 .ndo_stop = mlxsw_sp_port_stop,
1342 .ndo_start_xmit = mlxsw_sp_port_xmit,
1343 .ndo_setup_tc = mlxsw_sp_setup_tc,
1344 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1345 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1346 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1347 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1348 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1349 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1350 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1351 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1352 .ndo_set_features = mlxsw_sp_set_features,
1353 .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
1357 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1359 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1360 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1361 const struct mlxsw_sp_port_type_speed_ops *ops;
1362 char ptys_pl[MLXSW_REG_PTYS_LEN];
1363 u32 eth_proto_cap_masked;
1366 ops = mlxsw_sp->port_type_speed_ops;
1368 /* Set advertised speeds to speeds supported by both the driver
1371 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1373 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1377 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap,
1378 ð_proto_admin, ð_proto_oper);
1379 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1380 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1381 eth_proto_cap_masked,
1382 mlxsw_sp_port->link.autoneg);
1383 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1386 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1388 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1389 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1390 char ptys_pl[MLXSW_REG_PTYS_LEN];
1394 port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1395 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1396 mlxsw_sp_port->local_port, 0,
1398 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1401 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1403 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1407 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1408 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1409 bool dwrr, u8 dwrr_weight)
1411 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1412 char qeec_pl[MLXSW_REG_QEEC_LEN];
1414 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1416 mlxsw_reg_qeec_de_set(qeec_pl, true);
1417 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1418 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1419 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1422 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1423 enum mlxsw_reg_qeec_hr hr, u8 index,
1424 u8 next_index, u32 maxrate, u8 burst_size)
1426 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1427 char qeec_pl[MLXSW_REG_QEEC_LEN];
1429 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1431 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1432 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1433 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1437 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1438 enum mlxsw_reg_qeec_hr hr, u8 index,
1439 u8 next_index, u32 minrate)
1441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1442 char qeec_pl[MLXSW_REG_QEEC_LEN];
1444 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1446 mlxsw_reg_qeec_mise_set(qeec_pl, true);
1447 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1452 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1453 u8 switch_prio, u8 tclass)
1455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1456 char qtct_pl[MLXSW_REG_QTCT_LEN];
1458 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1460 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1463 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1467 /* Setup the elements hierarcy, so that each TC is linked to
1468 * one subgroup, which are all member in the same group.
1470 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1471 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1474 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1475 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1476 MLXSW_REG_QEEC_HR_SUBGROUP, i,
1481 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1482 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1483 MLXSW_REG_QEEC_HR_TC, i, i,
1488 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1489 MLXSW_REG_QEEC_HR_TC,
1496 /* Make sure the max shaper is disabled in all hierarchies that support
1497 * it. Note that this disables ptps (PTP shaper), but that is intended
1498 * for the initial configuration.
1500 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1501 MLXSW_REG_QEEC_HR_PORT, 0, 0,
1502 MLXSW_REG_QEEC_MAS_DIS, 0);
1505 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1506 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1507 MLXSW_REG_QEEC_HR_SUBGROUP,
1509 MLXSW_REG_QEEC_MAS_DIS, 0);
1513 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1514 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1515 MLXSW_REG_QEEC_HR_TC,
1517 MLXSW_REG_QEEC_MAS_DIS, 0);
1521 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1522 MLXSW_REG_QEEC_HR_TC,
1524 MLXSW_REG_QEEC_MAS_DIS, 0);
1529 /* Configure the min shaper for multicast TCs. */
1530 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1531 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1532 MLXSW_REG_QEEC_HR_TC,
1534 MLXSW_REG_QEEC_MIS_MIN);
1539 /* Map all priorities to traffic class 0. */
1540 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1541 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1549 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1553 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1555 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1559 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1562 u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1563 u8 module = mlxsw_sp_port->mapping.module;
1564 u64 overheat_counter;
1567 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1568 module, &overheat_counter);
1572 mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1577 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1578 bool is_8021ad_tagged,
1579 bool is_8021q_tagged)
1581 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1582 char spvc_pl[MLXSW_REG_SPVC_LEN];
1584 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1585 is_8021ad_tagged, is_8021q_tagged);
1586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1589 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1590 u16 local_port, u8 *port_number,
1591 u8 *split_port_subnumber,
1594 char pllp_pl[MLXSW_REG_PLLP_LEN];
1597 mlxsw_reg_pllp_pack(pllp_pl, local_port);
1598 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1601 mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1602 split_port_subnumber, slot_index);
1606 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1608 struct mlxsw_sp_port_mapping *port_mapping)
1610 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1611 struct mlxsw_sp_port *mlxsw_sp_port;
1612 u32 lanes = port_mapping->width;
1613 u8 split_port_subnumber;
1614 struct net_device *dev;
1620 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1627 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1629 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1631 goto err_port_swid_set;
1634 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1635 &split_port_subnumber, &slot_index);
1637 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1639 goto err_port_label_info_get;
1642 splittable = lanes > 1 && !split;
1643 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1644 port_number, split, split_port_subnumber,
1645 splittable, lanes, mlxsw_sp->base_mac,
1646 sizeof(mlxsw_sp->base_mac));
1648 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1650 goto err_core_port_init;
1653 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1656 goto err_alloc_etherdev;
1658 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1659 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1660 mlxsw_sp_port = netdev_priv(dev);
1661 mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
1662 mlxsw_sp_port, dev);
1663 mlxsw_sp_port->dev = dev;
1664 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1665 mlxsw_sp_port->local_port = local_port;
1666 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1667 mlxsw_sp_port->split = split;
1668 mlxsw_sp_port->mapping = *port_mapping;
1669 mlxsw_sp_port->link.autoneg = 1;
1670 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1672 mlxsw_sp_port->pcpu_stats =
1673 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1674 if (!mlxsw_sp_port->pcpu_stats) {
1676 goto err_alloc_stats;
1679 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1680 &update_stats_cache);
1682 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1683 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1685 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1687 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1688 mlxsw_sp_port->local_port);
1689 goto err_dev_addr_init;
1692 netif_carrier_off(dev);
1694 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1695 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1696 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1699 dev->max_mtu = ETH_MAX_MTU;
1701 /* Each packet needs to have a Tx header (metadata) on top all other
1704 dev->needed_headroom = MLXSW_TXHDR_LEN;
1706 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1709 mlxsw_sp_port->local_port);
1710 goto err_port_system_port_mapping_set;
1713 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1716 mlxsw_sp_port->local_port);
1717 goto err_port_speed_by_width_set;
1720 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1721 &mlxsw_sp_port->max_speed);
1723 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1724 mlxsw_sp_port->local_port);
1725 goto err_max_speed_get;
1728 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1730 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1731 mlxsw_sp_port->local_port);
1732 goto err_port_max_mtu_get;
1735 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1737 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1738 mlxsw_sp_port->local_port);
1739 goto err_port_mtu_set;
1742 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1744 goto err_port_admin_status_set;
1746 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1748 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1749 mlxsw_sp_port->local_port);
1750 goto err_port_buffers_init;
1753 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1756 mlxsw_sp_port->local_port);
1757 goto err_port_ets_init;
1760 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1763 mlxsw_sp_port->local_port);
1764 goto err_port_tc_mc_mode;
1767 /* ETS and buffers must be initialized before DCB. */
1768 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1770 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1771 mlxsw_sp_port->local_port);
1772 goto err_port_dcb_init;
1775 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1777 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1778 mlxsw_sp_port->local_port);
1779 goto err_port_fids_init;
1782 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1784 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1785 mlxsw_sp_port->local_port);
1786 goto err_port_qdiscs_init;
1789 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1792 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1793 mlxsw_sp_port->local_port);
1794 goto err_port_vlan_clear;
1797 err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1799 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1800 mlxsw_sp_port->local_port);
1801 goto err_port_nve_init;
1804 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1807 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1808 mlxsw_sp_port->local_port);
1809 goto err_port_pvid_set;
1812 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1813 MLXSW_SP_DEFAULT_VID);
1814 if (IS_ERR(mlxsw_sp_port_vlan)) {
1815 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1816 mlxsw_sp_port->local_port);
1817 err = PTR_ERR(mlxsw_sp_port_vlan);
1818 goto err_port_vlan_create;
1820 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1822 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1823 * only packets with 802.1q header as tagged packets.
1825 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1827 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1829 goto err_port_vlan_classification_set;
1832 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1833 mlxsw_sp->ptp_ops->shaper_work);
1835 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1837 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1839 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1840 mlxsw_sp_port->local_port);
1841 goto err_port_overheat_init_val_set;
1844 err = register_netdev(dev);
1846 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1847 mlxsw_sp_port->local_port);
1848 goto err_register_netdev;
1851 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1854 err_register_netdev:
1855 err_port_overheat_init_val_set:
1856 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1857 err_port_vlan_classification_set:
1858 mlxsw_sp->ports[local_port] = NULL;
1859 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1860 err_port_vlan_create:
1862 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1864 err_port_vlan_clear:
1865 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1866 err_port_qdiscs_init:
1867 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1869 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1871 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1872 err_port_tc_mc_mode:
1874 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1875 err_port_buffers_init:
1876 err_port_admin_status_set:
1878 err_port_max_mtu_get:
1880 err_port_speed_by_width_set:
1881 err_port_system_port_mapping_set:
1883 free_percpu(mlxsw_sp_port->pcpu_stats);
1887 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1889 err_port_label_info_get:
1890 mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1891 MLXSW_PORT_SWID_DISABLED_PORT);
1893 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1894 port_mapping->slot_index,
1895 port_mapping->module);
1899 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1901 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1902 u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1903 u8 module = mlxsw_sp_port->mapping.module;
1905 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1906 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1907 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1908 mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1909 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1910 mlxsw_sp->ports[local_port] = NULL;
1911 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1912 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1913 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1914 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1915 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1916 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1917 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1918 free_percpu(mlxsw_sp_port->pcpu_stats);
1919 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1920 free_netdev(mlxsw_sp_port->dev);
1921 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1922 mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1923 MLXSW_PORT_SWID_DISABLED_PORT);
1924 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1927 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1929 struct mlxsw_sp_port *mlxsw_sp_port;
1932 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1936 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1937 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1939 err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1942 sizeof(mlxsw_sp->base_mac));
1944 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1945 goto err_core_cpu_port_init;
1948 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1951 err_core_cpu_port_init:
1952 kfree(mlxsw_sp_port);
1956 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1958 struct mlxsw_sp_port *mlxsw_sp_port =
1959 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1961 mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1962 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1963 kfree(mlxsw_sp_port);
1966 static bool mlxsw_sp_local_port_valid(u16 local_port)
1968 return local_port != MLXSW_PORT_CPU_PORT;
1971 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1973 if (!mlxsw_sp_local_port_valid(local_port))
1975 return mlxsw_sp->ports[local_port] != NULL;
1978 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1979 u16 local_port, bool enable)
1981 char pmecr_pl[MLXSW_REG_PMECR_LEN];
1983 mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1984 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1985 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1986 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1989 struct mlxsw_sp_port_mapping_event {
1990 struct list_head list;
1991 char pmlp_pl[MLXSW_REG_PMLP_LEN];
1994 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1996 struct mlxsw_sp_port_mapping_event *event, *next_event;
1997 struct mlxsw_sp_port_mapping_events *events;
1998 struct mlxsw_sp_port_mapping port_mapping;
1999 struct mlxsw_sp *mlxsw_sp;
2000 struct devlink *devlink;
2001 LIST_HEAD(event_queue);
2005 events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
2006 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
2007 devlink = priv_to_devlink(mlxsw_sp->core);
2009 spin_lock_bh(&events->queue_lock);
2010 list_splice_init(&events->queue, &event_queue);
2011 spin_unlock_bh(&events->queue_lock);
2013 list_for_each_entry_safe(event, next_event, &event_queue, list) {
2014 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
2015 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
2016 event->pmlp_pl, &port_mapping);
2020 if (WARN_ON_ONCE(!port_mapping.width))
2025 if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2026 mlxsw_sp_port_create(mlxsw_sp, local_port,
2027 false, &port_mapping);
2031 devl_unlock(devlink);
2033 mlxsw_sp->port_mapping[local_port] = port_mapping;
2041 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2042 char *pmlp_pl, void *priv)
2044 struct mlxsw_sp_port_mapping_events *events;
2045 struct mlxsw_sp_port_mapping_event *event;
2046 struct mlxsw_sp *mlxsw_sp = priv;
2049 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
2050 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2053 events = &mlxsw_sp->port_mapping_events;
2054 event = kmalloc(sizeof(*event), GFP_ATOMIC);
2057 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2058 spin_lock(&events->queue_lock);
2059 list_add_tail(&event->list, &events->queue);
2060 spin_unlock(&events->queue_lock);
2061 mlxsw_core_schedule_work(&events->work);
2065 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2067 struct mlxsw_sp_port_mapping_event *event, *next_event;
2068 struct mlxsw_sp_port_mapping_events *events;
2070 events = &mlxsw_sp->port_mapping_events;
2072 /* Caller needs to make sure that no new event is going to appear. */
2073 cancel_work_sync(&events->work);
2074 list_for_each_entry_safe(event, next_event, &events->queue, list) {
2075 list_del(&event->list);
2080 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2082 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2085 for (i = 1; i < max_ports; i++)
2086 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2087 /* Make sure all scheduled events are processed */
2088 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2090 for (i = 1; i < max_ports; i++)
2091 if (mlxsw_sp_port_created(mlxsw_sp, i))
2092 mlxsw_sp_port_remove(mlxsw_sp, i);
2093 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2094 kfree(mlxsw_sp->ports);
2095 mlxsw_sp->ports = NULL;
2099 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2100 bool (*selector)(void *priv, u16 local_port),
2103 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2104 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2107 for (i = 1; i < max_ports; i++)
2108 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
2109 mlxsw_sp_port_remove(mlxsw_sp, i);
2112 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2114 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2115 struct mlxsw_sp_port_mapping_events *events;
2116 struct mlxsw_sp_port_mapping *port_mapping;
2121 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2122 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2123 if (!mlxsw_sp->ports)
2126 events = &mlxsw_sp->port_mapping_events;
2127 INIT_LIST_HEAD(&events->queue);
2128 spin_lock_init(&events->queue_lock);
2129 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2131 for (i = 1; i < max_ports; i++) {
2132 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2134 goto err_event_enable;
2137 err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2139 goto err_cpu_port_create;
2141 for (i = 1; i < max_ports; i++) {
2142 port_mapping = &mlxsw_sp->port_mapping[i];
2143 if (!port_mapping->width)
2145 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2147 goto err_port_create;
2152 for (i--; i >= 1; i--)
2153 if (mlxsw_sp_port_created(mlxsw_sp, i))
2154 mlxsw_sp_port_remove(mlxsw_sp, i);
2156 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2157 err_cpu_port_create:
2159 for (i--; i >= 1; i--)
2160 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2161 /* Make sure all scheduled events are processed */
2162 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2163 kfree(mlxsw_sp->ports);
2164 mlxsw_sp->ports = NULL;
2168 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2170 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2171 struct mlxsw_sp_port_mapping *port_mapping;
2175 mlxsw_sp->port_mapping = kcalloc(max_ports,
2176 sizeof(struct mlxsw_sp_port_mapping),
2178 if (!mlxsw_sp->port_mapping)
2181 for (i = 1; i < max_ports; i++) {
2182 port_mapping = &mlxsw_sp->port_mapping[i];
2183 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2185 goto err_port_module_info_get;
2189 err_port_module_info_get:
2190 kfree(mlxsw_sp->port_mapping);
2194 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2196 kfree(mlxsw_sp->port_mapping);
2200 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2201 struct mlxsw_sp_port_mapping *port_mapping,
2202 unsigned int count, const char *pmtdb_pl)
2204 struct mlxsw_sp_port_mapping split_port_mapping;
2207 split_port_mapping = *port_mapping;
2208 split_port_mapping.width /= count;
2209 for (i = 0; i < count; i++) {
2210 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2212 if (!mlxsw_sp_local_port_valid(s_local_port))
2215 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2216 true, &split_port_mapping);
2218 goto err_port_create;
2219 split_port_mapping.lane += split_port_mapping.width;
2225 for (i--; i >= 0; i--) {
2226 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2228 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2229 mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2234 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2236 const char *pmtdb_pl)
2238 struct mlxsw_sp_port_mapping *port_mapping;
2241 /* Go over original unsplit ports in the gap and recreate them. */
2242 for (i = 0; i < count; i++) {
2243 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2245 port_mapping = &mlxsw_sp->port_mapping[local_port];
2246 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2248 mlxsw_sp_port_create(mlxsw_sp, local_port,
2249 false, port_mapping);
2253 static struct mlxsw_sp_port *
2254 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2256 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2257 return mlxsw_sp->ports[local_port];
2261 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2263 struct netlink_ext_ack *extack)
2265 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2266 struct mlxsw_sp_port_mapping port_mapping;
2267 struct mlxsw_sp_port *mlxsw_sp_port;
2268 enum mlxsw_reg_pmtdb_status status;
2269 char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2273 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2274 if (!mlxsw_sp_port) {
2275 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2277 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2281 if (mlxsw_sp_port->split) {
2282 NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2286 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2287 mlxsw_sp_port->mapping.module,
2288 mlxsw_sp_port->mapping.module_width / count,
2290 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2292 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2296 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2297 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2298 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2302 port_mapping = mlxsw_sp_port->mapping;
2304 for (i = 0; i < count; i++) {
2305 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2307 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2308 mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2311 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2314 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2315 goto err_port_split_create;
2320 err_port_split_create:
2321 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2326 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2327 struct netlink_ext_ack *extack)
2329 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2330 struct mlxsw_sp_port *mlxsw_sp_port;
2331 char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2336 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2337 if (!mlxsw_sp_port) {
2338 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2340 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2344 if (!mlxsw_sp_port->split) {
2345 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2349 count = mlxsw_sp_port->mapping.module_width /
2350 mlxsw_sp_port->mapping.width;
2352 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2353 mlxsw_sp_port->mapping.module,
2354 mlxsw_sp_port->mapping.module_width / count,
2356 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2358 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2362 for (i = 0; i < count; i++) {
2363 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2365 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2366 mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2369 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2375 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2379 for (i = 0; i < TC_MAX_QUEUE; i++)
2380 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2383 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2384 char *pude_pl, void *priv)
2386 struct mlxsw_sp *mlxsw_sp = priv;
2387 struct mlxsw_sp_port *mlxsw_sp_port;
2388 enum mlxsw_reg_pude_oper_status status;
2391 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2393 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2395 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2399 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2400 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2401 netdev_info(mlxsw_sp_port->dev, "link up\n");
2402 netif_carrier_on(mlxsw_sp_port->dev);
2403 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2405 netdev_info(mlxsw_sp_port->dev, "link down\n");
2406 netif_carrier_off(mlxsw_sp_port->dev);
2407 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2411 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2412 char *mtpptr_pl, bool ingress)
2418 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2419 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2420 for (i = 0; i < num_rec; i++) {
2426 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2427 &domain_number, &sequence_id,
2429 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2430 message_type, domain_number,
2431 sequence_id, timestamp);
2435 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2436 char *mtpptr_pl, void *priv)
2438 struct mlxsw_sp *mlxsw_sp = priv;
2440 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2443 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2444 char *mtpptr_pl, void *priv)
2446 struct mlxsw_sp *mlxsw_sp = priv;
2448 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2451 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2452 u16 local_port, void *priv)
2454 struct mlxsw_sp *mlxsw_sp = priv;
2455 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2456 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2458 if (unlikely(!mlxsw_sp_port)) {
2459 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2464 skb->dev = mlxsw_sp_port->dev;
2466 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2467 u64_stats_update_begin(&pcpu_stats->syncp);
2468 pcpu_stats->rx_packets++;
2469 pcpu_stats->rx_bytes += skb->len;
2470 u64_stats_update_end(&pcpu_stats->syncp);
2472 skb->protocol = eth_type_trans(skb, skb->dev);
2473 netif_receive_skb(skb);
2476 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2479 skb->offload_fwd_mark = 1;
2480 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2483 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2484 u16 local_port, void *priv)
2486 skb->offload_l3_fwd_mark = 1;
2487 skb->offload_fwd_mark = 1;
2488 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2491 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2494 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2497 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2498 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2499 _is_ctrl, SP_##_trap_group, DISCARD)
2501 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2502 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2503 _is_ctrl, SP_##_trap_group, DISCARD)
2505 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2506 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2507 _is_ctrl, SP_##_trap_group, DISCARD)
2509 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2510 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2512 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2514 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2516 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2518 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2520 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2521 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2523 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2525 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2527 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2529 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2531 /* Multicast Router Traps */
2532 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2533 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2535 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2538 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2540 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2541 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2544 static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2546 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2549 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2551 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2552 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2553 enum mlxsw_reg_qpcr_ir_units ir_units;
2554 int max_cpu_policers;
2560 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2563 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2565 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2566 for (i = 0; i < max_cpu_policers; i++) {
2569 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2570 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2571 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2579 __set_bit(i, mlxsw_sp->trap->policers_usage);
2580 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2582 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2590 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2592 char htgt_pl[MLXSW_REG_HTGT_LEN];
2593 enum mlxsw_reg_htgt_trap_group i;
2594 int max_cpu_policers;
2595 int max_trap_groups;
2600 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2603 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2604 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2606 for (i = 0; i < max_trap_groups; i++) {
2609 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2610 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2611 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2615 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2616 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2617 tc = MLXSW_REG_HTGT_DEFAULT_TC;
2618 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2624 if (max_cpu_policers <= policer_id &&
2625 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2628 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2629 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2637 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2639 struct mlxsw_sp_trap *trap;
2643 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2645 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2646 trap = kzalloc(struct_size(trap, policers_usage,
2647 BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2650 trap->max_policers = max_policers;
2651 mlxsw_sp->trap = trap;
2653 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2655 goto err_cpu_policers_set;
2657 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2659 goto err_trap_groups_set;
2661 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2662 ARRAY_SIZE(mlxsw_sp_listener),
2665 goto err_traps_register;
2667 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2668 mlxsw_sp->listeners_count, mlxsw_sp);
2670 goto err_extra_traps_init;
2674 err_extra_traps_init:
2675 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2676 ARRAY_SIZE(mlxsw_sp_listener),
2679 err_trap_groups_set:
2680 err_cpu_policers_set:
2685 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2687 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2688 mlxsw_sp->listeners_count,
2690 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2691 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2692 kfree(mlxsw_sp->trap);
2695 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
2697 char sgcr_pl[MLXSW_REG_SGCR_LEN];
2701 if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2702 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2705 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2709 /* In DDD mode, which we by default use, each LAG entry is 8 PGT
2710 * entries. The LAG table address needs to be 8-aligned, but that ought
2711 * to be the case, since the LAG table is allocated first.
2713 err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
2717 if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
2719 goto err_mid_alloc_range;
2722 mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base);
2723 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl);
2725 goto err_mid_alloc_range;
2729 err_mid_alloc_range:
2730 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2735 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
2740 if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2741 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2744 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2748 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2752 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2754 struct mlxsw_sp_lag {
2755 struct net_device *dev;
2756 unsigned int ref_count;
2759 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2761 char slcr_pl[MLXSW_REG_SLCR_LEN];
2766 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2767 MLXSW_SP_LAG_SEED_INIT);
2768 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2769 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2770 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2771 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2772 MLXSW_REG_SLCR_LAG_HASH_SIP |
2773 MLXSW_REG_SLCR_LAG_HASH_DIP |
2774 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2775 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2776 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2777 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2781 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2785 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2788 err = mlxsw_sp_lag_pgt_init(mlxsw_sp);
2792 mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_lag),
2794 if (!mlxsw_sp->lags) {
2802 mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2806 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2808 mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2809 kfree(mlxsw_sp->lags);
2812 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2813 .clock_init = mlxsw_sp1_ptp_clock_init,
2814 .clock_fini = mlxsw_sp1_ptp_clock_fini,
2815 .init = mlxsw_sp1_ptp_init,
2816 .fini = mlxsw_sp1_ptp_fini,
2817 .receive = mlxsw_sp1_ptp_receive,
2818 .transmitted = mlxsw_sp1_ptp_transmitted,
2819 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
2820 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
2821 .shaper_work = mlxsw_sp1_ptp_shaper_work,
2822 .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
2823 .get_stats_count = mlxsw_sp1_get_stats_count,
2824 .get_stats_strings = mlxsw_sp1_get_stats_strings,
2825 .get_stats = mlxsw_sp1_get_stats,
2826 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2829 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2830 .clock_init = mlxsw_sp2_ptp_clock_init,
2831 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2832 .init = mlxsw_sp2_ptp_init,
2833 .fini = mlxsw_sp2_ptp_fini,
2834 .receive = mlxsw_sp2_ptp_receive,
2835 .transmitted = mlxsw_sp2_ptp_transmitted,
2836 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2837 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2838 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2839 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2840 .get_stats_count = mlxsw_sp2_get_stats_count,
2841 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2842 .get_stats = mlxsw_sp2_get_stats,
2843 .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2846 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2847 .clock_init = mlxsw_sp2_ptp_clock_init,
2848 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2849 .init = mlxsw_sp2_ptp_init,
2850 .fini = mlxsw_sp2_ptp_fini,
2851 .receive = mlxsw_sp2_ptp_receive,
2852 .transmitted = mlxsw_sp2_ptp_transmitted,
2853 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2854 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2855 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2856 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2857 .get_stats_count = mlxsw_sp2_get_stats_count,
2858 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2859 .get_stats = mlxsw_sp2_get_stats,
2860 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2863 struct mlxsw_sp_sample_trigger_node {
2864 struct mlxsw_sp_sample_trigger trigger;
2865 struct mlxsw_sp_sample_params params;
2866 struct rhash_head ht_node;
2867 struct rcu_head rcu;
2868 refcount_t refcount;
2871 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2872 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2873 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2874 .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2875 .automatic_shrinking = true,
2879 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2880 const struct mlxsw_sp_sample_trigger *trigger)
2882 memset(key, 0, sizeof(*key));
2883 key->type = trigger->type;
2884 key->local_port = trigger->local_port;
2887 /* RCU read lock must be held */
2888 struct mlxsw_sp_sample_params *
2889 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2890 const struct mlxsw_sp_sample_trigger *trigger)
2892 struct mlxsw_sp_sample_trigger_node *trigger_node;
2893 struct mlxsw_sp_sample_trigger key;
2895 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2896 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2897 mlxsw_sp_sample_trigger_ht_params);
2901 return &trigger_node->params;
2905 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2906 const struct mlxsw_sp_sample_trigger *trigger,
2907 const struct mlxsw_sp_sample_params *params)
2909 struct mlxsw_sp_sample_trigger_node *trigger_node;
2912 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2916 trigger_node->trigger = *trigger;
2917 trigger_node->params = *params;
2918 refcount_set(&trigger_node->refcount, 1);
2920 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2921 &trigger_node->ht_node,
2922 mlxsw_sp_sample_trigger_ht_params);
2924 goto err_rhashtable_insert;
2928 err_rhashtable_insert:
2929 kfree(trigger_node);
2934 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2935 struct mlxsw_sp_sample_trigger_node *trigger_node)
2937 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2938 &trigger_node->ht_node,
2939 mlxsw_sp_sample_trigger_ht_params);
2940 kfree_rcu(trigger_node, rcu);
2944 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2945 const struct mlxsw_sp_sample_trigger *trigger,
2946 const struct mlxsw_sp_sample_params *params,
2947 struct netlink_ext_ack *extack)
2949 struct mlxsw_sp_sample_trigger_node *trigger_node;
2950 struct mlxsw_sp_sample_trigger key;
2954 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2956 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2958 mlxsw_sp_sample_trigger_ht_params);
2960 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2963 if (trigger_node->trigger.local_port) {
2964 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2968 if (trigger_node->params.psample_group != params->psample_group ||
2969 trigger_node->params.truncate != params->truncate ||
2970 trigger_node->params.rate != params->rate ||
2971 trigger_node->params.trunc_size != params->trunc_size) {
2972 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2976 refcount_inc(&trigger_node->refcount);
2982 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2983 const struct mlxsw_sp_sample_trigger *trigger)
2985 struct mlxsw_sp_sample_trigger_node *trigger_node;
2986 struct mlxsw_sp_sample_trigger key;
2990 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2992 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2994 mlxsw_sp_sample_trigger_ht_params);
2998 if (!refcount_dec_and_test(&trigger_node->refcount))
3001 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
3004 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3005 unsigned long event, void *ptr);
3007 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
3008 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
3009 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
3011 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
3013 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
3014 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
3015 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
3016 mutex_init(&mlxsw_sp->parsing.lock);
3019 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
3021 mutex_destroy(&mlxsw_sp->parsing.lock);
3022 WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
3025 struct mlxsw_sp_ipv6_addr_node {
3026 struct in6_addr key;
3027 struct rhash_head ht_node;
3029 refcount_t refcount;
3032 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
3033 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
3034 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
3035 .key_len = sizeof(struct in6_addr),
3036 .automatic_shrinking = true,
3040 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
3043 struct mlxsw_sp_ipv6_addr_node *node;
3044 char rips_pl[MLXSW_REG_RIPS_LEN];
3047 err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
3048 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3053 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
3054 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
3056 goto err_rips_write;
3058 node = kzalloc(sizeof(*node), GFP_KERNEL);
3061 goto err_node_alloc;
3065 node->kvdl_index = *p_kvdl_index;
3066 refcount_set(&node->refcount, 1);
3068 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
3070 mlxsw_sp_ipv6_addr_ht_params);
3072 goto err_rhashtable_insert;
3076 err_rhashtable_insert:
3080 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3085 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3086 struct mlxsw_sp_ipv6_addr_node *node)
3088 u32 kvdl_index = node->kvdl_index;
3090 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
3091 mlxsw_sp_ipv6_addr_ht_params);
3093 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3097 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3098 const struct in6_addr *addr6,
3101 struct mlxsw_sp_ipv6_addr_node *node;
3104 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3105 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3106 mlxsw_sp_ipv6_addr_ht_params);
3108 refcount_inc(&node->refcount);
3109 *p_kvdl_index = node->kvdl_index;
3113 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3116 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3121 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3123 struct mlxsw_sp_ipv6_addr_node *node;
3125 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3126 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3127 mlxsw_sp_ipv6_addr_ht_params);
3131 if (!refcount_dec_and_test(&node->refcount))
3134 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3137 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3140 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3144 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3145 &mlxsw_sp_ipv6_addr_ht_params);
3149 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3153 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3155 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3156 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3159 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3160 const struct mlxsw_bus_info *mlxsw_bus_info,
3161 struct netlink_ext_ack *extack)
3163 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3166 mlxsw_sp->core = mlxsw_core;
3167 mlxsw_sp->bus_info = mlxsw_bus_info;
3169 mlxsw_sp_parsing_init(mlxsw_sp);
3171 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3173 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3177 err = mlxsw_sp_kvdl_init(mlxsw_sp);
3179 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3183 err = mlxsw_sp_pgt_init(mlxsw_sp);
3185 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3189 /* Initialize before FIDs so that the LAG table is at the start of PGT
3190 * and 8-aligned without overallocation.
3192 err = mlxsw_sp_lag_init(mlxsw_sp);
3194 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3198 err = mlxsw_sp->fid_core_ops->init(mlxsw_sp);
3200 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3201 goto err_fid_core_init;
3204 err = mlxsw_sp_policers_init(mlxsw_sp);
3206 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3207 goto err_policers_init;
3210 err = mlxsw_sp_traps_init(mlxsw_sp);
3212 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3213 goto err_traps_init;
3216 err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3218 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3219 goto err_devlink_traps_init;
3222 err = mlxsw_sp_buffers_init(mlxsw_sp);
3224 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3225 goto err_buffers_init;
3228 /* Initialize SPAN before router and switchdev, so that those components
3229 * can call mlxsw_sp_span_respin().
3231 err = mlxsw_sp_span_init(mlxsw_sp);
3233 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3237 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3239 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3240 goto err_switchdev_init;
3243 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3245 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3246 goto err_counter_pool_init;
3249 err = mlxsw_sp_afa_init(mlxsw_sp);
3251 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3255 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3257 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3258 goto err_ipv6_addr_ht_init;
3261 err = mlxsw_sp_nve_init(mlxsw_sp);
3263 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3267 err = mlxsw_sp_port_range_init(mlxsw_sp);
3269 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
3270 goto err_port_range_init;
3273 err = mlxsw_sp_acl_init(mlxsw_sp);
3275 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3279 err = mlxsw_sp_router_init(mlxsw_sp, extack);
3281 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3282 goto err_router_init;
3285 if (mlxsw_sp->bus_info->read_clock_capable) {
3286 /* NULL is a valid return value from clock_init */
3288 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3289 mlxsw_sp->bus_info->dev);
3290 if (IS_ERR(mlxsw_sp->clock)) {
3291 err = PTR_ERR(mlxsw_sp->clock);
3292 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3293 goto err_ptp_clock_init;
3297 if (mlxsw_sp->clock) {
3298 /* NULL is a valid return value from ptp_ops->init */
3299 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3300 if (IS_ERR(mlxsw_sp->ptp_state)) {
3301 err = PTR_ERR(mlxsw_sp->ptp_state);
3302 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3307 /* Initialize netdevice notifier after SPAN is initialized, so that the
3308 * event handler can call SPAN respin.
3310 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3311 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3312 &mlxsw_sp->netdevice_nb);
3314 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3315 goto err_netdev_notifier;
3318 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3320 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3321 goto err_dpipe_init;
3324 err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3326 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3327 goto err_port_module_info_init;
3330 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3331 &mlxsw_sp_sample_trigger_ht_params);
3333 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3334 goto err_sample_trigger_init;
3337 err = mlxsw_sp_ports_create(mlxsw_sp);
3339 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3340 goto err_ports_create;
3346 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3347 err_sample_trigger_init:
3348 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3349 err_port_module_info_init:
3350 mlxsw_sp_dpipe_fini(mlxsw_sp);
3352 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3353 &mlxsw_sp->netdevice_nb);
3354 err_netdev_notifier:
3355 if (mlxsw_sp->clock)
3356 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3358 if (mlxsw_sp->clock)
3359 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3361 mlxsw_sp_router_fini(mlxsw_sp);
3363 mlxsw_sp_acl_fini(mlxsw_sp);
3365 mlxsw_sp_port_range_fini(mlxsw_sp);
3366 err_port_range_init:
3367 mlxsw_sp_nve_fini(mlxsw_sp);
3369 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3370 err_ipv6_addr_ht_init:
3371 mlxsw_sp_afa_fini(mlxsw_sp);
3373 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3374 err_counter_pool_init:
3375 mlxsw_sp_switchdev_fini(mlxsw_sp);
3377 mlxsw_sp_span_fini(mlxsw_sp);
3379 mlxsw_sp_buffers_fini(mlxsw_sp);
3381 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3382 err_devlink_traps_init:
3383 mlxsw_sp_traps_fini(mlxsw_sp);
3385 mlxsw_sp_policers_fini(mlxsw_sp);
3387 mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3389 mlxsw_sp_lag_fini(mlxsw_sp);
3391 mlxsw_sp_pgt_fini(mlxsw_sp);
3393 mlxsw_sp_kvdl_fini(mlxsw_sp);
3394 mlxsw_sp_parsing_fini(mlxsw_sp);
3398 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3399 const struct mlxsw_bus_info *mlxsw_bus_info,
3400 struct netlink_ext_ack *extack)
3402 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3404 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3405 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3406 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3407 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3408 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3409 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3410 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3411 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3412 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3413 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3414 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3415 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3416 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3417 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3418 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3419 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3420 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3421 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3422 mlxsw_sp->listeners = mlxsw_sp1_listener;
3423 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3424 mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops;
3425 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3426 mlxsw_sp->pgt_smpe_index_valid = true;
3428 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3431 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3432 const struct mlxsw_bus_info *mlxsw_bus_info,
3433 struct netlink_ext_ack *extack)
3435 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3437 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3438 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3439 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3440 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3441 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3442 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3443 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3444 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3445 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3446 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3447 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3448 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3449 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3450 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3451 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3452 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3453 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3454 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3455 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3456 mlxsw_sp->listeners = mlxsw_sp2_listener;
3457 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3458 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3459 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3460 mlxsw_sp->pgt_smpe_index_valid = false;
3462 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3465 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3466 const struct mlxsw_bus_info *mlxsw_bus_info,
3467 struct netlink_ext_ack *extack)
3469 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3471 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3472 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3473 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3474 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3475 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3476 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3477 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3478 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3479 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3480 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3481 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3482 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3483 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3484 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3485 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3486 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3487 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3488 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3489 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3490 mlxsw_sp->listeners = mlxsw_sp2_listener;
3491 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3492 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3493 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3494 mlxsw_sp->pgt_smpe_index_valid = false;
3496 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3499 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3500 const struct mlxsw_bus_info *mlxsw_bus_info,
3501 struct netlink_ext_ack *extack)
3503 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3505 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3506 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3507 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3508 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3509 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3510 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3511 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3512 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3513 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3514 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3515 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3516 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3517 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3518 mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3519 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3520 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3521 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3522 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3523 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3524 mlxsw_sp->listeners = mlxsw_sp2_listener;
3525 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3526 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3527 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3528 mlxsw_sp->pgt_smpe_index_valid = false;
3530 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3533 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3535 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3537 mlxsw_sp_ports_remove(mlxsw_sp);
3538 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3539 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3540 mlxsw_sp_dpipe_fini(mlxsw_sp);
3541 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3542 &mlxsw_sp->netdevice_nb);
3543 if (mlxsw_sp->clock) {
3544 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3545 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3547 mlxsw_sp_router_fini(mlxsw_sp);
3548 mlxsw_sp_acl_fini(mlxsw_sp);
3549 mlxsw_sp_port_range_fini(mlxsw_sp);
3550 mlxsw_sp_nve_fini(mlxsw_sp);
3551 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3552 mlxsw_sp_afa_fini(mlxsw_sp);
3553 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3554 mlxsw_sp_switchdev_fini(mlxsw_sp);
3555 mlxsw_sp_span_fini(mlxsw_sp);
3556 mlxsw_sp_buffers_fini(mlxsw_sp);
3557 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3558 mlxsw_sp_traps_fini(mlxsw_sp);
3559 mlxsw_sp_policers_fini(mlxsw_sp);
3560 mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3561 mlxsw_sp_lag_fini(mlxsw_sp);
3562 mlxsw_sp_pgt_fini(mlxsw_sp);
3563 mlxsw_sp_kvdl_fini(mlxsw_sp);
3564 mlxsw_sp_parsing_fini(mlxsw_sp);
3567 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3568 .used_flood_mode = 1,
3569 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3570 .used_max_ib_mc = 1,
3576 .used_kvd_sizes = 1,
3577 .kvd_hash_single_parts = 59,
3578 .kvd_hash_double_parts = 41,
3579 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3583 .type = MLXSW_PORT_SWID_TYPE_ETH,
3588 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3589 .used_flood_mode = 1,
3590 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3591 .used_max_ib_mc = 1,
3600 .type = MLXSW_PORT_SWID_TYPE_ETH,
3603 .used_cqe_time_stamp_type = 1,
3604 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3605 .lag_mode_prefer_sw = true,
3606 .flood_mode_prefer_cff = true,
3609 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3610 * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3613 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3615 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3617 .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3618 .used_flood_mode = 1,
3619 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3620 .used_max_ib_mc = 1,
3629 .type = MLXSW_PORT_SWID_TYPE_ETH,
3632 .used_cqe_time_stamp_type = 1,
3633 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3634 .lag_mode_prefer_sw = true,
3635 .flood_mode_prefer_cff = true,
3639 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3640 struct devlink_resource_size_params *kvd_size_params,
3641 struct devlink_resource_size_params *linear_size_params,
3642 struct devlink_resource_size_params *hash_double_size_params,
3643 struct devlink_resource_size_params *hash_single_size_params)
3645 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3646 KVD_SINGLE_MIN_SIZE);
3647 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3648 KVD_DOUBLE_MIN_SIZE);
3649 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3650 u32 linear_size_min = 0;
3652 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3653 MLXSW_SP_KVD_GRANULARITY,
3654 DEVLINK_RESOURCE_UNIT_ENTRY);
3655 devlink_resource_size_params_init(linear_size_params, linear_size_min,
3656 kvd_size - single_size_min -
3658 MLXSW_SP_KVD_GRANULARITY,
3659 DEVLINK_RESOURCE_UNIT_ENTRY);
3660 devlink_resource_size_params_init(hash_double_size_params,
3662 kvd_size - single_size_min -
3664 MLXSW_SP_KVD_GRANULARITY,
3665 DEVLINK_RESOURCE_UNIT_ENTRY);
3666 devlink_resource_size_params_init(hash_single_size_params,
3668 kvd_size - double_size_min -
3670 MLXSW_SP_KVD_GRANULARITY,
3671 DEVLINK_RESOURCE_UNIT_ENTRY);
3674 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3676 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3677 struct devlink_resource_size_params hash_single_size_params;
3678 struct devlink_resource_size_params hash_double_size_params;
3679 struct devlink_resource_size_params linear_size_params;
3680 struct devlink_resource_size_params kvd_size_params;
3681 u32 kvd_size, single_size, double_size, linear_size;
3682 const struct mlxsw_config_profile *profile;
3685 profile = &mlxsw_sp1_config_profile;
3686 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3689 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3690 &linear_size_params,
3691 &hash_double_size_params,
3692 &hash_single_size_params);
3694 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3695 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3696 kvd_size, MLXSW_SP_RESOURCE_KVD,
3697 DEVLINK_RESOURCE_ID_PARENT_TOP,
3702 linear_size = profile->kvd_linear_size;
3703 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3705 MLXSW_SP_RESOURCE_KVD_LINEAR,
3706 MLXSW_SP_RESOURCE_KVD,
3707 &linear_size_params);
3711 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3715 double_size = kvd_size - linear_size;
3716 double_size *= profile->kvd_hash_double_parts;
3717 double_size /= profile->kvd_hash_double_parts +
3718 profile->kvd_hash_single_parts;
3719 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3720 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3722 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3723 MLXSW_SP_RESOURCE_KVD,
3724 &hash_double_size_params);
3728 single_size = kvd_size - double_size - linear_size;
3729 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3731 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3732 MLXSW_SP_RESOURCE_KVD,
3733 &hash_single_size_params);
3740 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3742 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3743 struct devlink_resource_size_params kvd_size_params;
3746 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3749 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3750 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3751 MLXSW_SP_KVD_GRANULARITY,
3752 DEVLINK_RESOURCE_UNIT_ENTRY);
3754 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3755 kvd_size, MLXSW_SP_RESOURCE_KVD,
3756 DEVLINK_RESOURCE_ID_PARENT_TOP,
3760 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3762 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3763 struct devlink_resource_size_params span_size_params;
3766 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3769 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3770 devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3771 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3773 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3774 max_span, MLXSW_SP_RESOURCE_SPAN,
3775 DEVLINK_RESOURCE_ID_PARENT_TOP,
3780 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3782 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3783 struct devlink_resource_size_params size_params;
3784 u8 max_rif_mac_profiles;
3786 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3787 max_rif_mac_profiles = 1;
3789 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3790 MAX_RIF_MAC_PROFILES);
3791 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3792 max_rif_mac_profiles, 1,
3793 DEVLINK_RESOURCE_UNIT_ENTRY);
3795 return devl_resource_register(devlink,
3797 max_rif_mac_profiles,
3798 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3799 DEVLINK_RESOURCE_ID_PARENT_TOP,
3803 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3805 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3806 struct devlink_resource_size_params size_params;
3809 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3812 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3813 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3814 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3816 return devl_resource_register(devlink, "rifs", max_rifs,
3817 MLXSW_SP_RESOURCE_RIFS,
3818 DEVLINK_RESOURCE_ID_PARENT_TOP,
3823 mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
3825 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3826 struct devlink_resource_size_params size_params;
3829 if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
3832 max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
3833 devlink_resource_size_params_init(&size_params, max, max, 1,
3834 DEVLINK_RESOURCE_UNIT_ENTRY);
3836 return devl_resource_register(devlink, "port_range_registers", max,
3837 MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
3838 DEVLINK_RESOURCE_ID_PARENT_TOP,
3842 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3846 err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3850 err = mlxsw_sp_resources_span_register(mlxsw_core);
3852 goto err_resources_span_register;
3854 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3856 goto err_resources_counter_register;
3858 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3860 goto err_policer_resources_register;
3862 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3864 goto err_resources_rif_mac_profile_register;
3866 err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3868 goto err_resources_rifs_register;
3870 err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3872 goto err_resources_port_range_register;
3876 err_resources_port_range_register:
3877 err_resources_rifs_register:
3878 err_resources_rif_mac_profile_register:
3879 err_policer_resources_register:
3880 err_resources_counter_register:
3881 err_resources_span_register:
3882 devl_resources_unregister(priv_to_devlink(mlxsw_core));
3886 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3890 err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3894 err = mlxsw_sp_resources_span_register(mlxsw_core);
3896 goto err_resources_span_register;
3898 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3900 goto err_resources_counter_register;
3902 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3904 goto err_policer_resources_register;
3906 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3908 goto err_resources_rif_mac_profile_register;
3910 err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3912 goto err_resources_rifs_register;
3914 err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3916 goto err_resources_port_range_register;
3920 err_resources_port_range_register:
3921 err_resources_rifs_register:
3922 err_resources_rif_mac_profile_register:
3923 err_policer_resources_register:
3924 err_resources_counter_register:
3925 err_resources_span_register:
3926 devl_resources_unregister(priv_to_devlink(mlxsw_core));
3930 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3931 const struct mlxsw_config_profile *profile,
3932 u64 *p_single_size, u64 *p_double_size,
3935 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3939 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3940 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3943 /* The hash part is what left of the kvd without the
3944 * linear part. It is split to the single size and
3945 * double size by the parts ratio from the profile.
3946 * Both sizes must be a multiplications of the
3947 * granularity from the profile. In case the user
3948 * provided the sizes they are obtained via devlink.
3950 err = devl_resource_size_get(devlink,
3951 MLXSW_SP_RESOURCE_KVD_LINEAR,
3954 *p_linear_size = profile->kvd_linear_size;
3956 err = devl_resource_size_get(devlink,
3957 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3960 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3962 double_size *= profile->kvd_hash_double_parts;
3963 double_size /= profile->kvd_hash_double_parts +
3964 profile->kvd_hash_single_parts;
3965 *p_double_size = rounddown(double_size,
3966 MLXSW_SP_KVD_GRANULARITY);
3969 err = devl_resource_size_get(devlink,
3970 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3973 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3974 *p_double_size - *p_linear_size;
3976 /* Check results are legal. */
3977 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3978 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3979 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3985 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3986 struct sk_buff *skb, u16 local_port)
3988 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3990 skb_pull(skb, MLXSW_TXHDR_LEN);
3991 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3994 static struct mlxsw_driver mlxsw_sp1_driver = {
3995 .kind = mlxsw_sp1_driver_name,
3996 .priv_size = sizeof(struct mlxsw_sp),
3997 .fw_req_rev = &mlxsw_sp1_fw_rev,
3998 .fw_filename = MLXSW_SP1_FW_FILENAME,
3999 .init = mlxsw_sp1_init,
4000 .fini = mlxsw_sp_fini,
4001 .port_split = mlxsw_sp_port_split,
4002 .port_unsplit = mlxsw_sp_port_unsplit,
4003 .sb_pool_get = mlxsw_sp_sb_pool_get,
4004 .sb_pool_set = mlxsw_sp_sb_pool_set,
4005 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4006 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4007 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4008 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4009 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4010 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4011 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4012 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4013 .trap_init = mlxsw_sp_trap_init,
4014 .trap_fini = mlxsw_sp_trap_fini,
4015 .trap_action_set = mlxsw_sp_trap_action_set,
4016 .trap_group_init = mlxsw_sp_trap_group_init,
4017 .trap_group_set = mlxsw_sp_trap_group_set,
4018 .trap_policer_init = mlxsw_sp_trap_policer_init,
4019 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4020 .trap_policer_set = mlxsw_sp_trap_policer_set,
4021 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4022 .txhdr_construct = mlxsw_sp_txhdr_construct,
4023 .resources_register = mlxsw_sp1_resources_register,
4024 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
4025 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4026 .txhdr_len = MLXSW_TXHDR_LEN,
4027 .profile = &mlxsw_sp1_config_profile,
4028 .sdq_supports_cqe_v2 = false,
4031 static struct mlxsw_driver mlxsw_sp2_driver = {
4032 .kind = mlxsw_sp2_driver_name,
4033 .priv_size = sizeof(struct mlxsw_sp),
4034 .fw_req_rev = &mlxsw_sp2_fw_rev,
4035 .fw_filename = MLXSW_SP2_FW_FILENAME,
4036 .init = mlxsw_sp2_init,
4037 .fini = mlxsw_sp_fini,
4038 .port_split = mlxsw_sp_port_split,
4039 .port_unsplit = mlxsw_sp_port_unsplit,
4040 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4041 .sb_pool_get = mlxsw_sp_sb_pool_get,
4042 .sb_pool_set = mlxsw_sp_sb_pool_set,
4043 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4044 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4045 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4046 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4047 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4048 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4049 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4050 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4051 .trap_init = mlxsw_sp_trap_init,
4052 .trap_fini = mlxsw_sp_trap_fini,
4053 .trap_action_set = mlxsw_sp_trap_action_set,
4054 .trap_group_init = mlxsw_sp_trap_group_init,
4055 .trap_group_set = mlxsw_sp_trap_group_set,
4056 .trap_policer_init = mlxsw_sp_trap_policer_init,
4057 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4058 .trap_policer_set = mlxsw_sp_trap_policer_set,
4059 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4060 .txhdr_construct = mlxsw_sp_txhdr_construct,
4061 .resources_register = mlxsw_sp2_resources_register,
4062 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4063 .txhdr_len = MLXSW_TXHDR_LEN,
4064 .profile = &mlxsw_sp2_config_profile,
4065 .sdq_supports_cqe_v2 = true,
4068 static struct mlxsw_driver mlxsw_sp3_driver = {
4069 .kind = mlxsw_sp3_driver_name,
4070 .priv_size = sizeof(struct mlxsw_sp),
4071 .fw_req_rev = &mlxsw_sp3_fw_rev,
4072 .fw_filename = MLXSW_SP3_FW_FILENAME,
4073 .init = mlxsw_sp3_init,
4074 .fini = mlxsw_sp_fini,
4075 .port_split = mlxsw_sp_port_split,
4076 .port_unsplit = mlxsw_sp_port_unsplit,
4077 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4078 .sb_pool_get = mlxsw_sp_sb_pool_get,
4079 .sb_pool_set = mlxsw_sp_sb_pool_set,
4080 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4081 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4082 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4083 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4084 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4085 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4086 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4087 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4088 .trap_init = mlxsw_sp_trap_init,
4089 .trap_fini = mlxsw_sp_trap_fini,
4090 .trap_action_set = mlxsw_sp_trap_action_set,
4091 .trap_group_init = mlxsw_sp_trap_group_init,
4092 .trap_group_set = mlxsw_sp_trap_group_set,
4093 .trap_policer_init = mlxsw_sp_trap_policer_init,
4094 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4095 .trap_policer_set = mlxsw_sp_trap_policer_set,
4096 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4097 .txhdr_construct = mlxsw_sp_txhdr_construct,
4098 .resources_register = mlxsw_sp2_resources_register,
4099 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4100 .txhdr_len = MLXSW_TXHDR_LEN,
4101 .profile = &mlxsw_sp2_config_profile,
4102 .sdq_supports_cqe_v2 = true,
4105 static struct mlxsw_driver mlxsw_sp4_driver = {
4106 .kind = mlxsw_sp4_driver_name,
4107 .priv_size = sizeof(struct mlxsw_sp),
4108 .init = mlxsw_sp4_init,
4109 .fini = mlxsw_sp_fini,
4110 .port_split = mlxsw_sp_port_split,
4111 .port_unsplit = mlxsw_sp_port_unsplit,
4112 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4113 .sb_pool_get = mlxsw_sp_sb_pool_get,
4114 .sb_pool_set = mlxsw_sp_sb_pool_set,
4115 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4116 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4117 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4118 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4119 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4120 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4121 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4122 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4123 .trap_init = mlxsw_sp_trap_init,
4124 .trap_fini = mlxsw_sp_trap_fini,
4125 .trap_action_set = mlxsw_sp_trap_action_set,
4126 .trap_group_init = mlxsw_sp_trap_group_init,
4127 .trap_group_set = mlxsw_sp_trap_group_set,
4128 .trap_policer_init = mlxsw_sp_trap_policer_init,
4129 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4130 .trap_policer_set = mlxsw_sp_trap_policer_set,
4131 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4132 .txhdr_construct = mlxsw_sp_txhdr_construct,
4133 .resources_register = mlxsw_sp2_resources_register,
4134 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4135 .txhdr_len = MLXSW_TXHDR_LEN,
4136 .profile = &mlxsw_sp4_config_profile,
4137 .sdq_supports_cqe_v2 = true,
4140 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4142 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4145 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4146 struct netdev_nested_priv *priv)
4150 if (mlxsw_sp_port_dev_check(lower_dev)) {
4151 priv->data = (void *)netdev_priv(lower_dev);
4158 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4160 struct netdev_nested_priv priv = {
4164 if (mlxsw_sp_port_dev_check(dev))
4165 return netdev_priv(dev);
4167 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4169 return (struct mlxsw_sp_port *)priv.data;
4172 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4174 struct mlxsw_sp_port *mlxsw_sp_port;
4176 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4177 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4180 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4182 struct netdev_nested_priv priv = {
4186 if (mlxsw_sp_port_dev_check(dev))
4187 return netdev_priv(dev);
4189 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4192 return (struct mlxsw_sp_port *)priv.data;
4195 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4197 char mprs_pl[MLXSW_REG_MPRS_LEN];
4200 mutex_lock(&mlxsw_sp->parsing.lock);
4202 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4205 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4206 mlxsw_sp->parsing.vxlan_udp_dport);
4207 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4211 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4212 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4215 mutex_unlock(&mlxsw_sp->parsing.lock);
4219 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4221 char mprs_pl[MLXSW_REG_MPRS_LEN];
4223 mutex_lock(&mlxsw_sp->parsing.lock);
4225 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4228 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4229 mlxsw_sp->parsing.vxlan_udp_dport);
4230 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4231 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4234 mutex_unlock(&mlxsw_sp->parsing.lock);
4237 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4240 char mprs_pl[MLXSW_REG_MPRS_LEN];
4243 mutex_lock(&mlxsw_sp->parsing.lock);
4245 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4246 be16_to_cpu(udp_dport));
4247 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4251 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4254 mutex_unlock(&mlxsw_sp->parsing.lock);
4259 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4260 struct net_device *lag_dev)
4262 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4263 struct net_device *upper_dev;
4264 struct list_head *iter;
4266 if (netif_is_bridge_port(lag_dev))
4267 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4269 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4270 if (!netif_is_bridge_port(upper_dev))
4272 br_dev = netdev_master_upper_dev_get(upper_dev);
4273 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4277 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4279 char sldr_pl[MLXSW_REG_SLDR_LEN];
4281 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4282 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4285 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4287 char sldr_pl[MLXSW_REG_SLDR_LEN];
4289 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4290 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4293 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4294 u16 lag_id, u8 port_index)
4296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4297 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4299 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4300 lag_id, port_index);
4301 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4304 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4308 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4310 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4312 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4315 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4319 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4321 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4323 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4326 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4330 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4332 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4334 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4337 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4338 struct net_device *lag_dev,
4341 struct mlxsw_sp_lag *lag;
4342 int free_lag_id = -1;
4346 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
4350 for (i = 0; i < max_lag; i++) {
4351 lag = &mlxsw_sp->lags[i];
4352 if (lag->ref_count) {
4353 if (lag->dev == lag_dev) {
4357 } else if (free_lag_id < 0) {
4361 if (free_lag_id < 0)
4363 *p_lag_id = free_lag_id;
4368 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4369 struct net_device *lag_dev,
4370 struct netdev_lag_upper_info *lag_upper_info,
4371 struct netlink_ext_ack *extack)
4375 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4376 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4379 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4380 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4386 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4387 u16 lag_id, u8 *p_port_index)
4389 u64 max_lag_members;
4392 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4394 for (i = 0; i < max_lag_members; i++) {
4395 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4403 static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
4404 struct net_device *lag_dev,
4405 struct netlink_ext_ack *extack)
4407 struct net_device *upper_dev;
4408 struct net_device *master;
4409 struct list_head *iter;
4413 master = netdev_master_upper_dev_get(lag_dev);
4414 if (master && netif_is_bridge_master(master)) {
4415 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master,
4421 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4422 if (!is_vlan_dev(upper_dev))
4425 master = netdev_master_upper_dev_get(upper_dev);
4426 if (master && netif_is_bridge_master(master)) {
4427 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4431 goto err_port_bridge_join;
4439 err_port_bridge_join:
4440 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4441 if (!is_vlan_dev(upper_dev))
4444 master = netdev_master_upper_dev_get(upper_dev);
4445 if (!master || !netif_is_bridge_master(master))
4451 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4454 master = netdev_master_upper_dev_get(lag_dev);
4455 if (master && netif_is_bridge_master(master))
4456 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4462 mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4463 struct net_device *lag_dev)
4465 struct net_device *upper_dev;
4466 struct net_device *master;
4467 struct list_head *iter;
4469 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4470 if (!is_vlan_dev(upper_dev))
4473 master = netdev_master_upper_dev_get(upper_dev);
4477 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4480 master = netdev_master_upper_dev_get(lag_dev);
4482 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4485 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4486 struct net_device *lag_dev,
4487 struct netlink_ext_ack *extack)
4489 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4490 struct mlxsw_sp_lag *lag;
4495 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4498 lag = &mlxsw_sp->lags[lag_id];
4499 if (!lag->ref_count) {
4500 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4506 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4510 err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
4513 goto err_lag_uppers_bridge_join;
4515 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4517 goto err_col_port_add;
4519 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4520 mlxsw_sp_port->local_port);
4521 mlxsw_sp_port->lag_id = lag_id;
4522 mlxsw_sp_port->lagged = 1;
4525 err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
4527 goto err_fid_port_join_lag;
4529 /* Port is no longer usable as a router interface */
4530 if (mlxsw_sp_port->default_vlan->fid)
4531 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4533 /* Join a router interface configured on the LAG, if exists */
4534 err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev,
4537 goto err_router_join;
4539 err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack);
4546 mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
4548 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4549 err_fid_port_join_lag:
4551 mlxsw_sp_port->lagged = 0;
4552 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4553 mlxsw_sp_port->local_port);
4554 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4556 mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
4557 err_lag_uppers_bridge_join:
4558 if (!lag->ref_count)
4559 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4563 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4564 struct net_device *lag_dev)
4566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4567 u16 lag_id = mlxsw_sp_port->lag_id;
4568 struct mlxsw_sp_lag *lag;
4570 if (!mlxsw_sp_port->lagged)
4572 lag = &mlxsw_sp->lags[lag_id];
4573 WARN_ON(lag->ref_count == 0);
4575 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4577 /* Any VLANs configured on the port are no longer valid */
4578 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4579 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4580 /* Make the LAG and its directly linked uppers leave bridges they
4583 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4585 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4587 if (lag->ref_count == 1)
4588 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4590 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4591 mlxsw_sp_port->local_port);
4592 mlxsw_sp_port->lagged = 0;
4595 /* Make sure untagged frames are allowed to ingress */
4596 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4600 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4603 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4604 char sldr_pl[MLXSW_REG_SLDR_LEN];
4606 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4607 mlxsw_sp_port->local_port);
4608 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4611 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4614 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4615 char sldr_pl[MLXSW_REG_SLDR_LEN];
4617 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4618 mlxsw_sp_port->local_port);
4619 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4623 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4627 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4628 mlxsw_sp_port->lag_id);
4632 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4634 goto err_dist_port_add;
4639 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4644 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4648 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4649 mlxsw_sp_port->lag_id);
4653 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4654 mlxsw_sp_port->lag_id);
4656 goto err_col_port_disable;
4660 err_col_port_disable:
4661 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4665 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4666 struct netdev_lag_lower_state_info *info)
4668 if (info->tx_enabled)
4669 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4671 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4674 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4677 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4678 enum mlxsw_reg_spms_state spms_state;
4683 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4684 MLXSW_REG_SPMS_STATE_DISCARDING;
4686 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4689 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4691 for (vid = 0; vid < VLAN_N_VID; vid++)
4692 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4694 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4699 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4704 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4707 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4709 goto err_port_stp_set;
4710 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4713 goto err_port_vlan_set;
4715 for (; vid <= VLAN_N_VID - 1; vid++) {
4716 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4719 goto err_vid_learning_set;
4724 err_vid_learning_set:
4725 for (vid--; vid >= 1; vid--)
4726 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4728 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4730 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4734 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4738 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4739 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4742 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4744 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4745 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4748 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4750 unsigned int num_vxlans = 0;
4751 struct net_device *dev;
4752 struct list_head *iter;
4754 netdev_for_each_lower_dev(br_dev, dev, iter) {
4755 if (netif_is_vxlan(dev))
4759 return num_vxlans > 1;
4762 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4764 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4765 struct net_device *dev;
4766 struct list_head *iter;
4768 netdev_for_each_lower_dev(br_dev, dev, iter) {
4772 if (!netif_is_vxlan(dev))
4775 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4779 if (test_and_set_bit(pvid, vlans))
4786 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4787 struct netlink_ext_ack *extack)
4789 if (br_multicast_enabled(br_dev)) {
4790 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4794 if (!br_vlan_enabled(br_dev) &&
4795 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4796 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4800 if (br_vlan_enabled(br_dev) &&
4801 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4802 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4809 static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
4810 struct net_device *dev)
4812 return upper_dev == netdev_master_upper_dev_get(dev);
4815 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
4816 unsigned long event, void *ptr,
4817 bool process_foreign);
4819 static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
4820 struct net_device *dev,
4821 struct netlink_ext_ack *extack)
4823 struct net_device *upper_dev;
4824 struct list_head *iter;
4827 netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
4828 struct netdev_notifier_changeupper_info info = {
4833 .master = mlxsw_sp_netdev_is_master(upper_dev, dev),
4834 .upper_dev = upper_dev,
4837 /* upper_info is relevant for LAG devices. But we would
4838 * only need this if LAG were a valid upper above
4839 * another upper (e.g. a bridge that is a member of a
4840 * LAG), and that is never a valid configuration. So we
4841 * can keep this as NULL.
4846 err = __mlxsw_sp_netdevice_event(mlxsw_sp,
4847 NETDEV_PRECHANGEUPPER,
4852 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev,
4861 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4862 struct net_device *dev,
4863 unsigned long event, void *ptr,
4864 bool replay_deslavement)
4866 struct netdev_notifier_changeupper_info *info;
4867 struct mlxsw_sp_port *mlxsw_sp_port;
4868 struct netlink_ext_ack *extack;
4869 struct net_device *upper_dev;
4870 struct mlxsw_sp *mlxsw_sp;
4874 mlxsw_sp_port = netdev_priv(dev);
4875 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4877 extack = netdev_notifier_info_to_extack(&info->info);
4880 case NETDEV_PRECHANGEUPPER:
4881 upper_dev = info->upper_dev;
4882 if (!is_vlan_dev(upper_dev) &&
4883 !netif_is_lag_master(upper_dev) &&
4884 !netif_is_bridge_master(upper_dev) &&
4885 !netif_is_ovs_master(upper_dev) &&
4886 !netif_is_macvlan(upper_dev) &&
4887 !netif_is_l3_master(upper_dev)) {
4888 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4893 if (netif_is_bridge_master(upper_dev) &&
4894 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4895 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4896 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4898 if (netdev_has_any_upper_dev(upper_dev) &&
4899 (!netif_is_bridge_master(upper_dev) ||
4900 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4902 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
4908 if (netif_is_lag_master(upper_dev) &&
4909 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4910 info->upper_info, extack))
4912 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4913 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4916 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4917 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4918 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4921 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4922 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4925 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4926 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4929 if (netif_is_bridge_master(upper_dev)) {
4930 br_vlan_get_proto(upper_dev, &proto);
4931 if (br_vlan_enabled(upper_dev) &&
4932 proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4933 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4936 if (vlan_uses_dev(lower_dev) &&
4937 br_vlan_enabled(upper_dev) &&
4938 proto == ETH_P_8021AD) {
4939 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4943 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4944 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4946 if (br_vlan_enabled(br_dev)) {
4947 br_vlan_get_proto(br_dev, &proto);
4948 if (proto == ETH_P_8021AD) {
4949 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4954 if (is_vlan_dev(upper_dev) &&
4955 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4956 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4959 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
4960 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4964 case NETDEV_CHANGEUPPER:
4965 upper_dev = info->upper_dev;
4966 if (netif_is_bridge_master(upper_dev)) {
4967 if (info->linking) {
4968 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4973 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4976 if (!replay_deslavement)
4978 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4981 } else if (netif_is_lag_master(upper_dev)) {
4982 if (info->linking) {
4983 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4986 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4987 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4989 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4992 } else if (netif_is_ovs_master(upper_dev)) {
4994 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4996 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4997 } else if (netif_is_macvlan(upper_dev)) {
4999 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5000 } else if (is_vlan_dev(upper_dev)) {
5001 struct net_device *br_dev;
5003 if (!netif_is_bridge_port(upper_dev))
5007 br_dev = netdev_master_upper_dev_get(upper_dev);
5008 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
5017 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5018 unsigned long event, void *ptr)
5020 struct netdev_notifier_changelowerstate_info *info;
5021 struct mlxsw_sp_port *mlxsw_sp_port;
5024 mlxsw_sp_port = netdev_priv(dev);
5028 case NETDEV_CHANGELOWERSTATE:
5029 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5030 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5031 info->lower_state_info);
5033 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
5041 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5042 struct net_device *port_dev,
5043 unsigned long event, void *ptr,
5044 bool replay_deslavement)
5047 case NETDEV_PRECHANGEUPPER:
5048 case NETDEV_CHANGEUPPER:
5049 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
5051 replay_deslavement);
5052 case NETDEV_CHANGELOWERSTATE:
5053 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
5060 /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
5061 * to do any per-LAG / per-LAG-upper processing.
5063 static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
5064 unsigned long event,
5067 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
5068 struct netdev_notifier_changeupper_info *info = ptr;
5074 case NETDEV_CHANGEUPPER:
5077 if (netif_is_bridge_master(info->upper_dev))
5078 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
5084 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5085 unsigned long event, void *ptr)
5087 struct net_device *dev;
5088 struct list_head *iter;
5091 netdev_for_each_lower_dev(lag_dev, dev, iter) {
5092 if (mlxsw_sp_port_dev_check(dev)) {
5093 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5100 return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr);
5103 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5104 struct net_device *dev,
5105 unsigned long event, void *ptr,
5106 u16 vid, bool replay_deslavement)
5108 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5109 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5110 struct netdev_notifier_changeupper_info *info = ptr;
5111 struct netlink_ext_ack *extack;
5112 struct net_device *upper_dev;
5115 extack = netdev_notifier_info_to_extack(&info->info);
5118 case NETDEV_PRECHANGEUPPER:
5119 upper_dev = info->upper_dev;
5120 if (!netif_is_bridge_master(upper_dev) &&
5121 !netif_is_macvlan(upper_dev) &&
5122 !netif_is_l3_master(upper_dev)) {
5123 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5128 if (netif_is_bridge_master(upper_dev) &&
5129 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5130 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5131 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5133 if (netdev_has_any_upper_dev(upper_dev) &&
5134 (!netif_is_bridge_master(upper_dev) ||
5135 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5137 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
5144 case NETDEV_CHANGEUPPER:
5145 upper_dev = info->upper_dev;
5146 if (netif_is_bridge_master(upper_dev)) {
5147 if (info->linking) {
5148 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5153 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5156 if (!replay_deslavement)
5158 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
5161 } else if (netif_is_macvlan(upper_dev)) {
5163 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5171 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5172 struct net_device *lag_dev,
5173 unsigned long event,
5176 struct net_device *dev;
5177 struct list_head *iter;
5180 netdev_for_each_lower_dev(lag_dev, dev, iter) {
5181 if (mlxsw_sp_port_dev_check(dev)) {
5182 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5190 return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr);
5193 static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
5194 struct net_device *vlan_dev,
5195 struct net_device *br_dev,
5196 unsigned long event, void *ptr,
5197 u16 vid, bool process_foreign)
5199 struct netdev_notifier_changeupper_info *info = ptr;
5200 struct netlink_ext_ack *extack;
5201 struct net_device *upper_dev;
5203 if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev))
5206 extack = netdev_notifier_info_to_extack(&info->info);
5209 case NETDEV_PRECHANGEUPPER:
5210 upper_dev = info->upper_dev;
5211 if (!netif_is_macvlan(upper_dev) &&
5212 !netif_is_l3_master(upper_dev)) {
5213 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5217 case NETDEV_CHANGEUPPER:
5218 upper_dev = info->upper_dev;
5221 if (netif_is_macvlan(upper_dev))
5222 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5229 static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
5230 struct net_device *vlan_dev,
5231 unsigned long event, void *ptr,
5232 bool process_foreign)
5234 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5235 u16 vid = vlan_dev_vlan_id(vlan_dev);
5237 if (mlxsw_sp_port_dev_check(real_dev))
5238 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5241 else if (netif_is_lag_master(real_dev))
5242 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5245 else if (netif_is_bridge_master(real_dev))
5246 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
5254 static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
5255 struct net_device *br_dev,
5256 unsigned long event, void *ptr,
5257 bool process_foreign)
5259 struct netdev_notifier_changeupper_info *info = ptr;
5260 struct netlink_ext_ack *extack;
5261 struct net_device *upper_dev;
5264 if (!process_foreign && !mlxsw_sp_lower_get(br_dev))
5267 extack = netdev_notifier_info_to_extack(&info->info);
5270 case NETDEV_PRECHANGEUPPER:
5271 upper_dev = info->upper_dev;
5272 if (!is_vlan_dev(upper_dev) &&
5273 !netif_is_macvlan(upper_dev) &&
5274 !netif_is_l3_master(upper_dev)) {
5275 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5280 if (br_vlan_enabled(br_dev)) {
5281 br_vlan_get_proto(br_dev, &proto);
5282 if (proto == ETH_P_8021AD) {
5283 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5287 if (is_vlan_dev(upper_dev) &&
5288 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5289 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5293 case NETDEV_CHANGEUPPER:
5294 upper_dev = info->upper_dev;
5297 if (is_vlan_dev(upper_dev))
5298 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5299 if (netif_is_macvlan(upper_dev))
5300 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5307 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5308 unsigned long event, void *ptr)
5310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5311 struct netdev_notifier_changeupper_info *info = ptr;
5312 struct netlink_ext_ack *extack;
5313 struct net_device *upper_dev;
5315 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5318 extack = netdev_notifier_info_to_extack(&info->info);
5319 upper_dev = info->upper_dev;
5321 if (!netif_is_l3_master(upper_dev)) {
5322 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5329 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5330 struct net_device *dev,
5331 unsigned long event, void *ptr)
5333 struct netdev_notifier_changeupper_info *cu_info;
5334 struct netdev_notifier_info *info = ptr;
5335 struct netlink_ext_ack *extack;
5336 struct net_device *upper_dev;
5338 extack = netdev_notifier_info_to_extack(info);
5341 case NETDEV_CHANGEUPPER:
5342 cu_info = container_of(info,
5343 struct netdev_notifier_changeupper_info,
5345 upper_dev = cu_info->upper_dev;
5346 if (!netif_is_bridge_master(upper_dev))
5348 if (!mlxsw_sp_lower_get(upper_dev))
5350 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5352 if (cu_info->linking) {
5353 if (!netif_running(dev))
5355 /* When the bridge is VLAN-aware, the VNI of the VxLAN
5356 * device needs to be mapped to a VLAN, but at this
5357 * point no VLANs are configured on the VxLAN device
5359 if (br_vlan_enabled(upper_dev))
5361 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5364 /* VLANs were already flushed, which triggered the
5367 if (br_vlan_enabled(upper_dev))
5369 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5373 upper_dev = netdev_master_upper_dev_get(dev);
5376 if (!netif_is_bridge_master(upper_dev))
5378 if (!mlxsw_sp_lower_get(upper_dev))
5380 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5383 upper_dev = netdev_master_upper_dev_get(dev);
5386 if (!netif_is_bridge_master(upper_dev))
5388 if (!mlxsw_sp_lower_get(upper_dev))
5390 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5397 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
5398 unsigned long event, void *ptr,
5399 bool process_foreign)
5401 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5402 struct mlxsw_sp_span_entry *span_entry;
5405 if (event == NETDEV_UNREGISTER) {
5406 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5408 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5411 if (netif_is_vxlan(dev))
5412 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5413 else if (mlxsw_sp_port_dev_check(dev))
5414 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true);
5415 else if (netif_is_lag_master(dev))
5416 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5417 else if (is_vlan_dev(dev))
5418 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr,
5420 else if (netif_is_bridge_master(dev))
5421 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr,
5423 else if (netif_is_macvlan(dev))
5424 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5429 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5430 unsigned long event, void *ptr)
5432 struct mlxsw_sp *mlxsw_sp;
5435 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5436 mlxsw_sp_span_respin(mlxsw_sp);
5437 err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false);
5439 return notifier_from_errno(err);
5442 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5443 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5447 static struct pci_driver mlxsw_sp1_pci_driver = {
5448 .name = mlxsw_sp1_driver_name,
5449 .id_table = mlxsw_sp1_pci_id_table,
5452 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5453 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5457 static struct pci_driver mlxsw_sp2_pci_driver = {
5458 .name = mlxsw_sp2_driver_name,
5459 .id_table = mlxsw_sp2_pci_id_table,
5462 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5463 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5467 static struct pci_driver mlxsw_sp3_pci_driver = {
5468 .name = mlxsw_sp3_driver_name,
5469 .id_table = mlxsw_sp3_pci_id_table,
5472 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5473 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5477 static struct pci_driver mlxsw_sp4_pci_driver = {
5478 .name = mlxsw_sp4_driver_name,
5479 .id_table = mlxsw_sp4_pci_id_table,
5482 static int __init mlxsw_sp_module_init(void)
5486 err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5490 err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5492 goto err_sp2_core_driver_register;
5494 err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5496 goto err_sp3_core_driver_register;
5498 err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5500 goto err_sp4_core_driver_register;
5502 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5504 goto err_sp1_pci_driver_register;
5506 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5508 goto err_sp2_pci_driver_register;
5510 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5512 goto err_sp3_pci_driver_register;
5514 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5516 goto err_sp4_pci_driver_register;
5520 err_sp4_pci_driver_register:
5521 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5522 err_sp3_pci_driver_register:
5523 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5524 err_sp2_pci_driver_register:
5525 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5526 err_sp1_pci_driver_register:
5527 mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5528 err_sp4_core_driver_register:
5529 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5530 err_sp3_core_driver_register:
5531 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5532 err_sp2_core_driver_register:
5533 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5537 static void __exit mlxsw_sp_module_exit(void)
5539 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5540 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5541 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5542 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5543 mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5544 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5545 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5546 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5549 module_init(mlxsw_sp_module_init);
5550 module_exit(mlxsw_sp_module_exit);
5552 MODULE_LICENSE("Dual BSD/GPL");
5553 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5554 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5555 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5556 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5557 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5558 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5559 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5560 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5561 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5562 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);