1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <net/switchdev.h>
27 #include <net/pkt_cls.h>
28 #include <net/netevent.h>
29 #include <net/addrconf.h>
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_acl_flex_actions.h"
42 #include "spectrum_span.h"
43 #include "spectrum_ptp.h"
44 #include "spectrum_trap.h"
45 #include "../mlxfw/mlxfw.h"
47 #define MLXSW_SP1_FWREV_MAJOR 13
48 #define MLXSW_SP1_FWREV_MINOR 2007
49 #define MLXSW_SP1_FWREV_SUBMINOR 1168
50 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
52 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
53 .major = MLXSW_SP1_FWREV_MAJOR,
54 .minor = MLXSW_SP1_FWREV_MINOR,
55 .subminor = MLXSW_SP1_FWREV_SUBMINOR,
56 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
59 #define MLXSW_SP1_FW_FILENAME \
60 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
61 "." __stringify(MLXSW_SP1_FWREV_MINOR) \
62 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
64 #define MLXSW_SP2_FWREV_MAJOR 29
65 #define MLXSW_SP2_FWREV_MINOR 2007
66 #define MLXSW_SP2_FWREV_SUBMINOR 1168
68 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
69 .major = MLXSW_SP2_FWREV_MAJOR,
70 .minor = MLXSW_SP2_FWREV_MINOR,
71 .subminor = MLXSW_SP2_FWREV_SUBMINOR,
74 #define MLXSW_SP2_FW_FILENAME \
75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
76 "." __stringify(MLXSW_SP2_FWREV_MINOR) \
77 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
79 #define MLXSW_SP3_FWREV_MAJOR 30
80 #define MLXSW_SP3_FWREV_MINOR 2007
81 #define MLXSW_SP3_FWREV_SUBMINOR 1168
83 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
84 .major = MLXSW_SP3_FWREV_MAJOR,
85 .minor = MLXSW_SP3_FWREV_MINOR,
86 .subminor = MLXSW_SP3_FWREV_SUBMINOR,
89 #define MLXSW_SP3_FW_FILENAME \
90 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
91 "." __stringify(MLXSW_SP3_FWREV_MINOR) \
92 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
94 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
95 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
96 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
98 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
99 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
101 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
102 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
109 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
112 * Packet control type.
113 * 0 - Ethernet control (e.g. EMADs, LACP)
116 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
119 * Packet protocol type. Must be set to 1 (Ethernet).
121 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
123 /* tx_hdr_rx_is_router
124 * Packet is sent from the router. Valid for data packets only.
126 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
129 * Indicates if the 'fid' field is valid and should be used for
130 * forwarding lookup. Valid for data packets only.
132 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
135 * Switch partition ID. Must be set to 0.
137 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
139 /* tx_hdr_control_tclass
140 * Indicates if the packet should use the control TClass and not one
141 * of the data TClasses.
143 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
146 * Egress TClass to be used on the egress device on the egress port.
148 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
151 * Destination local port for unicast packets.
152 * Destination multicast ID for multicast packets.
154 * Control packets are directed to a specific egress port, while data
155 * packets are transmitted through the CPU port (0) into the switch partition,
156 * where forwarding rules are applied.
158 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
161 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
162 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
163 * Valid for data packets only.
165 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
169 * 6 - Control packets
171 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
173 struct mlxsw_sp_mlxfw_dev {
174 struct mlxfw_dev mlxfw_dev;
175 struct mlxsw_sp *mlxsw_sp;
178 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
179 u16 component_index, u32 *p_max_size,
180 u8 *p_align_bits, u16 *p_max_write_size)
182 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
183 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
184 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
185 char mcqi_pl[MLXSW_REG_MCQI_LEN];
188 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
189 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
192 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
195 *p_align_bits = max_t(u8, *p_align_bits, 2);
196 *p_max_write_size = min_t(u16, *p_max_write_size,
197 MLXSW_REG_MCDA_MAX_DATA_LEN);
201 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
203 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
204 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
205 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
206 char mcc_pl[MLXSW_REG_MCC_LEN];
210 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
215 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
216 if (control_state != MLXFW_FSM_STATE_IDLE)
219 mlxsw_reg_mcc_pack(mcc_pl,
220 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
225 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
226 u32 fwhandle, u16 component_index,
229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
232 char mcc_pl[MLXSW_REG_MCC_LEN];
234 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
235 component_index, fwhandle, component_size);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
239 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
240 u32 fwhandle, u8 *data, u16 size,
243 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
244 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
245 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
246 char mcda_pl[MLXSW_REG_MCDA_LEN];
248 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
252 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
253 u32 fwhandle, u16 component_index)
255 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
256 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
257 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
258 char mcc_pl[MLXSW_REG_MCC_LEN];
260 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
261 component_index, fwhandle, 0);
262 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
265 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
270 char mcc_pl[MLXSW_REG_MCC_LEN];
272 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
277 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
278 enum mlxfw_fsm_state *fsm_state,
279 enum mlxfw_fsm_state_err *fsm_state_err)
281 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
282 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
284 char mcc_pl[MLXSW_REG_MCC_LEN];
289 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
290 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
294 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
295 *fsm_state = control_state;
296 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
297 MLXFW_FSM_STATE_ERR_MAX);
301 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
303 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
304 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
306 char mcc_pl[MLXSW_REG_MCC_LEN];
308 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
310 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
313 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
315 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
316 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
317 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
318 char mcc_pl[MLXSW_REG_MCC_LEN];
320 mlxsw_reg_mcc_pack(mcc_pl,
321 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
323 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
326 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
327 .component_query = mlxsw_sp_component_query,
328 .fsm_lock = mlxsw_sp_fsm_lock,
329 .fsm_component_update = mlxsw_sp_fsm_component_update,
330 .fsm_block_download = mlxsw_sp_fsm_block_download,
331 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
332 .fsm_activate = mlxsw_sp_fsm_activate,
333 .fsm_query_state = mlxsw_sp_fsm_query_state,
334 .fsm_cancel = mlxsw_sp_fsm_cancel,
335 .fsm_release = mlxsw_sp_fsm_release,
338 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
339 const struct firmware *firmware,
340 struct netlink_ext_ack *extack)
342 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
344 .ops = &mlxsw_sp_mlxfw_dev_ops,
345 .psid = mlxsw_sp->bus_info->psid,
346 .psid_size = strlen(mlxsw_sp->bus_info->psid),
347 .devlink = priv_to_devlink(mlxsw_sp->core),
353 mlxsw_core_fw_flash_start(mlxsw_sp->core);
354 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
356 mlxsw_core_fw_flash_end(mlxsw_sp->core);
361 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
363 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
364 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
365 const char *fw_filename = mlxsw_sp->fw_filename;
366 union devlink_param_value value;
367 const struct firmware *firmware;
370 /* Don't check if driver does not require it */
371 if (!req_rev || !fw_filename)
374 /* Don't check if devlink 'fw_load_policy' param is 'flash' */
375 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
376 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
380 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
383 /* Validate driver & FW are compatible */
384 if (rev->major != req_rev->major) {
385 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
386 rev->major, req_rev->major);
389 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
392 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
393 rev->major, rev->minor, rev->subminor, req_rev->major,
394 req_rev->minor, req_rev->subminor);
395 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
398 err = request_firmware_direct(&firmware, fw_filename,
399 mlxsw_sp->bus_info->dev);
401 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
406 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL);
407 release_firmware(firmware);
409 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
411 /* On FW flash success, tell the caller FW reset is needed
412 * if current FW supports it.
414 if (rev->minor >= req_rev->can_reset_minor)
415 return err ? err : -EAGAIN;
420 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core,
421 const char *file_name, const char *component,
422 struct netlink_ext_ack *extack)
424 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
425 const struct firmware *firmware;
431 err = request_firmware_direct(&firmware, file_name,
432 mlxsw_sp->bus_info->dev);
435 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack);
436 release_firmware(firmware);
441 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
442 unsigned int counter_index, u64 *packets,
445 char mgpc_pl[MLXSW_REG_MGPC_LEN];
448 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
449 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
450 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
454 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
456 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
460 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
461 unsigned int counter_index)
463 char mgpc_pl[MLXSW_REG_MGPC_LEN];
465 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
466 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
467 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
470 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
471 unsigned int *p_counter_index)
475 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
479 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
481 goto err_counter_clear;
485 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
490 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
491 unsigned int counter_index)
493 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
497 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
498 const struct mlxsw_tx_info *tx_info)
500 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
502 memset(txhdr, 0, MLXSW_TXHDR_LEN);
504 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
505 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
506 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
507 mlxsw_tx_hdr_swid_set(txhdr, 0);
508 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
509 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
510 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
513 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
516 case BR_STATE_FORWARDING:
517 return MLXSW_REG_SPMS_STATE_FORWARDING;
518 case BR_STATE_LEARNING:
519 return MLXSW_REG_SPMS_STATE_LEARNING;
520 case BR_STATE_LISTENING:
521 case BR_STATE_DISABLED:
522 case BR_STATE_BLOCKING:
523 return MLXSW_REG_SPMS_STATE_DISCARDING;
529 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
532 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
533 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
537 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
540 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
541 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
543 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
548 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
550 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
553 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
556 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
560 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
564 char paos_pl[MLXSW_REG_PAOS_LEN];
566 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
567 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
568 MLXSW_PORT_ADMIN_STATUS_DOWN);
569 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
572 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
576 char ppad_pl[MLXSW_REG_PPAD_LEN];
578 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
579 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
583 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
585 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
586 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
588 ether_addr_copy(addr, mlxsw_sp->base_mac);
589 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
590 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
593 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
595 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
596 char pmtu_pl[MLXSW_REG_PMTU_LEN];
600 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
601 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
602 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
605 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
610 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
611 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
614 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
616 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
617 char pspa_pl[MLXSW_REG_PSPA_LEN];
619 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
620 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
623 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
625 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
626 char svpe_pl[MLXSW_REG_SVPE_LEN];
628 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
629 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
632 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
635 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
639 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
642 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
644 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
649 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
652 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
653 char spvid_pl[MLXSW_REG_SPVID_LEN];
655 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
656 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
659 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
662 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
663 char spaft_pl[MLXSW_REG_SPAFT_LEN];
665 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
666 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
669 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
674 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
678 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
681 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
683 goto err_port_allow_untagged_set;
686 mlxsw_sp_port->pvid = vid;
689 err_port_allow_untagged_set:
690 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
695 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
697 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
698 char sspr_pl[MLXSW_REG_SSPR_LEN];
700 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
705 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
706 struct mlxsw_sp_port_mapping *port_mapping)
708 char pmlp_pl[MLXSW_REG_PMLP_LEN];
715 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
716 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
719 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
720 width = mlxsw_reg_pmlp_width_get(pmlp_pl);
721 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
723 if (width && !is_power_of_2(width)) {
724 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
729 for (i = 0; i < width; i++) {
730 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
731 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
736 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
737 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
738 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
742 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
743 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
749 port_mapping->module = module;
750 port_mapping->width = width;
751 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
755 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
757 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
758 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
759 char pmlp_pl[MLXSW_REG_PMLP_LEN];
762 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
763 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
764 for (i = 0; i < port_mapping->width; i++) {
765 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
766 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
769 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
772 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
774 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
775 char pmlp_pl[MLXSW_REG_PMLP_LEN];
777 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
778 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
779 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
782 static int mlxsw_sp_port_open(struct net_device *dev)
784 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
787 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
790 netif_start_queue(dev);
794 static int mlxsw_sp_port_stop(struct net_device *dev)
796 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
798 netif_stop_queue(dev);
799 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
802 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
803 struct net_device *dev)
805 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
807 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
808 const struct mlxsw_tx_info tx_info = {
809 .local_port = mlxsw_sp_port->local_port,
815 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
816 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
817 dev_kfree_skb_any(skb);
821 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
823 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
824 return NETDEV_TX_BUSY;
826 if (eth_skb_pad(skb)) {
827 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
831 mlxsw_sp_txhdr_construct(skb, &tx_info);
832 /* TX header is consumed by HW on the way so we shouldn't count its
833 * bytes as being sent.
835 len = skb->len - MLXSW_TXHDR_LEN;
837 /* Due to a race we might fail here because of a full queue. In that
838 * unlikely case we simply drop the packet.
840 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
843 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
844 u64_stats_update_begin(&pcpu_stats->syncp);
845 pcpu_stats->tx_packets++;
846 pcpu_stats->tx_bytes += len;
847 u64_stats_update_end(&pcpu_stats->syncp);
849 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
850 dev_kfree_skb_any(skb);
855 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
859 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
861 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
862 struct sockaddr *addr = p;
865 if (!is_valid_ether_addr(addr->sa_data))
866 return -EADDRNOTAVAIL;
868 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
871 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
875 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
878 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
881 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
883 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
886 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
888 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
892 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
893 * Assumes 100m cable and maximum MTU.
895 #define MLXSW_SP_PAUSE_DELAY 58752
897 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
898 u16 delay, bool pfc, bool pause)
901 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
903 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
908 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
912 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
914 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
918 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
919 u8 *prio_tc, bool pause_en,
920 struct ieee_pfc *my_pfc)
922 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
923 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
924 u16 delay = !!my_pfc ? my_pfc->delay : 0;
925 char pbmc_pl[MLXSW_REG_PBMC_LEN];
926 u32 taken_headroom_cells = 0;
927 u32 max_headroom_cells;
930 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp);
932 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
933 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
937 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
938 bool configure = false;
945 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
946 if (prio_tc[j] == i) {
947 pfc = pfc_en & BIT(j);
956 lossy = !(pfc || pause_en);
957 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
958 thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
959 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
961 delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
962 total_cells = thres_cells + delay_cells;
964 taken_headroom_cells += total_cells;
965 if (taken_headroom_cells > max_headroom_cells)
968 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells,
972 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
975 int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
976 int mtu, bool pause_en)
978 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
979 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
980 struct ieee_pfc *my_pfc;
983 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
984 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
986 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
990 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
992 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
993 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
996 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
999 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1001 goto err_span_port_mtu_update;
1002 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1004 goto err_port_mtu_set;
1009 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1010 err_span_port_mtu_update:
1011 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1016 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1017 struct rtnl_link_stats64 *stats)
1019 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1020 struct mlxsw_sp_port_pcpu_stats *p;
1021 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1026 for_each_possible_cpu(i) {
1027 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1029 start = u64_stats_fetch_begin_irq(&p->syncp);
1030 rx_packets = p->rx_packets;
1031 rx_bytes = p->rx_bytes;
1032 tx_packets = p->tx_packets;
1033 tx_bytes = p->tx_bytes;
1034 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1036 stats->rx_packets += rx_packets;
1037 stats->rx_bytes += rx_bytes;
1038 stats->tx_packets += tx_packets;
1039 stats->tx_bytes += tx_bytes;
1040 /* tx_dropped is u32, updated without syncp protection. */
1041 tx_dropped += p->tx_dropped;
1043 stats->tx_dropped = tx_dropped;
1047 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
1050 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1057 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1061 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1062 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1068 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1069 int prio, char *ppcnt_pl)
1071 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1072 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1074 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1075 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1078 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1079 struct rtnl_link_stats64 *stats)
1081 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1084 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1090 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1092 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1094 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1096 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1098 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1100 stats->rx_crc_errors =
1101 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1102 stats->rx_frame_errors =
1103 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1105 stats->rx_length_errors = (
1106 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1107 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1108 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1110 stats->rx_errors = (stats->rx_crc_errors +
1111 stats->rx_frame_errors + stats->rx_length_errors);
1118 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
1119 struct mlxsw_sp_port_xstats *xstats)
1121 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1124 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
1127 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
1129 for (i = 0; i < TC_MAX_QUEUE; i++) {
1130 err = mlxsw_sp_port_get_stats_raw(dev,
1131 MLXSW_REG_PPCNT_TC_CONG_TC,
1134 xstats->wred_drop[i] =
1135 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
1137 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
1142 xstats->backlog[i] =
1143 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1144 xstats->tail_drop[i] =
1145 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
1148 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1149 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
1154 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
1155 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
1159 static void update_stats_cache(struct work_struct *work)
1161 struct mlxsw_sp_port *mlxsw_sp_port =
1162 container_of(work, struct mlxsw_sp_port,
1163 periodic_hw_stats.update_dw.work);
1165 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1166 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
1167 * necessary when port goes down.
1171 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1172 &mlxsw_sp_port->periodic_hw_stats.stats);
1173 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
1174 &mlxsw_sp_port->periodic_hw_stats.xstats);
1177 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1178 MLXSW_HW_STATS_UPDATE_TIME);
1181 /* Return the stats from a cache that is updated periodically,
1182 * as this function might get called in an atomic context.
1185 mlxsw_sp_port_get_stats64(struct net_device *dev,
1186 struct rtnl_link_stats64 *stats)
1188 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1190 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1193 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1194 u16 vid_begin, u16 vid_end,
1195 bool is_member, bool untagged)
1197 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1201 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1205 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1206 vid_end, is_member, untagged);
1207 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1212 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1213 u16 vid_end, bool is_member, bool untagged)
1218 for (vid = vid_begin; vid <= vid_end;
1219 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1220 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1223 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1224 is_member, untagged);
1232 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1235 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1237 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1238 &mlxsw_sp_port->vlans_list, list) {
1239 if (!flush_default &&
1240 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1242 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1247 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1249 if (mlxsw_sp_port_vlan->bridge_port)
1250 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1251 else if (mlxsw_sp_port_vlan->fid)
1252 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1255 struct mlxsw_sp_port_vlan *
1256 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1258 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1259 bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1262 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1263 if (mlxsw_sp_port_vlan)
1264 return ERR_PTR(-EEXIST);
1266 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1268 return ERR_PTR(err);
1270 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1271 if (!mlxsw_sp_port_vlan) {
1273 goto err_port_vlan_alloc;
1276 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1277 mlxsw_sp_port_vlan->vid = vid;
1278 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1280 return mlxsw_sp_port_vlan;
1282 err_port_vlan_alloc:
1283 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1284 return ERR_PTR(err);
1287 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1289 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1290 u16 vid = mlxsw_sp_port_vlan->vid;
1292 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1293 list_del(&mlxsw_sp_port_vlan->list);
1294 kfree(mlxsw_sp_port_vlan);
1295 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1298 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1299 __be16 __always_unused proto, u16 vid)
1301 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1303 /* VLAN 0 is added to HW filter when device goes up, but it is
1304 * reserved in our case, so simply return.
1309 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1312 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1313 __be16 __always_unused proto, u16 vid)
1315 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1316 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1318 /* VLAN 0 is removed from HW filter when device goes down, but
1319 * it is reserved in our case, so simply return.
1324 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1325 if (!mlxsw_sp_port_vlan)
1327 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1332 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1333 struct flow_block_offload *f)
1335 switch (f->binder_type) {
1336 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1337 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1338 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1339 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1340 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1341 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1347 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1353 case TC_SETUP_BLOCK:
1354 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1355 case TC_SETUP_QDISC_RED:
1356 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1357 case TC_SETUP_QDISC_PRIO:
1358 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1359 case TC_SETUP_QDISC_ETS:
1360 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1361 case TC_SETUP_QDISC_TBF:
1362 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1363 case TC_SETUP_QDISC_FIFO:
1364 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1370 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1372 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1375 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1376 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1377 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1380 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1381 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1383 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1384 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1389 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1391 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1392 char pplr_pl[MLXSW_REG_PPLR_LEN];
1395 if (netif_running(dev))
1396 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1398 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1399 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1402 if (netif_running(dev))
1403 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1408 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1410 static int mlxsw_sp_handle_feature(struct net_device *dev,
1411 netdev_features_t wanted_features,
1412 netdev_features_t feature,
1413 mlxsw_sp_feature_handler feature_handler)
1415 netdev_features_t changes = wanted_features ^ dev->features;
1416 bool enable = !!(wanted_features & feature);
1419 if (!(changes & feature))
1422 err = feature_handler(dev, enable);
1424 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1425 enable ? "Enable" : "Disable", &feature, err);
1430 dev->features |= feature;
1432 dev->features &= ~feature;
1436 static int mlxsw_sp_set_features(struct net_device *dev,
1437 netdev_features_t features)
1439 netdev_features_t oper_features = dev->features;
1442 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1443 mlxsw_sp_feature_hw_tc);
1444 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1445 mlxsw_sp_feature_loopback);
1448 dev->features = oper_features;
1455 static struct devlink_port *
1456 mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1458 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1459 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1461 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1462 mlxsw_sp_port->local_port);
1465 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1468 struct hwtstamp_config config;
1471 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1474 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1479 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1485 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1488 struct hwtstamp_config config;
1491 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1496 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1502 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1504 struct hwtstamp_config config = {0};
1506 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1510 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1512 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1516 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1518 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1524 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1525 .ndo_open = mlxsw_sp_port_open,
1526 .ndo_stop = mlxsw_sp_port_stop,
1527 .ndo_start_xmit = mlxsw_sp_port_xmit,
1528 .ndo_setup_tc = mlxsw_sp_setup_tc,
1529 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1530 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1531 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1532 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1533 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1534 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1535 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1536 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1537 .ndo_set_features = mlxsw_sp_set_features,
1538 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
1539 .ndo_do_ioctl = mlxsw_sp_port_ioctl,
1543 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1545 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1546 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1547 const struct mlxsw_sp_port_type_speed_ops *ops;
1548 char ptys_pl[MLXSW_REG_PTYS_LEN];
1551 ops = mlxsw_sp->port_type_speed_ops;
1553 /* Set advertised speeds to supported speeds. */
1554 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1556 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1560 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap,
1561 ð_proto_admin, ð_proto_oper);
1562 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1563 eth_proto_cap, mlxsw_sp_port->link.autoneg);
1564 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1567 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1569 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1570 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1571 char ptys_pl[MLXSW_REG_PTYS_LEN];
1575 port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1576 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1577 mlxsw_sp_port->local_port, 0,
1579 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1582 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1584 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1588 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1589 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1590 bool dwrr, u8 dwrr_weight)
1592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1593 char qeec_pl[MLXSW_REG_QEEC_LEN];
1595 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1597 mlxsw_reg_qeec_de_set(qeec_pl, true);
1598 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1599 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1600 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1603 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1604 enum mlxsw_reg_qeec_hr hr, u8 index,
1605 u8 next_index, u32 maxrate, u8 burst_size)
1607 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1608 char qeec_pl[MLXSW_REG_QEEC_LEN];
1610 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1612 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1613 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1614 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1618 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1619 enum mlxsw_reg_qeec_hr hr, u8 index,
1620 u8 next_index, u32 minrate)
1622 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1623 char qeec_pl[MLXSW_REG_QEEC_LEN];
1625 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1627 mlxsw_reg_qeec_mise_set(qeec_pl, true);
1628 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1633 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1634 u8 switch_prio, u8 tclass)
1636 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1637 char qtct_pl[MLXSW_REG_QTCT_LEN];
1639 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1641 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1644 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1648 /* Setup the elements hierarcy, so that each TC is linked to
1649 * one subgroup, which are all member in the same group.
1651 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1652 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1655 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1656 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1657 MLXSW_REG_QEEC_HR_SUBGROUP, i,
1662 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1663 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1664 MLXSW_REG_QEEC_HR_TC, i, i,
1669 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1670 MLXSW_REG_QEEC_HR_TC,
1677 /* Make sure the max shaper is disabled in all hierarchies that support
1678 * it. Note that this disables ptps (PTP shaper), but that is intended
1679 * for the initial configuration.
1681 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1682 MLXSW_REG_QEEC_HR_PORT, 0, 0,
1683 MLXSW_REG_QEEC_MAS_DIS, 0);
1686 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1687 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1688 MLXSW_REG_QEEC_HR_SUBGROUP,
1690 MLXSW_REG_QEEC_MAS_DIS, 0);
1694 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1695 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1696 MLXSW_REG_QEEC_HR_TC,
1698 MLXSW_REG_QEEC_MAS_DIS, 0);
1702 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1703 MLXSW_REG_QEEC_HR_TC,
1705 MLXSW_REG_QEEC_MAS_DIS, 0);
1710 /* Configure the min shaper for multicast TCs. */
1711 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1712 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1713 MLXSW_REG_QEEC_HR_TC,
1715 MLXSW_REG_QEEC_MIS_MIN);
1720 /* Map all priorities to traffic class 0. */
1721 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1722 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1730 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1733 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1734 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1736 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1740 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1741 u8 split_base_local_port,
1742 struct mlxsw_sp_port_mapping *port_mapping)
1744 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1745 bool split = !!split_base_local_port;
1746 struct mlxsw_sp_port *mlxsw_sp_port;
1747 u32 lanes = port_mapping->width;
1748 struct net_device *dev;
1752 splittable = lanes > 1 && !split;
1753 err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1754 port_mapping->module + 1, split,
1755 port_mapping->lane / lanes,
1758 sizeof(mlxsw_sp->base_mac));
1760 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1765 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1768 goto err_alloc_etherdev;
1770 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1771 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1772 mlxsw_sp_port = netdev_priv(dev);
1773 mlxsw_sp_port->dev = dev;
1774 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1775 mlxsw_sp_port->local_port = local_port;
1776 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1777 mlxsw_sp_port->split = split;
1778 mlxsw_sp_port->split_base_local_port = split_base_local_port;
1779 mlxsw_sp_port->mapping = *port_mapping;
1780 mlxsw_sp_port->link.autoneg = 1;
1781 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1783 mlxsw_sp_port->pcpu_stats =
1784 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1785 if (!mlxsw_sp_port->pcpu_stats) {
1787 goto err_alloc_stats;
1790 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1791 &update_stats_cache);
1793 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1794 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1796 err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1798 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1799 mlxsw_sp_port->local_port);
1800 goto err_port_module_map;
1803 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1805 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1806 mlxsw_sp_port->local_port);
1807 goto err_port_swid_set;
1810 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1812 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1813 mlxsw_sp_port->local_port);
1814 goto err_dev_addr_init;
1817 netif_carrier_off(dev);
1819 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1820 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1821 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1824 dev->max_mtu = ETH_MAX_MTU;
1826 /* Each packet needs to have a Tx header (metadata) on top all other
1829 dev->needed_headroom = MLXSW_TXHDR_LEN;
1831 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1833 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1834 mlxsw_sp_port->local_port);
1835 goto err_port_system_port_mapping_set;
1838 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1840 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1841 mlxsw_sp_port->local_port);
1842 goto err_port_speed_by_width_set;
1845 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1847 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1848 mlxsw_sp_port->local_port);
1849 goto err_port_mtu_set;
1852 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1854 goto err_port_admin_status_set;
1856 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1858 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1859 mlxsw_sp_port->local_port);
1860 goto err_port_buffers_init;
1863 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1865 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1866 mlxsw_sp_port->local_port);
1867 goto err_port_ets_init;
1870 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1872 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1873 mlxsw_sp_port->local_port);
1874 goto err_port_tc_mc_mode;
1877 /* ETS and buffers must be initialized before DCB. */
1878 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1880 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1881 mlxsw_sp_port->local_port);
1882 goto err_port_dcb_init;
1885 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1887 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1888 mlxsw_sp_port->local_port);
1889 goto err_port_fids_init;
1892 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1894 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1895 mlxsw_sp_port->local_port);
1896 goto err_port_qdiscs_init;
1899 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1902 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1903 mlxsw_sp_port->local_port);
1904 goto err_port_vlan_clear;
1907 err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1909 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1910 mlxsw_sp_port->local_port);
1911 goto err_port_nve_init;
1914 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1916 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1917 mlxsw_sp_port->local_port);
1918 goto err_port_pvid_set;
1921 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1922 MLXSW_SP_DEFAULT_VID);
1923 if (IS_ERR(mlxsw_sp_port_vlan)) {
1924 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1925 mlxsw_sp_port->local_port);
1926 err = PTR_ERR(mlxsw_sp_port_vlan);
1927 goto err_port_vlan_create;
1929 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1931 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1932 mlxsw_sp->ptp_ops->shaper_work);
1933 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw,
1934 mlxsw_sp_span_speed_update_work);
1936 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1937 err = register_netdev(dev);
1939 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1940 mlxsw_sp_port->local_port);
1941 goto err_register_netdev;
1944 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1945 mlxsw_sp_port, dev);
1946 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1949 err_register_netdev:
1950 mlxsw_sp->ports[local_port] = NULL;
1951 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1952 err_port_vlan_create:
1954 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1956 err_port_vlan_clear:
1957 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1958 err_port_qdiscs_init:
1959 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1961 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1963 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1964 err_port_tc_mc_mode:
1966 err_port_buffers_init:
1967 err_port_admin_status_set:
1969 err_port_speed_by_width_set:
1970 err_port_system_port_mapping_set:
1972 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1974 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1975 err_port_module_map:
1976 free_percpu(mlxsw_sp_port->pcpu_stats);
1980 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1984 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1986 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1988 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1989 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw);
1990 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1991 mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1992 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1993 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1994 mlxsw_sp->ports[local_port] = NULL;
1995 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1996 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1997 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1998 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1999 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2000 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
2001 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2002 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
2003 free_percpu(mlxsw_sp_port->pcpu_stats);
2004 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
2005 free_netdev(mlxsw_sp_port->dev);
2006 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2009 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
2011 struct mlxsw_sp_port *mlxsw_sp_port;
2014 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
2018 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2019 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
2021 err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
2024 sizeof(mlxsw_sp->base_mac));
2026 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
2027 goto err_core_cpu_port_init;
2030 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
2033 err_core_cpu_port_init:
2034 kfree(mlxsw_sp_port);
2038 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
2040 struct mlxsw_sp_port *mlxsw_sp_port =
2041 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
2043 mlxsw_core_cpu_port_fini(mlxsw_sp->core);
2044 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
2045 kfree(mlxsw_sp_port);
2048 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2050 return mlxsw_sp->ports[local_port] != NULL;
2053 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2057 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2058 if (mlxsw_sp_port_created(mlxsw_sp, i))
2059 mlxsw_sp_port_remove(mlxsw_sp, i);
2060 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2061 kfree(mlxsw_sp->ports);
2062 mlxsw_sp->ports = NULL;
2065 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2067 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2068 struct mlxsw_sp_port_mapping *port_mapping;
2073 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2074 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2075 if (!mlxsw_sp->ports)
2078 err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2080 goto err_cpu_port_create;
2082 for (i = 1; i < max_ports; i++) {
2083 port_mapping = mlxsw_sp->port_mapping[i];
2086 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
2088 goto err_port_create;
2093 for (i--; i >= 1; i--)
2094 if (mlxsw_sp_port_created(mlxsw_sp, i))
2095 mlxsw_sp_port_remove(mlxsw_sp, i);
2096 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2097 err_cpu_port_create:
2098 kfree(mlxsw_sp->ports);
2099 mlxsw_sp->ports = NULL;
2103 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2105 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2106 struct mlxsw_sp_port_mapping port_mapping;
2110 mlxsw_sp->port_mapping = kcalloc(max_ports,
2111 sizeof(struct mlxsw_sp_port_mapping *),
2113 if (!mlxsw_sp->port_mapping)
2116 for (i = 1; i < max_ports; i++) {
2117 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
2119 goto err_port_module_info_get;
2120 if (!port_mapping.width)
2123 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
2124 sizeof(port_mapping),
2126 if (!mlxsw_sp->port_mapping[i]) {
2128 goto err_port_module_info_dup;
2133 err_port_module_info_get:
2134 err_port_module_info_dup:
2135 for (i--; i >= 1; i--)
2136 kfree(mlxsw_sp->port_mapping[i]);
2137 kfree(mlxsw_sp->port_mapping);
2141 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2145 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2146 kfree(mlxsw_sp->port_mapping[i]);
2147 kfree(mlxsw_sp->port_mapping);
2150 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
2152 u8 offset = (local_port - 1) % max_width;
2154 return local_port - offset;
2158 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2159 struct mlxsw_sp_port_mapping *port_mapping,
2160 unsigned int count, u8 offset)
2162 struct mlxsw_sp_port_mapping split_port_mapping;
2165 split_port_mapping = *port_mapping;
2166 split_port_mapping.width /= count;
2167 for (i = 0; i < count; i++) {
2168 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
2169 base_port, &split_port_mapping);
2171 goto err_port_create;
2172 split_port_mapping.lane += split_port_mapping.width;
2178 for (i--; i >= 0; i--)
2179 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2180 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2184 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2186 unsigned int count, u8 offset)
2188 struct mlxsw_sp_port_mapping *port_mapping;
2191 /* Go over original unsplit ports in the gap and recreate them. */
2192 for (i = 0; i < count * offset; i++) {
2193 port_mapping = mlxsw_sp->port_mapping[base_port + i];
2196 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
2200 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
2202 unsigned int max_width)
2204 enum mlxsw_res_id local_ports_in_x_res_id;
2205 int split_width = max_width / count;
2207 if (split_width == 1)
2208 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
2209 else if (split_width == 2)
2210 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
2211 else if (split_width == 4)
2212 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
2216 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
2218 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
2221 static struct mlxsw_sp_port *
2222 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2224 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2225 return mlxsw_sp->ports[local_port];
2229 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2231 struct netlink_ext_ack *extack)
2233 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2234 struct mlxsw_sp_port_mapping port_mapping;
2235 struct mlxsw_sp_port *mlxsw_sp_port;
2242 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2243 if (!mlxsw_sp_port) {
2244 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2246 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2250 max_width = mlxsw_core_module_max_width(mlxsw_core,
2251 mlxsw_sp_port->mapping.module);
2252 if (max_width < 0) {
2253 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2254 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2258 /* Split port with non-max cannot be split. */
2259 if (mlxsw_sp_port->mapping.width != max_width) {
2260 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
2261 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
2265 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2267 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2268 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2272 /* Only in case max split is being done, the local port and
2273 * base port may differ.
2275 base_port = count == max_width ?
2276 mlxsw_sp_cluster_base_port_get(local_port, max_width) :
2279 for (i = 0; i < count * offset; i++) {
2280 /* Expect base port to exist and also the one in the middle in
2281 * case of maximal split count.
2283 if (i == 0 || (count == max_width && i == count / 2))
2286 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
2287 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2288 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
2293 port_mapping = mlxsw_sp_port->mapping;
2295 for (i = 0; i < count; i++)
2296 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2297 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2299 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
2302 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2303 goto err_port_split_create;
2308 err_port_split_create:
2309 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2313 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
2314 struct netlink_ext_ack *extack)
2316 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2317 struct mlxsw_sp_port *mlxsw_sp_port;
2324 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2325 if (!mlxsw_sp_port) {
2326 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2328 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2332 if (!mlxsw_sp_port->split) {
2333 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2334 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2338 max_width = mlxsw_core_module_max_width(mlxsw_core,
2339 mlxsw_sp_port->mapping.module);
2340 if (max_width < 0) {
2341 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2342 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2346 count = max_width / mlxsw_sp_port->mapping.width;
2348 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2349 if (WARN_ON(offset < 0)) {
2350 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2351 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2355 base_port = mlxsw_sp_port->split_base_local_port;
2357 for (i = 0; i < count; i++)
2358 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2359 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2361 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2367 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2371 for (i = 0; i < TC_MAX_QUEUE; i++)
2372 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2375 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2376 char *pude_pl, void *priv)
2378 struct mlxsw_sp *mlxsw_sp = priv;
2379 struct mlxsw_sp_port *mlxsw_sp_port;
2380 enum mlxsw_reg_pude_oper_status status;
2383 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2384 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2388 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2389 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2390 netdev_info(mlxsw_sp_port->dev, "link up\n");
2391 netif_carrier_on(mlxsw_sp_port->dev);
2392 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2393 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0);
2395 netdev_info(mlxsw_sp_port->dev, "link down\n");
2396 netif_carrier_off(mlxsw_sp_port->dev);
2397 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2401 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2402 char *mtpptr_pl, bool ingress)
2408 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2409 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2410 for (i = 0; i < num_rec; i++) {
2416 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2417 &domain_number, &sequence_id,
2419 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2420 message_type, domain_number,
2421 sequence_id, timestamp);
2425 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2426 char *mtpptr_pl, void *priv)
2428 struct mlxsw_sp *mlxsw_sp = priv;
2430 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2433 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2434 char *mtpptr_pl, void *priv)
2436 struct mlxsw_sp *mlxsw_sp = priv;
2438 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2441 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2442 u8 local_port, void *priv)
2444 struct mlxsw_sp *mlxsw_sp = priv;
2445 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2446 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2448 if (unlikely(!mlxsw_sp_port)) {
2449 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2454 skb->dev = mlxsw_sp_port->dev;
2456 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2457 u64_stats_update_begin(&pcpu_stats->syncp);
2458 pcpu_stats->rx_packets++;
2459 pcpu_stats->rx_bytes += skb->len;
2460 u64_stats_update_end(&pcpu_stats->syncp);
2462 skb->protocol = eth_type_trans(skb, skb->dev);
2463 netif_receive_skb(skb);
2466 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2469 skb->offload_fwd_mark = 1;
2470 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2473 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2474 u8 local_port, void *priv)
2476 skb->offload_l3_fwd_mark = 1;
2477 skb->offload_fwd_mark = 1;
2478 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2481 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2484 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2487 void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2490 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2491 struct mlxsw_sp_port_sample *sample;
2494 if (unlikely(!mlxsw_sp_port)) {
2495 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2501 sample = rcu_dereference(mlxsw_sp_port->sample);
2504 size = sample->truncate ? sample->trunc_size : skb->len;
2505 psample_sample_packet(sample->psample_group, skb, size,
2506 mlxsw_sp_port->dev->ifindex, 0, sample->rate);
2513 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2514 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2515 _is_ctrl, SP_##_trap_group, DISCARD)
2517 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2518 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2519 _is_ctrl, SP_##_trap_group, DISCARD)
2521 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2522 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2523 _is_ctrl, SP_##_trap_group, DISCARD)
2525 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2526 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2528 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2530 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2532 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2534 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2536 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2537 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2539 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2541 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2543 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2545 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2547 /* Multicast Router Traps */
2548 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2549 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2551 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2554 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2556 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2557 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2560 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2562 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2563 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2564 enum mlxsw_reg_qpcr_ir_units ir_units;
2565 int max_cpu_policers;
2571 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2574 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2576 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2577 for (i = 0; i < max_cpu_policers; i++) {
2580 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2581 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2582 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2590 __set_bit(i, mlxsw_sp->trap->policers_usage);
2591 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2593 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2601 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2603 char htgt_pl[MLXSW_REG_HTGT_LEN];
2604 enum mlxsw_reg_htgt_trap_group i;
2605 int max_cpu_policers;
2606 int max_trap_groups;
2611 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2614 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2615 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2617 for (i = 0; i < max_trap_groups; i++) {
2620 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2621 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2622 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2626 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2627 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2628 tc = MLXSW_REG_HTGT_DEFAULT_TC;
2629 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2635 if (max_cpu_policers <= policer_id &&
2636 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2639 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2640 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2648 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2649 const struct mlxsw_listener listeners[],
2650 size_t listeners_count)
2655 for (i = 0; i < listeners_count; i++) {
2656 err = mlxsw_core_trap_register(mlxsw_sp->core,
2660 goto err_listener_register;
2665 err_listener_register:
2666 for (i--; i >= 0; i--) {
2667 mlxsw_core_trap_unregister(mlxsw_sp->core,
2674 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2675 const struct mlxsw_listener listeners[],
2676 size_t listeners_count)
2680 for (i = 0; i < listeners_count; i++) {
2681 mlxsw_core_trap_unregister(mlxsw_sp->core,
2687 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2689 struct mlxsw_sp_trap *trap;
2693 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2695 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2696 trap = kzalloc(struct_size(trap, policers_usage,
2697 BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2700 trap->max_policers = max_policers;
2701 mlxsw_sp->trap = trap;
2703 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2705 goto err_cpu_policers_set;
2707 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2709 goto err_trap_groups_set;
2711 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2712 ARRAY_SIZE(mlxsw_sp_listener));
2714 goto err_traps_register;
2716 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2717 mlxsw_sp->listeners_count);
2719 goto err_extra_traps_init;
2723 err_extra_traps_init:
2724 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2725 ARRAY_SIZE(mlxsw_sp_listener));
2727 err_trap_groups_set:
2728 err_cpu_policers_set:
2733 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2735 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2736 mlxsw_sp->listeners_count);
2737 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2738 ARRAY_SIZE(mlxsw_sp_listener));
2739 kfree(mlxsw_sp->trap);
2742 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2744 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2746 char slcr_pl[MLXSW_REG_SLCR_LEN];
2750 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2751 MLXSW_SP_LAG_SEED_INIT);
2752 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2753 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2754 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2755 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2756 MLXSW_REG_SLCR_LAG_HASH_SIP |
2757 MLXSW_REG_SLCR_LAG_HASH_DIP |
2758 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2759 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2760 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2761 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2765 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2766 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2769 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2770 sizeof(struct mlxsw_sp_upper),
2772 if (!mlxsw_sp->lags)
2778 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2780 kfree(mlxsw_sp->lags);
2783 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2785 char htgt_pl[MLXSW_REG_HTGT_LEN];
2787 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2788 MLXSW_REG_HTGT_INVALID_POLICER,
2789 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2790 MLXSW_REG_HTGT_DEFAULT_TC);
2791 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2794 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2795 .clock_init = mlxsw_sp1_ptp_clock_init,
2796 .clock_fini = mlxsw_sp1_ptp_clock_fini,
2797 .init = mlxsw_sp1_ptp_init,
2798 .fini = mlxsw_sp1_ptp_fini,
2799 .receive = mlxsw_sp1_ptp_receive,
2800 .transmitted = mlxsw_sp1_ptp_transmitted,
2801 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
2802 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
2803 .shaper_work = mlxsw_sp1_ptp_shaper_work,
2804 .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
2805 .get_stats_count = mlxsw_sp1_get_stats_count,
2806 .get_stats_strings = mlxsw_sp1_get_stats_strings,
2807 .get_stats = mlxsw_sp1_get_stats,
2810 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2811 .clock_init = mlxsw_sp2_ptp_clock_init,
2812 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2813 .init = mlxsw_sp2_ptp_init,
2814 .fini = mlxsw_sp2_ptp_fini,
2815 .receive = mlxsw_sp2_ptp_receive,
2816 .transmitted = mlxsw_sp2_ptp_transmitted,
2817 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2818 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2819 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2820 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2821 .get_stats_count = mlxsw_sp2_get_stats_count,
2822 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2823 .get_stats = mlxsw_sp2_get_stats,
2826 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2827 unsigned long event, void *ptr);
2829 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2830 const struct mlxsw_bus_info *mlxsw_bus_info,
2831 struct netlink_ext_ack *extack)
2833 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2836 mlxsw_sp->core = mlxsw_core;
2837 mlxsw_sp->bus_info = mlxsw_bus_info;
2839 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
2843 mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2845 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2847 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2851 err = mlxsw_sp_kvdl_init(mlxsw_sp);
2853 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2857 err = mlxsw_sp_fids_init(mlxsw_sp);
2859 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2863 err = mlxsw_sp_policers_init(mlxsw_sp);
2865 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2866 goto err_policers_init;
2869 err = mlxsw_sp_traps_init(mlxsw_sp);
2871 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2872 goto err_traps_init;
2875 err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2877 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2878 goto err_devlink_traps_init;
2881 err = mlxsw_sp_buffers_init(mlxsw_sp);
2883 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2884 goto err_buffers_init;
2887 err = mlxsw_sp_lag_init(mlxsw_sp);
2889 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2893 /* Initialize SPAN before router and switchdev, so that those components
2894 * can call mlxsw_sp_span_respin().
2896 err = mlxsw_sp_span_init(mlxsw_sp);
2898 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2902 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2904 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2905 goto err_switchdev_init;
2908 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2910 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2911 goto err_counter_pool_init;
2914 err = mlxsw_sp_afa_init(mlxsw_sp);
2916 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2920 err = mlxsw_sp_nve_init(mlxsw_sp);
2922 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2926 err = mlxsw_sp_acl_init(mlxsw_sp);
2928 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2932 err = mlxsw_sp_router_init(mlxsw_sp, extack);
2934 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2935 goto err_router_init;
2938 if (mlxsw_sp->bus_info->read_frc_capable) {
2939 /* NULL is a valid return value from clock_init */
2941 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2942 mlxsw_sp->bus_info->dev);
2943 if (IS_ERR(mlxsw_sp->clock)) {
2944 err = PTR_ERR(mlxsw_sp->clock);
2945 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2946 goto err_ptp_clock_init;
2950 if (mlxsw_sp->clock) {
2951 /* NULL is a valid return value from ptp_ops->init */
2952 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2953 if (IS_ERR(mlxsw_sp->ptp_state)) {
2954 err = PTR_ERR(mlxsw_sp->ptp_state);
2955 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2960 /* Initialize netdevice notifier after router and SPAN is initialized,
2961 * so that the event handler can use router structures and call SPAN
2964 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2965 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2966 &mlxsw_sp->netdevice_nb);
2968 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2969 goto err_netdev_notifier;
2972 err = mlxsw_sp_dpipe_init(mlxsw_sp);
2974 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2975 goto err_dpipe_init;
2978 err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2980 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2981 goto err_port_module_info_init;
2984 err = mlxsw_sp_ports_create(mlxsw_sp);
2986 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2987 goto err_ports_create;
2993 mlxsw_sp_port_module_info_fini(mlxsw_sp);
2994 err_port_module_info_init:
2995 mlxsw_sp_dpipe_fini(mlxsw_sp);
2997 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2998 &mlxsw_sp->netdevice_nb);
2999 err_netdev_notifier:
3000 if (mlxsw_sp->clock)
3001 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3003 if (mlxsw_sp->clock)
3004 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3006 mlxsw_sp_router_fini(mlxsw_sp);
3008 mlxsw_sp_acl_fini(mlxsw_sp);
3010 mlxsw_sp_nve_fini(mlxsw_sp);
3012 mlxsw_sp_afa_fini(mlxsw_sp);
3014 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3015 err_counter_pool_init:
3016 mlxsw_sp_switchdev_fini(mlxsw_sp);
3018 mlxsw_sp_span_fini(mlxsw_sp);
3020 mlxsw_sp_lag_fini(mlxsw_sp);
3022 mlxsw_sp_buffers_fini(mlxsw_sp);
3024 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3025 err_devlink_traps_init:
3026 mlxsw_sp_traps_fini(mlxsw_sp);
3028 mlxsw_sp_policers_fini(mlxsw_sp);
3030 mlxsw_sp_fids_fini(mlxsw_sp);
3032 mlxsw_sp_kvdl_fini(mlxsw_sp);
3036 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3037 const struct mlxsw_bus_info *mlxsw_bus_info,
3038 struct netlink_ext_ack *extack)
3040 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3042 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
3043 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
3044 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3045 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3046 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3047 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3048 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3049 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3050 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3051 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3052 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
3053 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3054 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3055 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3056 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3057 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3058 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3059 mlxsw_sp->listeners = mlxsw_sp1_listener;
3060 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3061 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3063 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3066 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3067 const struct mlxsw_bus_info *mlxsw_bus_info,
3068 struct netlink_ext_ack *extack)
3070 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3072 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev;
3073 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME;
3074 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3075 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3076 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3077 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3078 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3079 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3080 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3081 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3082 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
3083 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3084 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3085 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3086 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3087 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3088 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3089 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3091 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3094 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3095 const struct mlxsw_bus_info *mlxsw_bus_info,
3096 struct netlink_ext_ack *extack)
3098 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3100 mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev;
3101 mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME;
3102 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3103 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3104 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3105 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3106 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3107 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3108 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3109 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3110 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
3111 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3112 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3113 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3114 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3115 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3116 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3117 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3119 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3122 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3124 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3126 mlxsw_sp_ports_remove(mlxsw_sp);
3127 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3128 mlxsw_sp_dpipe_fini(mlxsw_sp);
3129 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3130 &mlxsw_sp->netdevice_nb);
3131 if (mlxsw_sp->clock) {
3132 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3133 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3135 mlxsw_sp_router_fini(mlxsw_sp);
3136 mlxsw_sp_acl_fini(mlxsw_sp);
3137 mlxsw_sp_nve_fini(mlxsw_sp);
3138 mlxsw_sp_afa_fini(mlxsw_sp);
3139 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3140 mlxsw_sp_switchdev_fini(mlxsw_sp);
3141 mlxsw_sp_span_fini(mlxsw_sp);
3142 mlxsw_sp_lag_fini(mlxsw_sp);
3143 mlxsw_sp_buffers_fini(mlxsw_sp);
3144 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3145 mlxsw_sp_traps_fini(mlxsw_sp);
3146 mlxsw_sp_policers_fini(mlxsw_sp);
3147 mlxsw_sp_fids_fini(mlxsw_sp);
3148 mlxsw_sp_kvdl_fini(mlxsw_sp);
3151 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
3154 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
3157 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3159 .max_mid = MLXSW_SP_MID_MAX,
3160 .used_flood_tables = 1,
3161 .used_flood_mode = 1,
3163 .max_fid_flood_tables = 3,
3164 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
3165 .used_max_ib_mc = 1,
3169 .used_kvd_sizes = 1,
3170 .kvd_hash_single_parts = 59,
3171 .kvd_hash_double_parts = 41,
3172 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3176 .type = MLXSW_PORT_SWID_TYPE_ETH,
3181 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3183 .max_mid = MLXSW_SP_MID_MAX,
3184 .used_flood_tables = 1,
3185 .used_flood_mode = 1,
3187 .max_fid_flood_tables = 3,
3188 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
3189 .used_max_ib_mc = 1,
3196 .type = MLXSW_PORT_SWID_TYPE_ETH,
3202 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3203 struct devlink_resource_size_params *kvd_size_params,
3204 struct devlink_resource_size_params *linear_size_params,
3205 struct devlink_resource_size_params *hash_double_size_params,
3206 struct devlink_resource_size_params *hash_single_size_params)
3208 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3209 KVD_SINGLE_MIN_SIZE);
3210 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3211 KVD_DOUBLE_MIN_SIZE);
3212 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3213 u32 linear_size_min = 0;
3215 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3216 MLXSW_SP_KVD_GRANULARITY,
3217 DEVLINK_RESOURCE_UNIT_ENTRY);
3218 devlink_resource_size_params_init(linear_size_params, linear_size_min,
3219 kvd_size - single_size_min -
3221 MLXSW_SP_KVD_GRANULARITY,
3222 DEVLINK_RESOURCE_UNIT_ENTRY);
3223 devlink_resource_size_params_init(hash_double_size_params,
3225 kvd_size - single_size_min -
3227 MLXSW_SP_KVD_GRANULARITY,
3228 DEVLINK_RESOURCE_UNIT_ENTRY);
3229 devlink_resource_size_params_init(hash_single_size_params,
3231 kvd_size - double_size_min -
3233 MLXSW_SP_KVD_GRANULARITY,
3234 DEVLINK_RESOURCE_UNIT_ENTRY);
3237 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3239 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3240 struct devlink_resource_size_params hash_single_size_params;
3241 struct devlink_resource_size_params hash_double_size_params;
3242 struct devlink_resource_size_params linear_size_params;
3243 struct devlink_resource_size_params kvd_size_params;
3244 u32 kvd_size, single_size, double_size, linear_size;
3245 const struct mlxsw_config_profile *profile;
3248 profile = &mlxsw_sp1_config_profile;
3249 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3252 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3253 &linear_size_params,
3254 &hash_double_size_params,
3255 &hash_single_size_params);
3257 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3258 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3259 kvd_size, MLXSW_SP_RESOURCE_KVD,
3260 DEVLINK_RESOURCE_ID_PARENT_TOP,
3265 linear_size = profile->kvd_linear_size;
3266 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3268 MLXSW_SP_RESOURCE_KVD_LINEAR,
3269 MLXSW_SP_RESOURCE_KVD,
3270 &linear_size_params);
3274 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3278 double_size = kvd_size - linear_size;
3279 double_size *= profile->kvd_hash_double_parts;
3280 double_size /= profile->kvd_hash_double_parts +
3281 profile->kvd_hash_single_parts;
3282 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3283 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3285 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3286 MLXSW_SP_RESOURCE_KVD,
3287 &hash_double_size_params);
3291 single_size = kvd_size - double_size - linear_size;
3292 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3294 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3295 MLXSW_SP_RESOURCE_KVD,
3296 &hash_single_size_params);
3303 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3305 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3306 struct devlink_resource_size_params kvd_size_params;
3309 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3312 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3313 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3314 MLXSW_SP_KVD_GRANULARITY,
3315 DEVLINK_RESOURCE_UNIT_ENTRY);
3317 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3318 kvd_size, MLXSW_SP_RESOURCE_KVD,
3319 DEVLINK_RESOURCE_ID_PARENT_TOP,
3323 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3325 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3326 struct devlink_resource_size_params span_size_params;
3329 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3332 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3333 devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3334 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3336 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3337 max_span, MLXSW_SP_RESOURCE_SPAN,
3338 DEVLINK_RESOURCE_ID_PARENT_TOP,
3342 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3346 err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3350 err = mlxsw_sp_resources_span_register(mlxsw_core);
3352 goto err_resources_span_register;
3354 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3356 goto err_resources_counter_register;
3358 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3360 goto err_resources_counter_register;
3364 err_resources_counter_register:
3365 err_resources_span_register:
3366 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3370 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3374 err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3378 err = mlxsw_sp_resources_span_register(mlxsw_core);
3380 goto err_resources_span_register;
3382 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3384 goto err_resources_counter_register;
3386 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3388 goto err_resources_counter_register;
3392 err_resources_counter_register:
3393 err_resources_span_register:
3394 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3398 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3399 const struct mlxsw_config_profile *profile,
3400 u64 *p_single_size, u64 *p_double_size,
3403 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3407 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3408 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3411 /* The hash part is what left of the kvd without the
3412 * linear part. It is split to the single size and
3413 * double size by the parts ratio from the profile.
3414 * Both sizes must be a multiplications of the
3415 * granularity from the profile. In case the user
3416 * provided the sizes they are obtained via devlink.
3418 err = devlink_resource_size_get(devlink,
3419 MLXSW_SP_RESOURCE_KVD_LINEAR,
3422 *p_linear_size = profile->kvd_linear_size;
3424 err = devlink_resource_size_get(devlink,
3425 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3428 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3430 double_size *= profile->kvd_hash_double_parts;
3431 double_size /= profile->kvd_hash_double_parts +
3432 profile->kvd_hash_single_parts;
3433 *p_double_size = rounddown(double_size,
3434 MLXSW_SP_KVD_GRANULARITY);
3437 err = devlink_resource_size_get(devlink,
3438 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3441 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3442 *p_double_size - *p_linear_size;
3444 /* Check results are legal. */
3445 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3446 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3447 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3454 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
3455 union devlink_param_value val,
3456 struct netlink_ext_ack *extack)
3458 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
3459 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
3460 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
3467 static const struct devlink_param mlxsw_sp_devlink_params[] = {
3468 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
3469 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
3471 mlxsw_sp_devlink_param_fw_load_policy_validate),
3474 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
3476 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3477 union devlink_param_value value;
3480 err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
3481 ARRAY_SIZE(mlxsw_sp_devlink_params));
3485 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
3486 devlink_param_driverinit_value_set(devlink,
3487 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
3492 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
3494 devlink_params_unregister(priv_to_devlink(mlxsw_core),
3495 mlxsw_sp_devlink_params,
3496 ARRAY_SIZE(mlxsw_sp_devlink_params));
3500 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3501 struct devlink_param_gset_ctx *ctx)
3503 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3504 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3506 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3511 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3512 struct devlink_param_gset_ctx *ctx)
3514 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3515 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3517 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3520 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3521 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3522 "acl_region_rehash_interval",
3523 DEVLINK_PARAM_TYPE_U32,
3524 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3525 mlxsw_sp_params_acl_region_rehash_intrvl_get,
3526 mlxsw_sp_params_acl_region_rehash_intrvl_set,
3530 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3532 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3533 union devlink_param_value value;
3536 err = mlxsw_sp_params_register(mlxsw_core);
3540 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3541 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3543 goto err_devlink_params_register;
3546 devlink_param_driverinit_value_set(devlink,
3547 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3551 err_devlink_params_register:
3552 mlxsw_sp_params_unregister(mlxsw_core);
3556 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3558 devlink_params_unregister(priv_to_devlink(mlxsw_core),
3559 mlxsw_sp2_devlink_params,
3560 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3561 mlxsw_sp_params_unregister(mlxsw_core);
3564 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3565 struct sk_buff *skb, u8 local_port)
3567 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3569 skb_pull(skb, MLXSW_TXHDR_LEN);
3570 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3573 static struct mlxsw_driver mlxsw_sp1_driver = {
3574 .kind = mlxsw_sp1_driver_name,
3575 .priv_size = sizeof(struct mlxsw_sp),
3576 .init = mlxsw_sp1_init,
3577 .fini = mlxsw_sp_fini,
3578 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3579 .port_split = mlxsw_sp_port_split,
3580 .port_unsplit = mlxsw_sp_port_unsplit,
3581 .sb_pool_get = mlxsw_sp_sb_pool_get,
3582 .sb_pool_set = mlxsw_sp_sb_pool_set,
3583 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3584 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3585 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3586 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3587 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3588 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3589 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3590 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3591 .flash_update = mlxsw_sp_flash_update,
3592 .trap_init = mlxsw_sp_trap_init,
3593 .trap_fini = mlxsw_sp_trap_fini,
3594 .trap_action_set = mlxsw_sp_trap_action_set,
3595 .trap_group_init = mlxsw_sp_trap_group_init,
3596 .trap_group_set = mlxsw_sp_trap_group_set,
3597 .trap_policer_init = mlxsw_sp_trap_policer_init,
3598 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3599 .trap_policer_set = mlxsw_sp_trap_policer_set,
3600 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3601 .txhdr_construct = mlxsw_sp_txhdr_construct,
3602 .resources_register = mlxsw_sp1_resources_register,
3603 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
3604 .params_register = mlxsw_sp_params_register,
3605 .params_unregister = mlxsw_sp_params_unregister,
3606 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3607 .txhdr_len = MLXSW_TXHDR_LEN,
3608 .profile = &mlxsw_sp1_config_profile,
3609 .res_query_enabled = true,
3612 static struct mlxsw_driver mlxsw_sp2_driver = {
3613 .kind = mlxsw_sp2_driver_name,
3614 .priv_size = sizeof(struct mlxsw_sp),
3615 .init = mlxsw_sp2_init,
3616 .fini = mlxsw_sp_fini,
3617 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3618 .port_split = mlxsw_sp_port_split,
3619 .port_unsplit = mlxsw_sp_port_unsplit,
3620 .sb_pool_get = mlxsw_sp_sb_pool_get,
3621 .sb_pool_set = mlxsw_sp_sb_pool_set,
3622 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3623 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3624 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3625 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3626 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3627 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3628 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3629 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3630 .flash_update = mlxsw_sp_flash_update,
3631 .trap_init = mlxsw_sp_trap_init,
3632 .trap_fini = mlxsw_sp_trap_fini,
3633 .trap_action_set = mlxsw_sp_trap_action_set,
3634 .trap_group_init = mlxsw_sp_trap_group_init,
3635 .trap_group_set = mlxsw_sp_trap_group_set,
3636 .trap_policer_init = mlxsw_sp_trap_policer_init,
3637 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3638 .trap_policer_set = mlxsw_sp_trap_policer_set,
3639 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3640 .txhdr_construct = mlxsw_sp_txhdr_construct,
3641 .resources_register = mlxsw_sp2_resources_register,
3642 .params_register = mlxsw_sp2_params_register,
3643 .params_unregister = mlxsw_sp2_params_unregister,
3644 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3645 .txhdr_len = MLXSW_TXHDR_LEN,
3646 .profile = &mlxsw_sp2_config_profile,
3647 .res_query_enabled = true,
3650 static struct mlxsw_driver mlxsw_sp3_driver = {
3651 .kind = mlxsw_sp3_driver_name,
3652 .priv_size = sizeof(struct mlxsw_sp),
3653 .init = mlxsw_sp3_init,
3654 .fini = mlxsw_sp_fini,
3655 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3656 .port_split = mlxsw_sp_port_split,
3657 .port_unsplit = mlxsw_sp_port_unsplit,
3658 .sb_pool_get = mlxsw_sp_sb_pool_get,
3659 .sb_pool_set = mlxsw_sp_sb_pool_set,
3660 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3661 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3662 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3663 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3664 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3665 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3666 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3667 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3668 .flash_update = mlxsw_sp_flash_update,
3669 .trap_init = mlxsw_sp_trap_init,
3670 .trap_fini = mlxsw_sp_trap_fini,
3671 .trap_action_set = mlxsw_sp_trap_action_set,
3672 .trap_group_init = mlxsw_sp_trap_group_init,
3673 .trap_group_set = mlxsw_sp_trap_group_set,
3674 .trap_policer_init = mlxsw_sp_trap_policer_init,
3675 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3676 .trap_policer_set = mlxsw_sp_trap_policer_set,
3677 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3678 .txhdr_construct = mlxsw_sp_txhdr_construct,
3679 .resources_register = mlxsw_sp2_resources_register,
3680 .params_register = mlxsw_sp2_params_register,
3681 .params_unregister = mlxsw_sp2_params_unregister,
3682 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3683 .txhdr_len = MLXSW_TXHDR_LEN,
3684 .profile = &mlxsw_sp2_config_profile,
3685 .res_query_enabled = true,
3688 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3690 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3693 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3695 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3698 if (mlxsw_sp_port_dev_check(lower_dev)) {
3699 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3706 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3708 struct mlxsw_sp_port *mlxsw_sp_port;
3710 if (mlxsw_sp_port_dev_check(dev))
3711 return netdev_priv(dev);
3713 mlxsw_sp_port = NULL;
3714 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3716 return mlxsw_sp_port;
3719 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3721 struct mlxsw_sp_port *mlxsw_sp_port;
3723 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3724 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3727 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3729 struct mlxsw_sp_port *mlxsw_sp_port;
3731 if (mlxsw_sp_port_dev_check(dev))
3732 return netdev_priv(dev);
3734 mlxsw_sp_port = NULL;
3735 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3738 return mlxsw_sp_port;
3741 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3743 struct mlxsw_sp_port *mlxsw_sp_port;
3746 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3748 dev_hold(mlxsw_sp_port->dev);
3750 return mlxsw_sp_port;
3753 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3755 dev_put(mlxsw_sp_port->dev);
3759 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3760 struct net_device *lag_dev)
3762 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3763 struct net_device *upper_dev;
3764 struct list_head *iter;
3766 if (netif_is_bridge_port(lag_dev))
3767 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3769 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3770 if (!netif_is_bridge_port(upper_dev))
3772 br_dev = netdev_master_upper_dev_get(upper_dev);
3773 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3777 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3779 char sldr_pl[MLXSW_REG_SLDR_LEN];
3781 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3782 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3785 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3787 char sldr_pl[MLXSW_REG_SLDR_LEN];
3789 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3790 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3793 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3794 u16 lag_id, u8 port_index)
3796 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3797 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3799 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3800 lag_id, port_index);
3801 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3804 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3807 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3808 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3810 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3812 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3815 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3818 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3819 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3821 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3823 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3826 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3829 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3830 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3832 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3834 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3837 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3838 struct net_device *lag_dev,
3841 struct mlxsw_sp_upper *lag;
3842 int free_lag_id = -1;
3846 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3847 for (i = 0; i < max_lag; i++) {
3848 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3849 if (lag->ref_count) {
3850 if (lag->dev == lag_dev) {
3854 } else if (free_lag_id < 0) {
3858 if (free_lag_id < 0)
3860 *p_lag_id = free_lag_id;
3865 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3866 struct net_device *lag_dev,
3867 struct netdev_lag_upper_info *lag_upper_info,
3868 struct netlink_ext_ack *extack)
3872 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3873 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3876 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3877 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3883 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3884 u16 lag_id, u8 *p_port_index)
3886 u64 max_lag_members;
3889 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3891 for (i = 0; i < max_lag_members; i++) {
3892 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3900 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3901 struct net_device *lag_dev)
3903 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3904 struct mlxsw_sp_upper *lag;
3909 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3912 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3913 if (!lag->ref_count) {
3914 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3920 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3923 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3925 goto err_col_port_add;
3927 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3928 mlxsw_sp_port->local_port);
3929 mlxsw_sp_port->lag_id = lag_id;
3930 mlxsw_sp_port->lagged = 1;
3933 /* Port is no longer usable as a router interface */
3934 if (mlxsw_sp_port->default_vlan->fid)
3935 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3940 if (!lag->ref_count)
3941 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3945 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3946 struct net_device *lag_dev)
3948 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3949 u16 lag_id = mlxsw_sp_port->lag_id;
3950 struct mlxsw_sp_upper *lag;
3952 if (!mlxsw_sp_port->lagged)
3954 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3955 WARN_ON(lag->ref_count == 0);
3957 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3959 /* Any VLANs configured on the port are no longer valid */
3960 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3961 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3962 /* Make the LAG and its directly linked uppers leave bridges they
3965 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3967 if (lag->ref_count == 1)
3968 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3970 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3971 mlxsw_sp_port->local_port);
3972 mlxsw_sp_port->lagged = 0;
3975 /* Make sure untagged frames are allowed to ingress */
3976 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3979 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3982 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3983 char sldr_pl[MLXSW_REG_SLDR_LEN];
3985 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3986 mlxsw_sp_port->local_port);
3987 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3990 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3993 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3994 char sldr_pl[MLXSW_REG_SLDR_LEN];
3996 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3997 mlxsw_sp_port->local_port);
3998 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4002 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4006 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4007 mlxsw_sp_port->lag_id);
4011 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4013 goto err_dist_port_add;
4018 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4023 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4027 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4028 mlxsw_sp_port->lag_id);
4032 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4033 mlxsw_sp_port->lag_id);
4035 goto err_col_port_disable;
4039 err_col_port_disable:
4040 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4044 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4045 struct netdev_lag_lower_state_info *info)
4047 if (info->tx_enabled)
4048 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4050 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4053 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4056 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4057 enum mlxsw_reg_spms_state spms_state;
4062 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4063 MLXSW_REG_SPMS_STATE_DISCARDING;
4065 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4068 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4070 for (vid = 0; vid < VLAN_N_VID; vid++)
4071 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4073 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4078 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4083 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4086 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4088 goto err_port_stp_set;
4089 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4092 goto err_port_vlan_set;
4094 for (; vid <= VLAN_N_VID - 1; vid++) {
4095 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4098 goto err_vid_learning_set;
4103 err_vid_learning_set:
4104 for (vid--; vid >= 1; vid--)
4105 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4107 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4109 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4113 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4117 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4118 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4121 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4123 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4124 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4127 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4129 unsigned int num_vxlans = 0;
4130 struct net_device *dev;
4131 struct list_head *iter;
4133 netdev_for_each_lower_dev(br_dev, dev, iter) {
4134 if (netif_is_vxlan(dev))
4138 return num_vxlans > 1;
4141 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4143 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4144 struct net_device *dev;
4145 struct list_head *iter;
4147 netdev_for_each_lower_dev(br_dev, dev, iter) {
4151 if (!netif_is_vxlan(dev))
4154 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4158 if (test_and_set_bit(pvid, vlans))
4165 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4166 struct netlink_ext_ack *extack)
4168 if (br_multicast_enabled(br_dev)) {
4169 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4173 if (!br_vlan_enabled(br_dev) &&
4174 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4175 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4179 if (br_vlan_enabled(br_dev) &&
4180 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4181 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4188 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4189 struct net_device *dev,
4190 unsigned long event, void *ptr)
4192 struct netdev_notifier_changeupper_info *info;
4193 struct mlxsw_sp_port *mlxsw_sp_port;
4194 struct netlink_ext_ack *extack;
4195 struct net_device *upper_dev;
4196 struct mlxsw_sp *mlxsw_sp;
4199 mlxsw_sp_port = netdev_priv(dev);
4200 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4202 extack = netdev_notifier_info_to_extack(&info->info);
4205 case NETDEV_PRECHANGEUPPER:
4206 upper_dev = info->upper_dev;
4207 if (!is_vlan_dev(upper_dev) &&
4208 !netif_is_lag_master(upper_dev) &&
4209 !netif_is_bridge_master(upper_dev) &&
4210 !netif_is_ovs_master(upper_dev) &&
4211 !netif_is_macvlan(upper_dev)) {
4212 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4217 if (netif_is_bridge_master(upper_dev) &&
4218 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4219 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4220 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4222 if (netdev_has_any_upper_dev(upper_dev) &&
4223 (!netif_is_bridge_master(upper_dev) ||
4224 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4226 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4229 if (netif_is_lag_master(upper_dev) &&
4230 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4231 info->upper_info, extack))
4233 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4234 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4237 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4238 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4239 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4242 if (netif_is_macvlan(upper_dev) &&
4243 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
4244 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4247 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4248 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4251 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4252 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4256 case NETDEV_CHANGEUPPER:
4257 upper_dev = info->upper_dev;
4258 if (netif_is_bridge_master(upper_dev)) {
4260 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4265 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4268 } else if (netif_is_lag_master(upper_dev)) {
4269 if (info->linking) {
4270 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4273 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4274 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4277 } else if (netif_is_ovs_master(upper_dev)) {
4279 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4281 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4282 } else if (netif_is_macvlan(upper_dev)) {
4284 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4285 } else if (is_vlan_dev(upper_dev)) {
4286 struct net_device *br_dev;
4288 if (!netif_is_bridge_port(upper_dev))
4292 br_dev = netdev_master_upper_dev_get(upper_dev);
4293 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4302 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4303 unsigned long event, void *ptr)
4305 struct netdev_notifier_changelowerstate_info *info;
4306 struct mlxsw_sp_port *mlxsw_sp_port;
4309 mlxsw_sp_port = netdev_priv(dev);
4313 case NETDEV_CHANGELOWERSTATE:
4314 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4315 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4316 info->lower_state_info);
4318 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4326 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4327 struct net_device *port_dev,
4328 unsigned long event, void *ptr)
4331 case NETDEV_PRECHANGEUPPER:
4332 case NETDEV_CHANGEUPPER:
4333 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4335 case NETDEV_CHANGELOWERSTATE:
4336 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4343 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4344 unsigned long event, void *ptr)
4346 struct net_device *dev;
4347 struct list_head *iter;
4350 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4351 if (mlxsw_sp_port_dev_check(dev)) {
4352 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4362 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4363 struct net_device *dev,
4364 unsigned long event, void *ptr,
4367 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4368 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4369 struct netdev_notifier_changeupper_info *info = ptr;
4370 struct netlink_ext_ack *extack;
4371 struct net_device *upper_dev;
4374 extack = netdev_notifier_info_to_extack(&info->info);
4377 case NETDEV_PRECHANGEUPPER:
4378 upper_dev = info->upper_dev;
4379 if (!netif_is_bridge_master(upper_dev) &&
4380 !netif_is_macvlan(upper_dev)) {
4381 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4386 if (netif_is_bridge_master(upper_dev) &&
4387 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4388 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4389 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4391 if (netdev_has_any_upper_dev(upper_dev) &&
4392 (!netif_is_bridge_master(upper_dev) ||
4393 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4395 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4398 if (netif_is_macvlan(upper_dev) &&
4399 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4400 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4404 case NETDEV_CHANGEUPPER:
4405 upper_dev = info->upper_dev;
4406 if (netif_is_bridge_master(upper_dev)) {
4408 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4413 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4416 } else if (netif_is_macvlan(upper_dev)) {
4418 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4429 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4430 struct net_device *lag_dev,
4431 unsigned long event,
4434 struct net_device *dev;
4435 struct list_head *iter;
4438 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4439 if (mlxsw_sp_port_dev_check(dev)) {
4440 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4451 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4452 struct net_device *br_dev,
4453 unsigned long event, void *ptr,
4456 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4457 struct netdev_notifier_changeupper_info *info = ptr;
4458 struct netlink_ext_ack *extack;
4459 struct net_device *upper_dev;
4464 extack = netdev_notifier_info_to_extack(&info->info);
4467 case NETDEV_PRECHANGEUPPER:
4468 upper_dev = info->upper_dev;
4469 if (!netif_is_macvlan(upper_dev)) {
4470 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4475 if (netif_is_macvlan(upper_dev) &&
4476 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4477 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4481 case NETDEV_CHANGEUPPER:
4482 upper_dev = info->upper_dev;
4485 if (netif_is_macvlan(upper_dev))
4486 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4493 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4494 unsigned long event, void *ptr)
4496 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4497 u16 vid = vlan_dev_vlan_id(vlan_dev);
4499 if (mlxsw_sp_port_dev_check(real_dev))
4500 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4502 else if (netif_is_lag_master(real_dev))
4503 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4506 else if (netif_is_bridge_master(real_dev))
4507 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4513 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4514 unsigned long event, void *ptr)
4516 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4517 struct netdev_notifier_changeupper_info *info = ptr;
4518 struct netlink_ext_ack *extack;
4519 struct net_device *upper_dev;
4524 extack = netdev_notifier_info_to_extack(&info->info);
4527 case NETDEV_PRECHANGEUPPER:
4528 upper_dev = info->upper_dev;
4529 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4530 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4535 if (netif_is_macvlan(upper_dev) &&
4536 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4537 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4541 case NETDEV_CHANGEUPPER:
4542 upper_dev = info->upper_dev;
4545 if (is_vlan_dev(upper_dev))
4546 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4547 if (netif_is_macvlan(upper_dev))
4548 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4555 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4556 unsigned long event, void *ptr)
4558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4559 struct netdev_notifier_changeupper_info *info = ptr;
4560 struct netlink_ext_ack *extack;
4562 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4565 extack = netdev_notifier_info_to_extack(&info->info);
4567 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
4568 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4573 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4575 struct netdev_notifier_changeupper_info *info = ptr;
4577 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4579 return netif_is_l3_master(info->upper_dev);
4582 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4583 struct net_device *dev,
4584 unsigned long event, void *ptr)
4586 struct netdev_notifier_changeupper_info *cu_info;
4587 struct netdev_notifier_info *info = ptr;
4588 struct netlink_ext_ack *extack;
4589 struct net_device *upper_dev;
4591 extack = netdev_notifier_info_to_extack(info);
4594 case NETDEV_CHANGEUPPER:
4595 cu_info = container_of(info,
4596 struct netdev_notifier_changeupper_info,
4598 upper_dev = cu_info->upper_dev;
4599 if (!netif_is_bridge_master(upper_dev))
4601 if (!mlxsw_sp_lower_get(upper_dev))
4603 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4605 if (cu_info->linking) {
4606 if (!netif_running(dev))
4608 /* When the bridge is VLAN-aware, the VNI of the VxLAN
4609 * device needs to be mapped to a VLAN, but at this
4610 * point no VLANs are configured on the VxLAN device
4612 if (br_vlan_enabled(upper_dev))
4614 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4617 /* VLANs were already flushed, which triggered the
4620 if (br_vlan_enabled(upper_dev))
4622 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4626 upper_dev = netdev_master_upper_dev_get(dev);
4629 if (!netif_is_bridge_master(upper_dev))
4631 if (!mlxsw_sp_lower_get(upper_dev))
4633 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4636 upper_dev = netdev_master_upper_dev_get(dev);
4639 if (!netif_is_bridge_master(upper_dev))
4641 if (!mlxsw_sp_lower_get(upper_dev))
4643 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4650 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4651 unsigned long event, void *ptr)
4653 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4654 struct mlxsw_sp_span_entry *span_entry;
4655 struct mlxsw_sp *mlxsw_sp;
4658 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4659 if (event == NETDEV_UNREGISTER) {
4660 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4662 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4664 mlxsw_sp_span_respin(mlxsw_sp);
4666 if (netif_is_vxlan(dev))
4667 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4668 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4669 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4671 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4672 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4674 else if (event == NETDEV_PRE_CHANGEADDR ||
4675 event == NETDEV_CHANGEADDR ||
4676 event == NETDEV_CHANGEMTU)
4677 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4678 else if (mlxsw_sp_is_vrf_event(event, ptr))
4679 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4680 else if (mlxsw_sp_port_dev_check(dev))
4681 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4682 else if (netif_is_lag_master(dev))
4683 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4684 else if (is_vlan_dev(dev))
4685 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4686 else if (netif_is_bridge_master(dev))
4687 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4688 else if (netif_is_macvlan(dev))
4689 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4691 return notifier_from_errno(err);
4694 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4695 .notifier_call = mlxsw_sp_inetaddr_valid_event,
4698 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4699 .notifier_call = mlxsw_sp_inet6addr_valid_event,
4702 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4703 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4707 static struct pci_driver mlxsw_sp1_pci_driver = {
4708 .name = mlxsw_sp1_driver_name,
4709 .id_table = mlxsw_sp1_pci_id_table,
4712 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4713 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4717 static struct pci_driver mlxsw_sp2_pci_driver = {
4718 .name = mlxsw_sp2_driver_name,
4719 .id_table = mlxsw_sp2_pci_id_table,
4722 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4723 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4727 static struct pci_driver mlxsw_sp3_pci_driver = {
4728 .name = mlxsw_sp3_driver_name,
4729 .id_table = mlxsw_sp3_pci_id_table,
4732 static int __init mlxsw_sp_module_init(void)
4736 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4737 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4739 err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4741 goto err_sp1_core_driver_register;
4743 err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4745 goto err_sp2_core_driver_register;
4747 err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4749 goto err_sp3_core_driver_register;
4751 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4753 goto err_sp1_pci_driver_register;
4755 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4757 goto err_sp2_pci_driver_register;
4759 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4761 goto err_sp3_pci_driver_register;
4765 err_sp3_pci_driver_register:
4766 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4767 err_sp2_pci_driver_register:
4768 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4769 err_sp1_pci_driver_register:
4770 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4771 err_sp3_core_driver_register:
4772 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4773 err_sp2_core_driver_register:
4774 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4775 err_sp1_core_driver_register:
4776 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4777 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4781 static void __exit mlxsw_sp_module_exit(void)
4783 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4784 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4785 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4786 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4787 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4788 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4789 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4790 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4793 module_init(mlxsw_sp_module_init);
4794 module_exit(mlxsw_sp_module_exit);
4796 MODULE_LICENSE("Dual BSD/GPL");
4797 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4798 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4799 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4800 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4801 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4802 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4803 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4804 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);