mlxsw: spectrum: Remove mlxsw_sp_lag_get()
[linux-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <linux/refcount.h>
27 #include <linux/rhashtable.h>
28 #include <net/switchdev.h>
29 #include <net/pkt_cls.h>
30 #include <net/netevent.h>
31 #include <net/addrconf.h>
32 #include <linux/ptp_classify.h>
33
34 #include "spectrum.h"
35 #include "pci.h"
36 #include "core.h"
37 #include "core_env.h"
38 #include "reg.h"
39 #include "port.h"
40 #include "trap.h"
41 #include "txheader.h"
42 #include "spectrum_cnt.h"
43 #include "spectrum_dpipe.h"
44 #include "spectrum_acl_flex_actions.h"
45 #include "spectrum_span.h"
46 #include "spectrum_ptp.h"
47 #include "spectrum_trap.h"
48
49 #define MLXSW_SP_FWREV_MINOR 2010
50 #define MLXSW_SP_FWREV_SUBMINOR 1006
51
52 #define MLXSW_SP1_FWREV_MAJOR 13
53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
54
55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56         .major = MLXSW_SP1_FWREV_MAJOR,
57         .minor = MLXSW_SP_FWREV_MINOR,
58         .subminor = MLXSW_SP_FWREV_SUBMINOR,
59         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
60 };
61
62 #define MLXSW_SP1_FW_FILENAME \
63         "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
64         "." __stringify(MLXSW_SP_FWREV_MINOR) \
65         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
66
67 #define MLXSW_SP2_FWREV_MAJOR 29
68
69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70         .major = MLXSW_SP2_FWREV_MAJOR,
71         .minor = MLXSW_SP_FWREV_MINOR,
72         .subminor = MLXSW_SP_FWREV_SUBMINOR,
73 };
74
75 #define MLXSW_SP2_FW_FILENAME \
76         "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77         "." __stringify(MLXSW_SP_FWREV_MINOR) \
78         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
79
80 #define MLXSW_SP3_FWREV_MAJOR 30
81
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83         .major = MLXSW_SP3_FWREV_MAJOR,
84         .minor = MLXSW_SP_FWREV_MINOR,
85         .subminor = MLXSW_SP_FWREV_SUBMINOR,
86 };
87
88 #define MLXSW_SP3_FW_FILENAME \
89         "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90         "." __stringify(MLXSW_SP_FWREV_MINOR) \
91         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
92
93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
94         "mellanox/lc_ini_bundle_" \
95         __stringify(MLXSW_SP_FWREV_MINOR) "_" \
96         __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
97
98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
102
103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
104         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
105 };
106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
107         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108 };
109
110 /* tx_hdr_version
111  * Tx header version.
112  * Must be set to 1.
113  */
114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
115
116 /* tx_hdr_ctl
117  * Packet control type.
118  * 0 - Ethernet control (e.g. EMADs, LACP)
119  * 1 - Ethernet data
120  */
121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
122
123 /* tx_hdr_proto
124  * Packet protocol type. Must be set to 1 (Ethernet).
125  */
126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
127
128 /* tx_hdr_rx_is_router
129  * Packet is sent from the router. Valid for data packets only.
130  */
131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
132
133 /* tx_hdr_fid_valid
134  * Indicates if the 'fid' field is valid and should be used for
135  * forwarding lookup. Valid for data packets only.
136  */
137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
138
139 /* tx_hdr_swid
140  * Switch partition ID. Must be set to 0.
141  */
142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
143
144 /* tx_hdr_control_tclass
145  * Indicates if the packet should use the control TClass and not one
146  * of the data TClasses.
147  */
148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
149
150 /* tx_hdr_etclass
151  * Egress TClass to be used on the egress device on the egress port.
152  */
153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
154
155 /* tx_hdr_port_mid
156  * Destination local port for unicast packets.
157  * Destination multicast ID for multicast packets.
158  *
159  * Control packets are directed to a specific egress port, while data
160  * packets are transmitted through the CPU port (0) into the switch partition,
161  * where forwarding rules are applied.
162  */
163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
164
165 /* tx_hdr_fid
166  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
167  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
168  * Valid for data packets only.
169  */
170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
171
172 /* tx_hdr_type
173  * 0 - Data packets
174  * 6 - Control packets
175  */
176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
177
178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
179                               unsigned int counter_index, u64 *packets,
180                               u64 *bytes)
181 {
182         char mgpc_pl[MLXSW_REG_MGPC_LEN];
183         int err;
184
185         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
186                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
187         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
188         if (err)
189                 return err;
190         if (packets)
191                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
192         if (bytes)
193                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
194         return 0;
195 }
196
197 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
198                                        unsigned int counter_index)
199 {
200         char mgpc_pl[MLXSW_REG_MGPC_LEN];
201
202         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
203                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
204         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
205 }
206
207 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
208                                 unsigned int *p_counter_index)
209 {
210         int err;
211
212         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
213                                      p_counter_index);
214         if (err)
215                 return err;
216         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
217         if (err)
218                 goto err_counter_clear;
219         return 0;
220
221 err_counter_clear:
222         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
223                               *p_counter_index);
224         return err;
225 }
226
227 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
228                                 unsigned int counter_index)
229 {
230          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
231                                counter_index);
232 }
233
234 void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
235                               const struct mlxsw_tx_info *tx_info)
236 {
237         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
238
239         memset(txhdr, 0, MLXSW_TXHDR_LEN);
240
241         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
242         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
243         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
244         mlxsw_tx_hdr_swid_set(txhdr, 0);
245         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
246         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
247         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
248 }
249
250 int
251 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
252                                   struct mlxsw_sp_port *mlxsw_sp_port,
253                                   struct sk_buff *skb,
254                                   const struct mlxsw_tx_info *tx_info)
255 {
256         char *txhdr;
257         u16 max_fid;
258         int err;
259
260         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
261                 err = -ENOMEM;
262                 goto err_skb_cow_head;
263         }
264
265         if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
266                 err = -EIO;
267                 goto err_res_valid;
268         }
269         max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
270
271         txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
272         memset(txhdr, 0, MLXSW_TXHDR_LEN);
273
274         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
275         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
276         mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
277         mlxsw_tx_hdr_fid_valid_set(txhdr, true);
278         mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
279         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
280         return 0;
281
282 err_res_valid:
283 err_skb_cow_head:
284         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
285         dev_kfree_skb_any(skb);
286         return err;
287 }
288
289 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
290 {
291         unsigned int type;
292
293         if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
294                 return false;
295
296         type = ptp_classify_raw(skb);
297         return !!ptp_parse_header(skb, type);
298 }
299
300 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
301                                  struct mlxsw_sp_port *mlxsw_sp_port,
302                                  struct sk_buff *skb,
303                                  const struct mlxsw_tx_info *tx_info)
304 {
305         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
306
307         /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
308          * need special handling and cannot be transmitted as regular control
309          * packets.
310          */
311         if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
312                 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
313                                                           mlxsw_sp_port, skb,
314                                                           tx_info);
315
316         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
317                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
318                 dev_kfree_skb_any(skb);
319                 return -ENOMEM;
320         }
321
322         mlxsw_sp_txhdr_construct(skb, tx_info);
323         return 0;
324 }
325
326 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
327 {
328         switch (state) {
329         case BR_STATE_FORWARDING:
330                 return MLXSW_REG_SPMS_STATE_FORWARDING;
331         case BR_STATE_LEARNING:
332                 return MLXSW_REG_SPMS_STATE_LEARNING;
333         case BR_STATE_LISTENING:
334         case BR_STATE_DISABLED:
335         case BR_STATE_BLOCKING:
336                 return MLXSW_REG_SPMS_STATE_DISCARDING;
337         default:
338                 BUG();
339         }
340 }
341
342 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
343                               u8 state)
344 {
345         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
346         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
347         char *spms_pl;
348         int err;
349
350         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
351         if (!spms_pl)
352                 return -ENOMEM;
353         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
354         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
355
356         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
357         kfree(spms_pl);
358         return err;
359 }
360
361 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
362 {
363         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
364         int err;
365
366         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
367         if (err)
368                 return err;
369         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
370         return 0;
371 }
372
373 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
374                                    bool is_up)
375 {
376         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
377         char paos_pl[MLXSW_REG_PAOS_LEN];
378
379         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
380                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
381                             MLXSW_PORT_ADMIN_STATUS_DOWN);
382         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
383 }
384
385 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
386                                       const unsigned char *addr)
387 {
388         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
389         char ppad_pl[MLXSW_REG_PPAD_LEN];
390
391         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
392         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
393         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
394 }
395
396 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
397 {
398         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
399
400         eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
401                         mlxsw_sp_port->local_port);
402         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
403                                           mlxsw_sp_port->dev->dev_addr);
404 }
405
406 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
407 {
408         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409         char pmtu_pl[MLXSW_REG_PMTU_LEN];
410         int err;
411
412         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
413         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
414         if (err)
415                 return err;
416
417         *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
418         return 0;
419 }
420
421 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
422 {
423         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
424         char pmtu_pl[MLXSW_REG_PMTU_LEN];
425
426         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
427         if (mtu > mlxsw_sp_port->max_mtu)
428                 return -EINVAL;
429
430         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
431         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
432 }
433
434 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
435                                   u16 local_port, u8 swid)
436 {
437         char pspa_pl[MLXSW_REG_PSPA_LEN];
438
439         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
440         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
441 }
442
443 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
444 {
445         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
446         char svpe_pl[MLXSW_REG_SVPE_LEN];
447
448         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
449         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
450 }
451
452 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
453                                    bool learn_enable)
454 {
455         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
456         char *spvmlr_pl;
457         int err;
458
459         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
460         if (!spvmlr_pl)
461                 return -ENOMEM;
462         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
463                               learn_enable);
464         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
465         kfree(spvmlr_pl);
466         return err;
467 }
468
469 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
470 {
471         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
472         char spfsr_pl[MLXSW_REG_SPFSR_LEN];
473         int err;
474
475         if (mlxsw_sp_port->security == enable)
476                 return 0;
477
478         mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
479         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
480         if (err)
481                 return err;
482
483         mlxsw_sp_port->security = enable;
484         return 0;
485 }
486
487 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
488 {
489         switch (ethtype) {
490         case ETH_P_8021Q:
491                 *p_sver_type = 0;
492                 break;
493         case ETH_P_8021AD:
494                 *p_sver_type = 1;
495                 break;
496         default:
497                 return -EINVAL;
498         }
499
500         return 0;
501 }
502
503 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
504                                      u16 ethtype)
505 {
506         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
507         char spevet_pl[MLXSW_REG_SPEVET_LEN];
508         u8 sver_type;
509         int err;
510
511         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
512         if (err)
513                 return err;
514
515         mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
516         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
517 }
518
519 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
520                                     u16 vid, u16 ethtype)
521 {
522         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523         char spvid_pl[MLXSW_REG_SPVID_LEN];
524         u8 sver_type;
525         int err;
526
527         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
528         if (err)
529                 return err;
530
531         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
532                              sver_type);
533
534         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
535 }
536
537 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
538                                             bool allow)
539 {
540         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
541         char spaft_pl[MLXSW_REG_SPAFT_LEN];
542
543         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
544         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
545 }
546
547 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
548                            u16 ethtype)
549 {
550         int err;
551
552         if (!vid) {
553                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
554                 if (err)
555                         return err;
556         } else {
557                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
558                 if (err)
559                         return err;
560                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
561                 if (err)
562                         goto err_port_allow_untagged_set;
563         }
564
565         mlxsw_sp_port->pvid = vid;
566         return 0;
567
568 err_port_allow_untagged_set:
569         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
570         return err;
571 }
572
573 static int
574 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
575 {
576         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577         char sspr_pl[MLXSW_REG_SSPR_LEN];
578
579         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
580         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
581 }
582
583 static int
584 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
585                                 u16 local_port, char *pmlp_pl,
586                                 struct mlxsw_sp_port_mapping *port_mapping)
587 {
588         bool separate_rxtx;
589         u8 first_lane;
590         u8 slot_index;
591         u8 module;
592         u8 width;
593         int i;
594
595         module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
596         slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
597         width = mlxsw_reg_pmlp_width_get(pmlp_pl);
598         separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
599         first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
600
601         if (width && !is_power_of_2(width)) {
602                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
603                         local_port);
604                 return -EINVAL;
605         }
606
607         for (i = 0; i < width; i++) {
608                 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
609                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
610                                 local_port);
611                         return -EINVAL;
612                 }
613                 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
614                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
615                                 local_port);
616                         return -EINVAL;
617                 }
618                 if (separate_rxtx &&
619                     mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
620                     mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
621                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
622                                 local_port);
623                         return -EINVAL;
624                 }
625                 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
626                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
627                                 local_port);
628                         return -EINVAL;
629                 }
630         }
631
632         port_mapping->module = module;
633         port_mapping->slot_index = slot_index;
634         port_mapping->width = width;
635         port_mapping->module_width = width;
636         port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
637         return 0;
638 }
639
640 static int
641 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
642                               struct mlxsw_sp_port_mapping *port_mapping)
643 {
644         char pmlp_pl[MLXSW_REG_PMLP_LEN];
645         int err;
646
647         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
648         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
649         if (err)
650                 return err;
651         return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
652                                                pmlp_pl, port_mapping);
653 }
654
655 static int
656 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
657                          const struct mlxsw_sp_port_mapping *port_mapping)
658 {
659         char pmlp_pl[MLXSW_REG_PMLP_LEN];
660         int i, err;
661
662         mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
663                                   port_mapping->module);
664
665         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
666         mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
667         for (i = 0; i < port_mapping->width; i++) {
668                 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
669                                               port_mapping->slot_index);
670                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
671                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
672         }
673
674         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
675         if (err)
676                 goto err_pmlp_write;
677         return 0;
678
679 err_pmlp_write:
680         mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
681                                     port_mapping->module);
682         return err;
683 }
684
685 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
686                                        u8 slot_index, u8 module)
687 {
688         char pmlp_pl[MLXSW_REG_PMLP_LEN];
689
690         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
691         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
692         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
693         mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
694 }
695
696 static int mlxsw_sp_port_open(struct net_device *dev)
697 {
698         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
699         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
700         int err;
701
702         err = mlxsw_env_module_port_up(mlxsw_sp->core,
703                                        mlxsw_sp_port->mapping.slot_index,
704                                        mlxsw_sp_port->mapping.module);
705         if (err)
706                 return err;
707         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
708         if (err)
709                 goto err_port_admin_status_set;
710         netif_start_queue(dev);
711         return 0;
712
713 err_port_admin_status_set:
714         mlxsw_env_module_port_down(mlxsw_sp->core,
715                                    mlxsw_sp_port->mapping.slot_index,
716                                    mlxsw_sp_port->mapping.module);
717         return err;
718 }
719
720 static int mlxsw_sp_port_stop(struct net_device *dev)
721 {
722         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
723         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724
725         netif_stop_queue(dev);
726         mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
727         mlxsw_env_module_port_down(mlxsw_sp->core,
728                                    mlxsw_sp_port->mapping.slot_index,
729                                    mlxsw_sp_port->mapping.module);
730         return 0;
731 }
732
733 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
734                                       struct net_device *dev)
735 {
736         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
737         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
738         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
739         const struct mlxsw_tx_info tx_info = {
740                 .local_port = mlxsw_sp_port->local_port,
741                 .is_emad = false,
742         };
743         u64 len;
744         int err;
745
746         memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
747
748         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
749                 return NETDEV_TX_BUSY;
750
751         if (eth_skb_pad(skb)) {
752                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
753                 return NETDEV_TX_OK;
754         }
755
756         err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
757                                     &tx_info);
758         if (err)
759                 return NETDEV_TX_OK;
760
761         /* TX header is consumed by HW on the way so we shouldn't count its
762          * bytes as being sent.
763          */
764         len = skb->len - MLXSW_TXHDR_LEN;
765
766         /* Due to a race we might fail here because of a full queue. In that
767          * unlikely case we simply drop the packet.
768          */
769         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
770
771         if (!err) {
772                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
773                 u64_stats_update_begin(&pcpu_stats->syncp);
774                 pcpu_stats->tx_packets++;
775                 pcpu_stats->tx_bytes += len;
776                 u64_stats_update_end(&pcpu_stats->syncp);
777         } else {
778                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
779                 dev_kfree_skb_any(skb);
780         }
781         return NETDEV_TX_OK;
782 }
783
784 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
785 {
786 }
787
788 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
789 {
790         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791         struct sockaddr *addr = p;
792         int err;
793
794         if (!is_valid_ether_addr(addr->sa_data))
795                 return -EADDRNOTAVAIL;
796
797         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
798         if (err)
799                 return err;
800         eth_hw_addr_set(dev, addr->sa_data);
801         return 0;
802 }
803
804 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
805 {
806         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
807         struct mlxsw_sp_hdroom orig_hdroom;
808         struct mlxsw_sp_hdroom hdroom;
809         int err;
810
811         orig_hdroom = *mlxsw_sp_port->hdroom;
812
813         hdroom = orig_hdroom;
814         hdroom.mtu = mtu;
815         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
816
817         err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
818         if (err) {
819                 netdev_err(dev, "Failed to configure port's headroom\n");
820                 return err;
821         }
822
823         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
824         if (err)
825                 goto err_port_mtu_set;
826         dev->mtu = mtu;
827         return 0;
828
829 err_port_mtu_set:
830         mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
831         return err;
832 }
833
834 static int
835 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
836                              struct rtnl_link_stats64 *stats)
837 {
838         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
839         struct mlxsw_sp_port_pcpu_stats *p;
840         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
841         u32 tx_dropped = 0;
842         unsigned int start;
843         int i;
844
845         for_each_possible_cpu(i) {
846                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
847                 do {
848                         start = u64_stats_fetch_begin(&p->syncp);
849                         rx_packets      = p->rx_packets;
850                         rx_bytes        = p->rx_bytes;
851                         tx_packets      = p->tx_packets;
852                         tx_bytes        = p->tx_bytes;
853                 } while (u64_stats_fetch_retry(&p->syncp, start));
854
855                 stats->rx_packets       += rx_packets;
856                 stats->rx_bytes         += rx_bytes;
857                 stats->tx_packets       += tx_packets;
858                 stats->tx_bytes         += tx_bytes;
859                 /* tx_dropped is u32, updated without syncp protection. */
860                 tx_dropped      += p->tx_dropped;
861         }
862         stats->tx_dropped       = tx_dropped;
863         return 0;
864 }
865
866 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
867 {
868         switch (attr_id) {
869         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
870                 return true;
871         }
872
873         return false;
874 }
875
876 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
877                                            void *sp)
878 {
879         switch (attr_id) {
880         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
881                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
882         }
883
884         return -EINVAL;
885 }
886
887 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
888                                 int prio, char *ppcnt_pl)
889 {
890         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
891         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
892
893         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
894         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
895 }
896
897 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
898                                       struct rtnl_link_stats64 *stats)
899 {
900         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
901         int err;
902
903         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
904                                           0, ppcnt_pl);
905         if (err)
906                 goto out;
907
908         stats->tx_packets =
909                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
910         stats->rx_packets =
911                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
912         stats->tx_bytes =
913                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
914         stats->rx_bytes =
915                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
916         stats->multicast =
917                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
918
919         stats->rx_crc_errors =
920                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
921         stats->rx_frame_errors =
922                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
923
924         stats->rx_length_errors = (
925                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
926                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
927                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
928
929         stats->rx_errors = (stats->rx_crc_errors +
930                 stats->rx_frame_errors + stats->rx_length_errors);
931
932 out:
933         return err;
934 }
935
936 static void
937 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
938                             struct mlxsw_sp_port_xstats *xstats)
939 {
940         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
941         int err, i;
942
943         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
944                                           ppcnt_pl);
945         if (!err)
946                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
947
948         for (i = 0; i < TC_MAX_QUEUE; i++) {
949                 err = mlxsw_sp_port_get_stats_raw(dev,
950                                                   MLXSW_REG_PPCNT_TC_CONG_CNT,
951                                                   i, ppcnt_pl);
952                 if (err)
953                         goto tc_cnt;
954
955                 xstats->wred_drop[i] =
956                         mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
957                 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
958
959 tc_cnt:
960                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
961                                                   i, ppcnt_pl);
962                 if (err)
963                         continue;
964
965                 xstats->backlog[i] =
966                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
967                 xstats->tail_drop[i] =
968                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
969         }
970
971         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
972                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
973                                                   i, ppcnt_pl);
974                 if (err)
975                         continue;
976
977                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
978                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
979         }
980 }
981
982 static void update_stats_cache(struct work_struct *work)
983 {
984         struct mlxsw_sp_port *mlxsw_sp_port =
985                 container_of(work, struct mlxsw_sp_port,
986                              periodic_hw_stats.update_dw.work);
987
988         if (!netif_carrier_ok(mlxsw_sp_port->dev))
989                 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
990                  * necessary when port goes down.
991                  */
992                 goto out;
993
994         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
995                                    &mlxsw_sp_port->periodic_hw_stats.stats);
996         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
997                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
998
999 out:
1000         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1001                                MLXSW_HW_STATS_UPDATE_TIME);
1002 }
1003
1004 /* Return the stats from a cache that is updated periodically,
1005  * as this function might get called in an atomic context.
1006  */
1007 static void
1008 mlxsw_sp_port_get_stats64(struct net_device *dev,
1009                           struct rtnl_link_stats64 *stats)
1010 {
1011         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1012
1013         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1014 }
1015
1016 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1017                                     u16 vid_begin, u16 vid_end,
1018                                     bool is_member, bool untagged)
1019 {
1020         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1021         char *spvm_pl;
1022         int err;
1023
1024         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1025         if (!spvm_pl)
1026                 return -ENOMEM;
1027
1028         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1029                             vid_end, is_member, untagged);
1030         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1031         kfree(spvm_pl);
1032         return err;
1033 }
1034
1035 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1036                            u16 vid_end, bool is_member, bool untagged)
1037 {
1038         u16 vid, vid_e;
1039         int err;
1040
1041         for (vid = vid_begin; vid <= vid_end;
1042              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1043                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1044                             vid_end);
1045
1046                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1047                                                is_member, untagged);
1048                 if (err)
1049                         return err;
1050         }
1051
1052         return 0;
1053 }
1054
1055 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1056                                      bool flush_default)
1057 {
1058         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1059
1060         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1061                                  &mlxsw_sp_port->vlans_list, list) {
1062                 if (!flush_default &&
1063                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1064                         continue;
1065                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1066         }
1067 }
1068
1069 static void
1070 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1071 {
1072         if (mlxsw_sp_port_vlan->bridge_port)
1073                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1074         else if (mlxsw_sp_port_vlan->fid)
1075                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1076 }
1077
1078 struct mlxsw_sp_port_vlan *
1079 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1080 {
1081         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1082         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1083         int err;
1084
1085         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1086         if (mlxsw_sp_port_vlan)
1087                 return ERR_PTR(-EEXIST);
1088
1089         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1090         if (err)
1091                 return ERR_PTR(err);
1092
1093         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1094         if (!mlxsw_sp_port_vlan) {
1095                 err = -ENOMEM;
1096                 goto err_port_vlan_alloc;
1097         }
1098
1099         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1100         mlxsw_sp_port_vlan->vid = vid;
1101         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1102
1103         return mlxsw_sp_port_vlan;
1104
1105 err_port_vlan_alloc:
1106         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1107         return ERR_PTR(err);
1108 }
1109
1110 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1111 {
1112         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1113         u16 vid = mlxsw_sp_port_vlan->vid;
1114
1115         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1116         list_del(&mlxsw_sp_port_vlan->list);
1117         kfree(mlxsw_sp_port_vlan);
1118         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1119 }
1120
1121 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1122                                  __be16 __always_unused proto, u16 vid)
1123 {
1124         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1125
1126         /* VLAN 0 is added to HW filter when device goes up, but it is
1127          * reserved in our case, so simply return.
1128          */
1129         if (!vid)
1130                 return 0;
1131
1132         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1133 }
1134
1135 int mlxsw_sp_port_kill_vid(struct net_device *dev,
1136                            __be16 __always_unused proto, u16 vid)
1137 {
1138         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1139         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1140
1141         /* VLAN 0 is removed from HW filter when device goes down, but
1142          * it is reserved in our case, so simply return.
1143          */
1144         if (!vid)
1145                 return 0;
1146
1147         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1148         if (!mlxsw_sp_port_vlan)
1149                 return 0;
1150         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1151
1152         return 0;
1153 }
1154
1155 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1156                                    struct flow_block_offload *f)
1157 {
1158         switch (f->binder_type) {
1159         case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1160                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1161         case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1162                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1163         case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1164                 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1165         case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1166                 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1167         default:
1168                 return -EOPNOTSUPP;
1169         }
1170 }
1171
1172 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1173                              void *type_data)
1174 {
1175         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1176
1177         switch (type) {
1178         case TC_SETUP_BLOCK:
1179                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1180         case TC_SETUP_QDISC_RED:
1181                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1182         case TC_SETUP_QDISC_PRIO:
1183                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1184         case TC_SETUP_QDISC_ETS:
1185                 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1186         case TC_SETUP_QDISC_TBF:
1187                 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1188         case TC_SETUP_QDISC_FIFO:
1189                 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1190         default:
1191                 return -EOPNOTSUPP;
1192         }
1193 }
1194
1195 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1196 {
1197         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1198
1199         if (!enable) {
1200                 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1201                     mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1202                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1203                         return -EINVAL;
1204                 }
1205                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1206                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1207         } else {
1208                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1209                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1210         }
1211         return 0;
1212 }
1213
1214 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1215 {
1216         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1217         char pplr_pl[MLXSW_REG_PPLR_LEN];
1218         int err;
1219
1220         if (netif_running(dev))
1221                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1222
1223         mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1224         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1225                               pplr_pl);
1226
1227         if (netif_running(dev))
1228                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1229
1230         return err;
1231 }
1232
1233 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1234
1235 static int mlxsw_sp_handle_feature(struct net_device *dev,
1236                                    netdev_features_t wanted_features,
1237                                    netdev_features_t feature,
1238                                    mlxsw_sp_feature_handler feature_handler)
1239 {
1240         netdev_features_t changes = wanted_features ^ dev->features;
1241         bool enable = !!(wanted_features & feature);
1242         int err;
1243
1244         if (!(changes & feature))
1245                 return 0;
1246
1247         err = feature_handler(dev, enable);
1248         if (err) {
1249                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1250                            enable ? "Enable" : "Disable", &feature, err);
1251                 return err;
1252         }
1253
1254         if (enable)
1255                 dev->features |= feature;
1256         else
1257                 dev->features &= ~feature;
1258
1259         return 0;
1260 }
1261 static int mlxsw_sp_set_features(struct net_device *dev,
1262                                  netdev_features_t features)
1263 {
1264         netdev_features_t oper_features = dev->features;
1265         int err = 0;
1266
1267         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1268                                        mlxsw_sp_feature_hw_tc);
1269         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1270                                        mlxsw_sp_feature_loopback);
1271
1272         if (err) {
1273                 dev->features = oper_features;
1274                 return -EINVAL;
1275         }
1276
1277         return 0;
1278 }
1279
1280 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1281                                       struct ifreq *ifr)
1282 {
1283         struct hwtstamp_config config;
1284         int err;
1285
1286         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1287                 return -EFAULT;
1288
1289         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1290                                                              &config);
1291         if (err)
1292                 return err;
1293
1294         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1295                 return -EFAULT;
1296
1297         return 0;
1298 }
1299
1300 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1301                                       struct ifreq *ifr)
1302 {
1303         struct hwtstamp_config config;
1304         int err;
1305
1306         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1307                                                              &config);
1308         if (err)
1309                 return err;
1310
1311         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1312                 return -EFAULT;
1313
1314         return 0;
1315 }
1316
1317 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1318 {
1319         struct hwtstamp_config config = {0};
1320
1321         mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1322 }
1323
1324 static int
1325 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1326 {
1327         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1328
1329         switch (cmd) {
1330         case SIOCSHWTSTAMP:
1331                 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1332         case SIOCGHWTSTAMP:
1333                 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1334         default:
1335                 return -EOPNOTSUPP;
1336         }
1337 }
1338
1339 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1340         .ndo_open               = mlxsw_sp_port_open,
1341         .ndo_stop               = mlxsw_sp_port_stop,
1342         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1343         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1344         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1345         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1346         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1347         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1348         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1349         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1350         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1351         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1352         .ndo_set_features       = mlxsw_sp_set_features,
1353         .ndo_eth_ioctl          = mlxsw_sp_port_ioctl,
1354 };
1355
1356 static int
1357 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1358 {
1359         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1360         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1361         const struct mlxsw_sp_port_type_speed_ops *ops;
1362         char ptys_pl[MLXSW_REG_PTYS_LEN];
1363         u32 eth_proto_cap_masked;
1364         int err;
1365
1366         ops = mlxsw_sp->port_type_speed_ops;
1367
1368         /* Set advertised speeds to speeds supported by both the driver
1369          * and the device.
1370          */
1371         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1372                                0, false);
1373         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1374         if (err)
1375                 return err;
1376
1377         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1378                                  &eth_proto_admin, &eth_proto_oper);
1379         eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1380         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1381                                eth_proto_cap_masked,
1382                                mlxsw_sp_port->link.autoneg);
1383         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1384 }
1385
1386 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1387 {
1388         const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1389         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1390         char ptys_pl[MLXSW_REG_PTYS_LEN];
1391         u32 eth_proto_oper;
1392         int err;
1393
1394         port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1395         port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1396                                                mlxsw_sp_port->local_port, 0,
1397                                                false);
1398         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1399         if (err)
1400                 return err;
1401         port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1402                                                  &eth_proto_oper);
1403         *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1404         return 0;
1405 }
1406
1407 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1408                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1409                           bool dwrr, u8 dwrr_weight)
1410 {
1411         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1412         char qeec_pl[MLXSW_REG_QEEC_LEN];
1413
1414         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1415                             next_index);
1416         mlxsw_reg_qeec_de_set(qeec_pl, true);
1417         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1418         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1419         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1420 }
1421
1422 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1423                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1424                                   u8 next_index, u32 maxrate, u8 burst_size)
1425 {
1426         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1427         char qeec_pl[MLXSW_REG_QEEC_LEN];
1428
1429         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1430                             next_index);
1431         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1432         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1433         mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1434         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1435 }
1436
1437 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1438                                     enum mlxsw_reg_qeec_hr hr, u8 index,
1439                                     u8 next_index, u32 minrate)
1440 {
1441         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1442         char qeec_pl[MLXSW_REG_QEEC_LEN];
1443
1444         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1445                             next_index);
1446         mlxsw_reg_qeec_mise_set(qeec_pl, true);
1447         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1448
1449         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1450 }
1451
1452 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1453                               u8 switch_prio, u8 tclass)
1454 {
1455         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1456         char qtct_pl[MLXSW_REG_QTCT_LEN];
1457
1458         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1459                             tclass);
1460         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1461 }
1462
1463 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1464 {
1465         int err, i;
1466
1467         /* Setup the elements hierarcy, so that each TC is linked to
1468          * one subgroup, which are all member in the same group.
1469          */
1470         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1471                                     MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1472         if (err)
1473                 return err;
1474         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1475                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1476                                             MLXSW_REG_QEEC_HR_SUBGROUP, i,
1477                                             0, false, 0);
1478                 if (err)
1479                         return err;
1480         }
1481         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1482                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1483                                             MLXSW_REG_QEEC_HR_TC, i, i,
1484                                             false, 0);
1485                 if (err)
1486                         return err;
1487
1488                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1489                                             MLXSW_REG_QEEC_HR_TC,
1490                                             i + 8, i,
1491                                             true, 100);
1492                 if (err)
1493                         return err;
1494         }
1495
1496         /* Make sure the max shaper is disabled in all hierarchies that support
1497          * it. Note that this disables ptps (PTP shaper), but that is intended
1498          * for the initial configuration.
1499          */
1500         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1501                                             MLXSW_REG_QEEC_HR_PORT, 0, 0,
1502                                             MLXSW_REG_QEEC_MAS_DIS, 0);
1503         if (err)
1504                 return err;
1505         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1506                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1507                                                     MLXSW_REG_QEEC_HR_SUBGROUP,
1508                                                     i, 0,
1509                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1510                 if (err)
1511                         return err;
1512         }
1513         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1514                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1515                                                     MLXSW_REG_QEEC_HR_TC,
1516                                                     i, i,
1517                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1518                 if (err)
1519                         return err;
1520
1521                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1522                                                     MLXSW_REG_QEEC_HR_TC,
1523                                                     i + 8, i,
1524                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1525                 if (err)
1526                         return err;
1527         }
1528
1529         /* Configure the min shaper for multicast TCs. */
1530         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1531                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1532                                                MLXSW_REG_QEEC_HR_TC,
1533                                                i + 8, i,
1534                                                MLXSW_REG_QEEC_MIS_MIN);
1535                 if (err)
1536                         return err;
1537         }
1538
1539         /* Map all priorities to traffic class 0. */
1540         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1541                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1542                 if (err)
1543                         return err;
1544         }
1545
1546         return 0;
1547 }
1548
1549 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1550                                         bool enable)
1551 {
1552         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1553         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1554
1555         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1556         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1557 }
1558
1559 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1560 {
1561         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1562         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1563         u8 module = mlxsw_sp_port->mapping.module;
1564         u64 overheat_counter;
1565         int err;
1566
1567         err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1568                                                     module, &overheat_counter);
1569         if (err)
1570                 return err;
1571
1572         mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1573         return 0;
1574 }
1575
1576 int
1577 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1578                                       bool is_8021ad_tagged,
1579                                       bool is_8021q_tagged)
1580 {
1581         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1582         char spvc_pl[MLXSW_REG_SPVC_LEN];
1583
1584         mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1585                             is_8021ad_tagged, is_8021q_tagged);
1586         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1587 }
1588
1589 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1590                                         u16 local_port, u8 *port_number,
1591                                         u8 *split_port_subnumber,
1592                                         u8 *slot_index)
1593 {
1594         char pllp_pl[MLXSW_REG_PLLP_LEN];
1595         int err;
1596
1597         mlxsw_reg_pllp_pack(pllp_pl, local_port);
1598         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1599         if (err)
1600                 return err;
1601         mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1602                               split_port_subnumber, slot_index);
1603         return 0;
1604 }
1605
1606 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1607                                 bool split,
1608                                 struct mlxsw_sp_port_mapping *port_mapping)
1609 {
1610         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1611         struct mlxsw_sp_port *mlxsw_sp_port;
1612         u32 lanes = port_mapping->width;
1613         u8 split_port_subnumber;
1614         struct net_device *dev;
1615         u8 port_number;
1616         u8 slot_index;
1617         bool splittable;
1618         int err;
1619
1620         err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1621         if (err) {
1622                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1623                         local_port);
1624                 return err;
1625         }
1626
1627         err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1628         if (err) {
1629                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1630                         local_port);
1631                 goto err_port_swid_set;
1632         }
1633
1634         err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1635                                            &split_port_subnumber, &slot_index);
1636         if (err) {
1637                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1638                         local_port);
1639                 goto err_port_label_info_get;
1640         }
1641
1642         splittable = lanes > 1 && !split;
1643         err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1644                                    port_number, split, split_port_subnumber,
1645                                    splittable, lanes, mlxsw_sp->base_mac,
1646                                    sizeof(mlxsw_sp->base_mac));
1647         if (err) {
1648                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1649                         local_port);
1650                 goto err_core_port_init;
1651         }
1652
1653         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1654         if (!dev) {
1655                 err = -ENOMEM;
1656                 goto err_alloc_etherdev;
1657         }
1658         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1659         dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1660         mlxsw_sp_port = netdev_priv(dev);
1661         mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
1662                                     mlxsw_sp_port, dev);
1663         mlxsw_sp_port->dev = dev;
1664         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1665         mlxsw_sp_port->local_port = local_port;
1666         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1667         mlxsw_sp_port->split = split;
1668         mlxsw_sp_port->mapping = *port_mapping;
1669         mlxsw_sp_port->link.autoneg = 1;
1670         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1671
1672         mlxsw_sp_port->pcpu_stats =
1673                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1674         if (!mlxsw_sp_port->pcpu_stats) {
1675                 err = -ENOMEM;
1676                 goto err_alloc_stats;
1677         }
1678
1679         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1680                           &update_stats_cache);
1681
1682         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1683         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1684
1685         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1686         if (err) {
1687                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1688                         mlxsw_sp_port->local_port);
1689                 goto err_dev_addr_init;
1690         }
1691
1692         netif_carrier_off(dev);
1693
1694         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1695                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1696         dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1697
1698         dev->min_mtu = 0;
1699         dev->max_mtu = ETH_MAX_MTU;
1700
1701         /* Each packet needs to have a Tx header (metadata) on top all other
1702          * headers.
1703          */
1704         dev->needed_headroom = MLXSW_TXHDR_LEN;
1705
1706         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1707         if (err) {
1708                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1709                         mlxsw_sp_port->local_port);
1710                 goto err_port_system_port_mapping_set;
1711         }
1712
1713         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1714         if (err) {
1715                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1716                         mlxsw_sp_port->local_port);
1717                 goto err_port_speed_by_width_set;
1718         }
1719
1720         err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1721                                                             &mlxsw_sp_port->max_speed);
1722         if (err) {
1723                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1724                         mlxsw_sp_port->local_port);
1725                 goto err_max_speed_get;
1726         }
1727
1728         err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1729         if (err) {
1730                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1731                         mlxsw_sp_port->local_port);
1732                 goto err_port_max_mtu_get;
1733         }
1734
1735         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1736         if (err) {
1737                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1738                         mlxsw_sp_port->local_port);
1739                 goto err_port_mtu_set;
1740         }
1741
1742         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1743         if (err)
1744                 goto err_port_admin_status_set;
1745
1746         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1747         if (err) {
1748                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1749                         mlxsw_sp_port->local_port);
1750                 goto err_port_buffers_init;
1751         }
1752
1753         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1754         if (err) {
1755                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1756                         mlxsw_sp_port->local_port);
1757                 goto err_port_ets_init;
1758         }
1759
1760         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1761         if (err) {
1762                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1763                         mlxsw_sp_port->local_port);
1764                 goto err_port_tc_mc_mode;
1765         }
1766
1767         /* ETS and buffers must be initialized before DCB. */
1768         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1769         if (err) {
1770                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1771                         mlxsw_sp_port->local_port);
1772                 goto err_port_dcb_init;
1773         }
1774
1775         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1776         if (err) {
1777                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1778                         mlxsw_sp_port->local_port);
1779                 goto err_port_fids_init;
1780         }
1781
1782         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1783         if (err) {
1784                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1785                         mlxsw_sp_port->local_port);
1786                 goto err_port_qdiscs_init;
1787         }
1788
1789         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1790                                      false);
1791         if (err) {
1792                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1793                         mlxsw_sp_port->local_port);
1794                 goto err_port_vlan_clear;
1795         }
1796
1797         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1798         if (err) {
1799                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1800                         mlxsw_sp_port->local_port);
1801                 goto err_port_nve_init;
1802         }
1803
1804         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1805                                      ETH_P_8021Q);
1806         if (err) {
1807                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1808                         mlxsw_sp_port->local_port);
1809                 goto err_port_pvid_set;
1810         }
1811
1812         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1813                                                        MLXSW_SP_DEFAULT_VID);
1814         if (IS_ERR(mlxsw_sp_port_vlan)) {
1815                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1816                         mlxsw_sp_port->local_port);
1817                 err = PTR_ERR(mlxsw_sp_port_vlan);
1818                 goto err_port_vlan_create;
1819         }
1820         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1821
1822         /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1823          * only packets with 802.1q header as tagged packets.
1824          */
1825         err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1826         if (err) {
1827                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1828                         local_port);
1829                 goto err_port_vlan_classification_set;
1830         }
1831
1832         INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1833                           mlxsw_sp->ptp_ops->shaper_work);
1834
1835         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1836
1837         err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1838         if (err) {
1839                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1840                         mlxsw_sp_port->local_port);
1841                 goto err_port_overheat_init_val_set;
1842         }
1843
1844         err = register_netdev(dev);
1845         if (err) {
1846                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1847                         mlxsw_sp_port->local_port);
1848                 goto err_register_netdev;
1849         }
1850
1851         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1852         return 0;
1853
1854 err_register_netdev:
1855 err_port_overheat_init_val_set:
1856         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1857 err_port_vlan_classification_set:
1858         mlxsw_sp->ports[local_port] = NULL;
1859         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1860 err_port_vlan_create:
1861 err_port_pvid_set:
1862         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1863 err_port_nve_init:
1864 err_port_vlan_clear:
1865         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1866 err_port_qdiscs_init:
1867         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1868 err_port_fids_init:
1869         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1870 err_port_dcb_init:
1871         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1872 err_port_tc_mc_mode:
1873 err_port_ets_init:
1874         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1875 err_port_buffers_init:
1876 err_port_admin_status_set:
1877 err_port_mtu_set:
1878 err_port_max_mtu_get:
1879 err_max_speed_get:
1880 err_port_speed_by_width_set:
1881 err_port_system_port_mapping_set:
1882 err_dev_addr_init:
1883         free_percpu(mlxsw_sp_port->pcpu_stats);
1884 err_alloc_stats:
1885         free_netdev(dev);
1886 err_alloc_etherdev:
1887         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1888 err_core_port_init:
1889 err_port_label_info_get:
1890         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1891                                MLXSW_PORT_SWID_DISABLED_PORT);
1892 err_port_swid_set:
1893         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1894                                    port_mapping->slot_index,
1895                                    port_mapping->module);
1896         return err;
1897 }
1898
1899 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1900 {
1901         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1902         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1903         u8 module = mlxsw_sp_port->mapping.module;
1904
1905         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1906         cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1907         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1908         mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1909         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1910         mlxsw_sp->ports[local_port] = NULL;
1911         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1912         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1913         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1914         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1915         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1916         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1917         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1918         free_percpu(mlxsw_sp_port->pcpu_stats);
1919         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1920         free_netdev(mlxsw_sp_port->dev);
1921         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1922         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1923                                MLXSW_PORT_SWID_DISABLED_PORT);
1924         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1925 }
1926
1927 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1928 {
1929         struct mlxsw_sp_port *mlxsw_sp_port;
1930         int err;
1931
1932         mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1933         if (!mlxsw_sp_port)
1934                 return -ENOMEM;
1935
1936         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1937         mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1938
1939         err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1940                                        mlxsw_sp_port,
1941                                        mlxsw_sp->base_mac,
1942                                        sizeof(mlxsw_sp->base_mac));
1943         if (err) {
1944                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1945                 goto err_core_cpu_port_init;
1946         }
1947
1948         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1949         return 0;
1950
1951 err_core_cpu_port_init:
1952         kfree(mlxsw_sp_port);
1953         return err;
1954 }
1955
1956 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1957 {
1958         struct mlxsw_sp_port *mlxsw_sp_port =
1959                                 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1960
1961         mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1962         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1963         kfree(mlxsw_sp_port);
1964 }
1965
1966 static bool mlxsw_sp_local_port_valid(u16 local_port)
1967 {
1968         return local_port != MLXSW_PORT_CPU_PORT;
1969 }
1970
1971 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1972 {
1973         if (!mlxsw_sp_local_port_valid(local_port))
1974                 return false;
1975         return mlxsw_sp->ports[local_port] != NULL;
1976 }
1977
1978 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1979                                            u16 local_port, bool enable)
1980 {
1981         char pmecr_pl[MLXSW_REG_PMECR_LEN];
1982
1983         mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1984                              enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1985                                       MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1986         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1987 }
1988
1989 struct mlxsw_sp_port_mapping_event {
1990         struct list_head list;
1991         char pmlp_pl[MLXSW_REG_PMLP_LEN];
1992 };
1993
1994 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1995 {
1996         struct mlxsw_sp_port_mapping_event *event, *next_event;
1997         struct mlxsw_sp_port_mapping_events *events;
1998         struct mlxsw_sp_port_mapping port_mapping;
1999         struct mlxsw_sp *mlxsw_sp;
2000         struct devlink *devlink;
2001         LIST_HEAD(event_queue);
2002         u16 local_port;
2003         int err;
2004
2005         events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
2006         mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
2007         devlink = priv_to_devlink(mlxsw_sp->core);
2008
2009         spin_lock_bh(&events->queue_lock);
2010         list_splice_init(&events->queue, &event_queue);
2011         spin_unlock_bh(&events->queue_lock);
2012
2013         list_for_each_entry_safe(event, next_event, &event_queue, list) {
2014                 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
2015                 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
2016                                                       event->pmlp_pl, &port_mapping);
2017                 if (err)
2018                         goto out;
2019
2020                 if (WARN_ON_ONCE(!port_mapping.width))
2021                         goto out;
2022
2023                 devl_lock(devlink);
2024
2025                 if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2026                         mlxsw_sp_port_create(mlxsw_sp, local_port,
2027                                              false, &port_mapping);
2028                 else
2029                         WARN_ON_ONCE(1);
2030
2031                 devl_unlock(devlink);
2032
2033                 mlxsw_sp->port_mapping[local_port] = port_mapping;
2034
2035 out:
2036                 kfree(event);
2037         }
2038 }
2039
2040 static void
2041 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2042                                     char *pmlp_pl, void *priv)
2043 {
2044         struct mlxsw_sp_port_mapping_events *events;
2045         struct mlxsw_sp_port_mapping_event *event;
2046         struct mlxsw_sp *mlxsw_sp = priv;
2047         u16 local_port;
2048
2049         local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
2050         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2051                 return;
2052
2053         events = &mlxsw_sp->port_mapping_events;
2054         event = kmalloc(sizeof(*event), GFP_ATOMIC);
2055         if (!event)
2056                 return;
2057         memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2058         spin_lock(&events->queue_lock);
2059         list_add_tail(&event->list, &events->queue);
2060         spin_unlock(&events->queue_lock);
2061         mlxsw_core_schedule_work(&events->work);
2062 }
2063
2064 static void
2065 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2066 {
2067         struct mlxsw_sp_port_mapping_event *event, *next_event;
2068         struct mlxsw_sp_port_mapping_events *events;
2069
2070         events = &mlxsw_sp->port_mapping_events;
2071
2072         /* Caller needs to make sure that no new event is going to appear. */
2073         cancel_work_sync(&events->work);
2074         list_for_each_entry_safe(event, next_event, &events->queue, list) {
2075                 list_del(&event->list);
2076                 kfree(event);
2077         }
2078 }
2079
2080 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2081 {
2082         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2083         int i;
2084
2085         for (i = 1; i < max_ports; i++)
2086                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2087         /* Make sure all scheduled events are processed */
2088         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2089
2090         for (i = 1; i < max_ports; i++)
2091                 if (mlxsw_sp_port_created(mlxsw_sp, i))
2092                         mlxsw_sp_port_remove(mlxsw_sp, i);
2093         mlxsw_sp_cpu_port_remove(mlxsw_sp);
2094         kfree(mlxsw_sp->ports);
2095         mlxsw_sp->ports = NULL;
2096 }
2097
2098 static void
2099 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2100                                bool (*selector)(void *priv, u16 local_port),
2101                                void *priv)
2102 {
2103         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2104         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2105         int i;
2106
2107         for (i = 1; i < max_ports; i++)
2108                 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
2109                         mlxsw_sp_port_remove(mlxsw_sp, i);
2110 }
2111
2112 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2113 {
2114         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2115         struct mlxsw_sp_port_mapping_events *events;
2116         struct mlxsw_sp_port_mapping *port_mapping;
2117         size_t alloc_size;
2118         int i;
2119         int err;
2120
2121         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2122         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2123         if (!mlxsw_sp->ports)
2124                 return -ENOMEM;
2125
2126         events = &mlxsw_sp->port_mapping_events;
2127         INIT_LIST_HEAD(&events->queue);
2128         spin_lock_init(&events->queue_lock);
2129         INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2130
2131         for (i = 1; i < max_ports; i++) {
2132                 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2133                 if (err)
2134                         goto err_event_enable;
2135         }
2136
2137         err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2138         if (err)
2139                 goto err_cpu_port_create;
2140
2141         for (i = 1; i < max_ports; i++) {
2142                 port_mapping = &mlxsw_sp->port_mapping[i];
2143                 if (!port_mapping->width)
2144                         continue;
2145                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2146                 if (err)
2147                         goto err_port_create;
2148         }
2149         return 0;
2150
2151 err_port_create:
2152         for (i--; i >= 1; i--)
2153                 if (mlxsw_sp_port_created(mlxsw_sp, i))
2154                         mlxsw_sp_port_remove(mlxsw_sp, i);
2155         i = max_ports;
2156         mlxsw_sp_cpu_port_remove(mlxsw_sp);
2157 err_cpu_port_create:
2158 err_event_enable:
2159         for (i--; i >= 1; i--)
2160                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2161         /* Make sure all scheduled events are processed */
2162         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2163         kfree(mlxsw_sp->ports);
2164         mlxsw_sp->ports = NULL;
2165         return err;
2166 }
2167
2168 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2169 {
2170         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2171         struct mlxsw_sp_port_mapping *port_mapping;
2172         int i;
2173         int err;
2174
2175         mlxsw_sp->port_mapping = kcalloc(max_ports,
2176                                          sizeof(struct mlxsw_sp_port_mapping),
2177                                          GFP_KERNEL);
2178         if (!mlxsw_sp->port_mapping)
2179                 return -ENOMEM;
2180
2181         for (i = 1; i < max_ports; i++) {
2182                 port_mapping = &mlxsw_sp->port_mapping[i];
2183                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2184                 if (err)
2185                         goto err_port_module_info_get;
2186         }
2187         return 0;
2188
2189 err_port_module_info_get:
2190         kfree(mlxsw_sp->port_mapping);
2191         return err;
2192 }
2193
2194 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2195 {
2196         kfree(mlxsw_sp->port_mapping);
2197 }
2198
2199 static int
2200 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2201                            struct mlxsw_sp_port_mapping *port_mapping,
2202                            unsigned int count, const char *pmtdb_pl)
2203 {
2204         struct mlxsw_sp_port_mapping split_port_mapping;
2205         int err, i;
2206
2207         split_port_mapping = *port_mapping;
2208         split_port_mapping.width /= count;
2209         for (i = 0; i < count; i++) {
2210                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2211
2212                 if (!mlxsw_sp_local_port_valid(s_local_port))
2213                         continue;
2214
2215                 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2216                                            true, &split_port_mapping);
2217                 if (err)
2218                         goto err_port_create;
2219                 split_port_mapping.lane += split_port_mapping.width;
2220         }
2221
2222         return 0;
2223
2224 err_port_create:
2225         for (i--; i >= 0; i--) {
2226                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2227
2228                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2229                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2230         }
2231         return err;
2232 }
2233
2234 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2235                                          unsigned int count,
2236                                          const char *pmtdb_pl)
2237 {
2238         struct mlxsw_sp_port_mapping *port_mapping;
2239         int i;
2240
2241         /* Go over original unsplit ports in the gap and recreate them. */
2242         for (i = 0; i < count; i++) {
2243                 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2244
2245                 port_mapping = &mlxsw_sp->port_mapping[local_port];
2246                 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2247                         continue;
2248                 mlxsw_sp_port_create(mlxsw_sp, local_port,
2249                                      false, port_mapping);
2250         }
2251 }
2252
2253 static struct mlxsw_sp_port *
2254 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2255 {
2256         if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2257                 return mlxsw_sp->ports[local_port];
2258         return NULL;
2259 }
2260
2261 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2262                                unsigned int count,
2263                                struct netlink_ext_ack *extack)
2264 {
2265         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2266         struct mlxsw_sp_port_mapping port_mapping;
2267         struct mlxsw_sp_port *mlxsw_sp_port;
2268         enum mlxsw_reg_pmtdb_status status;
2269         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2270         int i;
2271         int err;
2272
2273         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2274         if (!mlxsw_sp_port) {
2275                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2276                         local_port);
2277                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2278                 return -EINVAL;
2279         }
2280
2281         if (mlxsw_sp_port->split) {
2282                 NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2283                 return -EINVAL;
2284         }
2285
2286         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2287                              mlxsw_sp_port->mapping.module,
2288                              mlxsw_sp_port->mapping.module_width / count,
2289                              count);
2290         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2291         if (err) {
2292                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2293                 return err;
2294         }
2295
2296         status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2297         if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2298                 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2299                 return -EINVAL;
2300         }
2301
2302         port_mapping = mlxsw_sp_port->mapping;
2303
2304         for (i = 0; i < count; i++) {
2305                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2306
2307                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2308                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2309         }
2310
2311         err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2312                                          count, pmtdb_pl);
2313         if (err) {
2314                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2315                 goto err_port_split_create;
2316         }
2317
2318         return 0;
2319
2320 err_port_split_create:
2321         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2322
2323         return err;
2324 }
2325
2326 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2327                                  struct netlink_ext_ack *extack)
2328 {
2329         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2330         struct mlxsw_sp_port *mlxsw_sp_port;
2331         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2332         unsigned int count;
2333         int i;
2334         int err;
2335
2336         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2337         if (!mlxsw_sp_port) {
2338                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2339                         local_port);
2340                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2341                 return -EINVAL;
2342         }
2343
2344         if (!mlxsw_sp_port->split) {
2345                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2346                 return -EINVAL;
2347         }
2348
2349         count = mlxsw_sp_port->mapping.module_width /
2350                 mlxsw_sp_port->mapping.width;
2351
2352         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2353                              mlxsw_sp_port->mapping.module,
2354                              mlxsw_sp_port->mapping.module_width / count,
2355                              count);
2356         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2357         if (err) {
2358                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2359                 return err;
2360         }
2361
2362         for (i = 0; i < count; i++) {
2363                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2364
2365                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2366                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2367         }
2368
2369         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2370
2371         return 0;
2372 }
2373
2374 static void
2375 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2376 {
2377         int i;
2378
2379         for (i = 0; i < TC_MAX_QUEUE; i++)
2380                 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2381 }
2382
2383 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2384                                      char *pude_pl, void *priv)
2385 {
2386         struct mlxsw_sp *mlxsw_sp = priv;
2387         struct mlxsw_sp_port *mlxsw_sp_port;
2388         enum mlxsw_reg_pude_oper_status status;
2389         u16 local_port;
2390
2391         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2392
2393         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2394                 return;
2395         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2396         if (!mlxsw_sp_port)
2397                 return;
2398
2399         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2400         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2401                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2402                 netif_carrier_on(mlxsw_sp_port->dev);
2403                 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2404         } else {
2405                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2406                 netif_carrier_off(mlxsw_sp_port->dev);
2407                 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2408         }
2409 }
2410
2411 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2412                                           char *mtpptr_pl, bool ingress)
2413 {
2414         u16 local_port;
2415         u8 num_rec;
2416         int i;
2417
2418         local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2419         num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2420         for (i = 0; i < num_rec; i++) {
2421                 u8 domain_number;
2422                 u8 message_type;
2423                 u16 sequence_id;
2424                 u64 timestamp;
2425
2426                 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2427                                         &domain_number, &sequence_id,
2428                                         &timestamp);
2429                 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2430                                             message_type, domain_number,
2431                                             sequence_id, timestamp);
2432         }
2433 }
2434
2435 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2436                                               char *mtpptr_pl, void *priv)
2437 {
2438         struct mlxsw_sp *mlxsw_sp = priv;
2439
2440         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2441 }
2442
2443 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2444                                               char *mtpptr_pl, void *priv)
2445 {
2446         struct mlxsw_sp *mlxsw_sp = priv;
2447
2448         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2449 }
2450
2451 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2452                                        u16 local_port, void *priv)
2453 {
2454         struct mlxsw_sp *mlxsw_sp = priv;
2455         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2456         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2457
2458         if (unlikely(!mlxsw_sp_port)) {
2459                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2460                                      local_port);
2461                 return;
2462         }
2463
2464         skb->dev = mlxsw_sp_port->dev;
2465
2466         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2467         u64_stats_update_begin(&pcpu_stats->syncp);
2468         pcpu_stats->rx_packets++;
2469         pcpu_stats->rx_bytes += skb->len;
2470         u64_stats_update_end(&pcpu_stats->syncp);
2471
2472         skb->protocol = eth_type_trans(skb, skb->dev);
2473         netif_receive_skb(skb);
2474 }
2475
2476 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2477                                            void *priv)
2478 {
2479         skb->offload_fwd_mark = 1;
2480         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2481 }
2482
2483 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2484                                               u16 local_port, void *priv)
2485 {
2486         skb->offload_l3_fwd_mark = 1;
2487         skb->offload_fwd_mark = 1;
2488         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2489 }
2490
2491 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2492                           u16 local_port)
2493 {
2494         mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2495 }
2496
2497 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2498         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2499                   _is_ctrl, SP_##_trap_group, DISCARD)
2500
2501 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
2502         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
2503                 _is_ctrl, SP_##_trap_group, DISCARD)
2504
2505 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2506         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2507                 _is_ctrl, SP_##_trap_group, DISCARD)
2508
2509 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
2510         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2511
2512 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2513         /* Events */
2514         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2515         /* L2 traps */
2516         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2517         /* L3 traps */
2518         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2519                           false),
2520         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2521         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2522                           false),
2523         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2524                              ROUTER_EXP, false),
2525         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2526                              ROUTER_EXP, false),
2527         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2528                              ROUTER_EXP, false),
2529         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2530                              ROUTER_EXP, false),
2531         /* Multicast Router Traps */
2532         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2533         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2534         /* NVE traps */
2535         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2536 };
2537
2538 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2539         /* Events */
2540         MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2541         MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2542 };
2543
2544 static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2545         /* Events */
2546         MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2547 };
2548
2549 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2550 {
2551         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2552         char qpcr_pl[MLXSW_REG_QPCR_LEN];
2553         enum mlxsw_reg_qpcr_ir_units ir_units;
2554         int max_cpu_policers;
2555         bool is_bytes;
2556         u8 burst_size;
2557         u32 rate;
2558         int i, err;
2559
2560         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2561                 return -EIO;
2562
2563         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2564
2565         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2566         for (i = 0; i < max_cpu_policers; i++) {
2567                 is_bytes = false;
2568                 switch (i) {
2569                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2570                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2571                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2572                         rate = 1024;
2573                         burst_size = 7;
2574                         break;
2575                 default:
2576                         continue;
2577                 }
2578
2579                 __set_bit(i, mlxsw_sp->trap->policers_usage);
2580                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2581                                     burst_size);
2582                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2583                 if (err)
2584                         return err;
2585         }
2586
2587         return 0;
2588 }
2589
2590 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2591 {
2592         char htgt_pl[MLXSW_REG_HTGT_LEN];
2593         enum mlxsw_reg_htgt_trap_group i;
2594         int max_cpu_policers;
2595         int max_trap_groups;
2596         u8 priority, tc;
2597         u16 policer_id;
2598         int err;
2599
2600         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2601                 return -EIO;
2602
2603         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2604         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2605
2606         for (i = 0; i < max_trap_groups; i++) {
2607                 policer_id = i;
2608                 switch (i) {
2609                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2610                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2611                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2612                         priority = 1;
2613                         tc = 1;
2614                         break;
2615                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2616                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2617                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
2618                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2619                         break;
2620                 default:
2621                         continue;
2622                 }
2623
2624                 if (max_cpu_policers <= policer_id &&
2625                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2626                         return -EIO;
2627
2628                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2629                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2630                 if (err)
2631                         return err;
2632         }
2633
2634         return 0;
2635 }
2636
2637 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2638 {
2639         struct mlxsw_sp_trap *trap;
2640         u64 max_policers;
2641         int err;
2642
2643         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2644                 return -EIO;
2645         max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2646         trap = kzalloc(struct_size(trap, policers_usage,
2647                                    BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2648         if (!trap)
2649                 return -ENOMEM;
2650         trap->max_policers = max_policers;
2651         mlxsw_sp->trap = trap;
2652
2653         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2654         if (err)
2655                 goto err_cpu_policers_set;
2656
2657         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2658         if (err)
2659                 goto err_trap_groups_set;
2660
2661         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2662                                         ARRAY_SIZE(mlxsw_sp_listener),
2663                                         mlxsw_sp);
2664         if (err)
2665                 goto err_traps_register;
2666
2667         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2668                                         mlxsw_sp->listeners_count, mlxsw_sp);
2669         if (err)
2670                 goto err_extra_traps_init;
2671
2672         return 0;
2673
2674 err_extra_traps_init:
2675         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2676                                     ARRAY_SIZE(mlxsw_sp_listener),
2677                                     mlxsw_sp);
2678 err_traps_register:
2679 err_trap_groups_set:
2680 err_cpu_policers_set:
2681         kfree(trap);
2682         return err;
2683 }
2684
2685 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2686 {
2687         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2688                                     mlxsw_sp->listeners_count,
2689                                     mlxsw_sp);
2690         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2691                                     ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2692         kfree(mlxsw_sp->trap);
2693 }
2694
2695 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
2696 {
2697         char sgcr_pl[MLXSW_REG_SGCR_LEN];
2698         u16 max_lag;
2699         int err;
2700
2701         if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2702             MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2703                 return 0;
2704
2705         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2706         if (err)
2707                 return err;
2708
2709         /* In DDD mode, which we by default use, each LAG entry is 8 PGT
2710          * entries. The LAG table address needs to be 8-aligned, but that ought
2711          * to be the case, since the LAG table is allocated first.
2712          */
2713         err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
2714                                            max_lag * 8);
2715         if (err)
2716                 return err;
2717         if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
2718                 err = -EINVAL;
2719                 goto err_mid_alloc_range;
2720         }
2721
2722         mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base);
2723         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl);
2724         if (err)
2725                 goto err_mid_alloc_range;
2726
2727         return 0;
2728
2729 err_mid_alloc_range:
2730         mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2731                                     max_lag * 8);
2732         return err;
2733 }
2734
2735 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
2736 {
2737         u16 max_lag;
2738         int err;
2739
2740         if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2741             MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2742                 return;
2743
2744         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2745         if (err)
2746                 return;
2747
2748         mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2749                                     max_lag * 8);
2750 }
2751
2752 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2753
2754 struct mlxsw_sp_lag {
2755         struct net_device *dev;
2756         unsigned int ref_count;
2757 };
2758
2759 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2760 {
2761         char slcr_pl[MLXSW_REG_SLCR_LEN];
2762         u16 max_lag;
2763         u32 seed;
2764         int err;
2765
2766         seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2767                      MLXSW_SP_LAG_SEED_INIT);
2768         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2769                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2770                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2771                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2772                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2773                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2774                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2775                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2776                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2777         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2778         if (err)
2779                 return err;
2780
2781         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2782         if (err)
2783                 return err;
2784
2785         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2786                 return -EIO;
2787
2788         err = mlxsw_sp_lag_pgt_init(mlxsw_sp);
2789         if (err)
2790                 return err;
2791
2792         mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_lag),
2793                                  GFP_KERNEL);
2794         if (!mlxsw_sp->lags) {
2795                 err = -ENOMEM;
2796                 goto err_kcalloc;
2797         }
2798
2799         return 0;
2800
2801 err_kcalloc:
2802         mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2803         return err;
2804 }
2805
2806 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2807 {
2808         mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2809         kfree(mlxsw_sp->lags);
2810 }
2811
2812 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2813         .clock_init     = mlxsw_sp1_ptp_clock_init,
2814         .clock_fini     = mlxsw_sp1_ptp_clock_fini,
2815         .init           = mlxsw_sp1_ptp_init,
2816         .fini           = mlxsw_sp1_ptp_fini,
2817         .receive        = mlxsw_sp1_ptp_receive,
2818         .transmitted    = mlxsw_sp1_ptp_transmitted,
2819         .hwtstamp_get   = mlxsw_sp1_ptp_hwtstamp_get,
2820         .hwtstamp_set   = mlxsw_sp1_ptp_hwtstamp_set,
2821         .shaper_work    = mlxsw_sp1_ptp_shaper_work,
2822         .get_ts_info    = mlxsw_sp1_ptp_get_ts_info,
2823         .get_stats_count = mlxsw_sp1_get_stats_count,
2824         .get_stats_strings = mlxsw_sp1_get_stats_strings,
2825         .get_stats      = mlxsw_sp1_get_stats,
2826         .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2827 };
2828
2829 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2830         .clock_init     = mlxsw_sp2_ptp_clock_init,
2831         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2832         .init           = mlxsw_sp2_ptp_init,
2833         .fini           = mlxsw_sp2_ptp_fini,
2834         .receive        = mlxsw_sp2_ptp_receive,
2835         .transmitted    = mlxsw_sp2_ptp_transmitted,
2836         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2837         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2838         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2839         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2840         .get_stats_count = mlxsw_sp2_get_stats_count,
2841         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2842         .get_stats      = mlxsw_sp2_get_stats,
2843         .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2844 };
2845
2846 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2847         .clock_init     = mlxsw_sp2_ptp_clock_init,
2848         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2849         .init           = mlxsw_sp2_ptp_init,
2850         .fini           = mlxsw_sp2_ptp_fini,
2851         .receive        = mlxsw_sp2_ptp_receive,
2852         .transmitted    = mlxsw_sp2_ptp_transmitted,
2853         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2854         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2855         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2856         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2857         .get_stats_count = mlxsw_sp2_get_stats_count,
2858         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2859         .get_stats      = mlxsw_sp2_get_stats,
2860         .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2861 };
2862
2863 struct mlxsw_sp_sample_trigger_node {
2864         struct mlxsw_sp_sample_trigger trigger;
2865         struct mlxsw_sp_sample_params params;
2866         struct rhash_head ht_node;
2867         struct rcu_head rcu;
2868         refcount_t refcount;
2869 };
2870
2871 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2872         .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2873         .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2874         .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2875         .automatic_shrinking = true,
2876 };
2877
2878 static void
2879 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2880                                  const struct mlxsw_sp_sample_trigger *trigger)
2881 {
2882         memset(key, 0, sizeof(*key));
2883         key->type = trigger->type;
2884         key->local_port = trigger->local_port;
2885 }
2886
2887 /* RCU read lock must be held */
2888 struct mlxsw_sp_sample_params *
2889 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2890                                       const struct mlxsw_sp_sample_trigger *trigger)
2891 {
2892         struct mlxsw_sp_sample_trigger_node *trigger_node;
2893         struct mlxsw_sp_sample_trigger key;
2894
2895         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2896         trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2897                                          mlxsw_sp_sample_trigger_ht_params);
2898         if (!trigger_node)
2899                 return NULL;
2900
2901         return &trigger_node->params;
2902 }
2903
2904 static int
2905 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2906                                   const struct mlxsw_sp_sample_trigger *trigger,
2907                                   const struct mlxsw_sp_sample_params *params)
2908 {
2909         struct mlxsw_sp_sample_trigger_node *trigger_node;
2910         int err;
2911
2912         trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2913         if (!trigger_node)
2914                 return -ENOMEM;
2915
2916         trigger_node->trigger = *trigger;
2917         trigger_node->params = *params;
2918         refcount_set(&trigger_node->refcount, 1);
2919
2920         err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2921                                      &trigger_node->ht_node,
2922                                      mlxsw_sp_sample_trigger_ht_params);
2923         if (err)
2924                 goto err_rhashtable_insert;
2925
2926         return 0;
2927
2928 err_rhashtable_insert:
2929         kfree(trigger_node);
2930         return err;
2931 }
2932
2933 static void
2934 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2935                                   struct mlxsw_sp_sample_trigger_node *trigger_node)
2936 {
2937         rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2938                                &trigger_node->ht_node,
2939                                mlxsw_sp_sample_trigger_ht_params);
2940         kfree_rcu(trigger_node, rcu);
2941 }
2942
2943 int
2944 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2945                                    const struct mlxsw_sp_sample_trigger *trigger,
2946                                    const struct mlxsw_sp_sample_params *params,
2947                                    struct netlink_ext_ack *extack)
2948 {
2949         struct mlxsw_sp_sample_trigger_node *trigger_node;
2950         struct mlxsw_sp_sample_trigger key;
2951
2952         ASSERT_RTNL();
2953
2954         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2955
2956         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2957                                               &key,
2958                                               mlxsw_sp_sample_trigger_ht_params);
2959         if (!trigger_node)
2960                 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2961                                                          params);
2962
2963         if (trigger_node->trigger.local_port) {
2964                 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2965                 return -EINVAL;
2966         }
2967
2968         if (trigger_node->params.psample_group != params->psample_group ||
2969             trigger_node->params.truncate != params->truncate ||
2970             trigger_node->params.rate != params->rate ||
2971             trigger_node->params.trunc_size != params->trunc_size) {
2972                 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2973                 return -EINVAL;
2974         }
2975
2976         refcount_inc(&trigger_node->refcount);
2977
2978         return 0;
2979 }
2980
2981 void
2982 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2983                                      const struct mlxsw_sp_sample_trigger *trigger)
2984 {
2985         struct mlxsw_sp_sample_trigger_node *trigger_node;
2986         struct mlxsw_sp_sample_trigger key;
2987
2988         ASSERT_RTNL();
2989
2990         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2991
2992         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2993                                               &key,
2994                                               mlxsw_sp_sample_trigger_ht_params);
2995         if (!trigger_node)
2996                 return;
2997
2998         if (!refcount_dec_and_test(&trigger_node->refcount))
2999                 return;
3000
3001         mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
3002 }
3003
3004 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3005                                     unsigned long event, void *ptr);
3006
3007 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
3008 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
3009 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
3010
3011 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
3012 {
3013         refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
3014         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
3015         mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
3016         mutex_init(&mlxsw_sp->parsing.lock);
3017 }
3018
3019 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
3020 {
3021         mutex_destroy(&mlxsw_sp->parsing.lock);
3022         WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
3023 }
3024
3025 struct mlxsw_sp_ipv6_addr_node {
3026         struct in6_addr key;
3027         struct rhash_head ht_node;
3028         u32 kvdl_index;
3029         refcount_t refcount;
3030 };
3031
3032 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
3033         .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
3034         .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
3035         .key_len = sizeof(struct in6_addr),
3036         .automatic_shrinking = true,
3037 };
3038
3039 static int
3040 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
3041                         u32 *p_kvdl_index)
3042 {
3043         struct mlxsw_sp_ipv6_addr_node *node;
3044         char rips_pl[MLXSW_REG_RIPS_LEN];
3045         int err;
3046
3047         err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
3048                                   MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3049                                   p_kvdl_index);
3050         if (err)
3051                 return err;
3052
3053         mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
3054         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
3055         if (err)
3056                 goto err_rips_write;
3057
3058         node = kzalloc(sizeof(*node), GFP_KERNEL);
3059         if (!node) {
3060                 err = -ENOMEM;
3061                 goto err_node_alloc;
3062         }
3063
3064         node->key = *addr6;
3065         node->kvdl_index = *p_kvdl_index;
3066         refcount_set(&node->refcount, 1);
3067
3068         err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
3069                                      &node->ht_node,
3070                                      mlxsw_sp_ipv6_addr_ht_params);
3071         if (err)
3072                 goto err_rhashtable_insert;
3073
3074         return 0;
3075
3076 err_rhashtable_insert:
3077         kfree(node);
3078 err_node_alloc:
3079 err_rips_write:
3080         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3081                            *p_kvdl_index);
3082         return err;
3083 }
3084
3085 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3086                                     struct mlxsw_sp_ipv6_addr_node *node)
3087 {
3088         u32 kvdl_index = node->kvdl_index;
3089
3090         rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
3091                                mlxsw_sp_ipv6_addr_ht_params);
3092         kfree(node);
3093         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3094                            kvdl_index);
3095 }
3096
3097 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3098                                       const struct in6_addr *addr6,
3099                                       u32 *p_kvdl_index)
3100 {
3101         struct mlxsw_sp_ipv6_addr_node *node;
3102         int err = 0;
3103
3104         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3105         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3106                                       mlxsw_sp_ipv6_addr_ht_params);
3107         if (node) {
3108                 refcount_inc(&node->refcount);
3109                 *p_kvdl_index = node->kvdl_index;
3110                 goto out_unlock;
3111         }
3112
3113         err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3114
3115 out_unlock:
3116         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3117         return err;
3118 }
3119
3120 void
3121 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3122 {
3123         struct mlxsw_sp_ipv6_addr_node *node;
3124
3125         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3126         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3127                                       mlxsw_sp_ipv6_addr_ht_params);
3128         if (WARN_ON(!node))
3129                 goto out_unlock;
3130
3131         if (!refcount_dec_and_test(&node->refcount))
3132                 goto out_unlock;
3133
3134         mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3135
3136 out_unlock:
3137         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3138 }
3139
3140 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3141 {
3142         int err;
3143
3144         err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3145                               &mlxsw_sp_ipv6_addr_ht_params);
3146         if (err)
3147                 return err;
3148
3149         mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3150         return 0;
3151 }
3152
3153 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3154 {
3155         mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3156         rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3157 }
3158
3159 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3160                          const struct mlxsw_bus_info *mlxsw_bus_info,
3161                          struct netlink_ext_ack *extack)
3162 {
3163         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3164         int err;
3165
3166         mlxsw_sp->core = mlxsw_core;
3167         mlxsw_sp->bus_info = mlxsw_bus_info;
3168
3169         mlxsw_sp_parsing_init(mlxsw_sp);
3170
3171         err = mlxsw_sp_base_mac_get(mlxsw_sp);
3172         if (err) {
3173                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3174                 return err;
3175         }
3176
3177         err = mlxsw_sp_kvdl_init(mlxsw_sp);
3178         if (err) {
3179                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3180                 return err;
3181         }
3182
3183         err = mlxsw_sp_pgt_init(mlxsw_sp);
3184         if (err) {
3185                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3186                 goto err_pgt_init;
3187         }
3188
3189         /* Initialize before FIDs so that the LAG table is at the start of PGT
3190          * and 8-aligned without overallocation.
3191          */
3192         err = mlxsw_sp_lag_init(mlxsw_sp);
3193         if (err) {
3194                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3195                 goto err_lag_init;
3196         }
3197
3198         err = mlxsw_sp->fid_core_ops->init(mlxsw_sp);
3199         if (err) {
3200                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3201                 goto err_fid_core_init;
3202         }
3203
3204         err = mlxsw_sp_policers_init(mlxsw_sp);
3205         if (err) {
3206                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3207                 goto err_policers_init;
3208         }
3209
3210         err = mlxsw_sp_traps_init(mlxsw_sp);
3211         if (err) {
3212                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3213                 goto err_traps_init;
3214         }
3215
3216         err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3217         if (err) {
3218                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3219                 goto err_devlink_traps_init;
3220         }
3221
3222         err = mlxsw_sp_buffers_init(mlxsw_sp);
3223         if (err) {
3224                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3225                 goto err_buffers_init;
3226         }
3227
3228         /* Initialize SPAN before router and switchdev, so that those components
3229          * can call mlxsw_sp_span_respin().
3230          */
3231         err = mlxsw_sp_span_init(mlxsw_sp);
3232         if (err) {
3233                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3234                 goto err_span_init;
3235         }
3236
3237         err = mlxsw_sp_switchdev_init(mlxsw_sp);
3238         if (err) {
3239                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3240                 goto err_switchdev_init;
3241         }
3242
3243         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3244         if (err) {
3245                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3246                 goto err_counter_pool_init;
3247         }
3248
3249         err = mlxsw_sp_afa_init(mlxsw_sp);
3250         if (err) {
3251                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3252                 goto err_afa_init;
3253         }
3254
3255         err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3256         if (err) {
3257                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3258                 goto err_ipv6_addr_ht_init;
3259         }
3260
3261         err = mlxsw_sp_nve_init(mlxsw_sp);
3262         if (err) {
3263                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3264                 goto err_nve_init;
3265         }
3266
3267         err = mlxsw_sp_port_range_init(mlxsw_sp);
3268         if (err) {
3269                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
3270                 goto err_port_range_init;
3271         }
3272
3273         err = mlxsw_sp_acl_init(mlxsw_sp);
3274         if (err) {
3275                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3276                 goto err_acl_init;
3277         }
3278
3279         err = mlxsw_sp_router_init(mlxsw_sp, extack);
3280         if (err) {
3281                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3282                 goto err_router_init;
3283         }
3284
3285         if (mlxsw_sp->bus_info->read_clock_capable) {
3286                 /* NULL is a valid return value from clock_init */
3287                 mlxsw_sp->clock =
3288                         mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3289                                                       mlxsw_sp->bus_info->dev);
3290                 if (IS_ERR(mlxsw_sp->clock)) {
3291                         err = PTR_ERR(mlxsw_sp->clock);
3292                         dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3293                         goto err_ptp_clock_init;
3294                 }
3295         }
3296
3297         if (mlxsw_sp->clock) {
3298                 /* NULL is a valid return value from ptp_ops->init */
3299                 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3300                 if (IS_ERR(mlxsw_sp->ptp_state)) {
3301                         err = PTR_ERR(mlxsw_sp->ptp_state);
3302                         dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3303                         goto err_ptp_init;
3304                 }
3305         }
3306
3307         /* Initialize netdevice notifier after SPAN is initialized, so that the
3308          * event handler can call SPAN respin.
3309          */
3310         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3311         err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3312                                               &mlxsw_sp->netdevice_nb);
3313         if (err) {
3314                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3315                 goto err_netdev_notifier;
3316         }
3317
3318         err = mlxsw_sp_dpipe_init(mlxsw_sp);
3319         if (err) {
3320                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3321                 goto err_dpipe_init;
3322         }
3323
3324         err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3325         if (err) {
3326                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3327                 goto err_port_module_info_init;
3328         }
3329
3330         err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3331                               &mlxsw_sp_sample_trigger_ht_params);
3332         if (err) {
3333                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3334                 goto err_sample_trigger_init;
3335         }
3336
3337         err = mlxsw_sp_ports_create(mlxsw_sp);
3338         if (err) {
3339                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3340                 goto err_ports_create;
3341         }
3342
3343         return 0;
3344
3345 err_ports_create:
3346         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3347 err_sample_trigger_init:
3348         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3349 err_port_module_info_init:
3350         mlxsw_sp_dpipe_fini(mlxsw_sp);
3351 err_dpipe_init:
3352         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3353                                           &mlxsw_sp->netdevice_nb);
3354 err_netdev_notifier:
3355         if (mlxsw_sp->clock)
3356                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3357 err_ptp_init:
3358         if (mlxsw_sp->clock)
3359                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3360 err_ptp_clock_init:
3361         mlxsw_sp_router_fini(mlxsw_sp);
3362 err_router_init:
3363         mlxsw_sp_acl_fini(mlxsw_sp);
3364 err_acl_init:
3365         mlxsw_sp_port_range_fini(mlxsw_sp);
3366 err_port_range_init:
3367         mlxsw_sp_nve_fini(mlxsw_sp);
3368 err_nve_init:
3369         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3370 err_ipv6_addr_ht_init:
3371         mlxsw_sp_afa_fini(mlxsw_sp);
3372 err_afa_init:
3373         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3374 err_counter_pool_init:
3375         mlxsw_sp_switchdev_fini(mlxsw_sp);
3376 err_switchdev_init:
3377         mlxsw_sp_span_fini(mlxsw_sp);
3378 err_span_init:
3379         mlxsw_sp_buffers_fini(mlxsw_sp);
3380 err_buffers_init:
3381         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3382 err_devlink_traps_init:
3383         mlxsw_sp_traps_fini(mlxsw_sp);
3384 err_traps_init:
3385         mlxsw_sp_policers_fini(mlxsw_sp);
3386 err_policers_init:
3387         mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3388 err_fid_core_init:
3389         mlxsw_sp_lag_fini(mlxsw_sp);
3390 err_lag_init:
3391         mlxsw_sp_pgt_fini(mlxsw_sp);
3392 err_pgt_init:
3393         mlxsw_sp_kvdl_fini(mlxsw_sp);
3394         mlxsw_sp_parsing_fini(mlxsw_sp);
3395         return err;
3396 }
3397
3398 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3399                           const struct mlxsw_bus_info *mlxsw_bus_info,
3400                           struct netlink_ext_ack *extack)
3401 {
3402         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3403
3404         mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3405         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3406         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3407         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3408         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3409         mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3410         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3411         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3412         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3413         mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3414         mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3415         mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3416         mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3417         mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3418         mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3419         mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3420         mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3421         mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3422         mlxsw_sp->listeners = mlxsw_sp1_listener;
3423         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3424         mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops;
3425         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3426         mlxsw_sp->pgt_smpe_index_valid = true;
3427
3428         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3429 }
3430
3431 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3432                           const struct mlxsw_bus_info *mlxsw_bus_info,
3433                           struct netlink_ext_ack *extack)
3434 {
3435         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3436
3437         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3438         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3439         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3440         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3441         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3442         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3443         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3444         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3445         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3446         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3447         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3448         mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3449         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3450         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3451         mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3452         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3453         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3454         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3455         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3456         mlxsw_sp->listeners = mlxsw_sp2_listener;
3457         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3458         mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3459         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3460         mlxsw_sp->pgt_smpe_index_valid = false;
3461
3462         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3463 }
3464
3465 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3466                           const struct mlxsw_bus_info *mlxsw_bus_info,
3467                           struct netlink_ext_ack *extack)
3468 {
3469         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3470
3471         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3472         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3473         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3474         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3475         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3476         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3477         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3478         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3479         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3480         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3481         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3482         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3483         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3484         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3485         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3486         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3487         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3488         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3489         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3490         mlxsw_sp->listeners = mlxsw_sp2_listener;
3491         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3492         mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3493         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3494         mlxsw_sp->pgt_smpe_index_valid = false;
3495
3496         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3497 }
3498
3499 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3500                           const struct mlxsw_bus_info *mlxsw_bus_info,
3501                           struct netlink_ext_ack *extack)
3502 {
3503         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3504
3505         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3506         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3507         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3508         mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3509         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3510         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3511         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3512         mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3513         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3514         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3515         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3516         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3517         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3518         mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3519         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3520         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3521         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3522         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3523         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3524         mlxsw_sp->listeners = mlxsw_sp2_listener;
3525         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3526         mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3527         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3528         mlxsw_sp->pgt_smpe_index_valid = false;
3529
3530         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3531 }
3532
3533 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3534 {
3535         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3536
3537         mlxsw_sp_ports_remove(mlxsw_sp);
3538         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3539         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3540         mlxsw_sp_dpipe_fini(mlxsw_sp);
3541         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3542                                           &mlxsw_sp->netdevice_nb);
3543         if (mlxsw_sp->clock) {
3544                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3545                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3546         }
3547         mlxsw_sp_router_fini(mlxsw_sp);
3548         mlxsw_sp_acl_fini(mlxsw_sp);
3549         mlxsw_sp_port_range_fini(mlxsw_sp);
3550         mlxsw_sp_nve_fini(mlxsw_sp);
3551         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3552         mlxsw_sp_afa_fini(mlxsw_sp);
3553         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3554         mlxsw_sp_switchdev_fini(mlxsw_sp);
3555         mlxsw_sp_span_fini(mlxsw_sp);
3556         mlxsw_sp_buffers_fini(mlxsw_sp);
3557         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3558         mlxsw_sp_traps_fini(mlxsw_sp);
3559         mlxsw_sp_policers_fini(mlxsw_sp);
3560         mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3561         mlxsw_sp_lag_fini(mlxsw_sp);
3562         mlxsw_sp_pgt_fini(mlxsw_sp);
3563         mlxsw_sp_kvdl_fini(mlxsw_sp);
3564         mlxsw_sp_parsing_fini(mlxsw_sp);
3565 }
3566
3567 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3568         .used_flood_mode                = 1,
3569         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3570         .used_max_ib_mc                 = 1,
3571         .max_ib_mc                      = 0,
3572         .used_max_pkey                  = 1,
3573         .max_pkey                       = 0,
3574         .used_ubridge                   = 1,
3575         .ubridge                        = 1,
3576         .used_kvd_sizes                 = 1,
3577         .kvd_hash_single_parts          = 59,
3578         .kvd_hash_double_parts          = 41,
3579         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
3580         .swid_config                    = {
3581                 {
3582                         .used_type      = 1,
3583                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3584                 }
3585         },
3586 };
3587
3588 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3589         .used_flood_mode                = 1,
3590         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3591         .used_max_ib_mc                 = 1,
3592         .max_ib_mc                      = 0,
3593         .used_max_pkey                  = 1,
3594         .max_pkey                       = 0,
3595         .used_ubridge                   = 1,
3596         .ubridge                        = 1,
3597         .swid_config                    = {
3598                 {
3599                         .used_type      = 1,
3600                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3601                 }
3602         },
3603         .used_cqe_time_stamp_type       = 1,
3604         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3605         .lag_mode_prefer_sw             = true,
3606         .flood_mode_prefer_cff          = true,
3607 };
3608
3609 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3610  * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3611  * table.
3612  */
3613 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3614
3615 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3616         .used_max_lag                   = 1,
3617         .max_lag                        = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3618         .used_flood_mode                = 1,
3619         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3620         .used_max_ib_mc                 = 1,
3621         .max_ib_mc                      = 0,
3622         .used_max_pkey                  = 1,
3623         .max_pkey                       = 0,
3624         .used_ubridge                   = 1,
3625         .ubridge                        = 1,
3626         .swid_config                    = {
3627                 {
3628                         .used_type      = 1,
3629                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3630                 }
3631         },
3632         .used_cqe_time_stamp_type       = 1,
3633         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3634         .lag_mode_prefer_sw             = true,
3635         .flood_mode_prefer_cff          = true,
3636 };
3637
3638 static void
3639 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3640                                       struct devlink_resource_size_params *kvd_size_params,
3641                                       struct devlink_resource_size_params *linear_size_params,
3642                                       struct devlink_resource_size_params *hash_double_size_params,
3643                                       struct devlink_resource_size_params *hash_single_size_params)
3644 {
3645         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3646                                                  KVD_SINGLE_MIN_SIZE);
3647         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3648                                                  KVD_DOUBLE_MIN_SIZE);
3649         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3650         u32 linear_size_min = 0;
3651
3652         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3653                                           MLXSW_SP_KVD_GRANULARITY,
3654                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3655         devlink_resource_size_params_init(linear_size_params, linear_size_min,
3656                                           kvd_size - single_size_min -
3657                                           double_size_min,
3658                                           MLXSW_SP_KVD_GRANULARITY,
3659                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3660         devlink_resource_size_params_init(hash_double_size_params,
3661                                           double_size_min,
3662                                           kvd_size - single_size_min -
3663                                           linear_size_min,
3664                                           MLXSW_SP_KVD_GRANULARITY,
3665                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3666         devlink_resource_size_params_init(hash_single_size_params,
3667                                           single_size_min,
3668                                           kvd_size - double_size_min -
3669                                           linear_size_min,
3670                                           MLXSW_SP_KVD_GRANULARITY,
3671                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3672 }
3673
3674 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3675 {
3676         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3677         struct devlink_resource_size_params hash_single_size_params;
3678         struct devlink_resource_size_params hash_double_size_params;
3679         struct devlink_resource_size_params linear_size_params;
3680         struct devlink_resource_size_params kvd_size_params;
3681         u32 kvd_size, single_size, double_size, linear_size;
3682         const struct mlxsw_config_profile *profile;
3683         int err;
3684
3685         profile = &mlxsw_sp1_config_profile;
3686         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3687                 return -EIO;
3688
3689         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3690                                               &linear_size_params,
3691                                               &hash_double_size_params,
3692                                               &hash_single_size_params);
3693
3694         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3695         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3696                                      kvd_size, MLXSW_SP_RESOURCE_KVD,
3697                                      DEVLINK_RESOURCE_ID_PARENT_TOP,
3698                                      &kvd_size_params);
3699         if (err)
3700                 return err;
3701
3702         linear_size = profile->kvd_linear_size;
3703         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3704                                      linear_size,
3705                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3706                                      MLXSW_SP_RESOURCE_KVD,
3707                                      &linear_size_params);
3708         if (err)
3709                 return err;
3710
3711         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3712         if  (err)
3713                 return err;
3714
3715         double_size = kvd_size - linear_size;
3716         double_size *= profile->kvd_hash_double_parts;
3717         double_size /= profile->kvd_hash_double_parts +
3718                        profile->kvd_hash_single_parts;
3719         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3720         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3721                                      double_size,
3722                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3723                                      MLXSW_SP_RESOURCE_KVD,
3724                                      &hash_double_size_params);
3725         if (err)
3726                 return err;
3727
3728         single_size = kvd_size - double_size - linear_size;
3729         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3730                                      single_size,
3731                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3732                                      MLXSW_SP_RESOURCE_KVD,
3733                                      &hash_single_size_params);
3734         if (err)
3735                 return err;
3736
3737         return 0;
3738 }
3739
3740 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3741 {
3742         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3743         struct devlink_resource_size_params kvd_size_params;
3744         u32 kvd_size;
3745
3746         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3747                 return -EIO;
3748
3749         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3750         devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3751                                           MLXSW_SP_KVD_GRANULARITY,
3752                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3753
3754         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3755                                       kvd_size, MLXSW_SP_RESOURCE_KVD,
3756                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3757                                       &kvd_size_params);
3758 }
3759
3760 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3761 {
3762         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3763         struct devlink_resource_size_params span_size_params;
3764         u32 max_span;
3765
3766         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3767                 return -EIO;
3768
3769         max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3770         devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3771                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3772
3773         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3774                                       max_span, MLXSW_SP_RESOURCE_SPAN,
3775                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3776                                       &span_size_params);
3777 }
3778
3779 static int
3780 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3781 {
3782         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3783         struct devlink_resource_size_params size_params;
3784         u8 max_rif_mac_profiles;
3785
3786         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3787                 max_rif_mac_profiles = 1;
3788         else
3789                 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3790                                                           MAX_RIF_MAC_PROFILES);
3791         devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3792                                           max_rif_mac_profiles, 1,
3793                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3794
3795         return devl_resource_register(devlink,
3796                                       "rif_mac_profiles",
3797                                       max_rif_mac_profiles,
3798                                       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3799                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3800                                       &size_params);
3801 }
3802
3803 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3804 {
3805         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3806         struct devlink_resource_size_params size_params;
3807         u64 max_rifs;
3808
3809         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3810                 return -EIO;
3811
3812         max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3813         devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3814                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3815
3816         return devl_resource_register(devlink, "rifs", max_rifs,
3817                                       MLXSW_SP_RESOURCE_RIFS,
3818                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3819                                       &size_params);
3820 }
3821
3822 static int
3823 mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
3824 {
3825         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3826         struct devlink_resource_size_params size_params;
3827         u64 max;
3828
3829         if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
3830                 return -EIO;
3831
3832         max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
3833         devlink_resource_size_params_init(&size_params, max, max, 1,
3834                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3835
3836         return devl_resource_register(devlink, "port_range_registers", max,
3837                                       MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
3838                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3839                                       &size_params);
3840 }
3841
3842 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3843 {
3844         int err;
3845
3846         err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3847         if (err)
3848                 return err;
3849
3850         err = mlxsw_sp_resources_span_register(mlxsw_core);
3851         if (err)
3852                 goto err_resources_span_register;
3853
3854         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3855         if (err)
3856                 goto err_resources_counter_register;
3857
3858         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3859         if (err)
3860                 goto err_policer_resources_register;
3861
3862         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3863         if (err)
3864                 goto err_resources_rif_mac_profile_register;
3865
3866         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3867         if (err)
3868                 goto err_resources_rifs_register;
3869
3870         err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3871         if (err)
3872                 goto err_resources_port_range_register;
3873
3874         return 0;
3875
3876 err_resources_port_range_register:
3877 err_resources_rifs_register:
3878 err_resources_rif_mac_profile_register:
3879 err_policer_resources_register:
3880 err_resources_counter_register:
3881 err_resources_span_register:
3882         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3883         return err;
3884 }
3885
3886 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3887 {
3888         int err;
3889
3890         err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3891         if (err)
3892                 return err;
3893
3894         err = mlxsw_sp_resources_span_register(mlxsw_core);
3895         if (err)
3896                 goto err_resources_span_register;
3897
3898         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3899         if (err)
3900                 goto err_resources_counter_register;
3901
3902         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3903         if (err)
3904                 goto err_policer_resources_register;
3905
3906         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3907         if (err)
3908                 goto err_resources_rif_mac_profile_register;
3909
3910         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3911         if (err)
3912                 goto err_resources_rifs_register;
3913
3914         err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3915         if (err)
3916                 goto err_resources_port_range_register;
3917
3918         return 0;
3919
3920 err_resources_port_range_register:
3921 err_resources_rifs_register:
3922 err_resources_rif_mac_profile_register:
3923 err_policer_resources_register:
3924 err_resources_counter_register:
3925 err_resources_span_register:
3926         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3927         return err;
3928 }
3929
3930 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3931                                   const struct mlxsw_config_profile *profile,
3932                                   u64 *p_single_size, u64 *p_double_size,
3933                                   u64 *p_linear_size)
3934 {
3935         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3936         u32 double_size;
3937         int err;
3938
3939         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3940             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3941                 return -EIO;
3942
3943         /* The hash part is what left of the kvd without the
3944          * linear part. It is split to the single size and
3945          * double size by the parts ratio from the profile.
3946          * Both sizes must be a multiplications of the
3947          * granularity from the profile. In case the user
3948          * provided the sizes they are obtained via devlink.
3949          */
3950         err = devl_resource_size_get(devlink,
3951                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3952                                      p_linear_size);
3953         if (err)
3954                 *p_linear_size = profile->kvd_linear_size;
3955
3956         err = devl_resource_size_get(devlink,
3957                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3958                                      p_double_size);
3959         if (err) {
3960                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3961                               *p_linear_size;
3962                 double_size *= profile->kvd_hash_double_parts;
3963                 double_size /= profile->kvd_hash_double_parts +
3964                                profile->kvd_hash_single_parts;
3965                 *p_double_size = rounddown(double_size,
3966                                            MLXSW_SP_KVD_GRANULARITY);
3967         }
3968
3969         err = devl_resource_size_get(devlink,
3970                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3971                                      p_single_size);
3972         if (err)
3973                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3974                                  *p_double_size - *p_linear_size;
3975
3976         /* Check results are legal. */
3977         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3978             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3979             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3980                 return -EIO;
3981
3982         return 0;
3983 }
3984
3985 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3986                                      struct sk_buff *skb, u16 local_port)
3987 {
3988         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3989
3990         skb_pull(skb, MLXSW_TXHDR_LEN);
3991         mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3992 }
3993
3994 static struct mlxsw_driver mlxsw_sp1_driver = {
3995         .kind                           = mlxsw_sp1_driver_name,
3996         .priv_size                      = sizeof(struct mlxsw_sp),
3997         .fw_req_rev                     = &mlxsw_sp1_fw_rev,
3998         .fw_filename                    = MLXSW_SP1_FW_FILENAME,
3999         .init                           = mlxsw_sp1_init,
4000         .fini                           = mlxsw_sp_fini,
4001         .port_split                     = mlxsw_sp_port_split,
4002         .port_unsplit                   = mlxsw_sp_port_unsplit,
4003         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4004         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4005         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4006         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4007         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4008         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4009         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4010         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4011         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4012         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4013         .trap_init                      = mlxsw_sp_trap_init,
4014         .trap_fini                      = mlxsw_sp_trap_fini,
4015         .trap_action_set                = mlxsw_sp_trap_action_set,
4016         .trap_group_init                = mlxsw_sp_trap_group_init,
4017         .trap_group_set                 = mlxsw_sp_trap_group_set,
4018         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4019         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4020         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4021         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4022         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4023         .resources_register             = mlxsw_sp1_resources_register,
4024         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
4025         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4026         .txhdr_len                      = MLXSW_TXHDR_LEN,
4027         .profile                        = &mlxsw_sp1_config_profile,
4028         .sdq_supports_cqe_v2            = false,
4029 };
4030
4031 static struct mlxsw_driver mlxsw_sp2_driver = {
4032         .kind                           = mlxsw_sp2_driver_name,
4033         .priv_size                      = sizeof(struct mlxsw_sp),
4034         .fw_req_rev                     = &mlxsw_sp2_fw_rev,
4035         .fw_filename                    = MLXSW_SP2_FW_FILENAME,
4036         .init                           = mlxsw_sp2_init,
4037         .fini                           = mlxsw_sp_fini,
4038         .port_split                     = mlxsw_sp_port_split,
4039         .port_unsplit                   = mlxsw_sp_port_unsplit,
4040         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4041         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4042         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4043         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4044         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4045         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4046         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4047         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4048         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4049         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4050         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4051         .trap_init                      = mlxsw_sp_trap_init,
4052         .trap_fini                      = mlxsw_sp_trap_fini,
4053         .trap_action_set                = mlxsw_sp_trap_action_set,
4054         .trap_group_init                = mlxsw_sp_trap_group_init,
4055         .trap_group_set                 = mlxsw_sp_trap_group_set,
4056         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4057         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4058         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4059         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4060         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4061         .resources_register             = mlxsw_sp2_resources_register,
4062         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4063         .txhdr_len                      = MLXSW_TXHDR_LEN,
4064         .profile                        = &mlxsw_sp2_config_profile,
4065         .sdq_supports_cqe_v2            = true,
4066 };
4067
4068 static struct mlxsw_driver mlxsw_sp3_driver = {
4069         .kind                           = mlxsw_sp3_driver_name,
4070         .priv_size                      = sizeof(struct mlxsw_sp),
4071         .fw_req_rev                     = &mlxsw_sp3_fw_rev,
4072         .fw_filename                    = MLXSW_SP3_FW_FILENAME,
4073         .init                           = mlxsw_sp3_init,
4074         .fini                           = mlxsw_sp_fini,
4075         .port_split                     = mlxsw_sp_port_split,
4076         .port_unsplit                   = mlxsw_sp_port_unsplit,
4077         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4078         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4079         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4080         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4081         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4082         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4083         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4084         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4085         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4086         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4087         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4088         .trap_init                      = mlxsw_sp_trap_init,
4089         .trap_fini                      = mlxsw_sp_trap_fini,
4090         .trap_action_set                = mlxsw_sp_trap_action_set,
4091         .trap_group_init                = mlxsw_sp_trap_group_init,
4092         .trap_group_set                 = mlxsw_sp_trap_group_set,
4093         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4094         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4095         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4096         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4097         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4098         .resources_register             = mlxsw_sp2_resources_register,
4099         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4100         .txhdr_len                      = MLXSW_TXHDR_LEN,
4101         .profile                        = &mlxsw_sp2_config_profile,
4102         .sdq_supports_cqe_v2            = true,
4103 };
4104
4105 static struct mlxsw_driver mlxsw_sp4_driver = {
4106         .kind                           = mlxsw_sp4_driver_name,
4107         .priv_size                      = sizeof(struct mlxsw_sp),
4108         .init                           = mlxsw_sp4_init,
4109         .fini                           = mlxsw_sp_fini,
4110         .port_split                     = mlxsw_sp_port_split,
4111         .port_unsplit                   = mlxsw_sp_port_unsplit,
4112         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4113         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4114         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4115         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4116         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4117         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4118         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4119         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4120         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4121         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4122         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4123         .trap_init                      = mlxsw_sp_trap_init,
4124         .trap_fini                      = mlxsw_sp_trap_fini,
4125         .trap_action_set                = mlxsw_sp_trap_action_set,
4126         .trap_group_init                = mlxsw_sp_trap_group_init,
4127         .trap_group_set                 = mlxsw_sp_trap_group_set,
4128         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4129         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4130         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4131         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4132         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4133         .resources_register             = mlxsw_sp2_resources_register,
4134         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4135         .txhdr_len                      = MLXSW_TXHDR_LEN,
4136         .profile                        = &mlxsw_sp4_config_profile,
4137         .sdq_supports_cqe_v2            = true,
4138 };
4139
4140 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4141 {
4142         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4143 }
4144
4145 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4146                                    struct netdev_nested_priv *priv)
4147 {
4148         int ret = 0;
4149
4150         if (mlxsw_sp_port_dev_check(lower_dev)) {
4151                 priv->data = (void *)netdev_priv(lower_dev);
4152                 ret = 1;
4153         }
4154
4155         return ret;
4156 }
4157
4158 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4159 {
4160         struct netdev_nested_priv priv = {
4161                 .data = NULL,
4162         };
4163
4164         if (mlxsw_sp_port_dev_check(dev))
4165                 return netdev_priv(dev);
4166
4167         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4168
4169         return (struct mlxsw_sp_port *)priv.data;
4170 }
4171
4172 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4173 {
4174         struct mlxsw_sp_port *mlxsw_sp_port;
4175
4176         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4177         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4178 }
4179
4180 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4181 {
4182         struct netdev_nested_priv priv = {
4183                 .data = NULL,
4184         };
4185
4186         if (mlxsw_sp_port_dev_check(dev))
4187                 return netdev_priv(dev);
4188
4189         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4190                                       &priv);
4191
4192         return (struct mlxsw_sp_port *)priv.data;
4193 }
4194
4195 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4196 {
4197         char mprs_pl[MLXSW_REG_MPRS_LEN];
4198         int err = 0;
4199
4200         mutex_lock(&mlxsw_sp->parsing.lock);
4201
4202         if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4203                 goto out_unlock;
4204
4205         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4206                             mlxsw_sp->parsing.vxlan_udp_dport);
4207         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4208         if (err)
4209                 goto out_unlock;
4210
4211         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4212         refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4213
4214 out_unlock:
4215         mutex_unlock(&mlxsw_sp->parsing.lock);
4216         return err;
4217 }
4218
4219 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4220 {
4221         char mprs_pl[MLXSW_REG_MPRS_LEN];
4222
4223         mutex_lock(&mlxsw_sp->parsing.lock);
4224
4225         if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4226                 goto out_unlock;
4227
4228         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4229                             mlxsw_sp->parsing.vxlan_udp_dport);
4230         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4231         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4232
4233 out_unlock:
4234         mutex_unlock(&mlxsw_sp->parsing.lock);
4235 }
4236
4237 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4238                                          __be16 udp_dport)
4239 {
4240         char mprs_pl[MLXSW_REG_MPRS_LEN];
4241         int err;
4242
4243         mutex_lock(&mlxsw_sp->parsing.lock);
4244
4245         mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4246                             be16_to_cpu(udp_dport));
4247         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4248         if (err)
4249                 goto out_unlock;
4250
4251         mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4252
4253 out_unlock:
4254         mutex_unlock(&mlxsw_sp->parsing.lock);
4255         return err;
4256 }
4257
4258 static void
4259 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4260                                  struct net_device *lag_dev)
4261 {
4262         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4263         struct net_device *upper_dev;
4264         struct list_head *iter;
4265
4266         if (netif_is_bridge_port(lag_dev))
4267                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4268
4269         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4270                 if (!netif_is_bridge_port(upper_dev))
4271                         continue;
4272                 br_dev = netdev_master_upper_dev_get(upper_dev);
4273                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4274         }
4275 }
4276
4277 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4278 {
4279         char sldr_pl[MLXSW_REG_SLDR_LEN];
4280
4281         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4282         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4283 }
4284
4285 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4286 {
4287         char sldr_pl[MLXSW_REG_SLDR_LEN];
4288
4289         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4290         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4291 }
4292
4293 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4294                                      u16 lag_id, u8 port_index)
4295 {
4296         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4297         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4298
4299         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4300                                       lag_id, port_index);
4301         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4302 }
4303
4304 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4305                                         u16 lag_id)
4306 {
4307         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4308         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4309
4310         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4311                                          lag_id);
4312         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4313 }
4314
4315 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4316                                         u16 lag_id)
4317 {
4318         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4319         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4320
4321         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4322                                         lag_id);
4323         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4324 }
4325
4326 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4327                                          u16 lag_id)
4328 {
4329         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4330         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4331
4332         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4333                                          lag_id);
4334         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4335 }
4336
4337 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4338                                   struct net_device *lag_dev,
4339                                   u16 *p_lag_id)
4340 {
4341         struct mlxsw_sp_lag *lag;
4342         int free_lag_id = -1;
4343         u16 max_lag;
4344         int err, i;
4345
4346         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
4347         if (err)
4348                 return err;
4349
4350         for (i = 0; i < max_lag; i++) {
4351                 lag = &mlxsw_sp->lags[i];
4352                 if (lag->ref_count) {
4353                         if (lag->dev == lag_dev) {
4354                                 *p_lag_id = i;
4355                                 return 0;
4356                         }
4357                 } else if (free_lag_id < 0) {
4358                         free_lag_id = i;
4359                 }
4360         }
4361         if (free_lag_id < 0)
4362                 return -EBUSY;
4363         *p_lag_id = free_lag_id;
4364         return 0;
4365 }
4366
4367 static bool
4368 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4369                           struct net_device *lag_dev,
4370                           struct netdev_lag_upper_info *lag_upper_info,
4371                           struct netlink_ext_ack *extack)
4372 {
4373         u16 lag_id;
4374
4375         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4376                 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4377                 return false;
4378         }
4379         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4380                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4381                 return false;
4382         }
4383         return true;
4384 }
4385
4386 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4387                                        u16 lag_id, u8 *p_port_index)
4388 {
4389         u64 max_lag_members;
4390         int i;
4391
4392         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4393                                              MAX_LAG_MEMBERS);
4394         for (i = 0; i < max_lag_members; i++) {
4395                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4396                         *p_port_index = i;
4397                         return 0;
4398                 }
4399         }
4400         return -EBUSY;
4401 }
4402
4403 static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
4404                                            struct net_device *lag_dev,
4405                                            struct netlink_ext_ack *extack)
4406 {
4407         struct net_device *upper_dev;
4408         struct net_device *master;
4409         struct list_head *iter;
4410         int done = 0;
4411         int err;
4412
4413         master = netdev_master_upper_dev_get(lag_dev);
4414         if (master && netif_is_bridge_master(master)) {
4415                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master,
4416                                                 extack);
4417                 if (err)
4418                         return err;
4419         }
4420
4421         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4422                 if (!is_vlan_dev(upper_dev))
4423                         continue;
4424
4425                 master = netdev_master_upper_dev_get(upper_dev);
4426                 if (master && netif_is_bridge_master(master)) {
4427                         err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4428                                                         upper_dev, master,
4429                                                         extack);
4430                         if (err)
4431                                 goto err_port_bridge_join;
4432                 }
4433
4434                 ++done;
4435         }
4436
4437         return 0;
4438
4439 err_port_bridge_join:
4440         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4441                 if (!is_vlan_dev(upper_dev))
4442                         continue;
4443
4444                 master = netdev_master_upper_dev_get(upper_dev);
4445                 if (!master || !netif_is_bridge_master(master))
4446                         continue;
4447
4448                 if (!done--)
4449                         break;
4450
4451                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4452         }
4453
4454         master = netdev_master_upper_dev_get(lag_dev);
4455         if (master && netif_is_bridge_master(master))
4456                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4457
4458         return err;
4459 }
4460
4461 static void
4462 mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4463                                  struct net_device *lag_dev)
4464 {
4465         struct net_device *upper_dev;
4466         struct net_device *master;
4467         struct list_head *iter;
4468
4469         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4470                 if (!is_vlan_dev(upper_dev))
4471                         continue;
4472
4473                 master = netdev_master_upper_dev_get(upper_dev);
4474                 if (!master)
4475                         continue;
4476
4477                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4478         }
4479
4480         master = netdev_master_upper_dev_get(lag_dev);
4481         if (master)
4482                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4483 }
4484
4485 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4486                                   struct net_device *lag_dev,
4487                                   struct netlink_ext_ack *extack)
4488 {
4489         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4490         struct mlxsw_sp_lag *lag;
4491         u16 lag_id;
4492         u8 port_index;
4493         int err;
4494
4495         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4496         if (err)
4497                 return err;
4498         lag = &mlxsw_sp->lags[lag_id];
4499         if (!lag->ref_count) {
4500                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4501                 if (err)
4502                         return err;
4503                 lag->dev = lag_dev;
4504         }
4505
4506         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4507         if (err)
4508                 return err;
4509
4510         err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
4511                                               extack);
4512         if (err)
4513                 goto err_lag_uppers_bridge_join;
4514
4515         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4516         if (err)
4517                 goto err_col_port_add;
4518
4519         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4520                                    mlxsw_sp_port->local_port);
4521         mlxsw_sp_port->lag_id = lag_id;
4522         mlxsw_sp_port->lagged = 1;
4523         lag->ref_count++;
4524
4525         err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
4526         if (err)
4527                 goto err_fid_port_join_lag;
4528
4529         /* Port is no longer usable as a router interface */
4530         if (mlxsw_sp_port->default_vlan->fid)
4531                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4532
4533         /* Join a router interface configured on the LAG, if exists */
4534         err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev,
4535                                             extack);
4536         if (err)
4537                 goto err_router_join;
4538
4539         err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack);
4540         if (err)
4541                 goto err_replay;
4542
4543         return 0;
4544
4545 err_replay:
4546         mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
4547 err_router_join:
4548         mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4549 err_fid_port_join_lag:
4550         lag->ref_count--;
4551         mlxsw_sp_port->lagged = 0;
4552         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4553                                      mlxsw_sp_port->local_port);
4554         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4555 err_col_port_add:
4556         mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
4557 err_lag_uppers_bridge_join:
4558         if (!lag->ref_count)
4559                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4560         return err;
4561 }
4562
4563 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4564                                     struct net_device *lag_dev)
4565 {
4566         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4567         u16 lag_id = mlxsw_sp_port->lag_id;
4568         struct mlxsw_sp_lag *lag;
4569
4570         if (!mlxsw_sp_port->lagged)
4571                 return;
4572         lag = &mlxsw_sp->lags[lag_id];
4573         WARN_ON(lag->ref_count == 0);
4574
4575         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4576
4577         /* Any VLANs configured on the port are no longer valid */
4578         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4579         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4580         /* Make the LAG and its directly linked uppers leave bridges they
4581          * are memeber in
4582          */
4583         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4584
4585         mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4586
4587         if (lag->ref_count == 1)
4588                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4589
4590         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4591                                      mlxsw_sp_port->local_port);
4592         mlxsw_sp_port->lagged = 0;
4593         lag->ref_count--;
4594
4595         /* Make sure untagged frames are allowed to ingress */
4596         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4597                                ETH_P_8021Q);
4598 }
4599
4600 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4601                                       u16 lag_id)
4602 {
4603         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4604         char sldr_pl[MLXSW_REG_SLDR_LEN];
4605
4606         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4607                                          mlxsw_sp_port->local_port);
4608         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4609 }
4610
4611 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4612                                          u16 lag_id)
4613 {
4614         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4615         char sldr_pl[MLXSW_REG_SLDR_LEN];
4616
4617         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4618                                             mlxsw_sp_port->local_port);
4619         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4620 }
4621
4622 static int
4623 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4624 {
4625         int err;
4626
4627         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4628                                            mlxsw_sp_port->lag_id);
4629         if (err)
4630                 return err;
4631
4632         err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4633         if (err)
4634                 goto err_dist_port_add;
4635
4636         return 0;
4637
4638 err_dist_port_add:
4639         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4640         return err;
4641 }
4642
4643 static int
4644 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4645 {
4646         int err;
4647
4648         err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4649                                             mlxsw_sp_port->lag_id);
4650         if (err)
4651                 return err;
4652
4653         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4654                                             mlxsw_sp_port->lag_id);
4655         if (err)
4656                 goto err_col_port_disable;
4657
4658         return 0;
4659
4660 err_col_port_disable:
4661         mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4662         return err;
4663 }
4664
4665 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4666                                      struct netdev_lag_lower_state_info *info)
4667 {
4668         if (info->tx_enabled)
4669                 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4670         else
4671                 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4672 }
4673
4674 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4675                                  bool enable)
4676 {
4677         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4678         enum mlxsw_reg_spms_state spms_state;
4679         char *spms_pl;
4680         u16 vid;
4681         int err;
4682
4683         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4684                               MLXSW_REG_SPMS_STATE_DISCARDING;
4685
4686         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4687         if (!spms_pl)
4688                 return -ENOMEM;
4689         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4690
4691         for (vid = 0; vid < VLAN_N_VID; vid++)
4692                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4693
4694         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4695         kfree(spms_pl);
4696         return err;
4697 }
4698
4699 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4700 {
4701         u16 vid = 1;
4702         int err;
4703
4704         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4705         if (err)
4706                 return err;
4707         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4708         if (err)
4709                 goto err_port_stp_set;
4710         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4711                                      true, false);
4712         if (err)
4713                 goto err_port_vlan_set;
4714
4715         for (; vid <= VLAN_N_VID - 1; vid++) {
4716                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4717                                                      vid, false);
4718                 if (err)
4719                         goto err_vid_learning_set;
4720         }
4721
4722         return 0;
4723
4724 err_vid_learning_set:
4725         for (vid--; vid >= 1; vid--)
4726                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4727 err_port_vlan_set:
4728         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4729 err_port_stp_set:
4730         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4731         return err;
4732 }
4733
4734 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4735 {
4736         u16 vid;
4737
4738         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4739                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4740                                                vid, true);
4741
4742         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4743                                false, false);
4744         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4745         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4746 }
4747
4748 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4749 {
4750         unsigned int num_vxlans = 0;
4751         struct net_device *dev;
4752         struct list_head *iter;
4753
4754         netdev_for_each_lower_dev(br_dev, dev, iter) {
4755                 if (netif_is_vxlan(dev))
4756                         num_vxlans++;
4757         }
4758
4759         return num_vxlans > 1;
4760 }
4761
4762 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4763 {
4764         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4765         struct net_device *dev;
4766         struct list_head *iter;
4767
4768         netdev_for_each_lower_dev(br_dev, dev, iter) {
4769                 u16 pvid;
4770                 int err;
4771
4772                 if (!netif_is_vxlan(dev))
4773                         continue;
4774
4775                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4776                 if (err || !pvid)
4777                         continue;
4778
4779                 if (test_and_set_bit(pvid, vlans))
4780                         return false;
4781         }
4782
4783         return true;
4784 }
4785
4786 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4787                                            struct netlink_ext_ack *extack)
4788 {
4789         if (br_multicast_enabled(br_dev)) {
4790                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4791                 return false;
4792         }
4793
4794         if (!br_vlan_enabled(br_dev) &&
4795             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4796                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4797                 return false;
4798         }
4799
4800         if (br_vlan_enabled(br_dev) &&
4801             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4802                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4803                 return false;
4804         }
4805
4806         return true;
4807 }
4808
4809 static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
4810                                       struct net_device *dev)
4811 {
4812         return upper_dev == netdev_master_upper_dev_get(dev);
4813 }
4814
4815 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
4816                                       unsigned long event, void *ptr,
4817                                       bool process_foreign);
4818
4819 static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
4820                                               struct net_device *dev,
4821                                               struct netlink_ext_ack *extack)
4822 {
4823         struct net_device *upper_dev;
4824         struct list_head *iter;
4825         int err;
4826
4827         netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
4828                 struct netdev_notifier_changeupper_info info = {
4829                         .info = {
4830                                 .dev = dev,
4831                                 .extack = extack,
4832                         },
4833                         .master = mlxsw_sp_netdev_is_master(upper_dev, dev),
4834                         .upper_dev = upper_dev,
4835                         .linking = true,
4836
4837                         /* upper_info is relevant for LAG devices. But we would
4838                          * only need this if LAG were a valid upper above
4839                          * another upper (e.g. a bridge that is a member of a
4840                          * LAG), and that is never a valid configuration. So we
4841                          * can keep this as NULL.
4842                          */
4843                         .upper_info = NULL,
4844                 };
4845
4846                 err = __mlxsw_sp_netdevice_event(mlxsw_sp,
4847                                                  NETDEV_PRECHANGEUPPER,
4848                                                  &info, true);
4849                 if (err)
4850                         return err;
4851
4852                 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev,
4853                                                          extack);
4854                 if (err)
4855                         return err;
4856         }
4857
4858         return 0;
4859 }
4860
4861 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4862                                                struct net_device *dev,
4863                                                unsigned long event, void *ptr,
4864                                                bool replay_deslavement)
4865 {
4866         struct netdev_notifier_changeupper_info *info;
4867         struct mlxsw_sp_port *mlxsw_sp_port;
4868         struct netlink_ext_ack *extack;
4869         struct net_device *upper_dev;
4870         struct mlxsw_sp *mlxsw_sp;
4871         int err = 0;
4872         u16 proto;
4873
4874         mlxsw_sp_port = netdev_priv(dev);
4875         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4876         info = ptr;
4877         extack = netdev_notifier_info_to_extack(&info->info);
4878
4879         switch (event) {
4880         case NETDEV_PRECHANGEUPPER:
4881                 upper_dev = info->upper_dev;
4882                 if (!is_vlan_dev(upper_dev) &&
4883                     !netif_is_lag_master(upper_dev) &&
4884                     !netif_is_bridge_master(upper_dev) &&
4885                     !netif_is_ovs_master(upper_dev) &&
4886                     !netif_is_macvlan(upper_dev) &&
4887                     !netif_is_l3_master(upper_dev)) {
4888                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4889                         return -EINVAL;
4890                 }
4891                 if (!info->linking)
4892                         break;
4893                 if (netif_is_bridge_master(upper_dev) &&
4894                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4895                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4896                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4897                         return -EOPNOTSUPP;
4898                 if (netdev_has_any_upper_dev(upper_dev) &&
4899                     (!netif_is_bridge_master(upper_dev) ||
4900                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4901                                                           upper_dev))) {
4902                         err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
4903                                                                  upper_dev,
4904                                                                  extack);
4905                         if (err)
4906                                 return err;
4907                 }
4908                 if (netif_is_lag_master(upper_dev) &&
4909                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4910                                                info->upper_info, extack))
4911                         return -EINVAL;
4912                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4913                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4914                         return -EINVAL;
4915                 }
4916                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4917                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4918                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4919                         return -EINVAL;
4920                 }
4921                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4922                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4923                         return -EINVAL;
4924                 }
4925                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4926                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4927                         return -EINVAL;
4928                 }
4929                 if (netif_is_bridge_master(upper_dev)) {
4930                         br_vlan_get_proto(upper_dev, &proto);
4931                         if (br_vlan_enabled(upper_dev) &&
4932                             proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4933                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4934                                 return -EOPNOTSUPP;
4935                         }
4936                         if (vlan_uses_dev(lower_dev) &&
4937                             br_vlan_enabled(upper_dev) &&
4938                             proto == ETH_P_8021AD) {
4939                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4940                                 return -EOPNOTSUPP;
4941                         }
4942                 }
4943                 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4944                         struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4945
4946                         if (br_vlan_enabled(br_dev)) {
4947                                 br_vlan_get_proto(br_dev, &proto);
4948                                 if (proto == ETH_P_8021AD) {
4949                                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4950                                         return -EOPNOTSUPP;
4951                                 }
4952                         }
4953                 }
4954                 if (is_vlan_dev(upper_dev) &&
4955                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4956                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4957                         return -EOPNOTSUPP;
4958                 }
4959                 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
4960                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4961                         return -EOPNOTSUPP;
4962                 }
4963                 break;
4964         case NETDEV_CHANGEUPPER:
4965                 upper_dev = info->upper_dev;
4966                 if (netif_is_bridge_master(upper_dev)) {
4967                         if (info->linking) {
4968                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4969                                                                 lower_dev,
4970                                                                 upper_dev,
4971                                                                 extack);
4972                         } else {
4973                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4974                                                            lower_dev,
4975                                                            upper_dev);
4976                                 if (!replay_deslavement)
4977                                         break;
4978                                 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4979                                                                       lower_dev);
4980                         }
4981                 } else if (netif_is_lag_master(upper_dev)) {
4982                         if (info->linking) {
4983                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4984                                                              upper_dev, extack);
4985                         } else {
4986                                 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4987                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4988                                                         upper_dev);
4989                                 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4990                                                                       dev);
4991                         }
4992                 } else if (netif_is_ovs_master(upper_dev)) {
4993                         if (info->linking)
4994                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4995                         else
4996                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4997                 } else if (netif_is_macvlan(upper_dev)) {
4998                         if (!info->linking)
4999                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5000                 } else if (is_vlan_dev(upper_dev)) {
5001                         struct net_device *br_dev;
5002
5003                         if (!netif_is_bridge_port(upper_dev))
5004                                 break;
5005                         if (info->linking)
5006                                 break;
5007                         br_dev = netdev_master_upper_dev_get(upper_dev);
5008                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
5009                                                    br_dev);
5010                 }
5011                 break;
5012         }
5013
5014         return err;
5015 }
5016
5017 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5018                                                unsigned long event, void *ptr)
5019 {
5020         struct netdev_notifier_changelowerstate_info *info;
5021         struct mlxsw_sp_port *mlxsw_sp_port;
5022         int err;
5023
5024         mlxsw_sp_port = netdev_priv(dev);
5025         info = ptr;
5026
5027         switch (event) {
5028         case NETDEV_CHANGELOWERSTATE:
5029                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5030                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5031                                                         info->lower_state_info);
5032                         if (err)
5033                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
5034                 }
5035                 break;
5036         }
5037
5038         return 0;
5039 }
5040
5041 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5042                                          struct net_device *port_dev,
5043                                          unsigned long event, void *ptr,
5044                                          bool replay_deslavement)
5045 {
5046         switch (event) {
5047         case NETDEV_PRECHANGEUPPER:
5048         case NETDEV_CHANGEUPPER:
5049                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
5050                                                            event, ptr,
5051                                                            replay_deslavement);
5052         case NETDEV_CHANGELOWERSTATE:
5053                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
5054                                                            ptr);
5055         }
5056
5057         return 0;
5058 }
5059
5060 /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
5061  * to do any per-LAG / per-LAG-upper processing.
5062  */
5063 static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
5064                                              unsigned long event,
5065                                              void *ptr)
5066 {
5067         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
5068         struct netdev_notifier_changeupper_info *info = ptr;
5069
5070         if (!mlxsw_sp)
5071                 return 0;
5072
5073         switch (event) {
5074         case NETDEV_CHANGEUPPER:
5075                 if (info->linking)
5076                         break;
5077                 if (netif_is_bridge_master(info->upper_dev))
5078                         mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
5079                 break;
5080         }
5081         return 0;
5082 }
5083
5084 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5085                                         unsigned long event, void *ptr)
5086 {
5087         struct net_device *dev;
5088         struct list_head *iter;
5089         int ret;
5090
5091         netdev_for_each_lower_dev(lag_dev, dev, iter) {
5092                 if (mlxsw_sp_port_dev_check(dev)) {
5093                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5094                                                             ptr, false);
5095                         if (ret)
5096                                 return ret;
5097                 }
5098         }
5099
5100         return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr);
5101 }
5102
5103 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5104                                               struct net_device *dev,
5105                                               unsigned long event, void *ptr,
5106                                               u16 vid, bool replay_deslavement)
5107 {
5108         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5109         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5110         struct netdev_notifier_changeupper_info *info = ptr;
5111         struct netlink_ext_ack *extack;
5112         struct net_device *upper_dev;
5113         int err = 0;
5114
5115         extack = netdev_notifier_info_to_extack(&info->info);
5116
5117         switch (event) {
5118         case NETDEV_PRECHANGEUPPER:
5119                 upper_dev = info->upper_dev;
5120                 if (!netif_is_bridge_master(upper_dev) &&
5121                     !netif_is_macvlan(upper_dev) &&
5122                     !netif_is_l3_master(upper_dev)) {
5123                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5124                         return -EINVAL;
5125                 }
5126                 if (!info->linking)
5127                         break;
5128                 if (netif_is_bridge_master(upper_dev) &&
5129                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5130                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5131                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5132                         return -EOPNOTSUPP;
5133                 if (netdev_has_any_upper_dev(upper_dev) &&
5134                     (!netif_is_bridge_master(upper_dev) ||
5135                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5136                                                           upper_dev))) {
5137                         err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
5138                                                                  upper_dev,
5139                                                                  extack);
5140                         if (err)
5141                                 return err;
5142                 }
5143                 break;
5144         case NETDEV_CHANGEUPPER:
5145                 upper_dev = info->upper_dev;
5146                 if (netif_is_bridge_master(upper_dev)) {
5147                         if (info->linking) {
5148                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5149                                                                 vlan_dev,
5150                                                                 upper_dev,
5151                                                                 extack);
5152                         } else {
5153                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5154                                                            vlan_dev,
5155                                                            upper_dev);
5156                                 if (!replay_deslavement)
5157                                         break;
5158                                 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
5159                                                                       vlan_dev);
5160                         }
5161                 } else if (netif_is_macvlan(upper_dev)) {
5162                         if (!info->linking)
5163                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5164                 }
5165                 break;
5166         }
5167
5168         return err;
5169 }
5170
5171 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5172                                                   struct net_device *lag_dev,
5173                                                   unsigned long event,
5174                                                   void *ptr, u16 vid)
5175 {
5176         struct net_device *dev;
5177         struct list_head *iter;
5178         int ret;
5179
5180         netdev_for_each_lower_dev(lag_dev, dev, iter) {
5181                 if (mlxsw_sp_port_dev_check(dev)) {
5182                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5183                                                                  event, ptr,
5184                                                                  vid, false);
5185                         if (ret)
5186                                 return ret;
5187                 }
5188         }
5189
5190         return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr);
5191 }
5192
5193 static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
5194                                                 struct net_device *vlan_dev,
5195                                                 struct net_device *br_dev,
5196                                                 unsigned long event, void *ptr,
5197                                                 u16 vid, bool process_foreign)
5198 {
5199         struct netdev_notifier_changeupper_info *info = ptr;
5200         struct netlink_ext_ack *extack;
5201         struct net_device *upper_dev;
5202
5203         if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev))
5204                 return 0;
5205
5206         extack = netdev_notifier_info_to_extack(&info->info);
5207
5208         switch (event) {
5209         case NETDEV_PRECHANGEUPPER:
5210                 upper_dev = info->upper_dev;
5211                 if (!netif_is_macvlan(upper_dev) &&
5212                     !netif_is_l3_master(upper_dev)) {
5213                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5214                         return -EOPNOTSUPP;
5215                 }
5216                 break;
5217         case NETDEV_CHANGEUPPER:
5218                 upper_dev = info->upper_dev;
5219                 if (info->linking)
5220                         break;
5221                 if (netif_is_macvlan(upper_dev))
5222                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5223                 break;
5224         }
5225
5226         return 0;
5227 }
5228
5229 static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
5230                                          struct net_device *vlan_dev,
5231                                          unsigned long event, void *ptr,
5232                                          bool process_foreign)
5233 {
5234         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5235         u16 vid = vlan_dev_vlan_id(vlan_dev);
5236
5237         if (mlxsw_sp_port_dev_check(real_dev))
5238                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5239                                                           event, ptr, vid,
5240                                                           true);
5241         else if (netif_is_lag_master(real_dev))
5242                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5243                                                               real_dev, event,
5244                                                               ptr, vid);
5245         else if (netif_is_bridge_master(real_dev))
5246                 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
5247                                                             real_dev, event,
5248                                                             ptr, vid,
5249                                                             process_foreign);
5250
5251         return 0;
5252 }
5253
5254 static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
5255                                            struct net_device *br_dev,
5256                                            unsigned long event, void *ptr,
5257                                            bool process_foreign)
5258 {
5259         struct netdev_notifier_changeupper_info *info = ptr;
5260         struct netlink_ext_ack *extack;
5261         struct net_device *upper_dev;
5262         u16 proto;
5263
5264         if (!process_foreign && !mlxsw_sp_lower_get(br_dev))
5265                 return 0;
5266
5267         extack = netdev_notifier_info_to_extack(&info->info);
5268
5269         switch (event) {
5270         case NETDEV_PRECHANGEUPPER:
5271                 upper_dev = info->upper_dev;
5272                 if (!is_vlan_dev(upper_dev) &&
5273                     !netif_is_macvlan(upper_dev) &&
5274                     !netif_is_l3_master(upper_dev)) {
5275                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5276                         return -EOPNOTSUPP;
5277                 }
5278                 if (!info->linking)
5279                         break;
5280                 if (br_vlan_enabled(br_dev)) {
5281                         br_vlan_get_proto(br_dev, &proto);
5282                         if (proto == ETH_P_8021AD) {
5283                                 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5284                                 return -EOPNOTSUPP;
5285                         }
5286                 }
5287                 if (is_vlan_dev(upper_dev) &&
5288                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5289                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5290                         return -EOPNOTSUPP;
5291                 }
5292                 break;
5293         case NETDEV_CHANGEUPPER:
5294                 upper_dev = info->upper_dev;
5295                 if (info->linking)
5296                         break;
5297                 if (is_vlan_dev(upper_dev))
5298                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5299                 if (netif_is_macvlan(upper_dev))
5300                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5301                 break;
5302         }
5303
5304         return 0;
5305 }
5306
5307 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5308                                             unsigned long event, void *ptr)
5309 {
5310         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5311         struct netdev_notifier_changeupper_info *info = ptr;
5312         struct netlink_ext_ack *extack;
5313         struct net_device *upper_dev;
5314
5315         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5316                 return 0;
5317
5318         extack = netdev_notifier_info_to_extack(&info->info);
5319         upper_dev = info->upper_dev;
5320
5321         if (!netif_is_l3_master(upper_dev)) {
5322                 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5323                 return -EOPNOTSUPP;
5324         }
5325
5326         return 0;
5327 }
5328
5329 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5330                                           struct net_device *dev,
5331                                           unsigned long event, void *ptr)
5332 {
5333         struct netdev_notifier_changeupper_info *cu_info;
5334         struct netdev_notifier_info *info = ptr;
5335         struct netlink_ext_ack *extack;
5336         struct net_device *upper_dev;
5337
5338         extack = netdev_notifier_info_to_extack(info);
5339
5340         switch (event) {
5341         case NETDEV_CHANGEUPPER:
5342                 cu_info = container_of(info,
5343                                        struct netdev_notifier_changeupper_info,
5344                                        info);
5345                 upper_dev = cu_info->upper_dev;
5346                 if (!netif_is_bridge_master(upper_dev))
5347                         return 0;
5348                 if (!mlxsw_sp_lower_get(upper_dev))
5349                         return 0;
5350                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5351                         return -EOPNOTSUPP;
5352                 if (cu_info->linking) {
5353                         if (!netif_running(dev))
5354                                 return 0;
5355                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
5356                          * device needs to be mapped to a VLAN, but at this
5357                          * point no VLANs are configured on the VxLAN device
5358                          */
5359                         if (br_vlan_enabled(upper_dev))
5360                                 return 0;
5361                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5362                                                           dev, 0, extack);
5363                 } else {
5364                         /* VLANs were already flushed, which triggered the
5365                          * necessary cleanup
5366                          */
5367                         if (br_vlan_enabled(upper_dev))
5368                                 return 0;
5369                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5370                 }
5371                 break;
5372         case NETDEV_PRE_UP:
5373                 upper_dev = netdev_master_upper_dev_get(dev);
5374                 if (!upper_dev)
5375                         return 0;
5376                 if (!netif_is_bridge_master(upper_dev))
5377                         return 0;
5378                 if (!mlxsw_sp_lower_get(upper_dev))
5379                         return 0;
5380                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5381                                                   extack);
5382         case NETDEV_DOWN:
5383                 upper_dev = netdev_master_upper_dev_get(dev);
5384                 if (!upper_dev)
5385                         return 0;
5386                 if (!netif_is_bridge_master(upper_dev))
5387                         return 0;
5388                 if (!mlxsw_sp_lower_get(upper_dev))
5389                         return 0;
5390                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5391                 break;
5392         }
5393
5394         return 0;
5395 }
5396
5397 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
5398                                       unsigned long event, void *ptr,
5399                                       bool process_foreign)
5400 {
5401         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5402         struct mlxsw_sp_span_entry *span_entry;
5403         int err = 0;
5404
5405         if (event == NETDEV_UNREGISTER) {
5406                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5407                 if (span_entry)
5408                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5409         }
5410
5411         if (netif_is_vxlan(dev))
5412                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5413         else if (mlxsw_sp_port_dev_check(dev))
5414                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true);
5415         else if (netif_is_lag_master(dev))
5416                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5417         else if (is_vlan_dev(dev))
5418                 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr,
5419                                                     process_foreign);
5420         else if (netif_is_bridge_master(dev))
5421                 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr,
5422                                                       process_foreign);
5423         else if (netif_is_macvlan(dev))
5424                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5425
5426         return err;
5427 }
5428
5429 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5430                                     unsigned long event, void *ptr)
5431 {
5432         struct mlxsw_sp *mlxsw_sp;
5433         int err;
5434
5435         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5436         mlxsw_sp_span_respin(mlxsw_sp);
5437         err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false);
5438
5439         return notifier_from_errno(err);
5440 }
5441
5442 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5443         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5444         {0, },
5445 };
5446
5447 static struct pci_driver mlxsw_sp1_pci_driver = {
5448         .name = mlxsw_sp1_driver_name,
5449         .id_table = mlxsw_sp1_pci_id_table,
5450 };
5451
5452 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5453         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5454         {0, },
5455 };
5456
5457 static struct pci_driver mlxsw_sp2_pci_driver = {
5458         .name = mlxsw_sp2_driver_name,
5459         .id_table = mlxsw_sp2_pci_id_table,
5460 };
5461
5462 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5463         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5464         {0, },
5465 };
5466
5467 static struct pci_driver mlxsw_sp3_pci_driver = {
5468         .name = mlxsw_sp3_driver_name,
5469         .id_table = mlxsw_sp3_pci_id_table,
5470 };
5471
5472 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5473         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5474         {0, },
5475 };
5476
5477 static struct pci_driver mlxsw_sp4_pci_driver = {
5478         .name = mlxsw_sp4_driver_name,
5479         .id_table = mlxsw_sp4_pci_id_table,
5480 };
5481
5482 static int __init mlxsw_sp_module_init(void)
5483 {
5484         int err;
5485
5486         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5487         if (err)
5488                 return err;
5489
5490         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5491         if (err)
5492                 goto err_sp2_core_driver_register;
5493
5494         err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5495         if (err)
5496                 goto err_sp3_core_driver_register;
5497
5498         err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5499         if (err)
5500                 goto err_sp4_core_driver_register;
5501
5502         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5503         if (err)
5504                 goto err_sp1_pci_driver_register;
5505
5506         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5507         if (err)
5508                 goto err_sp2_pci_driver_register;
5509
5510         err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5511         if (err)
5512                 goto err_sp3_pci_driver_register;
5513
5514         err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5515         if (err)
5516                 goto err_sp4_pci_driver_register;
5517
5518         return 0;
5519
5520 err_sp4_pci_driver_register:
5521         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5522 err_sp3_pci_driver_register:
5523         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5524 err_sp2_pci_driver_register:
5525         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5526 err_sp1_pci_driver_register:
5527         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5528 err_sp4_core_driver_register:
5529         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5530 err_sp3_core_driver_register:
5531         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5532 err_sp2_core_driver_register:
5533         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5534         return err;
5535 }
5536
5537 static void __exit mlxsw_sp_module_exit(void)
5538 {
5539         mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5540         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5541         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5542         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5543         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5544         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5545         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5546         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5547 }
5548
5549 module_init(mlxsw_sp_module_init);
5550 module_exit(mlxsw_sp_module_exit);
5551
5552 MODULE_LICENSE("Dual BSD/GPL");
5553 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5554 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5555 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5556 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5557 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5558 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5559 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5560 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5561 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5562 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);