dt-bindings: net: dsa: add rtl8_4 and rtl8_4t tag formats
[linux-block.git] / include / net / dsa.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
91da11f8
LB
2/*
3 * include/net/dsa.h - Driver for Distributed Switch Architecture switch chips
e84665c9 4 * Copyright (c) 2008-2009 Marvell Semiconductor
91da11f8
LB
5 */
6
7#ifndef __LINUX_NET_DSA_H
8#define __LINUX_NET_DSA_H
9
4d56a29f 10#include <linux/if.h>
ea1f51be 11#include <linux/if_ether.h>
c8f0b869 12#include <linux/list.h>
f515f192 13#include <linux/notifier.h>
cf50dcc2
BH
14#include <linux/timer.h>
15#include <linux/workqueue.h>
fa981d9a 16#include <linux/of.h>
a2820543 17#include <linux/ethtool.h>
0336369d 18#include <linux/net_tstamp.h>
11d8f3dd 19#include <linux/phy.h>
ecfc9372 20#include <linux/platform_data/dsa.h>
44cc27e4 21#include <linux/phylink.h>
96567d5d 22#include <net/devlink.h>
f0c24ccf 23#include <net/switchdev.h>
cf50dcc2 24
f50f2127 25struct tc_action;
4d56a29f
RK
26struct phy_device;
27struct fixed_phy_status;
11d8f3dd 28struct phylink_link_state;
f50f2127 29
0b42f033
AL
30#define DSA_TAG_PROTO_NONE_VALUE 0
31#define DSA_TAG_PROTO_BRCM_VALUE 1
32#define DSA_TAG_PROTO_BRCM_PREPEND_VALUE 2
33#define DSA_TAG_PROTO_DSA_VALUE 3
34#define DSA_TAG_PROTO_EDSA_VALUE 4
35#define DSA_TAG_PROTO_GSWIP_VALUE 5
36#define DSA_TAG_PROTO_KSZ9477_VALUE 6
37#define DSA_TAG_PROTO_KSZ9893_VALUE 7
38#define DSA_TAG_PROTO_LAN9303_VALUE 8
39#define DSA_TAG_PROTO_MTK_VALUE 9
40#define DSA_TAG_PROTO_QCA_VALUE 10
41#define DSA_TAG_PROTO_TRAILER_VALUE 11
f9bbe447 42#define DSA_TAG_PROTO_8021Q_VALUE 12
227d07a0 43#define DSA_TAG_PROTO_SJA1105_VALUE 13
016e43a2 44#define DSA_TAG_PROTO_KSZ8795_VALUE 14
8dce89aa 45#define DSA_TAG_PROTO_OCELOT_VALUE 15
48fda74f 46#define DSA_TAG_PROTO_AR9331_VALUE 16
efd7fe68 47#define DSA_TAG_PROTO_RTL4_A_VALUE 17
01ef09ca 48#define DSA_TAG_PROTO_HELLCREEK_VALUE 18
54a52823 49#define DSA_TAG_PROTO_XRS700X_VALUE 19
7c83a7c5 50#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20
7c4bb540 51#define DSA_TAG_PROTO_SEVILLE_VALUE 21
964dbf18 52#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
4913b8eb 53#define DSA_TAG_PROTO_SJA1110_VALUE 23
1521d5ad 54#define DSA_TAG_PROTO_RTL8_4_VALUE 24
0b42f033 55
ac7a04c3 56enum dsa_tag_protocol {
0b42f033
AL
57 DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
58 DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
964dbf18 59 DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
0b42f033
AL
60 DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
61 DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
62 DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
63 DSA_TAG_PROTO_GSWIP = DSA_TAG_PROTO_GSWIP_VALUE,
64 DSA_TAG_PROTO_KSZ9477 = DSA_TAG_PROTO_KSZ9477_VALUE,
65 DSA_TAG_PROTO_KSZ9893 = DSA_TAG_PROTO_KSZ9893_VALUE,
66 DSA_TAG_PROTO_LAN9303 = DSA_TAG_PROTO_LAN9303_VALUE,
67 DSA_TAG_PROTO_MTK = DSA_TAG_PROTO_MTK_VALUE,
68 DSA_TAG_PROTO_QCA = DSA_TAG_PROTO_QCA_VALUE,
69 DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE,
f9bbe447 70 DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE,
227d07a0 71 DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
016e43a2 72 DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
8dce89aa 73 DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
48fda74f 74 DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
efd7fe68 75 DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
01ef09ca 76 DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE,
54a52823 77 DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE,
7c83a7c5 78 DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
7c4bb540 79 DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
4913b8eb 80 DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
1521d5ad 81 DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE,
ac7a04c3 82};
5037d532 83
90af1059 84struct dsa_switch;
3e8a72d1 85
68277a2c
JC
86struct dsa_device_ops {
87 struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
29a097b7 88 struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
2e8cb1b3
VO
89 void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
90 int *offset);
7f297314
VO
91 int (*connect)(struct dsa_switch *ds);
92 void (*disconnect)(struct dsa_switch *ds);
4e500251
VO
93 unsigned int needed_headroom;
94 unsigned int needed_tailroom;
875138f8 95 const char *name;
056eed2f 96 enum dsa_tag_protocol proto;
c3975400
VO
97 /* Some tagging protocols either mangle or shift the destination MAC
98 * address, in which case the DSA master would drop packets on ingress
99 * if what it understands out of the destination MAC address is not in
100 * its RX filter.
101 */
102 bool promisc_on_master;
68277a2c
JC
103};
104
4cfab356
FF
105/* This structure defines the control interfaces that are overlayed by the
106 * DSA layer on top of the DSA CPU/management net_device instance. This is
107 * used by the core net_device layer while calling various net_device_ops
108 * function pointers.
109 */
110struct dsa_netdevice_ops {
a7605370
AB
111 int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr,
112 int cmd);
4cfab356
FF
113};
114
0b42f033
AL
115#define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
116#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
117 MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
118
dedd6a00
VO
119struct dsa_lag {
120 struct net_device *dev;
121 unsigned int id;
e212fa7c
VO
122 struct mutex fdb_lock;
123 struct list_head fdbs;
dedd6a00
VO
124 refcount_t refcount;
125};
126
cf50dcc2 127struct dsa_switch_tree {
83c0afae
AL
128 struct list_head list;
129
b035c88c
VO
130 /* List of switch ports */
131 struct list_head ports;
132
f515f192
VD
133 /* Notifier chain for switch-wide events */
134 struct raw_notifier_head nh;
135
83c0afae 136 /* Tree identifier */
49463b7f 137 unsigned int index;
83c0afae
AL
138
139 /* Number of switches attached to this tree */
140 struct kref refcount;
141
b035c88c
VO
142 /* Maps offloaded LAG netdevs to a zero-based linear ID for
143 * drivers that need it.
144 */
dedd6a00 145 struct dsa_lag **lags;
b035c88c 146
357f203b
VO
147 /* Tagging protocol operations */
148 const struct dsa_device_ops *tag_ops;
149
deff7107
TW
150 /* Default tagging protocol preferred by the switches in this
151 * tree.
152 */
153 enum dsa_tag_protocol default_proto;
154
4b026e82
VO
155 /* Has this tree been applied to the hardware? */
156 bool setup;
157
cf50dcc2
BH
158 /*
159 * Configuration data for the platform device that owns
160 * this dsa switch tree instance.
161 */
162 struct dsa_platform_data *pd;
163
c5f51765
VD
164 /* List of DSA links composing the routing table */
165 struct list_head rtable;
058102a6 166
b035c88c 167 /* Length of "lags" array */
058102a6 168 unsigned int lags_len;
5b22d366
VO
169
170 /* Track the largest switch index within a tree */
171 unsigned int last_switch;
cf50dcc2
BH
172};
173
3d4a0a2a 174/* LAG IDs are one-based, the dst->lags array is zero-based */
058102a6 175#define dsa_lags_foreach_id(_id, _dst) \
3d4a0a2a
VO
176 for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \
177 if ((_dst)->lags[(_id) - 1])
058102a6
TW
178
179#define dsa_lag_foreach_port(_dp, _dst, _lag) \
180 list_for_each_entry((_dp), &(_dst)->ports, list) \
dedd6a00 181 if (dsa_port_offloads_lag((_dp), (_lag)))
058102a6 182
18596f50
GM
183#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
184 list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
185 if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
186
dedd6a00
VO
187static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
188 unsigned int id)
058102a6 189{
3d4a0a2a
VO
190 /* DSA LAG IDs are one-based, dst->lags is zero-based */
191 return dst->lags[id - 1];
058102a6
TW
192}
193
194static inline int dsa_lag_id(struct dsa_switch_tree *dst,
46a76724 195 struct net_device *lag_dev)
058102a6
TW
196{
197 unsigned int id;
198
199 dsa_lags_foreach_id(id, dst) {
dedd6a00
VO
200 struct dsa_lag *lag = dsa_lag_by_id(dst, id);
201
202 if (lag->dev == lag_dev)
203 return lag->id;
058102a6
TW
204 }
205
206 return -ENODEV;
207}
208
34297176 209/* TC matchall action types */
f50f2127
FF
210enum dsa_port_mall_action_type {
211 DSA_PORT_MALL_MIRROR,
34297176 212 DSA_PORT_MALL_POLICER,
f50f2127
FF
213};
214
215/* TC mirroring entry */
216struct dsa_mall_mirror_tc_entry {
217 u8 to_local_port;
218 bool ingress;
219};
220
34297176
VO
221/* TC port policer entry */
222struct dsa_mall_policer_tc_entry {
5f035af7 223 u32 burst;
34297176
VO
224 u64 rate_bytes_per_sec;
225};
226
f50f2127
FF
227/* TC matchall entry */
228struct dsa_mall_tc_entry {
229 struct list_head list;
230 unsigned long cookie;
231 enum dsa_port_mall_action_type type;
232 union {
233 struct dsa_mall_mirror_tc_entry mirror;
34297176 234 struct dsa_mall_policer_tc_entry policer;
f50f2127
FF
235 };
236};
237
d3eed0e5
VO
238struct dsa_bridge {
239 struct net_device *dev;
240 unsigned int num;
857fdd74 241 bool tx_fwd_offload;
d3eed0e5
VO
242 refcount_t refcount;
243};
f50f2127 244
c8b09808 245struct dsa_port {
f8b8b1cd
VD
246 /* A CPU port is physically connected to a master device.
247 * A user port exposed to userspace has a slave device.
248 */
249 union {
250 struct net_device *master;
251 struct net_device *slave;
252 };
253
357f203b
VO
254 /* Copy of the tagging protocol operations, for quicker access
255 * in the data path. Valid only for the CPU ports.
256 */
15240248
VD
257 const struct dsa_device_ops *tag_ops;
258
3e41f93b
VD
259 /* Copies for faster access in master receive hot path */
260 struct dsa_switch_tree *dst;
29a097b7 261 struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
3e41f93b 262
06251258
VO
263 struct dsa_switch *ds;
264
265 unsigned int index;
266
057cad2c
VD
267 enum {
268 DSA_PORT_TYPE_UNUSED = 0,
269 DSA_PORT_TYPE_CPU,
270 DSA_PORT_TYPE_DSA,
271 DSA_PORT_TYPE_USER,
272 } type;
273
71e0bbde 274 const char *name;
68b2d4a8 275 struct dsa_port *cpu_dp;
83216e39 276 u8 mac[ETH_ALEN];
b08db33d
VO
277
278 u8 stp_state;
279
1b26d364
VO
280 /* Warning: the following bit fields are not atomic, and updating them
281 * can only be done from code paths where concurrency is not possible
282 * (probe time or under rtnl_lock).
283 */
63cfc657
VO
284 u8 vlan_filtering:1;
285
286 /* Managed by DSA on user ports and by drivers on CPU and DSA ports */
287 u8 learning:1;
288
289 u8 lag_tx_enabled:1;
290
291 u8 devlink_port_setup:1;
292
295ab96f
VO
293 /* Master state bits, valid only on CPU ports */
294 u8 master_admin_up:1;
295 u8 master_oper_up:1;
296
63cfc657 297 u8 setup:1;
bde82f38 298
189b0d93 299 struct device_node *dn;
34a79f63 300 unsigned int ageing_time;
bde82f38 301
d3eed0e5 302 struct dsa_bridge *bridge;
96567d5d 303 struct devlink_port devlink_port;
aab9c406 304 struct phylink *pl;
44cc27e4 305 struct phylink_config pl_config;
dedd6a00 306 struct dsa_lag *lag;
18596f50 307 struct net_device *hsr_dev;
97a69a0d 308
ab8ccae1
VD
309 struct list_head list;
310
67dbb9d4
FF
311 /*
312 * Original copy of the master netdev ethtool_ops
313 */
67dbb9d4 314 const struct ethtool_ops *orig_ethtool_ops;
da7b9e9b
FF
315
316 /*
317 * Original copy of the master netdev net_device_ops
318 */
4cfab356 319 const struct dsa_netdevice_ops *netdev_ops;
fb35c60c 320
161ca59d
VO
321 /* List of MAC addresses that must be forwarded on this port.
322 * These are only valid on CPU ports and DSA links.
323 */
338a3a47 324 struct mutex addr_lists_lock;
3f6e32f9 325 struct list_head fdbs;
161ca59d 326 struct list_head mdbs;
134ef238
VO
327
328 /* List of VLANs that CPU and DSA ports are members of. */
329 struct mutex vlans_lock;
330 struct list_head vlans;
c8b09808
AL
331};
332
c5f51765
VD
333/* TODO: ideally DSA ports would have a single dp->link_dp member,
334 * and no dst->rtable nor this struct dsa_link would be needed,
335 * but this would require some more complex tree walking,
336 * so keep it stupid at the moment and list them all.
337 */
338struct dsa_link {
339 struct dsa_port *dp;
340 struct dsa_port *link_dp;
341 struct list_head list;
342};
343
c2693363
VO
344enum dsa_db_type {
345 DSA_DB_PORT,
346 DSA_DB_LAG,
347 DSA_DB_BRIDGE,
348};
349
350struct dsa_db {
351 enum dsa_db_type type;
352
353 union {
354 const struct dsa_port *dp;
355 struct dsa_lag lag;
356 struct dsa_bridge bridge;
357 };
358};
359
161ca59d
VO
360struct dsa_mac_addr {
361 unsigned char addr[ETH_ALEN];
362 u16 vid;
363 refcount_t refcount;
364 struct list_head list;
c2693363 365 struct dsa_db db;
161ca59d
VO
366};
367
134ef238
VO
368struct dsa_vlan {
369 u16 vid;
370 refcount_t refcount;
371 struct list_head list;
372};
373
c8f0b869 374struct dsa_switch {
c33063d6
AL
375 struct device *dev;
376
c8f0b869
BH
377 /*
378 * Parent switch tree, and switch index.
379 */
380 struct dsa_switch_tree *dst;
99feaafc 381 unsigned int index;
c8f0b869 382
1b26d364
VO
383 /* Warning: the following bit fields are not atomic, and updating them
384 * can only be done from code paths where concurrency is not possible
385 * (probe time or under rtnl_lock).
386 */
63cfc657
VO
387 u32 setup:1;
388
389 /* Disallow bridge core from requesting different VLAN awareness
390 * settings on ports if not hardware-supported
391 */
392 u32 vlan_filtering_is_global:1;
393
394 /* Keep VLAN filtering enabled on ports not offloading any upper */
395 u32 needs_standalone_vlan_filtering:1;
396
397 /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
398 * that have vlan_filtering=0. All drivers should ideally set this (and
399 * then the option would get removed), but it is unknown whether this
400 * would break things or not.
401 */
402 u32 configure_vlan_while_not_filtering:1;
403
404 /* If the switch driver always programs the CPU port as egress tagged
405 * despite the VLAN configuration indicating otherwise, then setting
406 * @untag_bridge_pvid will force the DSA receive path to pop the
407 * bridge's default_pvid VLAN tagged frames to offer a consistent
408 * behavior between a vlan_filtering=0 and vlan_filtering=1 bridge
409 * device.
410 */
411 u32 untag_bridge_pvid:1;
412
413 /* Let DSA manage the FDB entries towards the
414 * CPU, based on the software bridge database.
415 */
416 u32 assisted_learning_on_cpu_port:1;
417
418 /* In case vlan_filtering_is_global is set, the VLAN awareness state
419 * should be retrieved from here and not from the per-port settings.
420 */
421 u32 vlan_filtering:1;
422
63cfc657
VO
423 /* For switches that only have the MRU configurable. To ensure the
424 * configured MTU is not exceeded, normalization of MRU on all bridged
425 * interfaces is needed.
426 */
427 u32 mtu_enforcement_ingress:1;
7787ff77 428
c2693363
VO
429 /* Drivers that isolate the FDBs of multiple bridges must set this
430 * to true to receive the bridge as an argument in .port_fdb_{add,del}
431 * and .port_mdb_{add,del}. Otherwise, the bridge.num will always be
432 * passed as zero.
433 */
434 u32 fdb_isolation:1;
435
f515f192
VD
436 /* Listener for switch fabric events */
437 struct notifier_block nb;
438
7543a6d5
AL
439 /*
440 * Give the switch driver somewhere to hang its private data
441 * structure.
442 */
443 void *priv;
444
dc452a47
VO
445 void *tagger_data;
446
c8f0b869
BH
447 /*
448 * Configuration data for this switch.
449 */
ff04955c 450 struct dsa_chip_data *cd;
c8f0b869
BH
451
452 /*
9d490b4e 453 * The switch operations.
c8f0b869 454 */
a82f67af 455 const struct dsa_switch_ops *ops;
c8f0b869 456
c8f0b869
BH
457 /*
458 * Slave mii_bus and devices for the individual ports.
459 */
0d8bcdd3 460 u32 phys_mii_mask;
c8f0b869 461 struct mii_bus *slave_mii_bus;
a0c02161 462
0f3da6af
VD
463 /* Ageing Time limits in msecs */
464 unsigned int ageing_time_min;
465 unsigned int ageing_time_max;
466
d7b1fd52
VO
467 /* Storage for drivers using tag_8021q */
468 struct dsa_8021q_context *tag_8021q_ctx;
469
96567d5d
AL
470 /* devlink used to represent this switch device */
471 struct devlink *devlink;
472
55199df6
FF
473 /* Number of switch port queues */
474 unsigned int num_tx_queues;
475
058102a6
TW
476 /* Drivers that benefit from having an ID associated with each
477 * offloaded LAG should set this to the maximum number of
478 * supported IDs. DSA will then maintain a mapping of _at
479 * least_ these many IDs, accessible to drivers via
480 * dsa_lag_id().
481 */
482 unsigned int num_lag_ids;
483
947c8746
VO
484 /* Drivers that support bridge forwarding offload or FDB isolation
485 * should set this to the maximum number of bridges spanning the same
486 * switch tree (or all trees, in the case of cross-tree bridging
487 * support) that can be offloaded.
123abc06 488 */
947c8746 489 unsigned int max_num_bridges;
123abc06 490
258030ac 491 unsigned int num_ports;
c8f0b869
BH
492};
493
68bb8ea8 494static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
bff7b688 495{
b96ddf25 496 struct dsa_switch_tree *dst = ds->dst;
d607525b 497 struct dsa_port *dp;
b96ddf25
VD
498
499 list_for_each_entry(dp, &dst->ports, list)
500 if (dp->ds == ds && dp->index == p)
d607525b 501 return dp;
b96ddf25 502
d607525b 503 return NULL;
c38c5a66 504}
bff7b688 505
a8986681
VO
506static inline bool dsa_port_is_dsa(struct dsa_port *port)
507{
508 return port->type == DSA_PORT_TYPE_DSA;
509}
510
511static inline bool dsa_port_is_cpu(struct dsa_port *port)
512{
513 return port->type == DSA_PORT_TYPE_CPU;
514}
515
516static inline bool dsa_port_is_user(struct dsa_port *dp)
517{
518 return dp->type == DSA_PORT_TYPE_USER;
519}
520
a57d8c21
VO
521static inline bool dsa_port_is_unused(struct dsa_port *dp)
522{
523 return dp->type == DSA_PORT_TYPE_UNUSED;
524}
525
295ab96f
VO
526static inline bool dsa_port_master_is_operational(struct dsa_port *dp)
527{
528 return dsa_port_is_cpu(dp) && dp->master_admin_up &&
529 dp->master_oper_up;
530}
531
c38c5a66
VD
532static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
533{
534 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
bff7b688
VD
535}
536
c8f0b869
BH
537static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
538{
c38c5a66 539 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU;
c8f0b869
BH
540}
541
60045cbf
AL
542static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
543{
c38c5a66 544 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA;
60045cbf
AL
545}
546
2b3e9891 547static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
6cd456f3 548{
c38c5a66 549 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
6cd456f3
VD
550}
551
82b31898
VO
552#define dsa_tree_for_each_user_port(_dp, _dst) \
553 list_for_each_entry((_dp), &(_dst)->ports, list) \
554 if (dsa_port_is_user((_dp)))
555
556#define dsa_switch_for_each_port(_dp, _ds) \
557 list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
558 if ((_dp)->ds == (_ds))
559
560#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
561 list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
562 if ((_dp)->ds == (_ds))
563
564#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
565 list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
566 if ((_dp)->ds == (_ds))
567
568#define dsa_switch_for_each_available_port(_dp, _ds) \
569 dsa_switch_for_each_port((_dp), (_ds)) \
570 if (!dsa_port_is_unused((_dp)))
571
572#define dsa_switch_for_each_user_port(_dp, _ds) \
573 dsa_switch_for_each_port((_dp), (_ds)) \
574 if (dsa_port_is_user((_dp)))
575
576#define dsa_switch_for_each_cpu_port(_dp, _ds) \
577 dsa_switch_for_each_port((_dp), (_ds)) \
578 if (dsa_port_is_cpu((_dp)))
579
02bc6e54
VD
580static inline u32 dsa_user_ports(struct dsa_switch *ds)
581{
d0004a02 582 struct dsa_port *dp;
c38c5a66 583 u32 mask = 0;
02bc6e54 584
d0004a02
VO
585 dsa_switch_for_each_user_port(dp, ds)
586 mask |= BIT(dp->index);
c38c5a66
VD
587
588 return mask;
c8652c83
VD
589}
590
c5f51765
VD
591/* Return the local port used to reach an arbitrary switch device */
592static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
593{
594 struct dsa_switch_tree *dst = ds->dst;
595 struct dsa_link *dl;
596
597 list_for_each_entry(dl, &dst->rtable, list)
598 if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
599 return dl->dp->index;
600
601 return ds->num_ports;
602}
603
3b8fac5d
VD
604/* Return the local port used to reach an arbitrary switch port */
605static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
606 int port)
607{
608 if (device == ds->index)
609 return port;
610 else
c5f51765 611 return dsa_routing_port(ds, device);
3b8fac5d
VD
612}
613
614/* Return the local port used to reach the dedicated CPU port */
07073c79 615static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
c8f0b869 616{
07073c79
VD
617 const struct dsa_port *dp = dsa_to_port(ds, port);
618 const struct dsa_port *cpu_dp = dp->cpu_dp;
619
620 if (!cpu_dp)
621 return port;
c8f0b869 622
3b8fac5d 623 return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
c8f0b869
BH
624}
625
63609c8f
VO
626/* Return true if this is the local port used to reach the CPU port */
627static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
628{
629 if (dsa_is_unused_port(ds, port))
630 return false;
631
632 return port == dsa_upstream_port(ds, port);
633}
634
d352b20f
TW
635/* Return true if this is a DSA port leading away from the CPU */
636static inline bool dsa_is_downstream_port(struct dsa_switch *ds, int port)
637{
638 return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port);
639}
640
7af4a361
TW
641/* Return the local port used to reach the CPU port */
642static inline unsigned int dsa_switch_upstream_port(struct dsa_switch *ds)
643{
644 struct dsa_port *dp;
645
646 dsa_switch_for_each_available_port(dp, ds) {
647 return dsa_upstream_port(ds, dp->index);
648 }
649
650 return ds->num_ports;
651}
652
63609c8f
VO
653/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
654 * that the routing port from @downstream_ds to @upstream_ds is also the port
655 * which @downstream_ds uses to reach its dedicated CPU.
656 */
657static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
658 struct dsa_switch *downstream_ds)
659{
660 int routing_port;
661
662 if (upstream_ds == downstream_ds)
663 return true;
664
665 routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
666
667 return dsa_is_upstream_port(downstream_ds, routing_port);
668}
669
cf2d45f5
VO
670static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
671{
672 const struct dsa_switch *ds = dp->ds;
673
674 if (ds->vlan_filtering_is_global)
675 return ds->vlan_filtering;
676 else
677 return dp->vlan_filtering;
678}
679
dedd6a00
VO
680static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
681{
682 return dp->lag ? dp->lag->id : 0;
683}
684
685static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
686{
687 return dp->lag ? dp->lag->dev : NULL;
688}
689
690static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
691 const struct dsa_lag *lag)
692{
693 return dsa_port_lag_dev_get(dp) == lag->dev;
694}
695
cc76ce9e
TW
696static inline
697struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
698{
d3eed0e5 699 if (!dp->bridge)
cc76ce9e
TW
700 return NULL;
701
dedd6a00
VO
702 if (dp->lag)
703 return dp->lag->dev;
cc76ce9e
TW
704 else if (dp->hsr_dev)
705 return dp->hsr_dev;
706
707 return dp->slave;
708}
709
36cbf39b
VO
710static inline struct net_device *
711dsa_port_bridge_dev_get(const struct dsa_port *dp)
712{
d3eed0e5 713 return dp->bridge ? dp->bridge->dev : NULL;
36cbf39b
VO
714}
715
716static inline unsigned int dsa_port_bridge_num_get(struct dsa_port *dp)
717{
d3eed0e5 718 return dp->bridge ? dp->bridge->num : 0;
36cbf39b
VO
719}
720
721static inline bool dsa_port_bridge_same(const struct dsa_port *a,
722 const struct dsa_port *b)
723{
724 struct net_device *br_a = dsa_port_bridge_dev_get(a);
725 struct net_device *br_b = dsa_port_bridge_dev_get(b);
726
727 /* Standalone ports are not in the same bridge with one another */
728 return (!br_a || !br_b) ? false : (br_a == br_b);
729}
730
6a43cba3
VO
731static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
732 const struct net_device *dev)
733{
734 return dsa_port_to_bridge_port(dp) == dev;
735}
736
737static inline bool
738dsa_port_offloads_bridge_dev(struct dsa_port *dp,
739 const struct net_device *bridge_dev)
740{
741 /* DSA ports connected to a bridge, and event was emitted
742 * for the bridge.
743 */
744 return dsa_port_bridge_dev_get(dp) == bridge_dev;
745}
746
d3eed0e5
VO
747static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
748 const struct dsa_bridge *bridge)
749{
750 return dsa_port_bridge_dev_get(dp) == bridge->dev;
751}
752
6a43cba3
VO
753/* Returns true if any port of this tree offloads the given net_device */
754static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
755 const struct net_device *dev)
756{
757 struct dsa_port *dp;
758
759 list_for_each_entry(dp, &dst->ports, list)
760 if (dsa_port_offloads_bridge_port(dp, dev))
761 return true;
762
763 return false;
764}
765
766/* Returns true if any port of this tree offloads the given bridge */
767static inline bool
768dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
769 const struct net_device *bridge_dev)
770{
771 struct dsa_port *dp;
772
773 list_for_each_entry(dp, &dst->ports, list)
774 if (dsa_port_offloads_bridge_dev(dp, bridge_dev))
775 return true;
776
777 return false;
778}
779
2bedde1a
AS
780typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
781 bool is_static, void *data);
9d490b4e 782struct dsa_switch_ops {
53da0eba
VO
783 /*
784 * Tagging protocol helpers called for the CPU ports and DSA links.
785 * @get_tag_protocol retrieves the initial tagging protocol and is
786 * mandatory. Switches which can operate using multiple tagging
787 * protocols should implement @change_tag_protocol and report in
788 * @get_tag_protocol the tagger in current use.
789 */
5ed4e3eb 790 enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
4d776482
FF
791 int port,
792 enum dsa_tag_protocol mprot);
53da0eba
VO
793 int (*change_tag_protocol)(struct dsa_switch *ds, int port,
794 enum dsa_tag_protocol proto);
dc452a47
VO
795 /*
796 * Method for switch drivers to connect to the tagging protocol driver
797 * in current use. The switch driver can provide handlers for certain
798 * types of packets for switch management.
799 */
800 int (*connect_tag_protocol)(struct dsa_switch *ds,
801 enum dsa_tag_protocol proto);
7b314362 802
fd292c18 803 /* Optional switch-wide initialization and destruction methods */
c8f0b869 804 int (*setup)(struct dsa_switch *ds);
5e3f847a 805 void (*teardown)(struct dsa_switch *ds);
fd292c18
VO
806
807 /* Per-port initialization and destruction methods. Mandatory if the
808 * driver registers devlink port regions, optional otherwise.
809 */
810 int (*port_setup)(struct dsa_switch *ds, int port);
811 void (*port_teardown)(struct dsa_switch *ds, int port);
812
6819563e 813 u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
c8f0b869
BH
814
815 /*
816 * Access to the switch's PHY registers.
817 */
818 int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
819 int (*phy_write)(struct dsa_switch *ds, int port,
820 int regnum, u16 val);
821
ec9436ba
FF
822 /*
823 * Link state adjustment (called from libphy)
824 */
825 void (*adjust_link)(struct dsa_switch *ds, int port,
826 struct phy_device *phydev);
ce31b31c
FF
827 void (*fixed_link_update)(struct dsa_switch *ds, int port,
828 struct fixed_phy_status *st);
ec9436ba 829
11d8f3dd
FF
830 /*
831 * PHYLINK integration
832 */
072eea6c
RKO
833 void (*phylink_get_caps)(struct dsa_switch *ds, int port,
834 struct phylink_config *config);
11d8f3dd
FF
835 void (*phylink_validate)(struct dsa_switch *ds, int port,
836 unsigned long *supported,
837 struct phylink_link_state *state);
bde01822
RKO
838 struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
839 int port,
840 phy_interface_t iface);
11d8f3dd
FF
841 int (*phylink_mac_link_state)(struct dsa_switch *ds, int port,
842 struct phylink_link_state *state);
843 void (*phylink_mac_config)(struct dsa_switch *ds, int port,
844 unsigned int mode,
845 const struct phylink_link_state *state);
846 void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port);
847 void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
848 unsigned int mode,
849 phy_interface_t interface);
850 void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
851 unsigned int mode,
852 phy_interface_t interface,
5b502a7b
RK
853 struct phy_device *phydev,
854 int speed, int duplex,
855 bool tx_pause, bool rx_pause);
11d8f3dd
FF
856 void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
857 struct phylink_link_state *state);
c8f0b869 858 /*
c2ec5f2e 859 * Port statistics counters.
c8f0b869 860 */
89f09048
FF
861 void (*get_strings)(struct dsa_switch *ds, int port,
862 u32 stringset, uint8_t *data);
c8f0b869
BH
863 void (*get_ethtool_stats)(struct dsa_switch *ds,
864 int port, uint64_t *data);
89f09048 865 int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
cf963573
FF
866 void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
867 int port, uint64_t *data);
487d3855
868 void (*get_eth_phy_stats)(struct dsa_switch *ds, int port,
869 struct ethtool_eth_phy_stats *phy_stats);
870 void (*get_eth_mac_stats)(struct dsa_switch *ds, int port,
871 struct ethtool_eth_mac_stats *mac_stats);
872 void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
873 struct ethtool_eth_ctrl_stats *ctrl_stats);
c2ec5f2e
OR
874 void (*get_stats64)(struct dsa_switch *ds, int port,
875 struct rtnl_link_stats64 *s);
a71acad9
OR
876 void (*self_test)(struct dsa_switch *ds, int port,
877 struct ethtool_test *etest, u64 *data);
24462549 878
19e57c4e
FF
879 /*
880 * ethtool Wake-on-LAN
881 */
882 void (*get_wol)(struct dsa_switch *ds, int port,
883 struct ethtool_wolinfo *w);
884 int (*set_wol)(struct dsa_switch *ds, int port,
885 struct ethtool_wolinfo *w);
886
0336369d
BS
887 /*
888 * ethtool timestamp info
889 */
890 int (*get_ts_info)(struct dsa_switch *ds, int port,
891 struct ethtool_ts_info *ts);
892
24462549
FF
893 /*
894 * Suspend and resume
895 */
896 int (*suspend)(struct dsa_switch *ds);
897 int (*resume)(struct dsa_switch *ds);
b2f2af21
FF
898
899 /*
900 * Port enable/disable
901 */
902 int (*port_enable)(struct dsa_switch *ds, int port,
903 struct phy_device *phy);
75104db0 904 void (*port_disable)(struct dsa_switch *ds, int port);
7905288f
FF
905
906 /*
08f50061 907 * Port's MAC EEE settings
7905288f 908 */
08f50061
VD
909 int (*set_mac_eee)(struct dsa_switch *ds, int port,
910 struct ethtool_eee *e);
911 int (*get_mac_eee)(struct dsa_switch *ds, int port,
912 struct ethtool_eee *e);
51579c3f 913
6793abb4
GR
914 /* EEPROM access */
915 int (*get_eeprom_len)(struct dsa_switch *ds);
916 int (*get_eeprom)(struct dsa_switch *ds,
917 struct ethtool_eeprom *eeprom, u8 *data);
918 int (*set_eeprom)(struct dsa_switch *ds,
919 struct ethtool_eeprom *eeprom, u8 *data);
3d762a0f
GR
920
921 /*
922 * Register access.
923 */
924 int (*get_regs_len)(struct dsa_switch *ds, int port);
925 void (*get_regs)(struct dsa_switch *ds, int port,
926 struct ethtool_regs *regs, void *p);
b73adef6 927
e358bef7
VO
928 /*
929 * Upper device tracking.
930 */
931 int (*port_prechangeupper)(struct dsa_switch *ds, int port,
932 struct netdev_notifier_changeupper_info *info);
933
b73adef6
FF
934 /*
935 * Bridge integration
936 */
34a79f63 937 int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
71327a4e 938 int (*port_bridge_join)(struct dsa_switch *ds, int port,
b079922b 939 struct dsa_bridge bridge,
06b9cce4
VO
940 bool *tx_fwd_offload,
941 struct netlink_ext_ack *extack);
f123f2fb 942 void (*port_bridge_leave)(struct dsa_switch *ds, int port,
d3eed0e5 943 struct dsa_bridge bridge);
43c44a9f
VD
944 void (*port_stp_state_set)(struct dsa_switch *ds, int port,
945 u8 state);
732f794c 946 void (*port_fast_age)(struct dsa_switch *ds, int port);
a8b659e7
VO
947 int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
948 struct switchdev_brport_flags flags,
949 struct netlink_ext_ack *extack);
950 int (*port_bridge_flags)(struct dsa_switch *ds, int port,
951 struct switchdev_brport_flags flags,
952 struct netlink_ext_ack *extack);
2a778e1b 953
11149536
VD
954 /*
955 * VLAN support
956 */
fb2dabad 957 int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
89153ed6
VO
958 bool vlan_filtering,
959 struct netlink_ext_ack *extack);
1958d581 960 int (*port_vlan_add)(struct dsa_switch *ds, int port,
31046a5f
VO
961 const struct switchdev_obj_port_vlan *vlan,
962 struct netlink_ext_ack *extack);
76e398a6
VD
963 int (*port_vlan_del)(struct dsa_switch *ds, int port,
964 const struct switchdev_obj_port_vlan *vlan);
2a778e1b
VD
965 /*
966 * Forwarding database
967 */
1b6dd556 968 int (*port_fdb_add)(struct dsa_switch *ds, int port,
c2693363
VO
969 const unsigned char *addr, u16 vid,
970 struct dsa_db db);
2a778e1b 971 int (*port_fdb_del)(struct dsa_switch *ds, int port,
c2693363
VO
972 const unsigned char *addr, u16 vid,
973 struct dsa_db db);
ea70ba98 974 int (*port_fdb_dump)(struct dsa_switch *ds, int port,
2bedde1a 975 dsa_fdb_dump_cb_t *cb, void *data);
e212fa7c 976 int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag,
c2693363
VO
977 const unsigned char *addr, u16 vid,
978 struct dsa_db db);
e212fa7c 979 int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag,
c2693363
VO
980 const unsigned char *addr, u16 vid,
981 struct dsa_db db);
8df30255
VD
982
983 /*
984 * Multicast database
985 */
a52b2da7 986 int (*port_mdb_add)(struct dsa_switch *ds, int port,
c2693363
VO
987 const struct switchdev_obj_port_mdb *mdb,
988 struct dsa_db db);
8df30255 989 int (*port_mdb_del)(struct dsa_switch *ds, int port,
c2693363
VO
990 const struct switchdev_obj_port_mdb *mdb,
991 struct dsa_db db);
bf9f2648
FF
992 /*
993 * RXNFC
994 */
995 int (*get_rxnfc)(struct dsa_switch *ds, int port,
996 struct ethtool_rxnfc *nfc, u32 *rule_locs);
997 int (*set_rxnfc)(struct dsa_switch *ds, int port,
998 struct ethtool_rxnfc *nfc);
f50f2127
FF
999
1000 /*
1001 * TC integration
1002 */
ed11bb1f
VO
1003 int (*cls_flower_add)(struct dsa_switch *ds, int port,
1004 struct flow_cls_offload *cls, bool ingress);
1005 int (*cls_flower_del)(struct dsa_switch *ds, int port,
1006 struct flow_cls_offload *cls, bool ingress);
1007 int (*cls_flower_stats)(struct dsa_switch *ds, int port,
1008 struct flow_cls_offload *cls, bool ingress);
f50f2127
FF
1009 int (*port_mirror_add)(struct dsa_switch *ds, int port,
1010 struct dsa_mall_mirror_tc_entry *mirror,
1011 bool ingress);
1012 void (*port_mirror_del)(struct dsa_switch *ds, int port,
1013 struct dsa_mall_mirror_tc_entry *mirror);
34297176
VO
1014 int (*port_policer_add)(struct dsa_switch *ds, int port,
1015 struct dsa_mall_policer_tc_entry *policer);
1016 void (*port_policer_del)(struct dsa_switch *ds, int port);
47d23af2
VO
1017 int (*port_setup_tc)(struct dsa_switch *ds, int port,
1018 enum tc_setup_type type, void *type_data);
40ef2c93
VD
1019
1020 /*
1021 * Cross-chip operations
1022 */
f66a6a69
VO
1023 int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index,
1024 int sw_index, int port,
06b9cce4
VO
1025 struct dsa_bridge bridge,
1026 struct netlink_ext_ack *extack);
f66a6a69
VO
1027 void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
1028 int sw_index, int port,
d3eed0e5 1029 struct dsa_bridge bridge);
058102a6
TW
1030 int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
1031 int port);
1032 int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
dedd6a00 1033 int port, struct dsa_lag lag,
058102a6
TW
1034 struct netdev_lag_upper_info *info);
1035 int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
dedd6a00 1036 int port, struct dsa_lag lag);
0336369d
BS
1037
1038 /*
1039 * PTP functionality
1040 */
1041 int (*port_hwtstamp_get)(struct dsa_switch *ds, int port,
1042 struct ifreq *ifr);
1043 int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
1044 struct ifreq *ifr);
5c5416f5
YL
1045 void (*port_txtstamp)(struct dsa_switch *ds, int port,
1046 struct sk_buff *skb);
90af1059
BS
1047 bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
1048 struct sk_buff *skb, unsigned int type);
97a69a0d 1049
0f06b855 1050 /* Devlink parameters, etc */
6b297524
AL
1051 int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
1052 struct devlink_param_gset_ctx *ctx);
1053 int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
1054 struct devlink_param_gset_ctx *ctx);
0f06b855
AL
1055 int (*devlink_info_get)(struct dsa_switch *ds,
1056 struct devlink_info_req *req,
1057 struct netlink_ext_ack *extack);
2a6ef763
VO
1058 int (*devlink_sb_pool_get)(struct dsa_switch *ds,
1059 unsigned int sb_index, u16 pool_index,
1060 struct devlink_sb_pool_info *pool_info);
1061 int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
1062 u16 pool_index, u32 size,
1063 enum devlink_sb_threshold_type threshold_type,
1064 struct netlink_ext_ack *extack);
1065 int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
1066 unsigned int sb_index, u16 pool_index,
1067 u32 *p_threshold);
1068 int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
1069 unsigned int sb_index, u16 pool_index,
1070 u32 threshold,
1071 struct netlink_ext_ack *extack);
1072 int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
1073 unsigned int sb_index, u16 tc_index,
1074 enum devlink_sb_pool_type pool_type,
1075 u16 *p_pool_index, u32 *p_threshold);
1076 int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
1077 unsigned int sb_index, u16 tc_index,
1078 enum devlink_sb_pool_type pool_type,
1079 u16 pool_index, u32 threshold,
1080 struct netlink_ext_ack *extack);
1081 int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
1082 unsigned int sb_index);
1083 int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
1084 unsigned int sb_index);
1085 int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
1086 unsigned int sb_index, u16 pool_index,
1087 u32 *p_cur, u32 *p_max);
1088 int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
1089 unsigned int sb_index, u16 tc_index,
1090 enum devlink_sb_pool_type pool_type,
1091 u32 *p_cur, u32 *p_max);
bfcb8132
VO
1092
1093 /*
1094 * MTU change functionality. Switches can also adjust their MRU through
1095 * this method. By MTU, one understands the SDU (L2 payload) length.
1096 * If the switch needs to account for the DSA tag on the CPU port, this
ab88d64a 1097 * method needs to do so privately.
bfcb8132
VO
1098 */
1099 int (*port_change_mtu)(struct dsa_switch *ds, int port,
1100 int new_mtu);
1101 int (*port_max_mtu)(struct dsa_switch *ds, int port);
058102a6
TW
1102
1103 /*
1104 * LAG integration
1105 */
1106 int (*port_lag_change)(struct dsa_switch *ds, int port);
1107 int (*port_lag_join)(struct dsa_switch *ds, int port,
dedd6a00 1108 struct dsa_lag lag,
058102a6
TW
1109 struct netdev_lag_upper_info *info);
1110 int (*port_lag_leave)(struct dsa_switch *ds, int port,
dedd6a00 1111 struct dsa_lag lag);
18596f50
GM
1112
1113 /*
1114 * HSR integration
1115 */
1116 int (*port_hsr_join)(struct dsa_switch *ds, int port,
1117 struct net_device *hsr);
1118 int (*port_hsr_leave)(struct dsa_switch *ds, int port,
1119 struct net_device *hsr);
c595c433
HV
1120
1121 /*
1122 * MRP integration
1123 */
1124 int (*port_mrp_add)(struct dsa_switch *ds, int port,
1125 const struct switchdev_obj_mrp *mrp);
1126 int (*port_mrp_del)(struct dsa_switch *ds, int port,
1127 const struct switchdev_obj_mrp *mrp);
1128 int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port,
1129 const struct switchdev_obj_ring_role_mrp *mrp);
1130 int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port,
1131 const struct switchdev_obj_ring_role_mrp *mrp);
5da11eb4
VO
1132
1133 /*
1134 * tag_8021q operations
1135 */
1136 int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
1137 u16 flags);
1138 int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
295ab96f
VO
1139
1140 /*
1141 * DSA master tracking operations
1142 */
1143 void (*master_state_change)(struct dsa_switch *ds,
1144 const struct net_device *master,
1145 bool operational);
6b297524
AL
1146};
1147
1148#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
1149 DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \
1150 dsa_devlink_param_get, dsa_devlink_param_set, NULL)
1151
1152int dsa_devlink_param_get(struct devlink *dl, u32 id,
1153 struct devlink_param_gset_ctx *ctx);
1154int dsa_devlink_param_set(struct devlink *dl, u32 id,
1155 struct devlink_param_gset_ctx *ctx);
1156int dsa_devlink_params_register(struct dsa_switch *ds,
1157 const struct devlink_param *params,
1158 size_t params_count);
1159void dsa_devlink_params_unregister(struct dsa_switch *ds,
1160 const struct devlink_param *params,
1161 size_t params_count);
5cd73fbd
AL
1162int dsa_devlink_resource_register(struct dsa_switch *ds,
1163 const char *resource_name,
1164 u64 resource_size,
1165 u64 resource_id,
1166 u64 parent_resource_id,
1167 const struct devlink_resource_size_params *size_params);
1168
1169void dsa_devlink_resources_unregister(struct dsa_switch *ds);
1170
1171void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
1172 u64 resource_id,
1173 devlink_resource_occ_get_t *occ_get,
1174 void *occ_get_priv);
1175void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
1176 u64 resource_id);
97c82c23
AL
1177struct devlink_region *
1178dsa_devlink_region_create(struct dsa_switch *ds,
1179 const struct devlink_region_ops *ops,
1180 u32 region_max_snapshots, u64 region_size);
08156ba4
AL
1181struct devlink_region *
1182dsa_devlink_port_region_create(struct dsa_switch *ds,
1183 int port,
1184 const struct devlink_port_region_ops *ops,
1185 u32 region_max_snapshots, u64 region_size);
97c82c23
AL
1186void dsa_devlink_region_destroy(struct devlink_region *region);
1187
e1eea811 1188struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
5cd73fbd 1189
6b297524
AL
1190struct dsa_devlink_priv {
1191 struct dsa_switch *ds;
c8f0b869
BH
1192};
1193
ccc3e6b0
AL
1194static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
1195{
1196 struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
1197
1198 return dl_priv->ds;
1199}
1200
7d1e2a10
AL
1201static inline
1202struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
1203{
1204 struct devlink *dl = port->devlink;
1205 struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
1206
1207 return dl_priv->ds;
1208}
1209
1210static inline int dsa_devlink_port_to_port(struct devlink_port *port)
1211{
1212 return port->index;
1213}
1214
ab3d408d
FF
1215struct dsa_switch_driver {
1216 struct list_head list;
a82f67af 1217 const struct dsa_switch_ops *ops;
ab3d408d
FF
1218};
1219
14b89f36 1220struct net_device *dsa_dev_to_net_device(struct device *dev);
c8f0b869 1221
f9cef64f
VO
1222typedef int dsa_fdb_walk_cb_t(struct dsa_switch *ds, int port,
1223 const unsigned char *addr, u16 vid,
1224 struct dsa_db db);
1225
1226int dsa_port_walk_fdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb);
1227int dsa_port_walk_mdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb);
1228
73a7ece8 1229/* Keep inline for faster access in hot path */
9eb8eff0 1230static inline bool netdev_uses_dsa(const struct net_device *dev)
c6e970a0
AL
1231{
1232#if IS_ENABLED(CONFIG_NET_DSA)
717ffbfb 1233 return dev->dsa_ptr && dev->dsa_ptr->rcv;
c6e970a0
AL
1234#endif
1235 return false;
1236}
1237
9790cf20
VO
1238/* All DSA tags that push the EtherType to the right (basically all except tail
1239 * tags, which don't break dissection) can be treated the same from the
1240 * perspective of the flow dissector.
1241 *
1242 * We need to return:
1243 * - offset: the (B - A) difference between:
1244 * A. the position of the real EtherType and
1245 * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
1246 * after the normal EtherType was supposed to be)
1247 * The offset in bytes is exactly equal to the tagger overhead (and half of
1248 * that, in __be16 shorts).
1249 *
1250 * - proto: the value of the real EtherType.
1251 */
1252static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
1253 __be16 *proto, int *offset)
1254{
1255#if IS_ENABLED(CONFIG_NET_DSA)
1256 const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
4e500251 1257 int tag_len = ops->needed_headroom;
9790cf20
VO
1258
1259 *offset = tag_len;
1260 *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
1261#endif
1262}
1263
4cfab356
FF
1264#if IS_ENABLED(CONFIG_NET_DSA)
1265static inline int __dsa_netdevice_ops_check(struct net_device *dev)
1266{
1267 int err = -EOPNOTSUPP;
1268
1269 if (!dev->dsa_ptr)
1270 return err;
1271
1272 if (!dev->dsa_ptr->netdev_ops)
1273 return err;
1274
1275 return 0;
1276}
1277
a7605370
AB
1278static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
1279 int cmd)
4cfab356
FF
1280{
1281 const struct dsa_netdevice_ops *ops;
1282 int err;
1283
1284 err = __dsa_netdevice_ops_check(dev);
1285 if (err)
1286 return err;
1287
1288 ops = dev->dsa_ptr->netdev_ops;
1289
a7605370 1290 return ops->ndo_eth_ioctl(dev, ifr, cmd);
4cfab356 1291}
4cfab356 1292#else
a7605370
AB
1293static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
1294 int cmd)
4cfab356
FF
1295{
1296 return -EOPNOTSUPP;
1297}
4cfab356
FF
1298#endif
1299
83c0afae 1300void dsa_unregister_switch(struct dsa_switch *ds);
23c9ee49 1301int dsa_register_switch(struct dsa_switch *ds);
0650bf52 1302void dsa_switch_shutdown(struct dsa_switch *ds);
3b7bc1f0 1303struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
a2614140 1304void dsa_flush_workqueue(void);
ea825e70
FF
1305#ifdef CONFIG_PM_SLEEP
1306int dsa_switch_suspend(struct dsa_switch *ds);
1307int dsa_switch_resume(struct dsa_switch *ds);
1308#else
1309static inline int dsa_switch_suspend(struct dsa_switch *ds)
1310{
1311 return 0;
1312}
1313static inline int dsa_switch_resume(struct dsa_switch *ds)
1314{
1315 return 0;
1316}
1317#endif /* CONFIG_PM_SLEEP */
1318
60724d4b 1319#if IS_ENABLED(CONFIG_NET_DSA)
a5e3c9ba 1320bool dsa_slave_dev_check(const struct net_device *dev);
60724d4b 1321#else
a5e3c9ba
VO
1322static inline bool dsa_slave_dev_check(const struct net_device *dev)
1323{
1324 return false;
1325}
60724d4b
FF
1326#endif
1327
97a69a0d 1328netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
11d8f3dd 1329void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
cf963573 1330
d3b8c049
AL
1331struct dsa_tag_driver {
1332 const struct dsa_device_ops *ops;
1333 struct list_head list;
1334 struct module *owner;
1335};
1336
1337void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
1338 unsigned int count,
1339 struct module *owner);
1340void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
1341 unsigned int count);
1342
1343#define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
1344static int __init dsa_tag_driver_module_init(void) \
1345{ \
1346 dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
1347 THIS_MODULE); \
1348 return 0; \
1349} \
1350module_init(dsa_tag_driver_module_init); \
1351 \
1352static void __exit dsa_tag_driver_module_exit(void) \
1353{ \
1354 dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
1355} \
1356module_exit(dsa_tag_driver_module_exit)
1357
1358/**
1359 * module_dsa_tag_drivers() - Helper macro for registering DSA tag
1360 * drivers
c7d9a675 1361 * @__ops_array: Array of tag driver structures
d3b8c049
AL
1362 *
1363 * Helper macro for DSA tag drivers which do not do anything special
1364 * in module init/exit. Each module may only use this macro once, and
1365 * calling it replaces module_init() and module_exit().
1366 */
1367#define module_dsa_tag_drivers(__ops_array) \
1368dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
1369
1370#define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
1371
1372/* Create a static structure we can build a linked list of dsa_tag
1373 * drivers
1374 */
1375#define DSA_TAG_DRIVER(__ops) \
1376static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
1377 .ops = &__ops, \
1378}
1379
1380/**
1381 * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
1382 * driver
1383 * @__ops: Single tag driver structures
1384 *
1385 * Helper macro for DSA tag drivers which do not do anything special
1386 * in module init/exit. Each module may only use this macro once, and
1387 * calling it replaces module_init() and module_exit().
1388 */
1389#define module_dsa_tag_driver(__ops) \
1390DSA_TAG_DRIVER(__ops); \
1391 \
1392static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
1393 &DSA_TAG_DRIVER_NAME(__ops) \
1394}; \
1395module_dsa_tag_drivers(dsa_tag_driver_array)
91da11f8 1396#endif
d3b8c049 1397