net: dsa: keep the bridge_dev and bridge_num as part of the same structure
[linux-2.6-block.git] / net / dsa / dsa_priv.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
91da11f8
LB
2/*
3 * net/dsa/dsa_priv.h - Hardware switch handling
e84665c9 4 * Copyright (c) 2008-2009 Marvell Semiconductor
91da11f8
LB
5 */
6
7#ifndef __DSA_PRIV_H
8#define __DSA_PRIV_H
9
412a1526 10#include <linux/if_bridge.h>
91da11f8 11#include <linux/phy.h>
5075314e 12#include <linux/netdevice.h>
04ff53f9 13#include <linux/netpoll.h>
ea5dd34b 14#include <net/dsa.h>
e131a563 15#include <net/gro_cells.h>
5075314e 16
123abc06
VO
17#define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
18
52c96f9d 19enum {
1faabf74 20 DSA_NOTIFIER_AGEING_TIME,
52c96f9d
VD
21 DSA_NOTIFIER_BRIDGE_JOIN,
22 DSA_NOTIFIER_BRIDGE_LEAVE,
685fb6a4
VD
23 DSA_NOTIFIER_FDB_ADD,
24 DSA_NOTIFIER_FDB_DEL,
3dc80afc
VO
25 DSA_NOTIFIER_HOST_FDB_ADD,
26 DSA_NOTIFIER_HOST_FDB_DEL,
18596f50
GM
27 DSA_NOTIFIER_HSR_JOIN,
28 DSA_NOTIFIER_HSR_LEAVE,
058102a6
TW
29 DSA_NOTIFIER_LAG_CHANGE,
30 DSA_NOTIFIER_LAG_JOIN,
31 DSA_NOTIFIER_LAG_LEAVE,
8ae5bcdc
VD
32 DSA_NOTIFIER_MDB_ADD,
33 DSA_NOTIFIER_MDB_DEL,
b8e997c4
VO
34 DSA_NOTIFIER_HOST_MDB_ADD,
35 DSA_NOTIFIER_HOST_MDB_DEL,
d0c627b8
VD
36 DSA_NOTIFIER_VLAN_ADD,
37 DSA_NOTIFIER_VLAN_DEL,
bfcb8132 38 DSA_NOTIFIER_MTU,
53da0eba 39 DSA_NOTIFIER_TAG_PROTO,
c595c433
HV
40 DSA_NOTIFIER_MRP_ADD,
41 DSA_NOTIFIER_MRP_DEL,
42 DSA_NOTIFIER_MRP_ADD_RING_ROLE,
43 DSA_NOTIFIER_MRP_DEL_RING_ROLE,
c64b9c05
VO
44 DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
45 DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
52c96f9d
VD
46};
47
1faabf74
VD
48/* DSA_NOTIFIER_AGEING_TIME */
49struct dsa_notifier_ageing_time_info {
1faabf74 50 unsigned int ageing_time;
1faabf74
VD
51};
52
52c96f9d
VD
53/* DSA_NOTIFIER_BRIDGE_* */
54struct dsa_notifier_bridge_info {
d3eed0e5 55 struct dsa_bridge bridge;
f66a6a69 56 int tree_index;
52c96f9d
VD
57 int sw_index;
58 int port;
59};
60
685fb6a4
VD
61/* DSA_NOTIFIER_FDB_* */
62struct dsa_notifier_fdb_info {
685fb6a4
VD
63 int sw_index;
64 int port;
2acf4e6a
AS
65 const unsigned char *addr;
66 u16 vid;
685fb6a4
VD
67};
68
8ae5bcdc
VD
69/* DSA_NOTIFIER_MDB_* */
70struct dsa_notifier_mdb_info {
71 const struct switchdev_obj_port_mdb *mdb;
8ae5bcdc
VD
72 int sw_index;
73 int port;
74};
75
058102a6
TW
76/* DSA_NOTIFIER_LAG_* */
77struct dsa_notifier_lag_info {
78 struct net_device *lag;
79 int sw_index;
80 int port;
81
82 struct netdev_lag_upper_info *info;
83};
84
d0c627b8
VD
85/* DSA_NOTIFIER_VLAN_* */
86struct dsa_notifier_vlan_info {
87 const struct switchdev_obj_port_vlan *vlan;
d0c627b8
VD
88 int sw_index;
89 int port;
31046a5f 90 struct netlink_ext_ack *extack;
d0c627b8
VD
91};
92
bfcb8132
VO
93/* DSA_NOTIFIER_MTU */
94struct dsa_notifier_mtu_info {
88faba20 95 bool targeted_match;
bfcb8132
VO
96 int sw_index;
97 int port;
98 int mtu;
99};
100
53da0eba
VO
101/* DSA_NOTIFIER_TAG_PROTO_* */
102struct dsa_notifier_tag_proto_info {
103 const struct dsa_device_ops *tag_ops;
104};
105
c595c433
HV
106/* DSA_NOTIFIER_MRP_* */
107struct dsa_notifier_mrp_info {
108 const struct switchdev_obj_mrp *mrp;
109 int sw_index;
110 int port;
111};
112
113/* DSA_NOTIFIER_MRP_* */
114struct dsa_notifier_mrp_ring_role_info {
115 const struct switchdev_obj_ring_role_mrp *mrp;
116 int sw_index;
117 int port;
118};
119
c64b9c05
VO
120/* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
121struct dsa_notifier_tag_8021q_vlan_info {
122 int tree_index;
123 int sw_index;
124 int port;
125 u16 vid;
126};
127
c4bb76a9
VO
128struct dsa_switchdev_event_work {
129 struct dsa_switch *ds;
130 int port;
4bed397c 131 struct net_device *dev;
c4bb76a9
VO
132 struct work_struct work;
133 unsigned long event;
134 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
135 * SWITCHDEV_FDB_DEL_TO_DEVICE
136 */
137 unsigned char addr[ETH_ALEN];
138 u16 vid;
3dc80afc 139 bool host_addr;
c4bb76a9
VO
140};
141
18596f50
GM
142/* DSA_NOTIFIER_HSR_* */
143struct dsa_notifier_hsr_info {
144 struct net_device *hsr;
145 int sw_index;
146 int port;
147};
148
91da11f8 149struct dsa_slave_priv {
15240248 150 /* Copy of CPU port xmit for faster access in slave transmit hot path */
4ed70ce9 151 struct sk_buff * (*xmit)(struct sk_buff *skb,
5075314e 152 struct net_device *dev);
e84665c9 153
e131a563
AL
154 struct gro_cells gcells;
155
afdcf151
VD
156 /* DSA port data, such as switch, port index, etc. */
157 struct dsa_port *dp;
e84665c9 158
04ff53f9
FF
159#ifdef CONFIG_NET_POLL_CONTROLLER
160 struct netpoll *netpoll;
161#endif
f50f2127
FF
162
163 /* TC context */
164 struct list_head mall_tc_list;
91da11f8
LB
165};
166
91da11f8 167/* dsa.c */
c39e2a1d 168const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
4dad81ee 169void dsa_tag_driver_put(const struct dsa_device_ops *ops);
53da0eba 170const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
c39e2a1d 171
c9eb3e0f 172bool dsa_schedule_work(struct work_struct *work);
a57d8c21 173void dsa_flush_workqueue(void);
98cdb480 174const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
91da11f8 175
4e500251
VO
176static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
177{
178 return ops->needed_headroom + ops->needed_tailroom;
179}
180
f2f23566 181/* master.c */
17a22fcf
VD
182int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
183void dsa_master_teardown(struct net_device *dev);
f2f23566 184
2231c43b
VD
185static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
186 int device, int port)
3775b1b7 187{
2f657a60
VD
188 struct dsa_port *cpu_dp = dev->dsa_ptr;
189 struct dsa_switch_tree *dst = cpu_dp->dst;
7b9a2f4b 190 struct dsa_port *dp;
3775b1b7 191
7b9a2f4b
VD
192 list_for_each_entry(dp, &dst->ports, list)
193 if (dp->ds->index == device && dp->index == port &&
194 dp->type == DSA_PORT_TYPE_USER)
195 return dp->slave;
3775b1b7 196
7b9a2f4b 197 return NULL;
3775b1b7
VD
198}
199
a40c175b 200/* port.c */
53da0eba
VO
201void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
202 const struct dsa_device_ops *tag_ops);
39f32101 203int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
8640f8dc 204int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
fb8a6a2b 205int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
8640f8dc 206void dsa_port_disable_rt(struct dsa_port *dp);
75104db0 207void dsa_port_disable(struct dsa_port *dp);
2afc526a
VO
208int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
209 struct netlink_ext_ack *extack);
4e51bf44 210void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br);
cfbed329 211void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
058102a6
TW
212int dsa_port_lag_change(struct dsa_port *dp,
213 struct netdev_lag_lower_state_info *linfo);
214int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
2afc526a
VO
215 struct netdev_lag_upper_info *uinfo,
216 struct netlink_ext_ack *extack);
4e51bf44 217void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
058102a6 218void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
89153ed6
VO
219int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
220 struct netlink_ext_ack *extack);
54a0ed0d 221bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
bae33f2b 222int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
bfcb8132 223int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
88faba20 224 bool targeted_match);
2acf4e6a
AS
225int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
226 u16 vid);
227int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
228 u16 vid);
3dc80afc
VO
229int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
230 u16 vid);
231int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
232 u16 vid);
de40fc5d 233int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
bb9f6031 234int dsa_port_mdb_add(const struct dsa_port *dp,
ffb68fc5 235 const struct switchdev_obj_port_mdb *mdb);
bb9f6031 236int dsa_port_mdb_del(const struct dsa_port *dp,
3a9afea3 237 const struct switchdev_obj_port_mdb *mdb);
b8e997c4
VO
238int dsa_port_host_mdb_add(const struct dsa_port *dp,
239 const struct switchdev_obj_port_mdb *mdb);
240int dsa_port_host_mdb_del(const struct dsa_port *dp,
241 const struct switchdev_obj_port_mdb *mdb);
e18f4c18 242int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
a8b659e7
VO
243 struct switchdev_brport_flags flags,
244 struct netlink_ext_ack *extack);
045c45d1 245int dsa_port_bridge_flags(struct dsa_port *dp,
a8b659e7
VO
246 struct switchdev_brport_flags flags,
247 struct netlink_ext_ack *extack);
076e7133 248int dsa_port_vlan_add(struct dsa_port *dp,
31046a5f
VO
249 const struct switchdev_obj_port_vlan *vlan,
250 struct netlink_ext_ack *extack);
076e7133
VD
251int dsa_port_vlan_del(struct dsa_port *dp,
252 const struct switchdev_obj_port_vlan *vlan);
c595c433
HV
253int dsa_port_mrp_add(const struct dsa_port *dp,
254 const struct switchdev_obj_mrp *mrp);
255int dsa_port_mrp_del(const struct dsa_port *dp,
256 const struct switchdev_obj_mrp *mrp);
257int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
258 const struct switchdev_obj_ring_role_mrp *mrp);
259int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
260 const struct switchdev_obj_ring_role_mrp *mrp);
21bd64bd 261int dsa_port_phylink_create(struct dsa_port *dp);
33615367
SR
262int dsa_port_link_register_of(struct dsa_port *dp);
263void dsa_port_link_unregister_of(struct dsa_port *dp);
18596f50
GM
264int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
265void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
724395f4
VO
266int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
267void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
57ab1ca2 268
91da11f8 269/* slave.c */
5075314e 270extern const struct dsa_device_ops notag_netdev_ops;
010e269f
VO
271extern struct notifier_block dsa_slave_switchdev_notifier;
272extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
273
91da11f8 274void dsa_slave_mii_bus_init(struct dsa_switch *ds);
951259aa 275int dsa_slave_create(struct dsa_port *dp);
cda5c15b 276void dsa_slave_destroy(struct net_device *slave_dev);
24462549
FF
277int dsa_slave_suspend(struct net_device *slave_dev);
278int dsa_slave_resume(struct net_device *slave_dev);
88e4f0ca
VD
279int dsa_slave_register_notifier(void);
280void dsa_slave_unregister_notifier(void);
53da0eba
VO
281void dsa_slave_setup_tagger(struct net_device *slave);
282int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
06cfb2df
VO
283int dsa_slave_manage_vlan_filtering(struct net_device *dev,
284 bool vlan_filtering);
91da11f8 285
d945097b
VD
286static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
287{
288 struct dsa_slave_priv *p = netdev_priv(dev);
289
290 return p->dp;
291}
292
d0006b00
VD
293static inline struct net_device *
294dsa_slave_to_master(const struct net_device *dev)
295{
296 struct dsa_port *dp = dsa_slave_to_port(dev);
297
f8b8b1cd 298 return dp->cpu_dp->master;
d0006b00
VD
299}
300
412a1526
VO
301/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
302 * frames as untagged, since the bridge will not untag them.
303 */
304static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
305{
306 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
36cbf39b 307 struct net_device *br = dsa_port_bridge_dev_get(dp);
412a1526
VO
308 struct net_device *dev = skb->dev;
309 struct net_device *upper_dev;
412a1526
VO
310 u16 vid, pvid, proto;
311 int err;
312
313 if (!br || br_vlan_enabled(br))
314 return skb;
315
316 err = br_vlan_get_proto(br, &proto);
317 if (err)
318 return skb;
319
320 /* Move VLAN tag from data to hwaccel */
a348292b 321 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
412a1526
VO
322 skb = skb_vlan_untag(skb);
323 if (!skb)
324 return NULL;
325 }
326
327 if (!skb_vlan_tag_present(skb))
328 return skb;
329
330 vid = skb_vlan_tag_get_id(skb);
331
332 /* We already run under an RCU read-side critical section since
333 * we are called from netif_receive_skb_list_internal().
334 */
335 err = br_vlan_get_pvid_rcu(dev, &pvid);
336 if (err)
337 return skb;
338
339 if (vid != pvid)
340 return skb;
341
342 /* The sad part about attempting to untag from DSA is that we
343 * don't know, unless we check, if the skb will end up in
344 * the bridge's data path - br_allowed_ingress() - or not.
345 * For example, there might be an 8021q upper for the
346 * default_pvid of the bridge, which will steal VLAN-tagged traffic
347 * from the bridge's data path. This is a configuration that DSA
348 * supports because vlan_filtering is 0. In that case, we should
349 * definitely keep the tag, to make sure it keeps working.
350 */
3a68844d
FF
351 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
352 if (upper_dev)
353 return skb;
412a1526
VO
354
355 __vlan_hwaccel_clear_tag(skb);
356
357 return skb;
358}
359
884be12f
VO
360/* For switches without hardware support for DSA tagging to be able
361 * to support termination through the bridge.
362 */
363static inline struct net_device *
364dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
365{
366 struct dsa_port *cpu_dp = master->dsa_ptr;
367 struct dsa_switch_tree *dst = cpu_dp->dst;
368 struct bridge_vlan_info vinfo;
369 struct net_device *slave;
370 struct dsa_port *dp;
371 int err;
372
373 list_for_each_entry(dp, &dst->ports, list) {
374 if (dp->type != DSA_PORT_TYPE_USER)
375 continue;
376
d3eed0e5 377 if (!dp->bridge)
884be12f
VO
378 continue;
379
380 if (dp->stp_state != BR_STATE_LEARNING &&
381 dp->stp_state != BR_STATE_FORWARDING)
382 continue;
383
384 /* Since the bridge might learn this packet, keep the CPU port
385 * affinity with the port that will be used for the reply on
386 * xmit.
387 */
388 if (dp->cpu_dp != cpu_dp)
389 continue;
390
391 slave = dp->slave;
392
393 err = br_vlan_get_info_rcu(slave, vid, &vinfo);
394 if (err)
395 continue;
396
397 return slave;
398 }
399
400 return NULL;
401}
402
bea79078
VO
403/* If the ingress port offloads the bridge, we mark the frame as autonomously
404 * forwarded by hardware, so the software bridge doesn't forward in twice, back
405 * to us, because we already did. However, if we're in fallback mode and we do
d3eed0e5 406 * software bridging, we are not offloading it, therefore the dp->bridge
bea79078
VO
407 * pointer is not populated, and flooding needs to be done by software (we are
408 * effectively operating in standalone ports mode).
409 */
410static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
411{
412 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
413
d3eed0e5 414 skb->offload_fwd_mark = !!(dp->bridge);
bea79078
VO
415}
416
f1dacd7a
VO
417/* Helper for removing DSA header tags from packets in the RX path.
418 * Must not be called before skb_pull(len).
419 * skb->data
420 * |
421 * v
422 * | | | | | | | | | | | | | | | | | | |
423 * +-----------------------+-----------------------+---------------+-------+
424 * | Destination MAC | Source MAC | DSA header | EType |
425 * +-----------------------+-----------------------+---------------+-------+
426 * | |
427 * <----- len -----> <----- len ----->
428 * |
429 * >>>>>>> v
430 * >>>>>>> | | | | | | | | | | | | | | |
431 * >>>>>>> +-----------------------+-----------------------+-------+
432 * >>>>>>> | Destination MAC | Source MAC | EType |
433 * +-----------------------+-----------------------+-------+
434 * ^
435 * |
436 * skb->data
437 */
438static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
439{
440 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
441}
442
6bef794d
VO
443/* Helper for creating space for DSA header tags in TX path packets.
444 * Must not be called before skb_push(len).
445 *
446 * Before:
447 *
448 * <<<<<<< | | | | | | | | | | | | | | |
449 * ^ <<<<<<< +-----------------------+-----------------------+-------+
450 * | <<<<<<< | Destination MAC | Source MAC | EType |
451 * | +-----------------------+-----------------------+-------+
452 * <----- len ----->
453 * |
454 * |
455 * skb->data
456 *
457 * After:
458 *
459 * | | | | | | | | | | | | | | | | | | |
460 * +-----------------------+-----------------------+---------------+-------+
461 * | Destination MAC | Source MAC | DSA header | EType |
462 * +-----------------------+-----------------------+---------------+-------+
463 * ^ | |
464 * | <----- len ----->
465 * skb->data
466 */
467static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
468{
469 memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
470}
471
5d928ff4
VO
472/* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
473 * skb_mac_header(skb), which leaves skb->data pointing at the first byte after
474 * what the DSA master perceives as the EtherType (the beginning of the L3
475 * protocol). Since DSA EtherType header taggers treat the EtherType as part of
476 * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
477 * is located 2 bytes behind skb->data. Note that EtherType in this context
478 * means the first 2 bytes of the DSA header, not the encapsulated EtherType
479 * that will become visible after the DSA header is stripped.
480 */
481static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
482{
483 return skb->data - 2;
484}
485
a72808b6
VO
486/* On TX, skb->data points to skb_mac_header(skb), which means that EtherType
487 * header taggers start exactly where the EtherType is (the EtherType is
488 * treated as part of the DSA header).
489 */
490static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
491{
492 return skb->data + 2 * ETH_ALEN;
493}
494
f515f192
VD
495/* switch.c */
496int dsa_switch_register_notifier(struct dsa_switch *ds);
497void dsa_switch_unregister_notifier(struct dsa_switch *ds);
bff33f7e
VO
498
499/* dsa2.c */
058102a6
TW
500void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
501void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
886f8e26
VO
502int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
503int dsa_broadcast(unsigned long e, void *v);
53da0eba
VO
504int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
505 struct net_device *master,
506 const struct dsa_device_ops *tag_ops,
507 const struct dsa_device_ops *old_tag_ops);
3f9bb030
VO
508unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
509void dsa_bridge_num_put(const struct net_device *bridge_dev,
510 unsigned int bridge_num);
d3eed0e5
VO
511struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
512 const struct net_device *br);
058102a6 513
e19cc13c
VO
514/* tag_8021q.c */
515int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
516 struct dsa_notifier_bridge_info *info);
517int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
518 struct dsa_notifier_bridge_info *info);
c64b9c05
VO
519int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
520 struct dsa_notifier_tag_8021q_vlan_info *info);
521int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
522 struct dsa_notifier_tag_8021q_vlan_info *info);
e19cc13c 523
bff33f7e
VO
524extern struct list_head dsa_tree_list;
525
91da11f8 526#endif