net: dsa: Validate hardware support for MST
[linux-2.6-block.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *      Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30         return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp)
34 {
35         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
36         struct switchdev_notifier_fdb_info info = {
37                 /* flush all VLANs */
38                 .vid = 0,
39         };
40
41         /* When the port becomes standalone it has already left the bridge.
42          * Don't notify the bridge in that case.
43          */
44         if (!brport_dev)
45                 return;
46
47         call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
48                                  brport_dev, &info.info, NULL);
49 }
50
51 static void dsa_port_fast_age(const struct dsa_port *dp)
52 {
53         struct dsa_switch *ds = dp->ds;
54
55         if (!ds->ops->port_fast_age)
56                 return;
57
58         ds->ops->port_fast_age(ds, dp->index);
59
60         dsa_port_notify_bridge_fdb_flush(dp);
61 }
62
63 static bool dsa_port_can_configure_learning(struct dsa_port *dp)
64 {
65         struct switchdev_brport_flags flags = {
66                 .mask = BR_LEARNING,
67         };
68         struct dsa_switch *ds = dp->ds;
69         int err;
70
71         if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags)
72                 return false;
73
74         err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL);
75         return !err;
76 }
77
78 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
79 {
80         struct dsa_switch *ds = dp->ds;
81         int port = dp->index;
82
83         if (!ds->ops->port_stp_state_set)
84                 return -EOPNOTSUPP;
85
86         ds->ops->port_stp_state_set(ds, port, state);
87
88         if (!dsa_port_can_configure_learning(dp) ||
89             (do_fast_age && dp->learning)) {
90                 /* Fast age FDB entries or flush appropriate forwarding database
91                  * for the given port, if we are moving it from Learning or
92                  * Forwarding state, to Disabled or Blocking or Listening state.
93                  * Ports that were standalone before the STP state change don't
94                  * need to fast age the FDB, since address learning is off in
95                  * standalone mode.
96                  */
97
98                 if ((dp->stp_state == BR_STATE_LEARNING ||
99                      dp->stp_state == BR_STATE_FORWARDING) &&
100                     (state == BR_STATE_DISABLED ||
101                      state == BR_STATE_BLOCKING ||
102                      state == BR_STATE_LISTENING))
103                         dsa_port_fast_age(dp);
104         }
105
106         dp->stp_state = state;
107
108         return 0;
109 }
110
111 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
112                                    bool do_fast_age)
113 {
114         int err;
115
116         err = dsa_port_set_state(dp, state, do_fast_age);
117         if (err)
118                 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
119 }
120
121 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
122 {
123         struct dsa_switch *ds = dp->ds;
124         int port = dp->index;
125         int err;
126
127         if (ds->ops->port_enable) {
128                 err = ds->ops->port_enable(ds, port, phy);
129                 if (err)
130                         return err;
131         }
132
133         if (!dp->bridge)
134                 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
135
136         if (dp->pl)
137                 phylink_start(dp->pl);
138
139         return 0;
140 }
141
142 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
143 {
144         int err;
145
146         rtnl_lock();
147         err = dsa_port_enable_rt(dp, phy);
148         rtnl_unlock();
149
150         return err;
151 }
152
153 void dsa_port_disable_rt(struct dsa_port *dp)
154 {
155         struct dsa_switch *ds = dp->ds;
156         int port = dp->index;
157
158         if (dp->pl)
159                 phylink_stop(dp->pl);
160
161         if (!dp->bridge)
162                 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
163
164         if (ds->ops->port_disable)
165                 ds->ops->port_disable(ds, port);
166 }
167
168 void dsa_port_disable(struct dsa_port *dp)
169 {
170         rtnl_lock();
171         dsa_port_disable_rt(dp);
172         rtnl_unlock();
173 }
174
175 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
176                                          struct netlink_ext_ack *extack)
177 {
178         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
179                                    BR_BCAST_FLOOD | BR_PORT_LOCKED;
180         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
181         int flag, err;
182
183         for_each_set_bit(flag, &mask, 32) {
184                 struct switchdev_brport_flags flags = {0};
185
186                 flags.mask = BIT(flag);
187
188                 if (br_port_flag_is_set(brport_dev, BIT(flag)))
189                         flags.val = BIT(flag);
190
191                 err = dsa_port_bridge_flags(dp, flags, extack);
192                 if (err && err != -EOPNOTSUPP)
193                         return err;
194         }
195
196         return 0;
197 }
198
199 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
200 {
201         const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
202         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
203                                    BR_BCAST_FLOOD | BR_PORT_LOCKED;
204         int flag, err;
205
206         for_each_set_bit(flag, &mask, 32) {
207                 struct switchdev_brport_flags flags = {0};
208
209                 flags.mask = BIT(flag);
210                 flags.val = val & BIT(flag);
211
212                 err = dsa_port_bridge_flags(dp, flags, NULL);
213                 if (err && err != -EOPNOTSUPP)
214                         dev_err(dp->ds->dev,
215                                 "failed to clear bridge port flag %lu: %pe\n",
216                                 flags.val, ERR_PTR(err));
217         }
218 }
219
220 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
221                                          struct netlink_ext_ack *extack)
222 {
223         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
224         struct net_device *br = dsa_port_bridge_dev_get(dp);
225         int err;
226
227         err = dsa_port_inherit_brport_flags(dp, extack);
228         if (err)
229                 return err;
230
231         err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
232         if (err && err != -EOPNOTSUPP)
233                 return err;
234
235         err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
236         if (err && err != -EOPNOTSUPP)
237                 return err;
238
239         err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
240         if (err && err != -EOPNOTSUPP)
241                 return err;
242
243         return 0;
244 }
245
246 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
247 {
248         /* Configure the port for standalone mode (no address learning,
249          * flood everything).
250          * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
251          * when the user requests it through netlink or sysfs, but not
252          * automatically at port join or leave, so we need to handle resetting
253          * the brport flags ourselves. But we even prefer it that way, because
254          * otherwise, some setups might never get the notification they need,
255          * for example, when a port leaves a LAG that offloads the bridge,
256          * it becomes standalone, but as far as the bridge is concerned, no
257          * port ever left.
258          */
259         dsa_port_clear_brport_flags(dp);
260
261         /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
262          * so allow it to be in BR_STATE_FORWARDING to be kept functional
263          */
264         dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
265
266         /* VLAN filtering is handled by dsa_switch_bridge_leave */
267
268         /* Ageing time may be global to the switch chip, so don't change it
269          * here because we have no good reason (or value) to change it to.
270          */
271 }
272
273 static int dsa_port_bridge_create(struct dsa_port *dp,
274                                   struct net_device *br,
275                                   struct netlink_ext_ack *extack)
276 {
277         struct dsa_switch *ds = dp->ds;
278         struct dsa_bridge *bridge;
279
280         bridge = dsa_tree_bridge_find(ds->dst, br);
281         if (bridge) {
282                 refcount_inc(&bridge->refcount);
283                 dp->bridge = bridge;
284                 return 0;
285         }
286
287         bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
288         if (!bridge)
289                 return -ENOMEM;
290
291         refcount_set(&bridge->refcount, 1);
292
293         bridge->dev = br;
294
295         bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges);
296         if (ds->max_num_bridges && !bridge->num) {
297                 NL_SET_ERR_MSG_MOD(extack,
298                                    "Range of offloadable bridges exceeded");
299                 kfree(bridge);
300                 return -EOPNOTSUPP;
301         }
302
303         dp->bridge = bridge;
304
305         return 0;
306 }
307
308 static void dsa_port_bridge_destroy(struct dsa_port *dp,
309                                     const struct net_device *br)
310 {
311         struct dsa_bridge *bridge = dp->bridge;
312
313         dp->bridge = NULL;
314
315         if (!refcount_dec_and_test(&bridge->refcount))
316                 return;
317
318         if (bridge->num)
319                 dsa_bridge_num_put(br, bridge->num);
320
321         kfree(bridge);
322 }
323
324 static bool dsa_port_supports_mst(struct dsa_port *dp)
325 {
326         return dsa_port_can_configure_learning(dp);
327 }
328
329 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
330                          struct netlink_ext_ack *extack)
331 {
332         struct dsa_notifier_bridge_info info = {
333                 .tree_index = dp->ds->dst->index,
334                 .sw_index = dp->ds->index,
335                 .port = dp->index,
336                 .extack = extack,
337         };
338         struct net_device *dev = dp->slave;
339         struct net_device *brport_dev;
340         int err;
341
342         if (br_mst_enabled(br) && !dsa_port_supports_mst(dp))
343                 return -EOPNOTSUPP;
344
345         /* Here the interface is already bridged. Reflect the current
346          * configuration so that drivers can program their chips accordingly.
347          */
348         err = dsa_port_bridge_create(dp, br, extack);
349         if (err)
350                 return err;
351
352         brport_dev = dsa_port_to_bridge_port(dp);
353
354         info.bridge = *dp->bridge;
355         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
356         if (err)
357                 goto out_rollback;
358
359         /* Drivers which support bridge TX forwarding should set this */
360         dp->bridge->tx_fwd_offload = info.tx_fwd_offload;
361
362         err = switchdev_bridge_port_offload(brport_dev, dev, dp,
363                                             &dsa_slave_switchdev_notifier,
364                                             &dsa_slave_switchdev_blocking_notifier,
365                                             dp->bridge->tx_fwd_offload, extack);
366         if (err)
367                 goto out_rollback_unbridge;
368
369         err = dsa_port_switchdev_sync_attrs(dp, extack);
370         if (err)
371                 goto out_rollback_unoffload;
372
373         return 0;
374
375 out_rollback_unoffload:
376         switchdev_bridge_port_unoffload(brport_dev, dp,
377                                         &dsa_slave_switchdev_notifier,
378                                         &dsa_slave_switchdev_blocking_notifier);
379 out_rollback_unbridge:
380         dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
381 out_rollback:
382         dsa_port_bridge_destroy(dp, br);
383         return err;
384 }
385
386 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
387 {
388         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
389
390         /* Don't try to unoffload something that is not offloaded */
391         if (!brport_dev)
392                 return;
393
394         switchdev_bridge_port_unoffload(brport_dev, dp,
395                                         &dsa_slave_switchdev_notifier,
396                                         &dsa_slave_switchdev_blocking_notifier);
397
398         dsa_flush_workqueue();
399 }
400
401 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
402 {
403         struct dsa_notifier_bridge_info info = {
404                 .tree_index = dp->ds->dst->index,
405                 .sw_index = dp->ds->index,
406                 .port = dp->index,
407         };
408         int err;
409
410         /* If the port could not be offloaded to begin with, then
411          * there is nothing to do.
412          */
413         if (!dp->bridge)
414                 return;
415
416         info.bridge = *dp->bridge;
417
418         /* Here the port is already unbridged. Reflect the current configuration
419          * so that drivers can program their chips accordingly.
420          */
421         dsa_port_bridge_destroy(dp, br);
422
423         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
424         if (err)
425                 dev_err(dp->ds->dev,
426                         "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
427                         dp->index, ERR_PTR(err));
428
429         dsa_port_switchdev_unsync_attrs(dp);
430 }
431
432 int dsa_port_lag_change(struct dsa_port *dp,
433                         struct netdev_lag_lower_state_info *linfo)
434 {
435         struct dsa_notifier_lag_info info = {
436                 .sw_index = dp->ds->index,
437                 .port = dp->index,
438         };
439         bool tx_enabled;
440
441         if (!dp->lag)
442                 return 0;
443
444         /* On statically configured aggregates (e.g. loadbalance
445          * without LACP) ports will always be tx_enabled, even if the
446          * link is down. Thus we require both link_up and tx_enabled
447          * in order to include it in the tx set.
448          */
449         tx_enabled = linfo->link_up && linfo->tx_enabled;
450
451         if (tx_enabled == dp->lag_tx_enabled)
452                 return 0;
453
454         dp->lag_tx_enabled = tx_enabled;
455
456         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
457 }
458
459 static int dsa_port_lag_create(struct dsa_port *dp,
460                                struct net_device *lag_dev)
461 {
462         struct dsa_switch *ds = dp->ds;
463         struct dsa_lag *lag;
464
465         lag = dsa_tree_lag_find(ds->dst, lag_dev);
466         if (lag) {
467                 refcount_inc(&lag->refcount);
468                 dp->lag = lag;
469                 return 0;
470         }
471
472         lag = kzalloc(sizeof(*lag), GFP_KERNEL);
473         if (!lag)
474                 return -ENOMEM;
475
476         refcount_set(&lag->refcount, 1);
477         mutex_init(&lag->fdb_lock);
478         INIT_LIST_HEAD(&lag->fdbs);
479         lag->dev = lag_dev;
480         dsa_lag_map(ds->dst, lag);
481         dp->lag = lag;
482
483         return 0;
484 }
485
486 static void dsa_port_lag_destroy(struct dsa_port *dp)
487 {
488         struct dsa_lag *lag = dp->lag;
489
490         dp->lag = NULL;
491         dp->lag_tx_enabled = false;
492
493         if (!refcount_dec_and_test(&lag->refcount))
494                 return;
495
496         WARN_ON(!list_empty(&lag->fdbs));
497         dsa_lag_unmap(dp->ds->dst, lag);
498         kfree(lag);
499 }
500
501 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
502                       struct netdev_lag_upper_info *uinfo,
503                       struct netlink_ext_ack *extack)
504 {
505         struct dsa_notifier_lag_info info = {
506                 .sw_index = dp->ds->index,
507                 .port = dp->index,
508                 .info = uinfo,
509         };
510         struct net_device *bridge_dev;
511         int err;
512
513         err = dsa_port_lag_create(dp, lag_dev);
514         if (err)
515                 goto err_lag_create;
516
517         info.lag = *dp->lag;
518         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
519         if (err)
520                 goto err_lag_join;
521
522         bridge_dev = netdev_master_upper_dev_get(lag_dev);
523         if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
524                 return 0;
525
526         err = dsa_port_bridge_join(dp, bridge_dev, extack);
527         if (err)
528                 goto err_bridge_join;
529
530         return 0;
531
532 err_bridge_join:
533         dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
534 err_lag_join:
535         dsa_port_lag_destroy(dp);
536 err_lag_create:
537         return err;
538 }
539
540 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
541 {
542         struct net_device *br = dsa_port_bridge_dev_get(dp);
543
544         if (br)
545                 dsa_port_pre_bridge_leave(dp, br);
546 }
547
548 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
549 {
550         struct net_device *br = dsa_port_bridge_dev_get(dp);
551         struct dsa_notifier_lag_info info = {
552                 .sw_index = dp->ds->index,
553                 .port = dp->index,
554         };
555         int err;
556
557         if (!dp->lag)
558                 return;
559
560         /* Port might have been part of a LAG that in turn was
561          * attached to a bridge.
562          */
563         if (br)
564                 dsa_port_bridge_leave(dp, br);
565
566         info.lag = *dp->lag;
567
568         dsa_port_lag_destroy(dp);
569
570         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
571         if (err)
572                 dev_err(dp->ds->dev,
573                         "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
574                         dp->index, ERR_PTR(err));
575 }
576
577 /* Must be called under rcu_read_lock() */
578 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
579                                               bool vlan_filtering,
580                                               struct netlink_ext_ack *extack)
581 {
582         struct dsa_switch *ds = dp->ds;
583         struct dsa_port *other_dp;
584         int err;
585
586         /* VLAN awareness was off, so the question is "can we turn it on".
587          * We may have had 8021q uppers, those need to go. Make sure we don't
588          * enter an inconsistent state: deny changing the VLAN awareness state
589          * as long as we have 8021q uppers.
590          */
591         if (vlan_filtering && dsa_port_is_user(dp)) {
592                 struct net_device *br = dsa_port_bridge_dev_get(dp);
593                 struct net_device *upper_dev, *slave = dp->slave;
594                 struct list_head *iter;
595
596                 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
597                         struct bridge_vlan_info br_info;
598                         u16 vid;
599
600                         if (!is_vlan_dev(upper_dev))
601                                 continue;
602
603                         vid = vlan_dev_vlan_id(upper_dev);
604
605                         /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
606                          * device, respectively the VID is not found, returning
607                          * 0 means success, which is a failure for us here.
608                          */
609                         err = br_vlan_get_info(br, vid, &br_info);
610                         if (err == 0) {
611                                 NL_SET_ERR_MSG_MOD(extack,
612                                                    "Must first remove VLAN uppers having VIDs also present in bridge");
613                                 return false;
614                         }
615                 }
616         }
617
618         if (!ds->vlan_filtering_is_global)
619                 return true;
620
621         /* For cases where enabling/disabling VLAN awareness is global to the
622          * switch, we need to handle the case where multiple bridges span
623          * different ports of the same switch device and one of them has a
624          * different setting than what is being requested.
625          */
626         dsa_switch_for_each_port(other_dp, ds) {
627                 struct net_device *other_br = dsa_port_bridge_dev_get(other_dp);
628
629                 /* If it's the same bridge, it also has same
630                  * vlan_filtering setting => no need to check
631                  */
632                 if (!other_br || other_br == dsa_port_bridge_dev_get(dp))
633                         continue;
634
635                 if (br_vlan_enabled(other_br) != vlan_filtering) {
636                         NL_SET_ERR_MSG_MOD(extack,
637                                            "VLAN filtering is a global setting");
638                         return false;
639                 }
640         }
641         return true;
642 }
643
644 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
645                             struct netlink_ext_ack *extack)
646 {
647         bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp);
648         struct dsa_switch *ds = dp->ds;
649         bool apply;
650         int err;
651
652         if (!ds->ops->port_vlan_filtering)
653                 return -EOPNOTSUPP;
654
655         /* We are called from dsa_slave_switchdev_blocking_event(),
656          * which is not under rcu_read_lock(), unlike
657          * dsa_slave_switchdev_event().
658          */
659         rcu_read_lock();
660         apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
661         rcu_read_unlock();
662         if (!apply)
663                 return -EINVAL;
664
665         if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
666                 return 0;
667
668         err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
669                                            extack);
670         if (err)
671                 return err;
672
673         if (ds->vlan_filtering_is_global) {
674                 struct dsa_port *other_dp;
675
676                 ds->vlan_filtering = vlan_filtering;
677
678                 dsa_switch_for_each_user_port(other_dp, ds) {
679                         struct net_device *slave = dp->slave;
680
681                         /* We might be called in the unbind path, so not
682                          * all slave devices might still be registered.
683                          */
684                         if (!slave)
685                                 continue;
686
687                         err = dsa_slave_manage_vlan_filtering(slave,
688                                                               vlan_filtering);
689                         if (err)
690                                 goto restore;
691                 }
692         } else {
693                 dp->vlan_filtering = vlan_filtering;
694
695                 err = dsa_slave_manage_vlan_filtering(dp->slave,
696                                                       vlan_filtering);
697                 if (err)
698                         goto restore;
699         }
700
701         return 0;
702
703 restore:
704         ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL);
705
706         if (ds->vlan_filtering_is_global)
707                 ds->vlan_filtering = old_vlan_filtering;
708         else
709                 dp->vlan_filtering = old_vlan_filtering;
710
711         return err;
712 }
713
714 /* This enforces legacy behavior for switch drivers which assume they can't
715  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
716  */
717 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
718 {
719         struct net_device *br = dsa_port_bridge_dev_get(dp);
720         struct dsa_switch *ds = dp->ds;
721
722         if (!br)
723                 return false;
724
725         return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br);
726 }
727
728 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
729 {
730         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
731         unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
732         struct dsa_notifier_ageing_time_info info;
733         int err;
734
735         info.ageing_time = ageing_time;
736
737         err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
738         if (err)
739                 return err;
740
741         dp->ageing_time = ageing_time;
742
743         return 0;
744 }
745
746 int dsa_port_mst_enable(struct dsa_port *dp, bool on,
747                         struct netlink_ext_ack *extack)
748 {
749         if (!on)
750                 return 0;
751
752         if (!dsa_port_supports_mst(dp)) {
753                 NL_SET_ERR_MSG_MOD(extack, "Hardware does not support MST");
754                 return -EINVAL;
755         }
756
757         return 0;
758 }
759
760 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
761                               struct switchdev_brport_flags flags,
762                               struct netlink_ext_ack *extack)
763 {
764         struct dsa_switch *ds = dp->ds;
765
766         if (!ds->ops->port_pre_bridge_flags)
767                 return -EINVAL;
768
769         return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
770 }
771
772 int dsa_port_bridge_flags(struct dsa_port *dp,
773                           struct switchdev_brport_flags flags,
774                           struct netlink_ext_ack *extack)
775 {
776         struct dsa_switch *ds = dp->ds;
777         int err;
778
779         if (!ds->ops->port_bridge_flags)
780                 return -EOPNOTSUPP;
781
782         err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
783         if (err)
784                 return err;
785
786         if (flags.mask & BR_LEARNING) {
787                 bool learning = flags.val & BR_LEARNING;
788
789                 if (learning == dp->learning)
790                         return 0;
791
792                 if ((dp->learning && !learning) &&
793                     (dp->stp_state == BR_STATE_LEARNING ||
794                      dp->stp_state == BR_STATE_FORWARDING))
795                         dsa_port_fast_age(dp);
796
797                 dp->learning = learning;
798         }
799
800         return 0;
801 }
802
803 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
804                         bool targeted_match)
805 {
806         struct dsa_notifier_mtu_info info = {
807                 .sw_index = dp->ds->index,
808                 .targeted_match = targeted_match,
809                 .port = dp->index,
810                 .mtu = new_mtu,
811         };
812
813         return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
814 }
815
816 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
817                      u16 vid)
818 {
819         struct dsa_notifier_fdb_info info = {
820                 .sw_index = dp->ds->index,
821                 .port = dp->index,
822                 .addr = addr,
823                 .vid = vid,
824                 .db = {
825                         .type = DSA_DB_BRIDGE,
826                         .bridge = *dp->bridge,
827                 },
828         };
829
830         /* Refcounting takes bridge.num as a key, and should be global for all
831          * bridges in the absence of FDB isolation, and per bridge otherwise.
832          * Force the bridge.num to zero here in the absence of FDB isolation.
833          */
834         if (!dp->ds->fdb_isolation)
835                 info.db.bridge.num = 0;
836
837         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
838 }
839
840 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
841                      u16 vid)
842 {
843         struct dsa_notifier_fdb_info info = {
844                 .sw_index = dp->ds->index,
845                 .port = dp->index,
846                 .addr = addr,
847                 .vid = vid,
848                 .db = {
849                         .type = DSA_DB_BRIDGE,
850                         .bridge = *dp->bridge,
851                 },
852         };
853
854         if (!dp->ds->fdb_isolation)
855                 info.db.bridge.num = 0;
856
857         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
858 }
859
860 static int dsa_port_host_fdb_add(struct dsa_port *dp,
861                                  const unsigned char *addr, u16 vid,
862                                  struct dsa_db db)
863 {
864         struct dsa_notifier_fdb_info info = {
865                 .sw_index = dp->ds->index,
866                 .port = dp->index,
867                 .addr = addr,
868                 .vid = vid,
869                 .db = db,
870         };
871
872         if (!dp->ds->fdb_isolation)
873                 info.db.bridge.num = 0;
874
875         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
876 }
877
878 int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
879                                      const unsigned char *addr, u16 vid)
880 {
881         struct dsa_db db = {
882                 .type = DSA_DB_PORT,
883                 .dp = dp,
884         };
885
886         return dsa_port_host_fdb_add(dp, addr, vid, db);
887 }
888
889 int dsa_port_bridge_host_fdb_add(struct dsa_port *dp,
890                                  const unsigned char *addr, u16 vid)
891 {
892         struct dsa_port *cpu_dp = dp->cpu_dp;
893         struct dsa_db db = {
894                 .type = DSA_DB_BRIDGE,
895                 .bridge = *dp->bridge,
896         };
897         int err;
898
899         /* Avoid a call to __dev_set_promiscuity() on the master, which
900          * requires rtnl_lock(), since we can't guarantee that is held here,
901          * and we can't take it either.
902          */
903         if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
904                 err = dev_uc_add(cpu_dp->master, addr);
905                 if (err)
906                         return err;
907         }
908
909         return dsa_port_host_fdb_add(dp, addr, vid, db);
910 }
911
912 static int dsa_port_host_fdb_del(struct dsa_port *dp,
913                                  const unsigned char *addr, u16 vid,
914                                  struct dsa_db db)
915 {
916         struct dsa_notifier_fdb_info info = {
917                 .sw_index = dp->ds->index,
918                 .port = dp->index,
919                 .addr = addr,
920                 .vid = vid,
921                 .db = db,
922         };
923
924         if (!dp->ds->fdb_isolation)
925                 info.db.bridge.num = 0;
926
927         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
928 }
929
930 int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
931                                      const unsigned char *addr, u16 vid)
932 {
933         struct dsa_db db = {
934                 .type = DSA_DB_PORT,
935                 .dp = dp,
936         };
937
938         return dsa_port_host_fdb_del(dp, addr, vid, db);
939 }
940
941 int dsa_port_bridge_host_fdb_del(struct dsa_port *dp,
942                                  const unsigned char *addr, u16 vid)
943 {
944         struct dsa_port *cpu_dp = dp->cpu_dp;
945         struct dsa_db db = {
946                 .type = DSA_DB_BRIDGE,
947                 .bridge = *dp->bridge,
948         };
949         int err;
950
951         if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
952                 err = dev_uc_del(cpu_dp->master, addr);
953                 if (err)
954                         return err;
955         }
956
957         return dsa_port_host_fdb_del(dp, addr, vid, db);
958 }
959
960 int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
961                          u16 vid)
962 {
963         struct dsa_notifier_lag_fdb_info info = {
964                 .lag = dp->lag,
965                 .addr = addr,
966                 .vid = vid,
967                 .db = {
968                         .type = DSA_DB_BRIDGE,
969                         .bridge = *dp->bridge,
970                 },
971         };
972
973         if (!dp->ds->fdb_isolation)
974                 info.db.bridge.num = 0;
975
976         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info);
977 }
978
979 int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
980                          u16 vid)
981 {
982         struct dsa_notifier_lag_fdb_info info = {
983                 .lag = dp->lag,
984                 .addr = addr,
985                 .vid = vid,
986                 .db = {
987                         .type = DSA_DB_BRIDGE,
988                         .bridge = *dp->bridge,
989                 },
990         };
991
992         if (!dp->ds->fdb_isolation)
993                 info.db.bridge.num = 0;
994
995         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info);
996 }
997
998 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
999 {
1000         struct dsa_switch *ds = dp->ds;
1001         int port = dp->index;
1002
1003         if (!ds->ops->port_fdb_dump)
1004                 return -EOPNOTSUPP;
1005
1006         return ds->ops->port_fdb_dump(ds, port, cb, data);
1007 }
1008
1009 int dsa_port_mdb_add(const struct dsa_port *dp,
1010                      const struct switchdev_obj_port_mdb *mdb)
1011 {
1012         struct dsa_notifier_mdb_info info = {
1013                 .sw_index = dp->ds->index,
1014                 .port = dp->index,
1015                 .mdb = mdb,
1016                 .db = {
1017                         .type = DSA_DB_BRIDGE,
1018                         .bridge = *dp->bridge,
1019                 },
1020         };
1021
1022         if (!dp->ds->fdb_isolation)
1023                 info.db.bridge.num = 0;
1024
1025         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
1026 }
1027
1028 int dsa_port_mdb_del(const struct dsa_port *dp,
1029                      const struct switchdev_obj_port_mdb *mdb)
1030 {
1031         struct dsa_notifier_mdb_info info = {
1032                 .sw_index = dp->ds->index,
1033                 .port = dp->index,
1034                 .mdb = mdb,
1035                 .db = {
1036                         .type = DSA_DB_BRIDGE,
1037                         .bridge = *dp->bridge,
1038                 },
1039         };
1040
1041         if (!dp->ds->fdb_isolation)
1042                 info.db.bridge.num = 0;
1043
1044         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
1045 }
1046
1047 static int dsa_port_host_mdb_add(const struct dsa_port *dp,
1048                                  const struct switchdev_obj_port_mdb *mdb,
1049                                  struct dsa_db db)
1050 {
1051         struct dsa_notifier_mdb_info info = {
1052                 .sw_index = dp->ds->index,
1053                 .port = dp->index,
1054                 .mdb = mdb,
1055                 .db = db,
1056         };
1057
1058         if (!dp->ds->fdb_isolation)
1059                 info.db.bridge.num = 0;
1060
1061         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
1062 }
1063
1064 int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
1065                                      const struct switchdev_obj_port_mdb *mdb)
1066 {
1067         struct dsa_db db = {
1068                 .type = DSA_DB_PORT,
1069                 .dp = dp,
1070         };
1071
1072         return dsa_port_host_mdb_add(dp, mdb, db);
1073 }
1074
1075 int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
1076                                  const struct switchdev_obj_port_mdb *mdb)
1077 {
1078         struct dsa_port *cpu_dp = dp->cpu_dp;
1079         struct dsa_db db = {
1080                 .type = DSA_DB_BRIDGE,
1081                 .bridge = *dp->bridge,
1082         };
1083         int err;
1084
1085         err = dev_mc_add(cpu_dp->master, mdb->addr);
1086         if (err)
1087                 return err;
1088
1089         return dsa_port_host_mdb_add(dp, mdb, db);
1090 }
1091
1092 static int dsa_port_host_mdb_del(const struct dsa_port *dp,
1093                                  const struct switchdev_obj_port_mdb *mdb,
1094                                  struct dsa_db db)
1095 {
1096         struct dsa_notifier_mdb_info info = {
1097                 .sw_index = dp->ds->index,
1098                 .port = dp->index,
1099                 .mdb = mdb,
1100                 .db = db,
1101         };
1102
1103         if (!dp->ds->fdb_isolation)
1104                 info.db.bridge.num = 0;
1105
1106         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
1107 }
1108
1109 int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
1110                                      const struct switchdev_obj_port_mdb *mdb)
1111 {
1112         struct dsa_db db = {
1113                 .type = DSA_DB_PORT,
1114                 .dp = dp,
1115         };
1116
1117         return dsa_port_host_mdb_del(dp, mdb, db);
1118 }
1119
1120 int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
1121                                  const struct switchdev_obj_port_mdb *mdb)
1122 {
1123         struct dsa_port *cpu_dp = dp->cpu_dp;
1124         struct dsa_db db = {
1125                 .type = DSA_DB_BRIDGE,
1126                 .bridge = *dp->bridge,
1127         };
1128         int err;
1129
1130         err = dev_mc_del(cpu_dp->master, mdb->addr);
1131         if (err)
1132                 return err;
1133
1134         return dsa_port_host_mdb_del(dp, mdb, db);
1135 }
1136
1137 int dsa_port_vlan_add(struct dsa_port *dp,
1138                       const struct switchdev_obj_port_vlan *vlan,
1139                       struct netlink_ext_ack *extack)
1140 {
1141         struct dsa_notifier_vlan_info info = {
1142                 .sw_index = dp->ds->index,
1143                 .port = dp->index,
1144                 .vlan = vlan,
1145                 .extack = extack,
1146         };
1147
1148         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
1149 }
1150
1151 int dsa_port_vlan_del(struct dsa_port *dp,
1152                       const struct switchdev_obj_port_vlan *vlan)
1153 {
1154         struct dsa_notifier_vlan_info info = {
1155                 .sw_index = dp->ds->index,
1156                 .port = dp->index,
1157                 .vlan = vlan,
1158         };
1159
1160         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
1161 }
1162
1163 int dsa_port_host_vlan_add(struct dsa_port *dp,
1164                            const struct switchdev_obj_port_vlan *vlan,
1165                            struct netlink_ext_ack *extack)
1166 {
1167         struct dsa_notifier_vlan_info info = {
1168                 .sw_index = dp->ds->index,
1169                 .port = dp->index,
1170                 .vlan = vlan,
1171                 .extack = extack,
1172         };
1173         struct dsa_port *cpu_dp = dp->cpu_dp;
1174         int err;
1175
1176         err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info);
1177         if (err && err != -EOPNOTSUPP)
1178                 return err;
1179
1180         vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
1181
1182         return err;
1183 }
1184
1185 int dsa_port_host_vlan_del(struct dsa_port *dp,
1186                            const struct switchdev_obj_port_vlan *vlan)
1187 {
1188         struct dsa_notifier_vlan_info info = {
1189                 .sw_index = dp->ds->index,
1190                 .port = dp->index,
1191                 .vlan = vlan,
1192         };
1193         struct dsa_port *cpu_dp = dp->cpu_dp;
1194         int err;
1195
1196         err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info);
1197         if (err && err != -EOPNOTSUPP)
1198                 return err;
1199
1200         vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
1201
1202         return err;
1203 }
1204
1205 int dsa_port_mrp_add(const struct dsa_port *dp,
1206                      const struct switchdev_obj_mrp *mrp)
1207 {
1208         struct dsa_switch *ds = dp->ds;
1209
1210         if (!ds->ops->port_mrp_add)
1211                 return -EOPNOTSUPP;
1212
1213         return ds->ops->port_mrp_add(ds, dp->index, mrp);
1214 }
1215
1216 int dsa_port_mrp_del(const struct dsa_port *dp,
1217                      const struct switchdev_obj_mrp *mrp)
1218 {
1219         struct dsa_switch *ds = dp->ds;
1220
1221         if (!ds->ops->port_mrp_del)
1222                 return -EOPNOTSUPP;
1223
1224         return ds->ops->port_mrp_del(ds, dp->index, mrp);
1225 }
1226
1227 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
1228                                const struct switchdev_obj_ring_role_mrp *mrp)
1229 {
1230         struct dsa_switch *ds = dp->ds;
1231
1232         if (!ds->ops->port_mrp_add_ring_role)
1233                 return -EOPNOTSUPP;
1234
1235         return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp);
1236 }
1237
1238 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
1239                                const struct switchdev_obj_ring_role_mrp *mrp)
1240 {
1241         struct dsa_switch *ds = dp->ds;
1242
1243         if (!ds->ops->port_mrp_del_ring_role)
1244                 return -EOPNOTSUPP;
1245
1246         return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp);
1247 }
1248
1249 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
1250                                const struct dsa_device_ops *tag_ops)
1251 {
1252         cpu_dp->rcv = tag_ops->rcv;
1253         cpu_dp->tag_ops = tag_ops;
1254 }
1255
1256 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
1257 {
1258         struct device_node *phy_dn;
1259         struct phy_device *phydev;
1260
1261         phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
1262         if (!phy_dn)
1263                 return NULL;
1264
1265         phydev = of_phy_find_device(phy_dn);
1266         if (!phydev) {
1267                 of_node_put(phy_dn);
1268                 return ERR_PTR(-EPROBE_DEFER);
1269         }
1270
1271         of_node_put(phy_dn);
1272         return phydev;
1273 }
1274
1275 static void dsa_port_phylink_validate(struct phylink_config *config,
1276                                       unsigned long *supported,
1277                                       struct phylink_link_state *state)
1278 {
1279         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1280         struct dsa_switch *ds = dp->ds;
1281
1282         if (!ds->ops->phylink_validate) {
1283                 if (config->mac_capabilities)
1284                         phylink_generic_validate(config, supported, state);
1285                 return;
1286         }
1287
1288         ds->ops->phylink_validate(ds, dp->index, supported, state);
1289 }
1290
1291 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
1292                                                struct phylink_link_state *state)
1293 {
1294         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1295         struct dsa_switch *ds = dp->ds;
1296         int err;
1297
1298         /* Only called for inband modes */
1299         if (!ds->ops->phylink_mac_link_state) {
1300                 state->link = 0;
1301                 return;
1302         }
1303
1304         err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
1305         if (err < 0) {
1306                 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
1307                         dp->index, err);
1308                 state->link = 0;
1309         }
1310 }
1311
1312 static struct phylink_pcs *
1313 dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
1314                                 phy_interface_t interface)
1315 {
1316         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1317         struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
1318         struct dsa_switch *ds = dp->ds;
1319
1320         if (ds->ops->phylink_mac_select_pcs)
1321                 pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface);
1322
1323         return pcs;
1324 }
1325
1326 static void dsa_port_phylink_mac_config(struct phylink_config *config,
1327                                         unsigned int mode,
1328                                         const struct phylink_link_state *state)
1329 {
1330         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1331         struct dsa_switch *ds = dp->ds;
1332
1333         if (!ds->ops->phylink_mac_config)
1334                 return;
1335
1336         ds->ops->phylink_mac_config(ds, dp->index, mode, state);
1337 }
1338
1339 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
1340 {
1341         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1342         struct dsa_switch *ds = dp->ds;
1343
1344         if (!ds->ops->phylink_mac_an_restart)
1345                 return;
1346
1347         ds->ops->phylink_mac_an_restart(ds, dp->index);
1348 }
1349
1350 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
1351                                            unsigned int mode,
1352                                            phy_interface_t interface)
1353 {
1354         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1355         struct phy_device *phydev = NULL;
1356         struct dsa_switch *ds = dp->ds;
1357
1358         if (dsa_port_is_user(dp))
1359                 phydev = dp->slave->phydev;
1360
1361         if (!ds->ops->phylink_mac_link_down) {
1362                 if (ds->ops->adjust_link && phydev)
1363                         ds->ops->adjust_link(ds, dp->index, phydev);
1364                 return;
1365         }
1366
1367         ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1368 }
1369
1370 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
1371                                          struct phy_device *phydev,
1372                                          unsigned int mode,
1373                                          phy_interface_t interface,
1374                                          int speed, int duplex,
1375                                          bool tx_pause, bool rx_pause)
1376 {
1377         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1378         struct dsa_switch *ds = dp->ds;
1379
1380         if (!ds->ops->phylink_mac_link_up) {
1381                 if (ds->ops->adjust_link && phydev)
1382                         ds->ops->adjust_link(ds, dp->index, phydev);
1383                 return;
1384         }
1385
1386         ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1387                                      speed, duplex, tx_pause, rx_pause);
1388 }
1389
1390 static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1391         .validate = dsa_port_phylink_validate,
1392         .mac_select_pcs = dsa_port_phylink_mac_select_pcs,
1393         .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1394         .mac_config = dsa_port_phylink_mac_config,
1395         .mac_an_restart = dsa_port_phylink_mac_an_restart,
1396         .mac_link_down = dsa_port_phylink_mac_link_down,
1397         .mac_link_up = dsa_port_phylink_mac_link_up,
1398 };
1399
1400 int dsa_port_phylink_create(struct dsa_port *dp)
1401 {
1402         struct dsa_switch *ds = dp->ds;
1403         phy_interface_t mode;
1404         int err;
1405
1406         err = of_get_phy_mode(dp->dn, &mode);
1407         if (err)
1408                 mode = PHY_INTERFACE_MODE_NA;
1409
1410         /* Presence of phylink_mac_link_state or phylink_mac_an_restart is
1411          * an indicator of a legacy phylink driver.
1412          */
1413         if (ds->ops->phylink_mac_link_state ||
1414             ds->ops->phylink_mac_an_restart)
1415                 dp->pl_config.legacy_pre_march2020 = true;
1416
1417         if (ds->ops->phylink_get_caps)
1418                 ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
1419
1420         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
1421                                 mode, &dsa_port_phylink_mac_ops);
1422         if (IS_ERR(dp->pl)) {
1423                 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1424                 return PTR_ERR(dp->pl);
1425         }
1426
1427         return 0;
1428 }
1429
1430 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1431 {
1432         struct dsa_switch *ds = dp->ds;
1433         struct phy_device *phydev;
1434         int port = dp->index;
1435         int err = 0;
1436
1437         phydev = dsa_port_get_phy_device(dp);
1438         if (!phydev)
1439                 return 0;
1440
1441         if (IS_ERR(phydev))
1442                 return PTR_ERR(phydev);
1443
1444         if (enable) {
1445                 err = genphy_resume(phydev);
1446                 if (err < 0)
1447                         goto err_put_dev;
1448
1449                 err = genphy_read_status(phydev);
1450                 if (err < 0)
1451                         goto err_put_dev;
1452         } else {
1453                 err = genphy_suspend(phydev);
1454                 if (err < 0)
1455                         goto err_put_dev;
1456         }
1457
1458         if (ds->ops->adjust_link)
1459                 ds->ops->adjust_link(ds, port, phydev);
1460
1461         dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1462
1463 err_put_dev:
1464         put_device(&phydev->mdio.dev);
1465         return err;
1466 }
1467
1468 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1469 {
1470         struct device_node *dn = dp->dn;
1471         struct dsa_switch *ds = dp->ds;
1472         struct phy_device *phydev;
1473         int port = dp->index;
1474         phy_interface_t mode;
1475         int err;
1476
1477         err = of_phy_register_fixed_link(dn);
1478         if (err) {
1479                 dev_err(ds->dev,
1480                         "failed to register the fixed PHY of port %d\n",
1481                         port);
1482                 return err;
1483         }
1484
1485         phydev = of_phy_find_device(dn);
1486
1487         err = of_get_phy_mode(dn, &mode);
1488         if (err)
1489                 mode = PHY_INTERFACE_MODE_NA;
1490         phydev->interface = mode;
1491
1492         genphy_read_status(phydev);
1493
1494         if (ds->ops->adjust_link)
1495                 ds->ops->adjust_link(ds, port, phydev);
1496
1497         put_device(&phydev->mdio.dev);
1498
1499         return 0;
1500 }
1501
1502 static int dsa_port_phylink_register(struct dsa_port *dp)
1503 {
1504         struct dsa_switch *ds = dp->ds;
1505         struct device_node *port_dn = dp->dn;
1506         int err;
1507
1508         dp->pl_config.dev = ds->dev;
1509         dp->pl_config.type = PHYLINK_DEV;
1510
1511         err = dsa_port_phylink_create(dp);
1512         if (err)
1513                 return err;
1514
1515         err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1516         if (err && err != -ENODEV) {
1517                 pr_err("could not attach to PHY: %d\n", err);
1518                 goto err_phy_connect;
1519         }
1520
1521         return 0;
1522
1523 err_phy_connect:
1524         phylink_destroy(dp->pl);
1525         return err;
1526 }
1527
1528 int dsa_port_link_register_of(struct dsa_port *dp)
1529 {
1530         struct dsa_switch *ds = dp->ds;
1531         struct device_node *phy_np;
1532         int port = dp->index;
1533
1534         if (!ds->ops->adjust_link) {
1535                 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1536                 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1537                         if (ds->ops->phylink_mac_link_down)
1538                                 ds->ops->phylink_mac_link_down(ds, port,
1539                                         MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1540                         return dsa_port_phylink_register(dp);
1541                 }
1542                 return 0;
1543         }
1544
1545         dev_warn(ds->dev,
1546                  "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1547
1548         if (of_phy_is_fixed_link(dp->dn))
1549                 return dsa_port_fixed_link_register_of(dp);
1550         else
1551                 return dsa_port_setup_phy_of(dp, true);
1552 }
1553
1554 void dsa_port_link_unregister_of(struct dsa_port *dp)
1555 {
1556         struct dsa_switch *ds = dp->ds;
1557
1558         if (!ds->ops->adjust_link && dp->pl) {
1559                 rtnl_lock();
1560                 phylink_disconnect_phy(dp->pl);
1561                 rtnl_unlock();
1562                 phylink_destroy(dp->pl);
1563                 dp->pl = NULL;
1564                 return;
1565         }
1566
1567         if (of_phy_is_fixed_link(dp->dn))
1568                 of_phy_deregister_fixed_link(dp->dn);
1569         else
1570                 dsa_port_setup_phy_of(dp, false);
1571 }
1572
1573 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1574 {
1575         struct dsa_switch *ds = dp->ds;
1576         int err;
1577
1578         if (!ds->ops->port_hsr_join)
1579                 return -EOPNOTSUPP;
1580
1581         dp->hsr_dev = hsr;
1582
1583         err = ds->ops->port_hsr_join(ds, dp->index, hsr);
1584         if (err)
1585                 dp->hsr_dev = NULL;
1586
1587         return err;
1588 }
1589
1590 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1591 {
1592         struct dsa_switch *ds = dp->ds;
1593         int err;
1594
1595         dp->hsr_dev = NULL;
1596
1597         if (ds->ops->port_hsr_leave) {
1598                 err = ds->ops->port_hsr_leave(ds, dp->index, hsr);
1599                 if (err)
1600                         dev_err(dp->ds->dev,
1601                                 "port %d failed to leave HSR %s: %pe\n",
1602                                 dp->index, hsr->name, ERR_PTR(err));
1603         }
1604 }
1605
1606 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
1607 {
1608         struct dsa_notifier_tag_8021q_vlan_info info = {
1609                 .tree_index = dp->ds->dst->index,
1610                 .sw_index = dp->ds->index,
1611                 .port = dp->index,
1612                 .vid = vid,
1613         };
1614
1615         if (broadcast)
1616                 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1617
1618         return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1619 }
1620
1621 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
1622 {
1623         struct dsa_notifier_tag_8021q_vlan_info info = {
1624                 .tree_index = dp->ds->dst->index,
1625                 .sw_index = dp->ds->index,
1626                 .port = dp->index,
1627                 .vid = vid,
1628         };
1629         int err;
1630
1631         if (broadcast)
1632                 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1633         else
1634                 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1635         if (err)
1636                 dev_err(dp->ds->dev,
1637                         "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n",
1638                         dp->index, vid, ERR_PTR(err));
1639 }