1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/switchdev/switchdev.c - Switch device API
4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
22 static bool switchdev_obj_eq(const struct switchdev_obj *a,
23 const struct switchdev_obj *b)
25 const struct switchdev_obj_port_vlan *va, *vb;
26 const struct switchdev_obj_port_mdb *ma, *mb;
28 if (a->id != b->id || a->orig_dev != b->orig_dev)
32 case SWITCHDEV_OBJ_ID_PORT_VLAN:
33 va = SWITCHDEV_OBJ_PORT_VLAN(a);
34 vb = SWITCHDEV_OBJ_PORT_VLAN(b);
35 return va->flags == vb->flags &&
37 va->changed == vb->changed;
38 case SWITCHDEV_OBJ_ID_PORT_MDB:
39 case SWITCHDEV_OBJ_ID_HOST_MDB:
40 ma = SWITCHDEV_OBJ_PORT_MDB(a);
41 mb = SWITCHDEV_OBJ_PORT_MDB(b);
42 return ma->vid == mb->vid &&
43 ether_addr_equal(ma->addr, mb->addr);
51 static LIST_HEAD(deferred);
52 static DEFINE_SPINLOCK(deferred_lock);
54 typedef void switchdev_deferred_func_t(struct net_device *dev,
57 struct switchdev_deferred_item {
58 struct list_head list;
59 struct net_device *dev;
60 netdevice_tracker dev_tracker;
61 switchdev_deferred_func_t *func;
65 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
67 struct switchdev_deferred_item *dfitem;
69 spin_lock_bh(&deferred_lock);
70 if (list_empty(&deferred)) {
74 dfitem = list_first_entry(&deferred,
75 struct switchdev_deferred_item, list);
76 list_del(&dfitem->list);
78 spin_unlock_bh(&deferred_lock);
83 * switchdev_deferred_process - Process ops in deferred queue
85 * Called to flush the ops currently queued in deferred ops queue.
86 * rtnl_lock must be held.
88 void switchdev_deferred_process(void)
90 struct switchdev_deferred_item *dfitem;
94 while ((dfitem = switchdev_deferred_dequeue())) {
95 dfitem->func(dfitem->dev, dfitem->data);
96 netdev_put(dfitem->dev, &dfitem->dev_tracker);
100 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
102 static void switchdev_deferred_process_work(struct work_struct *work)
105 switchdev_deferred_process();
109 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
111 static int switchdev_deferred_enqueue(struct net_device *dev,
112 const void *data, size_t data_len,
113 switchdev_deferred_func_t *func)
115 struct switchdev_deferred_item *dfitem;
117 dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
122 memcpy(dfitem->data, data, data_len);
123 netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
124 spin_lock_bh(&deferred_lock);
125 list_add_tail(&dfitem->list, &deferred);
126 spin_unlock_bh(&deferred_lock);
127 schedule_work(&deferred_process_work);
131 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
132 struct net_device *dev,
133 const struct switchdev_attr *attr,
134 struct netlink_ext_ack *extack)
139 struct switchdev_notifier_port_attr_info attr_info = {
144 rc = call_switchdev_blocking_notifiers(nt, dev,
145 &attr_info.info, extack);
146 err = notifier_to_errno(rc);
148 WARN_ON(!attr_info.handled);
152 if (!attr_info.handled)
158 static int switchdev_port_attr_set_now(struct net_device *dev,
159 const struct switchdev_attr *attr,
160 struct netlink_ext_ack *extack)
162 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
166 static void switchdev_port_attr_set_deferred(struct net_device *dev,
169 const struct switchdev_attr *attr = data;
172 err = switchdev_port_attr_set_now(dev, attr, NULL);
173 if (err && err != -EOPNOTSUPP)
174 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
177 attr->complete(dev, err, attr->complete_priv);
180 static int switchdev_port_attr_set_defer(struct net_device *dev,
181 const struct switchdev_attr *attr)
183 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
184 switchdev_port_attr_set_deferred);
188 * switchdev_port_attr_set - Set port attribute
191 * @attr: attribute to set
192 * @extack: netlink extended ack, for error message propagation
194 * rtnl_lock must be held and must not be in atomic section,
195 * in case SWITCHDEV_F_DEFER flag is not set.
197 int switchdev_port_attr_set(struct net_device *dev,
198 const struct switchdev_attr *attr,
199 struct netlink_ext_ack *extack)
201 if (attr->flags & SWITCHDEV_F_DEFER)
202 return switchdev_port_attr_set_defer(dev, attr);
204 return switchdev_port_attr_set_now(dev, attr, extack);
206 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
208 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
211 case SWITCHDEV_OBJ_ID_PORT_VLAN:
212 return sizeof(struct switchdev_obj_port_vlan);
213 case SWITCHDEV_OBJ_ID_PORT_MDB:
214 return sizeof(struct switchdev_obj_port_mdb);
215 case SWITCHDEV_OBJ_ID_HOST_MDB:
216 return sizeof(struct switchdev_obj_port_mdb);
223 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
224 struct net_device *dev,
225 const struct switchdev_obj *obj,
226 struct netlink_ext_ack *extack)
231 struct switchdev_notifier_port_obj_info obj_info = {
236 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
237 err = notifier_to_errno(rc);
239 WARN_ON(!obj_info.handled);
242 if (!obj_info.handled)
247 static void switchdev_obj_id_to_helpful_msg(struct net_device *dev,
248 enum switchdev_obj_id obj_id,
251 const char *action = add ? "add" : "del";
252 const char *reason = "";
257 case SWITCHDEV_OBJ_ID_UNDEFINED:
258 obj_str = "Undefined object";
259 problem = "Attempted operation is undefined, indicating a possible programming\n"
262 case SWITCHDEV_OBJ_ID_PORT_VLAN:
263 obj_str = "VLAN entry";
264 problem = "Failure in VLAN settings on this port might disrupt network\n"
265 "segmentation or traffic isolation, affecting network partitioning.\n";
267 case SWITCHDEV_OBJ_ID_PORT_MDB:
268 obj_str = "Port Multicast Database entry";
269 problem = "Failure in updating the port's Multicast Database could lead to\n"
270 "multicast forwarding issues.\n";
272 case SWITCHDEV_OBJ_ID_HOST_MDB:
273 obj_str = "Host Multicast Database entry";
274 problem = "Failure in updating the host's Multicast Database may impact multicast\n"
275 "group memberships or traffic delivery, affecting multicast\n"
278 case SWITCHDEV_OBJ_ID_MRP:
279 obj_str = "Media Redundancy Protocol configuration for port";
280 problem = "Failure to set MRP ring ID on this port prevents communication with\n"
281 "the specified redundancy ring, resulting in an inability to engage\n"
282 "in MRP-based network operations.\n";
284 case SWITCHDEV_OBJ_ID_RING_TEST_MRP:
285 obj_str = "MRP Test Frame Operations for port";
286 problem = "Failure to generate/monitor MRP test frames may lead to inability to\n"
287 "assess the ring's operational integrity and fault response, hindering\n"
288 "proactive network management.\n";
290 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
291 obj_str = "MRP Ring Role Configuration";
292 problem = "Improper MRP ring role configuration may create conflicts in the ring,\n"
293 "disrupting communication for all participants, or isolate the local\n"
294 "system from the ring, hindering its ability to communicate with other\n"
297 case SWITCHDEV_OBJ_ID_RING_STATE_MRP:
298 obj_str = "MRP Ring State Configuration";
299 problem = "Failure to correctly set the MRP ring state can result in network\n"
300 "loops or leave segments without communication. In a Closed state,\n"
301 "it maintains loop prevention by blocking one MRM port, while an Open\n"
302 "state activates in response to failures, changing port states to\n"
303 "preserve network connectivity.\n";
305 case SWITCHDEV_OBJ_ID_IN_TEST_MRP:
306 obj_str = "MRP_InTest Frame Generation Configuration";
307 problem = "Failure in managing MRP_InTest frame generation can misjudge the\n"
308 "interconnection ring's state, leading to incorrect blocking or\n"
309 "unblocking of the I/C port. This misconfiguration might result\n"
310 "in unintended network loops or isolate critical network segments,\n"
311 "compromising network integrity and reliability.\n";
313 case SWITCHDEV_OBJ_ID_IN_ROLE_MRP:
314 obj_str = "Interconnection Ring Role Configuration";
315 problem = "Failure in incorrect assignment of interconnection ring roles\n"
316 "(MIM/MIC) can impair the formation of the interconnection rings.\n";
318 case SWITCHDEV_OBJ_ID_IN_STATE_MRP:
319 obj_str = "Interconnection Ring State Configuration";
320 problem = "Failure in updating the interconnection ring state can lead in\n"
321 "case of Open state to incorrect blocking or unblocking of the\n"
322 "I/C port, resulting in unintended network loops or isolation\n"
323 "of critical network\n";
326 obj_str = "Unknown object";
327 problem = "Indicating a possible programming error.\n";
332 reason = "Current HW/SW setup lacks sufficient resources.\n";
336 netdev_err(dev, "Failed to %s %s (object id=%d) with error: %pe (%d).\n%s%s\n",
337 action, obj_str, obj_id, ERR_PTR(err), err, problem, reason);
340 static void switchdev_port_obj_add_deferred(struct net_device *dev,
343 const struct switchdev_obj *obj = data;
347 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
349 if (err && err != -EOPNOTSUPP)
350 switchdev_obj_id_to_helpful_msg(dev, obj->id, err, true);
352 obj->complete(dev, err, obj->complete_priv);
355 static int switchdev_port_obj_add_defer(struct net_device *dev,
356 const struct switchdev_obj *obj)
358 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
359 switchdev_port_obj_add_deferred);
363 * switchdev_port_obj_add - Add port object
366 * @obj: object to add
367 * @extack: netlink extended ack
369 * rtnl_lock must be held and must not be in atomic section,
370 * in case SWITCHDEV_F_DEFER flag is not set.
372 int switchdev_port_obj_add(struct net_device *dev,
373 const struct switchdev_obj *obj,
374 struct netlink_ext_ack *extack)
376 if (obj->flags & SWITCHDEV_F_DEFER)
377 return switchdev_port_obj_add_defer(dev, obj);
379 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
382 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
384 static int switchdev_port_obj_del_now(struct net_device *dev,
385 const struct switchdev_obj *obj)
387 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
391 static void switchdev_port_obj_del_deferred(struct net_device *dev,
394 const struct switchdev_obj *obj = data;
397 err = switchdev_port_obj_del_now(dev, obj);
398 if (err && err != -EOPNOTSUPP)
399 switchdev_obj_id_to_helpful_msg(dev, obj->id, err, false);
401 obj->complete(dev, err, obj->complete_priv);
404 static int switchdev_port_obj_del_defer(struct net_device *dev,
405 const struct switchdev_obj *obj)
407 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
408 switchdev_port_obj_del_deferred);
412 * switchdev_port_obj_del - Delete port object
415 * @obj: object to delete
417 * rtnl_lock must be held and must not be in atomic section,
418 * in case SWITCHDEV_F_DEFER flag is not set.
420 int switchdev_port_obj_del(struct net_device *dev,
421 const struct switchdev_obj *obj)
423 if (obj->flags & SWITCHDEV_F_DEFER)
424 return switchdev_port_obj_del_defer(dev, obj);
426 return switchdev_port_obj_del_now(dev, obj);
428 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
431 * switchdev_port_obj_act_is_deferred - Is object action pending?
434 * @nt: type of action; add or delete
435 * @obj: object to test
437 * Returns true if a deferred item is pending, which is
438 * equivalent to the action @nt on an object @obj.
440 * rtnl_lock must be held.
442 bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
443 enum switchdev_notifier_type nt,
444 const struct switchdev_obj *obj)
446 struct switchdev_deferred_item *dfitem;
451 spin_lock_bh(&deferred_lock);
453 list_for_each_entry(dfitem, &deferred, list) {
454 if (dfitem->dev != dev)
457 if ((dfitem->func == switchdev_port_obj_add_deferred &&
458 nt == SWITCHDEV_PORT_OBJ_ADD) ||
459 (dfitem->func == switchdev_port_obj_del_deferred &&
460 nt == SWITCHDEV_PORT_OBJ_DEL)) {
461 if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
468 spin_unlock_bh(&deferred_lock);
472 EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
474 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
475 static RAW_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
478 * register_switchdev_notifier - Register notifier
479 * @nb: notifier_block
481 * Register switch device notifier.
483 int register_switchdev_notifier(struct notifier_block *nb)
485 return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
487 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
490 * unregister_switchdev_notifier - Unregister notifier
491 * @nb: notifier_block
493 * Unregister switch device notifier.
495 int unregister_switchdev_notifier(struct notifier_block *nb)
497 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
499 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
502 * call_switchdev_notifiers - Call notifiers
503 * @val: value passed unmodified to notifier function
505 * @info: notifier information data
506 * @extack: netlink extended ack
507 * Call all network notifier blocks.
509 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
510 struct switchdev_notifier_info *info,
511 struct netlink_ext_ack *extack)
514 info->extack = extack;
515 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
517 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
519 int register_switchdev_blocking_notifier(struct notifier_block *nb)
521 struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
525 err = raw_notifier_chain_register(chain, nb);
530 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
532 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
534 struct raw_notifier_head *chain = &switchdev_blocking_notif_chain;
538 err = raw_notifier_chain_unregister(chain, nb);
543 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
545 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
546 struct switchdev_notifier_info *info,
547 struct netlink_ext_ack *extack)
551 info->extack = extack;
552 return raw_notifier_call_chain(&switchdev_blocking_notif_chain,
555 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
557 struct switchdev_nested_priv {
558 bool (*check_cb)(const struct net_device *dev);
559 bool (*foreign_dev_check_cb)(const struct net_device *dev,
560 const struct net_device *foreign_dev);
561 const struct net_device *dev;
562 struct net_device *lower_dev;
565 static int switchdev_lower_dev_walk(struct net_device *lower_dev,
566 struct netdev_nested_priv *priv)
568 struct switchdev_nested_priv *switchdev_priv = priv->data;
569 bool (*foreign_dev_check_cb)(const struct net_device *dev,
570 const struct net_device *foreign_dev);
571 bool (*check_cb)(const struct net_device *dev);
572 const struct net_device *dev;
574 check_cb = switchdev_priv->check_cb;
575 foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
576 dev = switchdev_priv->dev;
578 if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
579 switchdev_priv->lower_dev = lower_dev;
586 static struct net_device *
587 switchdev_lower_dev_find_rcu(struct net_device *dev,
588 bool (*check_cb)(const struct net_device *dev),
589 bool (*foreign_dev_check_cb)(const struct net_device *dev,
590 const struct net_device *foreign_dev))
592 struct switchdev_nested_priv switchdev_priv = {
593 .check_cb = check_cb,
594 .foreign_dev_check_cb = foreign_dev_check_cb,
598 struct netdev_nested_priv priv = {
599 .data = &switchdev_priv,
602 netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
604 return switchdev_priv.lower_dev;
607 static struct net_device *
608 switchdev_lower_dev_find(struct net_device *dev,
609 bool (*check_cb)(const struct net_device *dev),
610 bool (*foreign_dev_check_cb)(const struct net_device *dev,
611 const struct net_device *foreign_dev))
613 struct switchdev_nested_priv switchdev_priv = {
614 .check_cb = check_cb,
615 .foreign_dev_check_cb = foreign_dev_check_cb,
619 struct netdev_nested_priv priv = {
620 .data = &switchdev_priv,
623 netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
625 return switchdev_priv.lower_dev;
628 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
629 struct net_device *orig_dev, unsigned long event,
630 const struct switchdev_notifier_fdb_info *fdb_info,
631 bool (*check_cb)(const struct net_device *dev),
632 bool (*foreign_dev_check_cb)(const struct net_device *dev,
633 const struct net_device *foreign_dev),
634 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
635 unsigned long event, const void *ctx,
636 const struct switchdev_notifier_fdb_info *fdb_info))
638 const struct switchdev_notifier_info *info = &fdb_info->info;
639 struct net_device *br, *lower_dev, *switchdev;
640 struct list_head *iter;
641 int err = -EOPNOTSUPP;
644 return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
646 /* Recurse through lower interfaces in case the FDB entry is pointing
647 * towards a bridge or a LAG device.
649 netdev_for_each_lower_dev(dev, lower_dev, iter) {
650 /* Do not propagate FDB entries across bridges */
651 if (netif_is_bridge_master(lower_dev))
654 /* Bridge ports might be either us, or LAG interfaces
657 if (!check_cb(lower_dev) &&
658 !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
659 foreign_dev_check_cb))
662 err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
663 event, fdb_info, check_cb,
664 foreign_dev_check_cb,
666 if (err && err != -EOPNOTSUPP)
670 /* Event is neither on a bridge nor a LAG. Check whether it is on an
671 * interface that is in a bridge with us.
673 br = netdev_master_upper_dev_get_rcu(dev);
674 if (!br || !netif_is_bridge_master(br))
677 switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
681 if (!foreign_dev_check_cb(switchdev, dev))
684 return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
685 check_cb, foreign_dev_check_cb,
689 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
690 const struct switchdev_notifier_fdb_info *fdb_info,
691 bool (*check_cb)(const struct net_device *dev),
692 bool (*foreign_dev_check_cb)(const struct net_device *dev,
693 const struct net_device *foreign_dev),
694 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
695 unsigned long event, const void *ctx,
696 const struct switchdev_notifier_fdb_info *fdb_info))
700 err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
701 check_cb, foreign_dev_check_cb,
703 if (err == -EOPNOTSUPP)
708 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
710 static int __switchdev_handle_port_obj_add(struct net_device *dev,
711 struct switchdev_notifier_port_obj_info *port_obj_info,
712 bool (*check_cb)(const struct net_device *dev),
713 bool (*foreign_dev_check_cb)(const struct net_device *dev,
714 const struct net_device *foreign_dev),
715 int (*add_cb)(struct net_device *dev, const void *ctx,
716 const struct switchdev_obj *obj,
717 struct netlink_ext_ack *extack))
719 struct switchdev_notifier_info *info = &port_obj_info->info;
720 struct net_device *br, *lower_dev, *switchdev;
721 struct netlink_ext_ack *extack;
722 struct list_head *iter;
723 int err = -EOPNOTSUPP;
725 extack = switchdev_notifier_info_to_extack(info);
728 err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
729 if (err != -EOPNOTSUPP)
730 port_obj_info->handled = true;
734 /* Switch ports might be stacked under e.g. a LAG. Ignore the
735 * unsupported devices, another driver might be able to handle them. But
736 * propagate to the callers any hard errors.
738 * If the driver does its own bookkeeping of stacked ports, it's not
739 * necessary to go through this helper.
741 netdev_for_each_lower_dev(dev, lower_dev, iter) {
742 if (netif_is_bridge_master(lower_dev))
745 /* When searching for switchdev interfaces that are neighbors
746 * of foreign ones, and @dev is a bridge, do not recurse on the
747 * foreign interface again, it was already visited.
749 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
750 !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
753 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
754 check_cb, foreign_dev_check_cb,
756 if (err && err != -EOPNOTSUPP)
760 /* Event is neither on a bridge nor a LAG. Check whether it is on an
761 * interface that is in a bridge with us.
763 if (!foreign_dev_check_cb)
766 br = netdev_master_upper_dev_get(dev);
767 if (!br || !netif_is_bridge_master(br))
770 switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
774 if (!foreign_dev_check_cb(switchdev, dev))
777 return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
778 foreign_dev_check_cb, add_cb);
781 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
782 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
785 int switchdev_handle_port_obj_add(struct net_device *dev,
786 struct switchdev_notifier_port_obj_info *port_obj_info,
787 bool (*check_cb)(const struct net_device *dev),
788 int (*add_cb)(struct net_device *dev, const void *ctx,
789 const struct switchdev_obj *obj,
790 struct netlink_ext_ack *extack))
794 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
796 if (err == -EOPNOTSUPP)
800 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
802 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
803 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
804 * that pass @check_cb and are in the same bridge as @dev.
806 int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
807 struct switchdev_notifier_port_obj_info *port_obj_info,
808 bool (*check_cb)(const struct net_device *dev),
809 bool (*foreign_dev_check_cb)(const struct net_device *dev,
810 const struct net_device *foreign_dev),
811 int (*add_cb)(struct net_device *dev, const void *ctx,
812 const struct switchdev_obj *obj,
813 struct netlink_ext_ack *extack))
817 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
818 foreign_dev_check_cb, add_cb);
819 if (err == -EOPNOTSUPP)
823 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
825 static int __switchdev_handle_port_obj_del(struct net_device *dev,
826 struct switchdev_notifier_port_obj_info *port_obj_info,
827 bool (*check_cb)(const struct net_device *dev),
828 bool (*foreign_dev_check_cb)(const struct net_device *dev,
829 const struct net_device *foreign_dev),
830 int (*del_cb)(struct net_device *dev, const void *ctx,
831 const struct switchdev_obj *obj))
833 struct switchdev_notifier_info *info = &port_obj_info->info;
834 struct net_device *br, *lower_dev, *switchdev;
835 struct list_head *iter;
836 int err = -EOPNOTSUPP;
839 err = del_cb(dev, info->ctx, port_obj_info->obj);
840 if (err != -EOPNOTSUPP)
841 port_obj_info->handled = true;
845 /* Switch ports might be stacked under e.g. a LAG. Ignore the
846 * unsupported devices, another driver might be able to handle them. But
847 * propagate to the callers any hard errors.
849 * If the driver does its own bookkeeping of stacked ports, it's not
850 * necessary to go through this helper.
852 netdev_for_each_lower_dev(dev, lower_dev, iter) {
853 if (netif_is_bridge_master(lower_dev))
856 /* When searching for switchdev interfaces that are neighbors
857 * of foreign ones, and @dev is a bridge, do not recurse on the
858 * foreign interface again, it was already visited.
860 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
861 !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
864 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
865 check_cb, foreign_dev_check_cb,
867 if (err && err != -EOPNOTSUPP)
871 /* Event is neither on a bridge nor a LAG. Check whether it is on an
872 * interface that is in a bridge with us.
874 if (!foreign_dev_check_cb)
877 br = netdev_master_upper_dev_get(dev);
878 if (!br || !netif_is_bridge_master(br))
881 switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
885 if (!foreign_dev_check_cb(switchdev, dev))
888 return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
889 foreign_dev_check_cb, del_cb);
892 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
893 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
896 int switchdev_handle_port_obj_del(struct net_device *dev,
897 struct switchdev_notifier_port_obj_info *port_obj_info,
898 bool (*check_cb)(const struct net_device *dev),
899 int (*del_cb)(struct net_device *dev, const void *ctx,
900 const struct switchdev_obj *obj))
904 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
906 if (err == -EOPNOTSUPP)
910 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
912 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
913 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
914 * that pass @check_cb and are in the same bridge as @dev.
916 int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
917 struct switchdev_notifier_port_obj_info *port_obj_info,
918 bool (*check_cb)(const struct net_device *dev),
919 bool (*foreign_dev_check_cb)(const struct net_device *dev,
920 const struct net_device *foreign_dev),
921 int (*del_cb)(struct net_device *dev, const void *ctx,
922 const struct switchdev_obj *obj))
926 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
927 foreign_dev_check_cb, del_cb);
928 if (err == -EOPNOTSUPP)
932 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
934 static int __switchdev_handle_port_attr_set(struct net_device *dev,
935 struct switchdev_notifier_port_attr_info *port_attr_info,
936 bool (*check_cb)(const struct net_device *dev),
937 int (*set_cb)(struct net_device *dev, const void *ctx,
938 const struct switchdev_attr *attr,
939 struct netlink_ext_ack *extack))
941 struct switchdev_notifier_info *info = &port_attr_info->info;
942 struct netlink_ext_ack *extack;
943 struct net_device *lower_dev;
944 struct list_head *iter;
945 int err = -EOPNOTSUPP;
947 extack = switchdev_notifier_info_to_extack(info);
950 err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
951 if (err != -EOPNOTSUPP)
952 port_attr_info->handled = true;
956 /* Switch ports might be stacked under e.g. a LAG. Ignore the
957 * unsupported devices, another driver might be able to handle them. But
958 * propagate to the callers any hard errors.
960 * If the driver does its own bookkeeping of stacked ports, it's not
961 * necessary to go through this helper.
963 netdev_for_each_lower_dev(dev, lower_dev, iter) {
964 if (netif_is_bridge_master(lower_dev))
967 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
969 if (err && err != -EOPNOTSUPP)
976 int switchdev_handle_port_attr_set(struct net_device *dev,
977 struct switchdev_notifier_port_attr_info *port_attr_info,
978 bool (*check_cb)(const struct net_device *dev),
979 int (*set_cb)(struct net_device *dev, const void *ctx,
980 const struct switchdev_attr *attr,
981 struct netlink_ext_ack *extack))
985 err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
987 if (err == -EOPNOTSUPP)
991 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
993 int switchdev_bridge_port_offload(struct net_device *brport_dev,
994 struct net_device *dev, const void *ctx,
995 struct notifier_block *atomic_nb,
996 struct notifier_block *blocking_nb,
998 struct netlink_ext_ack *extack)
1000 struct switchdev_notifier_brport_info brport_info = {
1004 .atomic_nb = atomic_nb,
1005 .blocking_nb = blocking_nb,
1006 .tx_fwd_offload = tx_fwd_offload,
1013 err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
1014 brport_dev, &brport_info.info,
1016 return notifier_to_errno(err);
1018 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
1020 void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
1022 struct notifier_block *atomic_nb,
1023 struct notifier_block *blocking_nb)
1025 struct switchdev_notifier_brport_info brport_info = {
1028 .atomic_nb = atomic_nb,
1029 .blocking_nb = blocking_nb,
1035 call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
1036 brport_dev, &brport_info.info,
1039 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);
1041 int switchdev_bridge_port_replay(struct net_device *brport_dev,
1042 struct net_device *dev, const void *ctx,
1043 struct notifier_block *atomic_nb,
1044 struct notifier_block *blocking_nb,
1045 struct netlink_ext_ack *extack)
1047 struct switchdev_notifier_brport_info brport_info = {
1051 .atomic_nb = atomic_nb,
1052 .blocking_nb = blocking_nb,
1059 err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY,
1060 brport_dev, &brport_info.info,
1062 return notifier_to_errno(err);
1064 EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay);