1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/switchdev/switchdev.c - Switch device API
4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
22 static LIST_HEAD(deferred);
23 static DEFINE_SPINLOCK(deferred_lock);
25 typedef void switchdev_deferred_func_t(struct net_device *dev,
28 struct switchdev_deferred_item {
29 struct list_head list;
30 struct net_device *dev;
31 switchdev_deferred_func_t *func;
35 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
37 struct switchdev_deferred_item *dfitem;
39 spin_lock_bh(&deferred_lock);
40 if (list_empty(&deferred)) {
44 dfitem = list_first_entry(&deferred,
45 struct switchdev_deferred_item, list);
46 list_del(&dfitem->list);
48 spin_unlock_bh(&deferred_lock);
53 * switchdev_deferred_process - Process ops in deferred queue
55 * Called to flush the ops currently queued in deferred ops queue.
56 * rtnl_lock must be held.
58 void switchdev_deferred_process(void)
60 struct switchdev_deferred_item *dfitem;
64 while ((dfitem = switchdev_deferred_dequeue())) {
65 dfitem->func(dfitem->dev, dfitem->data);
70 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
72 static void switchdev_deferred_process_work(struct work_struct *work)
75 switchdev_deferred_process();
79 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
81 static int switchdev_deferred_enqueue(struct net_device *dev,
82 const void *data, size_t data_len,
83 switchdev_deferred_func_t *func)
85 struct switchdev_deferred_item *dfitem;
87 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
92 memcpy(dfitem->data, data, data_len);
94 spin_lock_bh(&deferred_lock);
95 list_add_tail(&dfitem->list, &deferred);
96 spin_unlock_bh(&deferred_lock);
97 schedule_work(&deferred_process_work);
101 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102 struct net_device *dev,
103 const struct switchdev_attr *attr)
108 struct switchdev_notifier_port_attr_info attr_info = {
113 rc = call_switchdev_blocking_notifiers(nt, dev,
114 &attr_info.info, NULL);
115 err = notifier_to_errno(rc);
117 WARN_ON(!attr_info.handled);
121 if (!attr_info.handled)
127 static int switchdev_port_attr_set_now(struct net_device *dev,
128 const struct switchdev_attr *attr)
130 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr);
133 static void switchdev_port_attr_set_deferred(struct net_device *dev,
136 const struct switchdev_attr *attr = data;
139 err = switchdev_port_attr_set_now(dev, attr);
140 if (err && err != -EOPNOTSUPP)
141 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
144 attr->complete(dev, err, attr->complete_priv);
147 static int switchdev_port_attr_set_defer(struct net_device *dev,
148 const struct switchdev_attr *attr)
150 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
151 switchdev_port_attr_set_deferred);
155 * switchdev_port_attr_set - Set port attribute
158 * @attr: attribute to set
160 * rtnl_lock must be held and must not be in atomic section,
161 * in case SWITCHDEV_F_DEFER flag is not set.
163 int switchdev_port_attr_set(struct net_device *dev,
164 const struct switchdev_attr *attr)
166 if (attr->flags & SWITCHDEV_F_DEFER)
167 return switchdev_port_attr_set_defer(dev, attr);
169 return switchdev_port_attr_set_now(dev, attr);
171 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
173 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
176 case SWITCHDEV_OBJ_ID_PORT_VLAN:
177 return sizeof(struct switchdev_obj_port_vlan);
178 case SWITCHDEV_OBJ_ID_PORT_MDB:
179 return sizeof(struct switchdev_obj_port_mdb);
180 case SWITCHDEV_OBJ_ID_HOST_MDB:
181 return sizeof(struct switchdev_obj_port_mdb);
188 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
189 struct net_device *dev,
190 const struct switchdev_obj *obj,
191 struct netlink_ext_ack *extack)
196 struct switchdev_notifier_port_obj_info obj_info = {
201 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
202 err = notifier_to_errno(rc);
204 WARN_ON(!obj_info.handled);
207 if (!obj_info.handled)
212 static void switchdev_port_obj_add_deferred(struct net_device *dev,
215 const struct switchdev_obj *obj = data;
219 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
221 if (err && err != -EOPNOTSUPP)
222 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
225 obj->complete(dev, err, obj->complete_priv);
228 static int switchdev_port_obj_add_defer(struct net_device *dev,
229 const struct switchdev_obj *obj)
231 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
232 switchdev_port_obj_add_deferred);
236 * switchdev_port_obj_add - Add port object
239 * @obj: object to add
240 * @extack: netlink extended ack
242 * rtnl_lock must be held and must not be in atomic section,
243 * in case SWITCHDEV_F_DEFER flag is not set.
245 int switchdev_port_obj_add(struct net_device *dev,
246 const struct switchdev_obj *obj,
247 struct netlink_ext_ack *extack)
249 if (obj->flags & SWITCHDEV_F_DEFER)
250 return switchdev_port_obj_add_defer(dev, obj);
252 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
255 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
257 static int switchdev_port_obj_del_now(struct net_device *dev,
258 const struct switchdev_obj *obj)
260 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
264 static void switchdev_port_obj_del_deferred(struct net_device *dev,
267 const struct switchdev_obj *obj = data;
270 err = switchdev_port_obj_del_now(dev, obj);
271 if (err && err != -EOPNOTSUPP)
272 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
275 obj->complete(dev, err, obj->complete_priv);
278 static int switchdev_port_obj_del_defer(struct net_device *dev,
279 const struct switchdev_obj *obj)
281 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
282 switchdev_port_obj_del_deferred);
286 * switchdev_port_obj_del - Delete port object
289 * @obj: object to delete
291 * rtnl_lock must be held and must not be in atomic section,
292 * in case SWITCHDEV_F_DEFER flag is not set.
294 int switchdev_port_obj_del(struct net_device *dev,
295 const struct switchdev_obj *obj)
297 if (obj->flags & SWITCHDEV_F_DEFER)
298 return switchdev_port_obj_del_defer(dev, obj);
300 return switchdev_port_obj_del_now(dev, obj);
302 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
304 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
305 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
308 * register_switchdev_notifier - Register notifier
309 * @nb: notifier_block
311 * Register switch device notifier.
313 int register_switchdev_notifier(struct notifier_block *nb)
315 return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
317 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
320 * unregister_switchdev_notifier - Unregister notifier
321 * @nb: notifier_block
323 * Unregister switch device notifier.
325 int unregister_switchdev_notifier(struct notifier_block *nb)
327 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
329 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
332 * call_switchdev_notifiers - Call notifiers
333 * @val: value passed unmodified to notifier function
335 * @info: notifier information data
336 * @extack: netlink extended ack
337 * Call all network notifier blocks.
339 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
340 struct switchdev_notifier_info *info,
341 struct netlink_ext_ack *extack)
344 info->extack = extack;
345 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
347 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
349 int register_switchdev_blocking_notifier(struct notifier_block *nb)
351 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
353 return blocking_notifier_chain_register(chain, nb);
355 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
357 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
359 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
361 return blocking_notifier_chain_unregister(chain, nb);
363 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
365 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
366 struct switchdev_notifier_info *info,
367 struct netlink_ext_ack *extack)
370 info->extack = extack;
371 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
374 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
376 static int __switchdev_handle_port_obj_add(struct net_device *dev,
377 struct switchdev_notifier_port_obj_info *port_obj_info,
378 bool (*check_cb)(const struct net_device *dev),
379 int (*add_cb)(struct net_device *dev,
380 const struct switchdev_obj *obj,
381 struct netlink_ext_ack *extack))
383 struct netlink_ext_ack *extack;
384 struct net_device *lower_dev;
385 struct list_head *iter;
386 int err = -EOPNOTSUPP;
388 extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
391 err = add_cb(dev, port_obj_info->obj, extack);
392 if (err != -EOPNOTSUPP)
393 port_obj_info->handled = true;
397 /* Switch ports might be stacked under e.g. a LAG. Ignore the
398 * unsupported devices, another driver might be able to handle them. But
399 * propagate to the callers any hard errors.
401 * If the driver does its own bookkeeping of stacked ports, it's not
402 * necessary to go through this helper.
404 netdev_for_each_lower_dev(dev, lower_dev, iter) {
405 if (netif_is_bridge_master(lower_dev))
408 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
410 if (err && err != -EOPNOTSUPP)
417 int switchdev_handle_port_obj_add(struct net_device *dev,
418 struct switchdev_notifier_port_obj_info *port_obj_info,
419 bool (*check_cb)(const struct net_device *dev),
420 int (*add_cb)(struct net_device *dev,
421 const struct switchdev_obj *obj,
422 struct netlink_ext_ack *extack))
426 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
428 if (err == -EOPNOTSUPP)
432 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
434 static int __switchdev_handle_port_obj_del(struct net_device *dev,
435 struct switchdev_notifier_port_obj_info *port_obj_info,
436 bool (*check_cb)(const struct net_device *dev),
437 int (*del_cb)(struct net_device *dev,
438 const struct switchdev_obj *obj))
440 struct net_device *lower_dev;
441 struct list_head *iter;
442 int err = -EOPNOTSUPP;
445 err = del_cb(dev, port_obj_info->obj);
446 if (err != -EOPNOTSUPP)
447 port_obj_info->handled = true;
451 /* Switch ports might be stacked under e.g. a LAG. Ignore the
452 * unsupported devices, another driver might be able to handle them. But
453 * propagate to the callers any hard errors.
455 * If the driver does its own bookkeeping of stacked ports, it's not
456 * necessary to go through this helper.
458 netdev_for_each_lower_dev(dev, lower_dev, iter) {
459 if (netif_is_bridge_master(lower_dev))
462 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
464 if (err && err != -EOPNOTSUPP)
471 int switchdev_handle_port_obj_del(struct net_device *dev,
472 struct switchdev_notifier_port_obj_info *port_obj_info,
473 bool (*check_cb)(const struct net_device *dev),
474 int (*del_cb)(struct net_device *dev,
475 const struct switchdev_obj *obj))
479 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
481 if (err == -EOPNOTSUPP)
485 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
487 static int __switchdev_handle_port_attr_set(struct net_device *dev,
488 struct switchdev_notifier_port_attr_info *port_attr_info,
489 bool (*check_cb)(const struct net_device *dev),
490 int (*set_cb)(struct net_device *dev,
491 const struct switchdev_attr *attr))
493 struct net_device *lower_dev;
494 struct list_head *iter;
495 int err = -EOPNOTSUPP;
498 err = set_cb(dev, port_attr_info->attr);
499 if (err != -EOPNOTSUPP)
500 port_attr_info->handled = true;
504 /* Switch ports might be stacked under e.g. a LAG. Ignore the
505 * unsupported devices, another driver might be able to handle them. But
506 * propagate to the callers any hard errors.
508 * If the driver does its own bookkeeping of stacked ports, it's not
509 * necessary to go through this helper.
511 netdev_for_each_lower_dev(dev, lower_dev, iter) {
512 if (netif_is_bridge_master(lower_dev))
515 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
517 if (err && err != -EOPNOTSUPP)
524 int switchdev_handle_port_attr_set(struct net_device *dev,
525 struct switchdev_notifier_port_attr_info *port_attr_info,
526 bool (*check_cb)(const struct net_device *dev),
527 int (*set_cb)(struct net_device *dev,
528 const struct switchdev_attr *attr))
532 err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
534 if (err == -EOPNOTSUPP)
538 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);