Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
007f790c JP |
2 | /* |
3 | * net/switchdev/switchdev.c - Switch device API | |
7ea6eb3f | 4 | * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us> |
f8f21471 | 5 | * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> |
007f790c JP |
6 | */ |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/types.h> | |
10 | #include <linux/init.h> | |
03bf0c28 JP |
11 | #include <linux/mutex.h> |
12 | #include <linux/notifier.h> | |
007f790c | 13 | #include <linux/netdevice.h> |
850d0cbc | 14 | #include <linux/etherdevice.h> |
47f8328b | 15 | #include <linux/if_bridge.h> |
7ea6eb3f | 16 | #include <linux/list.h> |
793f4014 | 17 | #include <linux/workqueue.h> |
87aaf2ca | 18 | #include <linux/if_vlan.h> |
4f2c6ae5 | 19 | #include <linux/rtnetlink.h> |
007f790c JP |
20 | #include <net/switchdev.h> |
21 | ||
793f4014 JP |
22 | static LIST_HEAD(deferred); |
23 | static DEFINE_SPINLOCK(deferred_lock); | |
24 | ||
25 | typedef void switchdev_deferred_func_t(struct net_device *dev, | |
26 | const void *data); | |
27 | ||
28 | struct switchdev_deferred_item { | |
29 | struct list_head list; | |
30 | struct net_device *dev; | |
31 | switchdev_deferred_func_t *func; | |
fbfc8502 | 32 | unsigned long data[]; |
793f4014 JP |
33 | }; |
34 | ||
35 | static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) | |
36 | { | |
37 | struct switchdev_deferred_item *dfitem; | |
38 | ||
39 | spin_lock_bh(&deferred_lock); | |
40 | if (list_empty(&deferred)) { | |
41 | dfitem = NULL; | |
42 | goto unlock; | |
43 | } | |
44 | dfitem = list_first_entry(&deferred, | |
45 | struct switchdev_deferred_item, list); | |
46 | list_del(&dfitem->list); | |
47 | unlock: | |
48 | spin_unlock_bh(&deferred_lock); | |
49 | return dfitem; | |
50 | } | |
51 | ||
52 | /** | |
53 | * switchdev_deferred_process - Process ops in deferred queue | |
54 | * | |
55 | * Called to flush the ops currently queued in deferred ops queue. | |
56 | * rtnl_lock must be held. | |
57 | */ | |
58 | void switchdev_deferred_process(void) | |
59 | { | |
60 | struct switchdev_deferred_item *dfitem; | |
61 | ||
62 | ASSERT_RTNL(); | |
63 | ||
64 | while ((dfitem = switchdev_deferred_dequeue())) { | |
65 | dfitem->func(dfitem->dev, dfitem->data); | |
66 | dev_put(dfitem->dev); | |
67 | kfree(dfitem); | |
68 | } | |
69 | } | |
70 | EXPORT_SYMBOL_GPL(switchdev_deferred_process); | |
71 | ||
72 | static void switchdev_deferred_process_work(struct work_struct *work) | |
73 | { | |
74 | rtnl_lock(); | |
75 | switchdev_deferred_process(); | |
76 | rtnl_unlock(); | |
77 | } | |
78 | ||
79 | static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); | |
80 | ||
81 | static int switchdev_deferred_enqueue(struct net_device *dev, | |
82 | const void *data, size_t data_len, | |
83 | switchdev_deferred_func_t *func) | |
84 | { | |
85 | struct switchdev_deferred_item *dfitem; | |
86 | ||
87 | dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC); | |
88 | if (!dfitem) | |
89 | return -ENOMEM; | |
90 | dfitem->dev = dev; | |
91 | dfitem->func = func; | |
92 | memcpy(dfitem->data, data, data_len); | |
93 | dev_hold(dev); | |
94 | spin_lock_bh(&deferred_lock); | |
95 | list_add_tail(&dfitem->list, &deferred); | |
96 | spin_unlock_bh(&deferred_lock); | |
97 | schedule_work(&deferred_process_work); | |
98 | return 0; | |
99 | } | |
100 | ||
d45224d6 FF |
101 | static int switchdev_port_attr_notify(enum switchdev_notifier_type nt, |
102 | struct net_device *dev, | |
bae33f2b | 103 | const struct switchdev_attr *attr) |
3094333d | 104 | { |
d45224d6 FF |
105 | int err; |
106 | int rc; | |
3094333d | 107 | |
d45224d6 FF |
108 | struct switchdev_notifier_port_attr_info attr_info = { |
109 | .attr = attr, | |
d45224d6 FF |
110 | .handled = false, |
111 | }; | |
3094333d | 112 | |
d45224d6 FF |
113 | rc = call_switchdev_blocking_notifiers(nt, dev, |
114 | &attr_info.info, NULL); | |
115 | err = notifier_to_errno(rc); | |
116 | if (err) { | |
117 | WARN_ON(!attr_info.handled); | |
118 | return err; | |
3094333d SF |
119 | } |
120 | ||
d45224d6 FF |
121 | if (!attr_info.handled) |
122 | return -EOPNOTSUPP; | |
464314ea | 123 | |
d45224d6 | 124 | return 0; |
3094333d SF |
125 | } |
126 | ||
0bc05d58 JP |
127 | static int switchdev_port_attr_set_now(struct net_device *dev, |
128 | const struct switchdev_attr *attr) | |
3094333d | 129 | { |
bae33f2b | 130 | return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr); |
3094333d | 131 | } |
0bc05d58 JP |
132 | |
133 | static void switchdev_port_attr_set_deferred(struct net_device *dev, | |
134 | const void *data) | |
135 | { | |
136 | const struct switchdev_attr *attr = data; | |
137 | int err; | |
138 | ||
139 | err = switchdev_port_attr_set_now(dev, attr); | |
140 | if (err && err != -EOPNOTSUPP) | |
141 | netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", | |
142 | err, attr->id); | |
7ceb2afb ER |
143 | if (attr->complete) |
144 | attr->complete(dev, err, attr->complete_priv); | |
0bc05d58 JP |
145 | } |
146 | ||
147 | static int switchdev_port_attr_set_defer(struct net_device *dev, | |
148 | const struct switchdev_attr *attr) | |
149 | { | |
150 | return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), | |
151 | switchdev_port_attr_set_deferred); | |
152 | } | |
153 | ||
154 | /** | |
155 | * switchdev_port_attr_set - Set port attribute | |
156 | * | |
157 | * @dev: port device | |
158 | * @attr: attribute to set | |
159 | * | |
0bc05d58 JP |
160 | * rtnl_lock must be held and must not be in atomic section, |
161 | * in case SWITCHDEV_F_DEFER flag is not set. | |
162 | */ | |
163 | int switchdev_port_attr_set(struct net_device *dev, | |
164 | const struct switchdev_attr *attr) | |
165 | { | |
166 | if (attr->flags & SWITCHDEV_F_DEFER) | |
167 | return switchdev_port_attr_set_defer(dev, attr); | |
168 | ASSERT_RTNL(); | |
169 | return switchdev_port_attr_set_now(dev, attr); | |
170 | } | |
3094333d SF |
171 | EXPORT_SYMBOL_GPL(switchdev_port_attr_set); |
172 | ||
e258d919 SF |
173 | static size_t switchdev_obj_size(const struct switchdev_obj *obj) |
174 | { | |
175 | switch (obj->id) { | |
176 | case SWITCHDEV_OBJ_ID_PORT_VLAN: | |
177 | return sizeof(struct switchdev_obj_port_vlan); | |
4d41e125 ER |
178 | case SWITCHDEV_OBJ_ID_PORT_MDB: |
179 | return sizeof(struct switchdev_obj_port_mdb); | |
47d5b6db AL |
180 | case SWITCHDEV_OBJ_ID_HOST_MDB: |
181 | return sizeof(struct switchdev_obj_port_mdb); | |
e258d919 SF |
182 | default: |
183 | BUG(); | |
184 | } | |
185 | return 0; | |
186 | } | |
187 | ||
d17d9f5e PM |
188 | static int switchdev_port_obj_notify(enum switchdev_notifier_type nt, |
189 | struct net_device *dev, | |
190 | const struct switchdev_obj *obj, | |
69b7320e | 191 | struct netlink_ext_ack *extack) |
491d0f15 | 192 | { |
d17d9f5e PM |
193 | int rc; |
194 | int err; | |
491d0f15 | 195 | |
d17d9f5e PM |
196 | struct switchdev_notifier_port_obj_info obj_info = { |
197 | .obj = obj, | |
d17d9f5e PM |
198 | .handled = false, |
199 | }; | |
491d0f15 | 200 | |
479c86dc | 201 | rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack); |
d17d9f5e PM |
202 | err = notifier_to_errno(rc); |
203 | if (err) { | |
204 | WARN_ON(!obj_info.handled); | |
205 | return err; | |
491d0f15 | 206 | } |
d17d9f5e PM |
207 | if (!obj_info.handled) |
208 | return -EOPNOTSUPP; | |
209 | return 0; | |
491d0f15 SF |
210 | } |
211 | ||
4d429c5d JP |
212 | static void switchdev_port_obj_add_deferred(struct net_device *dev, |
213 | const void *data) | |
214 | { | |
215 | const struct switchdev_obj *obj = data; | |
216 | int err; | |
217 | ||
cf6def51 VO |
218 | ASSERT_RTNL(); |
219 | err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, | |
220 | dev, obj, NULL); | |
4d429c5d JP |
221 | if (err && err != -EOPNOTSUPP) |
222 | netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", | |
223 | err, obj->id); | |
7ceb2afb ER |
224 | if (obj->complete) |
225 | obj->complete(dev, err, obj->complete_priv); | |
4d429c5d JP |
226 | } |
227 | ||
228 | static int switchdev_port_obj_add_defer(struct net_device *dev, | |
229 | const struct switchdev_obj *obj) | |
230 | { | |
e258d919 | 231 | return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), |
4d429c5d JP |
232 | switchdev_port_obj_add_deferred); |
233 | } | |
491d0f15 SF |
234 | |
235 | /** | |
4d429c5d | 236 | * switchdev_port_obj_add - Add port object |
491d0f15 SF |
237 | * |
238 | * @dev: port device | |
4d429c5d | 239 | * @obj: object to add |
c8af73f0 | 240 | * @extack: netlink extended ack |
4d429c5d | 241 | * |
4d429c5d JP |
242 | * rtnl_lock must be held and must not be in atomic section, |
243 | * in case SWITCHDEV_F_DEFER flag is not set. | |
491d0f15 | 244 | */ |
4d429c5d | 245 | int switchdev_port_obj_add(struct net_device *dev, |
69b7320e PM |
246 | const struct switchdev_obj *obj, |
247 | struct netlink_ext_ack *extack) | |
4d429c5d JP |
248 | { |
249 | if (obj->flags & SWITCHDEV_F_DEFER) | |
250 | return switchdev_port_obj_add_defer(dev, obj); | |
251 | ASSERT_RTNL(); | |
cf6def51 VO |
252 | return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, |
253 | dev, obj, extack); | |
4d429c5d JP |
254 | } |
255 | EXPORT_SYMBOL_GPL(switchdev_port_obj_add); | |
256 | ||
257 | static int switchdev_port_obj_del_now(struct net_device *dev, | |
258 | const struct switchdev_obj *obj) | |
491d0f15 | 259 | { |
d17d9f5e | 260 | return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL, |
ffb68fc5 | 261 | dev, obj, NULL); |
491d0f15 | 262 | } |
4d429c5d JP |
263 | |
264 | static void switchdev_port_obj_del_deferred(struct net_device *dev, | |
265 | const void *data) | |
266 | { | |
267 | const struct switchdev_obj *obj = data; | |
268 | int err; | |
269 | ||
270 | err = switchdev_port_obj_del_now(dev, obj); | |
271 | if (err && err != -EOPNOTSUPP) | |
272 | netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", | |
273 | err, obj->id); | |
7ceb2afb ER |
274 | if (obj->complete) |
275 | obj->complete(dev, err, obj->complete_priv); | |
4d429c5d JP |
276 | } |
277 | ||
278 | static int switchdev_port_obj_del_defer(struct net_device *dev, | |
279 | const struct switchdev_obj *obj) | |
280 | { | |
e258d919 | 281 | return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), |
4d429c5d JP |
282 | switchdev_port_obj_del_deferred); |
283 | } | |
284 | ||
285 | /** | |
286 | * switchdev_port_obj_del - Delete port object | |
287 | * | |
288 | * @dev: port device | |
4d429c5d JP |
289 | * @obj: object to delete |
290 | * | |
291 | * rtnl_lock must be held and must not be in atomic section, | |
292 | * in case SWITCHDEV_F_DEFER flag is not set. | |
293 | */ | |
294 | int switchdev_port_obj_del(struct net_device *dev, | |
295 | const struct switchdev_obj *obj) | |
296 | { | |
297 | if (obj->flags & SWITCHDEV_F_DEFER) | |
298 | return switchdev_port_obj_del_defer(dev, obj); | |
299 | ASSERT_RTNL(); | |
300 | return switchdev_port_obj_del_now(dev, obj); | |
301 | } | |
491d0f15 SF |
302 | EXPORT_SYMBOL_GPL(switchdev_port_obj_del); |
303 | ||
ff5cf100 | 304 | static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); |
a93e3b17 | 305 | static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); |
03bf0c28 JP |
306 | |
307 | /** | |
ebb9a03a | 308 | * register_switchdev_notifier - Register notifier |
03bf0c28 JP |
309 | * @nb: notifier_block |
310 | * | |
ff5cf100 | 311 | * Register switch device notifier. |
03bf0c28 | 312 | */ |
ebb9a03a | 313 | int register_switchdev_notifier(struct notifier_block *nb) |
03bf0c28 | 314 | { |
ff5cf100 | 315 | return atomic_notifier_chain_register(&switchdev_notif_chain, nb); |
03bf0c28 | 316 | } |
ebb9a03a | 317 | EXPORT_SYMBOL_GPL(register_switchdev_notifier); |
03bf0c28 JP |
318 | |
319 | /** | |
ebb9a03a | 320 | * unregister_switchdev_notifier - Unregister notifier |
03bf0c28 JP |
321 | * @nb: notifier_block |
322 | * | |
323 | * Unregister switch device notifier. | |
03bf0c28 | 324 | */ |
ebb9a03a | 325 | int unregister_switchdev_notifier(struct notifier_block *nb) |
03bf0c28 | 326 | { |
ff5cf100 | 327 | return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb); |
03bf0c28 | 328 | } |
ebb9a03a | 329 | EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); |
03bf0c28 JP |
330 | |
331 | /** | |
ebb9a03a | 332 | * call_switchdev_notifiers - Call notifiers |
03bf0c28 JP |
333 | * @val: value passed unmodified to notifier function |
334 | * @dev: port device | |
335 | * @info: notifier information data | |
ea6754ae | 336 | * @extack: netlink extended ack |
ff5cf100 | 337 | * Call all network notifier blocks. |
03bf0c28 | 338 | */ |
ebb9a03a | 339 | int call_switchdev_notifiers(unsigned long val, struct net_device *dev, |
6685987c PM |
340 | struct switchdev_notifier_info *info, |
341 | struct netlink_ext_ack *extack) | |
03bf0c28 | 342 | { |
03bf0c28 | 343 | info->dev = dev; |
6685987c | 344 | info->extack = extack; |
ff5cf100 | 345 | return atomic_notifier_call_chain(&switchdev_notif_chain, val, info); |
03bf0c28 | 346 | } |
ebb9a03a | 347 | EXPORT_SYMBOL_GPL(call_switchdev_notifiers); |
8a44dbb2 | 348 | |
a93e3b17 PM |
349 | int register_switchdev_blocking_notifier(struct notifier_block *nb) |
350 | { | |
351 | struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; | |
352 | ||
353 | return blocking_notifier_chain_register(chain, nb); | |
354 | } | |
355 | EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); | |
356 | ||
357 | int unregister_switchdev_blocking_notifier(struct notifier_block *nb) | |
358 | { | |
359 | struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; | |
360 | ||
361 | return blocking_notifier_chain_unregister(chain, nb); | |
362 | } | |
363 | EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); | |
364 | ||
365 | int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, | |
479c86dc PM |
366 | struct switchdev_notifier_info *info, |
367 | struct netlink_ext_ack *extack) | |
a93e3b17 PM |
368 | { |
369 | info->dev = dev; | |
479c86dc | 370 | info->extack = extack; |
a93e3b17 PM |
371 | return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, |
372 | val, info); | |
373 | } | |
374 | EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); | |
375 | ||
f30f0601 PM |
376 | static int __switchdev_handle_port_obj_add(struct net_device *dev, |
377 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
378 | bool (*check_cb)(const struct net_device *dev), | |
379 | int (*add_cb)(struct net_device *dev, | |
380 | const struct switchdev_obj *obj, | |
69213513 | 381 | struct netlink_ext_ack *extack)) |
f30f0601 | 382 | { |
69213513 | 383 | struct netlink_ext_ack *extack; |
f30f0601 PM |
384 | struct net_device *lower_dev; |
385 | struct list_head *iter; | |
386 | int err = -EOPNOTSUPP; | |
387 | ||
69213513 PM |
388 | extack = switchdev_notifier_info_to_extack(&port_obj_info->info); |
389 | ||
f30f0601 | 390 | if (check_cb(dev)) { |
c358f952 | 391 | err = add_cb(dev, port_obj_info->obj, extack); |
20776b46 RV |
392 | if (err != -EOPNOTSUPP) |
393 | port_obj_info->handled = true; | |
394 | return err; | |
f30f0601 PM |
395 | } |
396 | ||
397 | /* Switch ports might be stacked under e.g. a LAG. Ignore the | |
398 | * unsupported devices, another driver might be able to handle them. But | |
399 | * propagate to the callers any hard errors. | |
400 | * | |
401 | * If the driver does its own bookkeeping of stacked ports, it's not | |
402 | * necessary to go through this helper. | |
403 | */ | |
404 | netdev_for_each_lower_dev(dev, lower_dev, iter) { | |
07c6f980 RK |
405 | if (netif_is_bridge_master(lower_dev)) |
406 | continue; | |
407 | ||
f30f0601 PM |
408 | err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info, |
409 | check_cb, add_cb); | |
410 | if (err && err != -EOPNOTSUPP) | |
411 | return err; | |
412 | } | |
413 | ||
414 | return err; | |
415 | } | |
416 | ||
417 | int switchdev_handle_port_obj_add(struct net_device *dev, | |
418 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
419 | bool (*check_cb)(const struct net_device *dev), | |
420 | int (*add_cb)(struct net_device *dev, | |
421 | const struct switchdev_obj *obj, | |
69213513 | 422 | struct netlink_ext_ack *extack)) |
f30f0601 PM |
423 | { |
424 | int err; | |
425 | ||
426 | err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, | |
427 | add_cb); | |
428 | if (err == -EOPNOTSUPP) | |
429 | err = 0; | |
430 | return err; | |
431 | } | |
432 | EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add); | |
433 | ||
434 | static int __switchdev_handle_port_obj_del(struct net_device *dev, | |
435 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
436 | bool (*check_cb)(const struct net_device *dev), | |
437 | int (*del_cb)(struct net_device *dev, | |
438 | const struct switchdev_obj *obj)) | |
439 | { | |
440 | struct net_device *lower_dev; | |
441 | struct list_head *iter; | |
442 | int err = -EOPNOTSUPP; | |
443 | ||
444 | if (check_cb(dev)) { | |
20776b46 RV |
445 | err = del_cb(dev, port_obj_info->obj); |
446 | if (err != -EOPNOTSUPP) | |
447 | port_obj_info->handled = true; | |
448 | return err; | |
f30f0601 PM |
449 | } |
450 | ||
451 | /* Switch ports might be stacked under e.g. a LAG. Ignore the | |
452 | * unsupported devices, another driver might be able to handle them. But | |
453 | * propagate to the callers any hard errors. | |
454 | * | |
455 | * If the driver does its own bookkeeping of stacked ports, it's not | |
456 | * necessary to go through this helper. | |
457 | */ | |
458 | netdev_for_each_lower_dev(dev, lower_dev, iter) { | |
07c6f980 RK |
459 | if (netif_is_bridge_master(lower_dev)) |
460 | continue; | |
461 | ||
f30f0601 PM |
462 | err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info, |
463 | check_cb, del_cb); | |
464 | if (err && err != -EOPNOTSUPP) | |
465 | return err; | |
466 | } | |
467 | ||
468 | return err; | |
469 | } | |
470 | ||
471 | int switchdev_handle_port_obj_del(struct net_device *dev, | |
472 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
473 | bool (*check_cb)(const struct net_device *dev), | |
474 | int (*del_cb)(struct net_device *dev, | |
475 | const struct switchdev_obj *obj)) | |
476 | { | |
477 | int err; | |
478 | ||
479 | err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, | |
480 | del_cb); | |
481 | if (err == -EOPNOTSUPP) | |
482 | err = 0; | |
483 | return err; | |
484 | } | |
485 | EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del); | |
1cb33af1 FF |
486 | |
487 | static int __switchdev_handle_port_attr_set(struct net_device *dev, | |
488 | struct switchdev_notifier_port_attr_info *port_attr_info, | |
489 | bool (*check_cb)(const struct net_device *dev), | |
490 | int (*set_cb)(struct net_device *dev, | |
4c08c586 VO |
491 | const struct switchdev_attr *attr, |
492 | struct netlink_ext_ack *extack)) | |
1cb33af1 | 493 | { |
4c08c586 | 494 | struct netlink_ext_ack *extack; |
1cb33af1 FF |
495 | struct net_device *lower_dev; |
496 | struct list_head *iter; | |
497 | int err = -EOPNOTSUPP; | |
498 | ||
4c08c586 VO |
499 | extack = switchdev_notifier_info_to_extack(&port_attr_info->info); |
500 | ||
1cb33af1 | 501 | if (check_cb(dev)) { |
4c08c586 | 502 | err = set_cb(dev, port_attr_info->attr, extack); |
20776b46 RV |
503 | if (err != -EOPNOTSUPP) |
504 | port_attr_info->handled = true; | |
505 | return err; | |
1cb33af1 FF |
506 | } |
507 | ||
508 | /* Switch ports might be stacked under e.g. a LAG. Ignore the | |
509 | * unsupported devices, another driver might be able to handle them. But | |
510 | * propagate to the callers any hard errors. | |
511 | * | |
512 | * If the driver does its own bookkeeping of stacked ports, it's not | |
513 | * necessary to go through this helper. | |
514 | */ | |
515 | netdev_for_each_lower_dev(dev, lower_dev, iter) { | |
07c6f980 RK |
516 | if (netif_is_bridge_master(lower_dev)) |
517 | continue; | |
518 | ||
1cb33af1 FF |
519 | err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info, |
520 | check_cb, set_cb); | |
521 | if (err && err != -EOPNOTSUPP) | |
522 | return err; | |
523 | } | |
524 | ||
525 | return err; | |
526 | } | |
527 | ||
528 | int switchdev_handle_port_attr_set(struct net_device *dev, | |
529 | struct switchdev_notifier_port_attr_info *port_attr_info, | |
530 | bool (*check_cb)(const struct net_device *dev), | |
531 | int (*set_cb)(struct net_device *dev, | |
4c08c586 VO |
532 | const struct switchdev_attr *attr, |
533 | struct netlink_ext_ack *extack)) | |
1cb33af1 FF |
534 | { |
535 | int err; | |
536 | ||
537 | err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb, | |
538 | set_cb); | |
539 | if (err == -EOPNOTSUPP) | |
540 | err = 0; | |
541 | return err; | |
542 | } | |
543 | EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set); |