Merge tag 'cxl-for-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
[linux-block.git] / net / bridge / br_switchdev.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
6bc506b4
IS
2#include <linux/kernel.h>
3#include <linux/list.h>
4#include <linux/netdevice.h>
5#include <linux/rtnetlink.h>
6#include <linux/skbuff.h>
9776457c 7#include <net/ip.h>
6bc506b4
IS
8#include <net/switchdev.h>
9
10#include "br_private.h"
11
47211192
TW
12static struct static_key_false br_switchdev_tx_fwd_offload;
13
14static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
15 const struct sk_buff *skb)
16{
17 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
18 return false;
19
20 return (p->flags & BR_TX_FWD_OFFLOAD) &&
21 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
22}
23
24bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
25{
26 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
27 return false;
28
29 return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
30}
31
c5381154
VO
32void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
33{
34 skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
35}
36
47211192
TW
37/* Mark the frame for TX forwarding offload if this egress port supports it */
38void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
39 struct sk_buff *skb)
40{
41 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
42 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
43}
44
45/* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
46 * that the skb has been already forwarded to, to avoid further cloning to
47 * other ports in the same hwdom by making nbp_switchdev_allowed_egress()
48 * return false.
49 */
50void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
51 struct sk_buff *skb)
52{
53 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
54 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
55}
56
6bc506b4
IS
57void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
58 struct sk_buff *skb)
59{
f7cf972f
TW
60 if (p->hwdom)
61 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
6bc506b4
IS
62}
63
64bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
65 const struct sk_buff *skb)
66{
47211192
TW
67 struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
68
69 return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
70 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
6bc506b4 71}
3922285d
AS
72
73/* Flags that can be offloaded to hardware */
9c0ca02b 74#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \
c3976a3f
75 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
76 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
3922285d
AS
77
78int br_switchdev_set_port_flag(struct net_bridge_port *p,
79 unsigned long flags,
078bbb85
VO
80 unsigned long mask,
81 struct netlink_ext_ack *extack)
3922285d
AS
82{
83 struct switchdev_attr attr = {
84 .orig_dev = p->dev,
3922285d 85 };
d45224d6
FF
86 struct switchdev_notifier_port_attr_info info = {
87 .attr = &attr,
88 };
3922285d
AS
89 int err;
90
304ae3bf
VO
91 mask &= BR_PORT_FLAGS_HW_OFFLOAD;
92 if (!mask)
3922285d
AS
93 return 0;
94
e18f4c18
VO
95 attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
96 attr.u.brport_flags.val = flags;
97 attr.u.brport_flags.mask = mask;
304ae3bf 98
d45224d6
FF
99 /* We run from atomic context here */
100 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
078bbb85 101 &info.info, extack);
d45224d6 102 err = notifier_to_errno(err);
3922285d
AS
103 if (err == -EOPNOTSUPP)
104 return 0;
3922285d 105
1ef07644 106 if (err) {
028fb19c
LR
107 NL_SET_ERR_MSG_WEAK_MOD(extack,
108 "bridge flag offload is not supported");
3922285d
AS
109 return -EOPNOTSUPP;
110 }
111
112 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
113 attr.flags = SWITCHDEV_F_DEFER;
1ef07644 114
dcbdf135 115 err = switchdev_port_attr_set(p->dev, &attr, extack);
3922285d 116 if (err) {
028fb19c
LR
117 NL_SET_ERR_MSG_WEAK_MOD(extack,
118 "error setting offload flag on port");
3922285d
AS
119 return err;
120 }
121
122 return 0;
123}
6b26b51b 124
fab9eca8
VO
125static void br_switchdev_fdb_populate(struct net_bridge *br,
126 struct switchdev_notifier_fdb_info *item,
127 const struct net_bridge_fdb_entry *fdb,
128 const void *ctx)
129{
130 const struct net_bridge_port *p = READ_ONCE(fdb->dst);
131
132 item->addr = fdb->key.addr.addr;
133 item->vid = fdb->key.vlan_id;
134 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
135 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
136 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
27fabd02 137 item->locked = false;
fab9eca8
VO
138 item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
139 item->info.ctx = ctx;
140}
141
6b26b51b 142void
6eb38bf8
TW
143br_switchdev_fdb_notify(struct net_bridge *br,
144 const struct net_bridge_fdb_entry *fdb, int type)
6b26b51b 145{
fab9eca8
VO
146 struct switchdev_notifier_fdb_info item;
147
27fabd02
HS
148 if (test_bit(BR_FDB_LOCKED, &fdb->flags))
149 return;
150
927cdea5
VO
151 /* Entries with these flags were created using ndm_state == NUD_REACHABLE,
152 * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
153 * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
154 * Drivers don't know how to deal with these, so don't notify them to
155 * avoid confusing them.
156 */
157 if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
158 !test_bit(BR_FDB_STATIC, &fdb->flags) &&
159 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
160 return;
161
fab9eca8 162 br_switchdev_fdb_populate(br, &item, fdb, NULL);
e5b4b898 163
6b26b51b
AS
164 switch (type) {
165 case RTM_DELNEIGH:
e5b4b898 166 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
fab9eca8 167 item.info.dev, &item.info, NULL);
6b26b51b
AS
168 break;
169 case RTM_NEWNEIGH:
e5b4b898 170 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
fab9eca8 171 item.info.dev, &item.info, NULL);
6b26b51b
AS
172 break;
173 }
174}
d66e4348 175
169327d5 176int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
8d23a54f 177 bool changed, struct netlink_ext_ack *extack)
d66e4348
PM
178{
179 struct switchdev_obj_port_vlan v = {
180 .obj.orig_dev = dev,
181 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
182 .flags = flags,
b7a9e0da 183 .vid = vid,
8d23a54f 184 .changed = changed,
d66e4348
PM
185 };
186
69b7320e 187 return switchdev_port_obj_add(dev, &v.obj, extack);
d66e4348
PM
188}
189
190int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
191{
192 struct switchdev_obj_port_vlan v = {
193 .obj.orig_dev = dev,
194 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
b7a9e0da 195 .vid = vid,
d66e4348
PM
196 };
197
198 return switchdev_port_obj_del(dev, &v.obj);
199}
85826610
TW
200
201static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
202{
203 struct net_bridge *br = joining->br;
204 struct net_bridge_port *p;
205 int hwdom;
206
207 /* joining is yet to be added to the port list. */
208 list_for_each_entry(p, &br->port_list, list) {
2f5dc00f 209 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
85826610
TW
210 joining->hwdom = p->hwdom;
211 return 0;
212 }
213 }
214
215 hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
216 if (hwdom >= BR_HWDOM_MAX)
217 return -EBUSY;
218
219 set_bit(hwdom, &br->busy_hwdoms);
220 joining->hwdom = hwdom;
221 return 0;
222}
223
224static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
225{
226 struct net_bridge *br = leaving->br;
227 struct net_bridge_port *p;
228
229 /* leaving is no longer in the port list. */
230 list_for_each_entry(p, &br->port_list, list) {
231 if (p->hwdom == leaving->hwdom)
232 return;
233 }
234
235 clear_bit(leaving->hwdom, &br->busy_hwdoms);
236}
237
2f5dc00f
VO
238static int nbp_switchdev_add(struct net_bridge_port *p,
239 struct netdev_phys_item_id ppid,
47211192 240 bool tx_fwd_offload,
2f5dc00f 241 struct netlink_ext_ack *extack)
85826610 242{
47211192
TW
243 int err;
244
2f5dc00f
VO
245 if (p->offload_count) {
246 /* Prevent unsupported configurations such as a bridge port
247 * which is a bonding interface, and the member ports are from
248 * different hardware switches.
249 */
250 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
251 NL_SET_ERR_MSG_MOD(extack,
252 "Same bridge port cannot be offloaded by two physical switches");
253 return -EBUSY;
254 }
85826610 255
2f5dc00f
VO
256 /* Tolerate drivers that call switchdev_bridge_port_offload()
257 * more than once for the same bridge port, such as when the
258 * bridge port is an offloaded bonding/team interface.
259 */
260 p->offload_count++;
85826610 261
2f5dc00f 262 return 0;
85826610
TW
263 }
264
2f5dc00f
VO
265 p->ppid = ppid;
266 p->offload_count = 1;
267
47211192
TW
268 err = nbp_switchdev_hwdom_set(p);
269 if (err)
270 return err;
271
272 if (tx_fwd_offload) {
273 p->flags |= BR_TX_FWD_OFFLOAD;
274 static_branch_inc(&br_switchdev_tx_fwd_offload);
275 }
276
277 return 0;
85826610
TW
278}
279
2f5dc00f 280static void nbp_switchdev_del(struct net_bridge_port *p)
85826610 281{
2f5dc00f
VO
282 if (WARN_ON(!p->offload_count))
283 return;
284
285 p->offload_count--;
286
287 if (p->offload_count)
288 return;
85826610
TW
289
290 if (p->hwdom)
291 nbp_switchdev_hwdom_put(p);
47211192
TW
292
293 if (p->flags & BR_TX_FWD_OFFLOAD) {
294 p->flags &= ~BR_TX_FWD_OFFLOAD;
295 static_branch_dec(&br_switchdev_tx_fwd_offload);
296 }
85826610 297}
2f5dc00f 298
326b212e
VO
299static int
300br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
301 const struct net_bridge_fdb_entry *fdb,
302 unsigned long action, const void *ctx)
5cda5272 303{
5cda5272
VO
304 struct switchdev_notifier_fdb_info item;
305 int err;
306
fab9eca8 307 br_switchdev_fdb_populate(br, &item, fdb, ctx);
5cda5272
VO
308
309 err = nb->notifier_call(nb, action, &item);
310 return notifier_to_errno(err);
311}
312
326b212e
VO
313static int
314br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
315 bool adding, struct notifier_block *nb)
5cda5272
VO
316{
317 struct net_bridge_fdb_entry *fdb;
318 struct net_bridge *br;
319 unsigned long action;
320 int err = 0;
321
322 if (!nb)
323 return 0;
324
325 if (!netif_is_bridge_master(br_dev))
326 return -EINVAL;
327
328 br = netdev_priv(br_dev);
329
330 if (adding)
331 action = SWITCHDEV_FDB_ADD_TO_DEVICE;
332 else
333 action = SWITCHDEV_FDB_DEL_TO_DEVICE;
334
335 rcu_read_lock();
336
337 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
326b212e 338 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
5cda5272
VO
339 if (err)
340 break;
341 }
342
343 rcu_read_unlock();
344
345 return err;
346}
347
6284c723
TW
348static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
349 const void *ctx,
350 struct notifier_block *nb,
351 struct netlink_ext_ack *extack)
352{
353 struct switchdev_notifier_port_attr_info attr_info = {
354 .info = {
355 .dev = br_dev,
356 .extack = extack,
357 .ctx = ctx,
358 },
359 };
360 struct net_bridge *br = netdev_priv(br_dev);
361 struct net_bridge_vlan_group *vg;
362 struct switchdev_attr attr;
363 struct net_bridge_vlan *v;
364 int err;
365
366 attr_info.attr = &attr;
367 attr.orig_dev = br_dev;
368
369 vg = br_vlan_group(br);
7f40ea21
CL
370 if (!vg)
371 return 0;
6284c723
TW
372
373 list_for_each_entry(v, &vg->vlan_list, vlist) {
374 if (v->msti) {
375 attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
376 attr.u.vlan_msti.vid = v->vid;
377 attr.u.vlan_msti.msti = v->msti;
378
379 err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
380 &attr_info);
381 err = notifier_to_errno(err);
382 if (err)
383 return err;
384 }
385 }
386
387 return 0;
388}
389
326b212e
VO
390static int
391br_switchdev_vlan_replay_one(struct notifier_block *nb,
392 struct net_device *dev,
393 struct switchdev_obj_port_vlan *vlan,
394 const void *ctx, unsigned long action,
395 struct netlink_ext_ack *extack)
4a6849e4
VO
396{
397 struct switchdev_notifier_port_obj_info obj_info = {
398 .info = {
399 .dev = dev,
400 .extack = extack,
401 .ctx = ctx,
402 },
403 .obj = &vlan->obj,
404 };
405 int err;
406
407 err = nb->notifier_call(nb, action, &obj_info);
408 return notifier_to_errno(err);
409}
410
b28d580e
VO
411static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
412 struct net_device *dev,
413 struct net_bridge_vlan_group *vg,
414 const void *ctx, unsigned long action,
415 struct netlink_ext_ack *extack)
4a6849e4 416{
4a6849e4 417 struct net_bridge_vlan *v;
4a6849e4
VO
418 int err = 0;
419 u16 pvid;
420
4a6849e4
VO
421 if (!vg)
422 return 0;
423
4a6849e4
VO
424 pvid = br_get_pvid(vg);
425
426 list_for_each_entry(v, &vg->vlan_list, vlist) {
427 struct switchdev_obj_port_vlan vlan = {
428 .obj.orig_dev = dev,
429 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
430 .flags = br_vlan_flags(v, pvid),
431 .vid = v->vid,
432 };
433
434 if (!br_vlan_should_use(v))
435 continue;
436
326b212e
VO
437 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
438 action, extack);
4a6849e4
VO
439 if (err)
440 return err;
441 }
442
b28d580e
VO
443 return 0;
444}
445
446static int br_switchdev_vlan_replay(struct net_device *br_dev,
447 const void *ctx, bool adding,
448 struct notifier_block *nb,
449 struct netlink_ext_ack *extack)
450{
451 struct net_bridge *br = netdev_priv(br_dev);
452 struct net_bridge_port *p;
453 unsigned long action;
454 int err;
455
456 ASSERT_RTNL();
457
458 if (!nb)
459 return 0;
460
461 if (!netif_is_bridge_master(br_dev))
462 return -EINVAL;
463
464 if (adding)
465 action = SWITCHDEV_PORT_OBJ_ADD;
466 else
467 action = SWITCHDEV_PORT_OBJ_DEL;
468
469 err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
470 ctx, action, extack);
471 if (err)
472 return err;
473
474 list_for_each_entry(p, &br->port_list, list) {
475 struct net_device *dev = p->dev;
476
477 err = br_switchdev_vlan_replay_group(nb, dev,
478 nbp_vlan_group(p),
479 ctx, action, extack);
480 if (err)
481 return err;
482 }
483
6284c723
TW
484 if (adding) {
485 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
486 if (err)
487 return err;
488 }
489
b28d580e 490 return 0;
4a6849e4
VO
491}
492
9776457c 493#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
326b212e 494struct br_switchdev_mdb_complete_info {
9776457c
VO
495 struct net_bridge_port *port;
496 struct br_ip ip;
497};
498
326b212e 499static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
9776457c 500{
326b212e 501 struct br_switchdev_mdb_complete_info *data = priv;
9776457c
VO
502 struct net_bridge_port_group __rcu **pp;
503 struct net_bridge_port_group *p;
504 struct net_bridge_mdb_entry *mp;
505 struct net_bridge_port *port = data->port;
506 struct net_bridge *br = port->br;
507
508 if (err)
509 goto err;
510
511 spin_lock_bh(&br->multicast_lock);
512 mp = br_mdb_ip_get(br, &data->ip);
513 if (!mp)
514 goto out;
515 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
516 pp = &p->next) {
517 if (p->key.port != port)
518 continue;
519 p->flags |= MDB_PG_FLAGS_OFFLOAD;
520 }
521out:
522 spin_unlock_bh(&br->multicast_lock);
523err:
524 kfree(priv);
525}
526
527static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
528 const struct net_bridge_mdb_entry *mp)
529{
530 if (mp->addr.proto == htons(ETH_P_IP))
531 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
532#if IS_ENABLED(CONFIG_IPV6)
533 else if (mp->addr.proto == htons(ETH_P_IPV6))
534 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
535#endif
536 else
537 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
538
539 mdb->vid = mp->addr.vid;
540}
541
326b212e
VO
542static void br_switchdev_host_mdb_one(struct net_device *dev,
543 struct net_device *lower_dev,
544 struct net_bridge_mdb_entry *mp,
545 int type)
9776457c
VO
546{
547 struct switchdev_obj_port_mdb mdb = {
548 .obj = {
549 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
550 .flags = SWITCHDEV_F_DEFER,
551 .orig_dev = dev,
552 },
553 };
554
555 br_switchdev_mdb_populate(&mdb, mp);
556
557 switch (type) {
558 case RTM_NEWMDB:
559 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
560 break;
561 case RTM_DELMDB:
562 switchdev_port_obj_del(lower_dev, &mdb.obj);
563 break;
564 }
565}
566
326b212e 567static void br_switchdev_host_mdb(struct net_device *dev,
9776457c
VO
568 struct net_bridge_mdb_entry *mp, int type)
569{
570 struct net_device *lower_dev;
571 struct list_head *iter;
572
573 netdev_for_each_lower_dev(dev, lower_dev, iter)
326b212e 574 br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
9776457c
VO
575}
576
326b212e
VO
577static int
578br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
579 const struct switchdev_obj_port_mdb *mdb,
580 unsigned long action, const void *ctx,
581 struct netlink_ext_ack *extack)
9776457c
VO
582{
583 struct switchdev_notifier_port_obj_info obj_info = {
584 .info = {
585 .dev = dev,
586 .extack = extack,
587 .ctx = ctx,
588 },
589 .obj = &mdb->obj,
590 };
591 int err;
592
593 err = nb->notifier_call(nb, action, &obj_info);
594 return notifier_to_errno(err);
595}
596
326b212e
VO
597static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
598 enum switchdev_obj_id id,
599 const struct net_bridge_mdb_entry *mp,
600 struct net_device *orig_dev)
9776457c
VO
601{
602 struct switchdev_obj_port_mdb *mdb;
603
604 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
605 if (!mdb)
606 return -ENOMEM;
607
608 mdb->obj.id = id;
609 mdb->obj.orig_dev = orig_dev;
610 br_switchdev_mdb_populate(mdb, mp);
611 list_add_tail(&mdb->obj.list, mdb_list);
612
613 return 0;
614}
615
616void br_switchdev_mdb_notify(struct net_device *dev,
617 struct net_bridge_mdb_entry *mp,
618 struct net_bridge_port_group *pg,
619 int type)
620{
326b212e 621 struct br_switchdev_mdb_complete_info *complete_info;
9776457c
VO
622 struct switchdev_obj_port_mdb mdb = {
623 .obj = {
624 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
625 .flags = SWITCHDEV_F_DEFER,
626 },
627 };
628
629 if (!pg)
326b212e 630 return br_switchdev_host_mdb(dev, mp, type);
9776457c
VO
631
632 br_switchdev_mdb_populate(&mdb, mp);
633
634 mdb.obj.orig_dev = pg->key.port->dev;
635 switch (type) {
636 case RTM_NEWMDB:
637 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
638 if (!complete_info)
639 break;
640 complete_info->port = pg->key.port;
641 complete_info->ip = mp->addr;
642 mdb.obj.complete_priv = complete_info;
326b212e 643 mdb.obj.complete = br_switchdev_mdb_complete;
9776457c
VO
644 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
645 kfree(complete_info);
646 break;
647 case RTM_DELMDB:
648 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
649 break;
650 }
651}
652#endif
653
326b212e
VO
654static int
655br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
656 const void *ctx, bool adding, struct notifier_block *nb,
657 struct netlink_ext_ack *extack)
9776457c
VO
658{
659#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
660 const struct net_bridge_mdb_entry *mp;
661 struct switchdev_obj *obj, *tmp;
662 struct net_bridge *br;
663 unsigned long action;
664 LIST_HEAD(mdb_list);
665 int err = 0;
666
667 ASSERT_RTNL();
668
669 if (!nb)
670 return 0;
671
672 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
673 return -EINVAL;
674
675 br = netdev_priv(br_dev);
676
677 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
678 return 0;
679
680 /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
681 * because the write-side protection is br->multicast_lock. But we
682 * need to emulate the [ blocking ] calling context of a regular
683 * switchdev event, so since both br->multicast_lock and RCU read side
684 * critical sections are atomic, we have no choice but to pick the RCU
685 * read side lock, queue up all our events, leave the critical section
686 * and notify switchdev from blocking context.
687 */
688 rcu_read_lock();
689
690 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
691 struct net_bridge_port_group __rcu * const *pp;
692 const struct net_bridge_port_group *p;
693
694 if (mp->host_joined) {
326b212e
VO
695 err = br_switchdev_mdb_queue_one(&mdb_list,
696 SWITCHDEV_OBJ_ID_HOST_MDB,
697 mp, br_dev);
9776457c
VO
698 if (err) {
699 rcu_read_unlock();
700 goto out_free_mdb;
701 }
702 }
703
704 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
705 pp = &p->next) {
706 if (p->key.port->dev != dev)
707 continue;
708
326b212e
VO
709 err = br_switchdev_mdb_queue_one(&mdb_list,
710 SWITCHDEV_OBJ_ID_PORT_MDB,
711 mp, dev);
9776457c
VO
712 if (err) {
713 rcu_read_unlock();
714 goto out_free_mdb;
715 }
716 }
717 }
718
719 rcu_read_unlock();
720
721 if (adding)
722 action = SWITCHDEV_PORT_OBJ_ADD;
723 else
724 action = SWITCHDEV_PORT_OBJ_DEL;
725
726 list_for_each_entry(obj, &mdb_list, list) {
326b212e
VO
727 err = br_switchdev_mdb_replay_one(nb, dev,
728 SWITCHDEV_OBJ_PORT_MDB(obj),
729 action, ctx, extack);
9776457c
VO
730 if (err)
731 goto out_free_mdb;
732 }
733
734out_free_mdb:
735 list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
736 list_del(&obj->list);
737 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
738 }
739
740 if (err)
741 return err;
742#endif
743
744 return 0;
745}
746
4e51bf44
VO
747static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
748 struct notifier_block *atomic_nb,
749 struct notifier_block *blocking_nb,
750 struct netlink_ext_ack *extack)
751{
752 struct net_device *br_dev = p->br->dev;
753 struct net_device *dev = p->dev;
754 int err;
755
b28d580e 756 err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
4e51bf44
VO
757 if (err && err != -EOPNOTSUPP)
758 return err;
759
326b212e
VO
760 err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
761 extack);
4e51bf44
VO
762 if (err && err != -EOPNOTSUPP)
763 return err;
764
326b212e 765 err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
4e51bf44
VO
766 if (err && err != -EOPNOTSUPP)
767 return err;
768
769 return 0;
770}
771
772static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
773 const void *ctx,
774 struct notifier_block *atomic_nb,
775 struct notifier_block *blocking_nb)
776{
777 struct net_device *br_dev = p->br->dev;
778 struct net_device *dev = p->dev;
779
263029ae 780 br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
4e51bf44 781
326b212e 782 br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
4e51bf44 783
b28d580e 784 br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
4e51bf44
VO
785}
786
2f5dc00f
VO
787/* Let the bridge know that this port is offloaded, so that it can assign a
788 * switchdev hardware domain to it.
789 */
957e2235
VO
790int br_switchdev_port_offload(struct net_bridge_port *p,
791 struct net_device *dev, const void *ctx,
792 struct notifier_block *atomic_nb,
793 struct notifier_block *blocking_nb,
794 bool tx_fwd_offload,
795 struct netlink_ext_ack *extack)
2f5dc00f
VO
796{
797 struct netdev_phys_item_id ppid;
2f5dc00f
VO
798 int err;
799
2f5dc00f
VO
800 err = dev_get_port_parent_id(dev, &ppid, false);
801 if (err)
802 return err;
803
47211192 804 err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
4e51bf44
VO
805 if (err)
806 return err;
807
808 err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
809 if (err)
810 goto out_switchdev_del;
811
812 return 0;
813
814out_switchdev_del:
815 nbp_switchdev_del(p);
816
817 return err;
2f5dc00f 818}
2f5dc00f 819
957e2235
VO
820void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
821 struct notifier_block *atomic_nb,
822 struct notifier_block *blocking_nb)
2f5dc00f 823{
4e51bf44
VO
824 nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
825
2f5dc00f
VO
826 nbp_switchdev_del(p);
827}