Merge tag 'devprop-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-2.6-block.git] / net / bridge / br_forward.c
CommitLineData
1da177e4
LT
1/*
2 * Forwarding decision
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
1da177e4
LT
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
025d89c2 14#include <linux/err.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
c06ee961 18#include <linux/netpoll.h>
1da177e4 19#include <linux/skbuff.h>
85ca719e 20#include <linux/if_vlan.h>
1da177e4
LT
21#include <linux/netfilter_bridge.h>
22#include "br_private.h"
23
1a81a2e0 24/* Don't forward packets to originating port or forwarding disabled */
9d6f229f 25static inline int should_deliver(const struct net_bridge_port *p,
1da177e4
LT
26 const struct sk_buff *skb)
27{
2594e906
NA
28 struct net_bridge_vlan_group *vg;
29
907b1e6e 30 vg = nbp_vlan_group_rcu(p);
a97bfc1d 31 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
6bc506b4 32 br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING &&
7d850abd
NA
33 nbp_switchdev_allowed_egress(p, skb) &&
34 !br_skb_isolated(p, skb);
1da177e4
LT
35}
36
0c4b51f0 37int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
1da177e4 38{
28c1382f 39 skb_push(skb, ETH_HLEN);
df356d5e
TM
40 if (!is_skb_forwardable(skb->dev, skb))
41 goto drop;
42
df356d5e 43 br_drop_fake_rtable(skb);
df356d5e
TM
44
45 if (skb->ip_summed == CHECKSUM_PARTIAL &&
46 (skb->protocol == htons(ETH_P_8021Q) ||
47 skb->protocol == htons(ETH_P_8021AD))) {
48 int depth;
49
50 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
51 goto drop;
52
53 skb_set_network_header(skb, depth);
1da177e4
LT
54 }
55
df356d5e
TM
56 dev_queue_xmit(skb);
57
58 return 0;
59
60drop:
61 kfree_skb(skb);
1da177e4
LT
62 return 0;
63}
34666d46 64EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
1da177e4 65
0c4b51f0 66int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1da177e4 67{
41d1c883 68 skb->tstamp = 0;
29a26a56
EB
69 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
70 net, sk, skb, NULL, skb->dev,
9ef513be 71 br_dev_queue_push_xmit);
1da177e4 72
1da177e4 73}
34666d46 74EXPORT_SYMBOL_GPL(br_forward_finish);
1da177e4 75
37b090e6
NA
76static void __br_forward(const struct net_bridge_port *to,
77 struct sk_buff *skb, bool local_orig)
1da177e4 78{
2594e906 79 struct net_bridge_vlan_group *vg;
37b090e6
NA
80 struct net_device *indev;
81 struct net *net;
82 int br_hook;
2594e906 83
907b1e6e 84 vg = nbp_vlan_group_rcu(to);
11538d03 85 skb = br_handle_vlan(to->br, to, vg, skb);
78851988
VY
86 if (!skb)
87 return;
88
37b090e6 89 indev = skb->dev;
1da177e4 90 skb->dev = to->dev;
37b090e6
NA
91 if (!local_orig) {
92 if (skb_warn_if_lro(skb)) {
91d2c34a 93 kfree_skb(skb);
37b090e6 94 return;
91d2c34a 95 }
37b090e6
NA
96 br_hook = NF_BR_FORWARD;
97 skb_forward_csum(skb);
98 net = dev_net(indev);
99 } else {
100 if (unlikely(netpoll_tx_running(to->br->dev))) {
28c1382f
YW
101 skb_push(skb, ETH_HLEN);
102 if (!is_skb_forwardable(skb->dev, skb))
37b090e6 103 kfree_skb(skb);
28c1382f 104 else
37b090e6 105 br_netpoll_send_skb(to, skb);
37b090e6
NA
106 return;
107 }
108 br_hook = NF_BR_LOCAL_OUT;
109 net = dev_net(skb->dev);
110 indev = NULL;
91d2c34a
HX
111 }
112
37b090e6
NA
113 NF_HOOK(NFPROTO_BRIDGE, br_hook,
114 net, NULL, skb, indev, skb->dev,
713aefa3 115 br_forward_finish);
1da177e4
LT
116}
117
37b090e6
NA
118static int deliver_clone(const struct net_bridge_port *prev,
119 struct sk_buff *skb, bool local_orig)
1da177e4 120{
37b090e6 121 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
1da177e4 122
37b090e6
NA
123 skb = skb_clone(skb, GFP_ATOMIC);
124 if (!skb) {
125 dev->stats.tx_dropped++;
126 return -ENOMEM;
1da177e4
LT
127 }
128
37b090e6
NA
129 __br_forward(prev, skb, local_orig);
130 return 0;
1da177e4
LT
131}
132
37b090e6
NA
133/**
134 * br_forward - forward a packet to a specific port
135 * @to: destination port
136 * @skb: packet being forwarded
137 * @local_rcv: packet will be received locally after forwarding
138 * @local_orig: packet is locally originated
139 *
140 * Should be called with rcu_read_lock.
141 */
142void br_forward(const struct net_bridge_port *to,
143 struct sk_buff *skb, bool local_rcv, bool local_orig)
1da177e4 144{
2756f68c
NA
145 if (unlikely(!to))
146 goto out;
147
148 /* redirect to backup link if the destination port is down */
149 if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
150 struct net_bridge_port *backup_port;
151
152 backup_port = rcu_dereference(to->backup_port);
153 if (unlikely(!backup_port))
154 goto out;
155 to = backup_port;
156 }
157
158 if (should_deliver(to, skb)) {
b35c5f63 159 if (local_rcv)
37b090e6 160 deliver_clone(to, skb, local_orig);
7f7708f0 161 else
37b090e6 162 __br_forward(to, skb, local_orig);
1da177e4
LT
163 return;
164 }
165
2756f68c 166out:
b35c5f63 167 if (!local_rcv)
7f7708f0 168 kfree_skb(skb);
1da177e4 169}
37b090e6 170EXPORT_SYMBOL_GPL(br_forward);
025d89c2
HX
171
172static struct net_bridge_port *maybe_deliver(
173 struct net_bridge_port *prev, struct net_bridge_port *p,
37b090e6 174 struct sk_buff *skb, bool local_orig)
025d89c2
HX
175{
176 int err;
177
178 if (!should_deliver(p, skb))
179 return prev;
180
181 if (!prev)
182 goto out;
183
37b090e6 184 err = deliver_clone(prev, skb, local_orig);
025d89c2
HX
185 if (err)
186 return ERR_PTR(err);
187
188out:
189 return p;
190}
191
37b090e6
NA
192/* called under rcu_read_lock */
193void br_flood(struct net_bridge *br, struct sk_buff *skb,
8addd5e7 194 enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
1da177e4 195{
1080ab95 196 u8 igmp_type = br_multicast_igmp_type(skb);
37b090e6 197 struct net_bridge_port *prev = NULL;
1080ab95 198 struct net_bridge_port *p;
1da177e4 199
1da177e4 200 list_for_each_entry_rcu(p, &br->port_list, list) {
99f906e9
MM
201 /* Do not flood unicast traffic to ports that turn it off, nor
202 * other traffic if flood off, except for traffic we originate
203 */
204 switch (pkt_type) {
205 case BR_PKT_UNICAST:
206 if (!(p->flags & BR_FLOOD))
207 continue;
208 break;
209 case BR_PKT_MULTICAST:
210 if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
211 continue;
212 break;
213 case BR_PKT_BROADCAST:
214 if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
215 continue;
216 break;
217 }
95850116
KP
218
219 /* Do not flood to ports that enable proxy ARP */
220 if (p->flags & BR_PROXYARP)
221 continue;
821f1b21 222 if ((p->flags & (BR_PROXYARP_WIFI | BR_NEIGH_SUPPRESS)) &&
842a9ae0
JM
223 BR_INPUT_SKB_CB(skb)->proxyarp_replied)
224 continue;
95850116 225
37b090e6 226 prev = maybe_deliver(prev, p, skb, local_orig);
025d89c2
HX
227 if (IS_ERR(prev))
228 goto out;
1080ab95 229 if (prev == p)
a65056ec 230 br_multicast_count(p->br, p, skb, igmp_type,
1080ab95 231 BR_MCAST_DIR_TX);
1da177e4
LT
232 }
233
b33084be
HX
234 if (!prev)
235 goto out;
236
b35c5f63 237 if (local_rcv)
37b090e6 238 deliver_clone(prev, skb, local_orig);
025d89c2 239 else
37b090e6 240 __br_forward(prev, skb, local_orig);
b33084be 241 return;
1da177e4 242
b33084be 243out:
b35c5f63 244 if (!local_rcv)
b33084be 245 kfree_skb(skb);
1da177e4
LT
246}
247
5cb5e947 248#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
5b9d6b15
AB
249static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
250 const unsigned char *addr, bool local_orig)
251{
252 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
253 const unsigned char *src = eth_hdr(skb)->h_source;
254
255 if (!should_deliver(p, skb))
256 return;
257
258 /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
259 if (skb->dev == p->dev && ether_addr_equal(src, addr))
260 return;
261
262 skb = skb_copy(skb, GFP_ATOMIC);
263 if (!skb) {
264 dev->stats.tx_dropped++;
265 return;
266 }
267
268 if (!is_broadcast_ether_addr(addr))
269 memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
270
271 __br_forward(p, skb, local_orig);
272}
273
5cb5e947 274/* called with rcu_read_lock */
37b090e6
NA
275void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
276 struct sk_buff *skb,
277 bool local_rcv, bool local_orig)
5cb5e947
HX
278{
279 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
1080ab95 280 u8 igmp_type = br_multicast_igmp_type(skb);
5cb5e947 281 struct net_bridge *br = netdev_priv(dev);
afe0159d 282 struct net_bridge_port *prev = NULL;
5cb5e947
HX
283 struct net_bridge_port_group *p;
284 struct hlist_node *rp;
285
e8051688 286 rp = rcu_dereference(hlist_first_rcu(&br->router_list));
83f6a740 287 p = mdst ? rcu_dereference(mdst->ports) : NULL;
5cb5e947 288 while (p || rp) {
afe0159d 289 struct net_bridge_port *port, *lport, *rport;
290
5cb5e947 291 lport = p ? p->port : NULL;
d8fb1648 292 rport = hlist_entry_safe(rp, struct net_bridge_port, rlist);
5cb5e947 293
6db6f0ea
FF
294 if ((unsigned long)lport > (unsigned long)rport) {
295 port = lport;
296
297 if (port->flags & BR_MULTICAST_TO_UNICAST) {
298 maybe_deliver_addr(lport, skb, p->eth_addr,
299 local_orig);
300 goto delivered;
301 }
302 } else {
303 port = rport;
304 }
5cb5e947 305
37b090e6 306 prev = maybe_deliver(prev, port, skb, local_orig);
6db6f0ea 307delivered:
5cb5e947
HX
308 if (IS_ERR(prev))
309 goto out;
1080ab95 310 if (prev == port)
a65056ec 311 br_multicast_count(port->br, port, skb, igmp_type,
1080ab95 312 BR_MCAST_DIR_TX);
5cb5e947
HX
313
314 if ((unsigned long)lport >= (unsigned long)port)
83f6a740 315 p = rcu_dereference(p->next);
5cb5e947 316 if ((unsigned long)rport >= (unsigned long)port)
e8051688 317 rp = rcu_dereference(hlist_next_rcu(rp));
5cb5e947
HX
318 }
319
320 if (!prev)
321 goto out;
322
b35c5f63 323 if (local_rcv)
37b090e6 324 deliver_clone(prev, skb, local_orig);
5cb5e947 325 else
37b090e6 326 __br_forward(prev, skb, local_orig);
5cb5e947
HX
327 return;
328
329out:
b35c5f63 330 if (!local_rcv)
5cb5e947
HX
331 kfree_skb(skb);
332}
5cb5e947 333#endif