Merge branch 'i2c-mux/for-next' of https://github.com/peda-r/i2c-mux into i2c/for...
[linux-2.6-block.git] / net / ipv4 / ip_input.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) module.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
113aa838 11 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
e905a9ed 16 *
1da177e4
LT
17 *
18 * Fixes:
19 * Alan Cox : Commented a couple of minor bits of surplus code
20 * Alan Cox : Undefining IP_FORWARD doesn't include the code
21 * (just stops a compiler warning).
22 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23 * are junked rather than corrupting things.
24 * Alan Cox : Frames to bad broadcast subnets are dumped
25 * We used to process them non broadcast and
26 * boy could that cause havoc.
27 * Alan Cox : ip_forward sets the free flag on the
28 * new frame it queues. Still crap because
29 * it copies the frame but at least it
30 * doesn't eat memory too.
31 * Alan Cox : Generic queue code and memory fixes.
32 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
33 * Gerhard Koerting: Forward fragmented frames correctly.
34 * Gerhard Koerting: Fixes to my fix of the above 8-).
35 * Gerhard Koerting: IP interface addressing fix.
36 * Linus Torvalds : More robustness checks
37 * Alan Cox : Even more checks: Still not as robust as it ought to be
38 * Alan Cox : Save IP header pointer for later
39 * Alan Cox : ip option setting
40 * Alan Cox : Use ip_tos/ip_ttl settings
41 * Alan Cox : Fragmentation bogosity removed
42 * (Thanks to Mark.Bush@prg.ox.ac.uk)
43 * Dmitry Gorodchanin : Send of a raw packet crash fix.
44 * Alan Cox : Silly ip bug when an overlength
45 * fragment turns up. Now frees the
46 * queue.
47 * Linus Torvalds/ : Memory leakage on fragmentation
48 * Alan Cox : handling.
49 * Gerhard Koerting: Forwarding uses IP priority hints
50 * Teemu Rantanen : Fragment problems.
51 * Alan Cox : General cleanup, comments and reformat
52 * Alan Cox : SNMP statistics
53 * Alan Cox : BSD address rule semantics. Also see
54 * UDP as there is a nasty checksum issue
55 * if you do things the wrong way.
56 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
57 * Alan Cox : IP options adjust sk->priority.
58 * Pedro Roque : Fix mtu/length error in ip_forward.
59 * Alan Cox : Avoid ip_chk_addr when possible.
60 * Richard Underwood : IP multicasting.
61 * Alan Cox : Cleaned up multicast handlers.
62 * Alan Cox : RAW sockets demultiplex in the BSD style.
63 * Gunther Mayer : Fix the SNMP reporting typo
64 * Alan Cox : Always in group 224.0.0.1
65 * Pauline Middelink : Fast ip_checksum update when forwarding
66 * Masquerading support.
67 * Alan Cox : Multicast loopback error for 224.0.0.1
68 * Alan Cox : IP_MULTICAST_LOOP option.
69 * Alan Cox : Use notifiers.
70 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
71 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
72 * Stefan Becker : Send out ICMP HOST REDIRECT
73 * Arnt Gulbrandsen : ip_build_xmit
74 * Alan Cox : Per socket routing cache
75 * Alan Cox : Fixed routing cache, added header cache.
76 * Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it.
77 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
78 * Alan Cox : Incoming IP option handling.
79 * Alan Cox : Set saddr on raw output frames as per BSD.
80 * Alan Cox : Stopped broadcast source route explosions.
81 * Alan Cox : Can disable source routing
82 * Takeshi Sone : Masquerading didn't work.
83 * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
84 * Alan Cox : Memory leaks, tramples, misc debugging.
85 * Alan Cox : Fixed multicast (by popular demand 8))
86 * Alan Cox : Fixed forwarding (by even more popular demand 8))
87 * Alan Cox : Fixed SNMP statistics [I think]
88 * Gerhard Koerting : IP fragmentation forwarding fix
89 * Alan Cox : Device lock against page fault.
90 * Alan Cox : IP_HDRINCL facility.
91 * Werner Almesberger : Zero fragment bug
92 * Alan Cox : RAW IP frame length bug
93 * Alan Cox : Outgoing firewall on build_xmit
94 * A.N.Kuznetsov : IP_OPTIONS support throughout the kernel
95 * Alan Cox : Multicast routing hooks
96 * Jos Vos : Do accounting *before* call_in_firewall
97 * Willy Konynenberg : Transparent proxying support
98 *
e905a9ed 99 *
1da177e4
LT
100 *
101 * To Fix:
102 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
103 * and could be made very efficient with the addition of some virtual memory hacks to permit
104 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
e905a9ed 105 * Output fragmentation wants updating along with the buffer management to use a single
1da177e4
LT
106 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
107 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
108 * fragmentation anyway.
109 *
110 * This program is free software; you can redistribute it and/or
111 * modify it under the terms of the GNU General Public License
112 * as published by the Free Software Foundation; either version
113 * 2 of the License, or (at your option) any later version.
114 */
115
afd46503
JP
116#define pr_fmt(fmt) "IPv4: " fmt
117
1da177e4
LT
118#include <linux/module.h>
119#include <linux/types.h>
120#include <linux/kernel.h>
121#include <linux/string.h>
122#include <linux/errno.h>
5a0e3ad6 123#include <linux/slab.h>
1da177e4
LT
124
125#include <linux/net.h>
126#include <linux/socket.h>
127#include <linux/sockios.h>
128#include <linux/in.h>
129#include <linux/inet.h>
14c85021 130#include <linux/inetdevice.h>
1da177e4
LT
131#include <linux/netdevice.h>
132#include <linux/etherdevice.h>
133
134#include <net/snmp.h>
135#include <net/ip.h>
136#include <net/protocol.h>
137#include <net/route.h>
138#include <linux/skbuff.h>
139#include <net/sock.h>
140#include <net/arp.h>
141#include <net/icmp.h>
142#include <net/raw.h>
143#include <net/checksum.h>
1f07d03e 144#include <net/inet_ecn.h>
1da177e4
LT
145#include <linux/netfilter_ipv4.h>
146#include <net/xfrm.h>
147#include <linux/mroute.h>
148#include <linux/netlink.h>
f38a9eb1 149#include <net/dst_metadata.h>
1da177e4 150
1da177e4 151/*
66018506 152 * Process Router Attention IP option (RFC 2113)
e905a9ed 153 */
ba57b4db 154bool ip_call_ra_chain(struct sk_buff *skb)
1da177e4
LT
155{
156 struct ip_ra_chain *ra;
eddc9ec5 157 u8 protocol = ip_hdr(skb)->protocol;
1da177e4 158 struct sock *last = NULL;
cb84663e 159 struct net_device *dev = skb->dev;
37fcbab6 160 struct net *net = dev_net(dev);
1da177e4 161
5796ef75 162 for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
1da177e4
LT
163 struct sock *sk = ra->sk;
164
165 /* If socket is bound to an interface, only report
166 * the packet if it came from that interface.
167 */
c720c7e8 168 if (sk && inet_sk(sk)->inet_num == protocol &&
1da177e4 169 (!sk->sk_bound_dev_if ||
5796ef75 170 sk->sk_bound_dev_if == dev->ifindex)) {
56f8a75c 171 if (ip_is_fragment(ip_hdr(skb))) {
19bcf9f2 172 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
ba57b4db 173 return true;
1da177e4
LT
174 }
175 if (last) {
176 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
177 if (skb2)
178 raw_rcv(last, skb2);
179 }
180 last = sk;
181 }
182 }
183
184 if (last) {
185 raw_rcv(last, skb);
ba57b4db 186 return true;
1da177e4 187 }
ba57b4db 188 return false;
1da177e4
LT
189}
190
0c4b51f0 191static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1da177e4 192{
21d1196a 193 __skb_pull(skb, skb_network_header_len(skb));
1da177e4
LT
194
195 rcu_read_lock();
196 {
eddc9ec5 197 int protocol = ip_hdr(skb)->protocol;
32613090 198 const struct net_protocol *ipprot;
f9242b6b 199 int raw;
1da177e4
LT
200
201 resubmit:
7bc54c90 202 raw = raw_local_deliver(skb, protocol);
1da177e4 203
f9242b6b 204 ipprot = rcu_dereference(inet_protos[protocol]);
00db4124 205 if (ipprot) {
1da177e4
LT
206 int ret;
207
b59c2701
PM
208 if (!ipprot->no_policy) {
209 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
210 kfree_skb(skb);
211 goto out;
212 }
213 nf_reset(skb);
1da177e4
LT
214 }
215 ret = ipprot->handler(skb);
216 if (ret < 0) {
217 protocol = -ret;
218 goto resubmit;
219 }
b45386ef 220 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
1da177e4 221 } else {
7bc54c90 222 if (!raw) {
1da177e4 223 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
b45386ef 224 __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
1da177e4
LT
225 icmp_send(skb, ICMP_DEST_UNREACH,
226 ICMP_PROT_UNREACH, 0);
227 }
d8c6f4b9
NH
228 kfree_skb(skb);
229 } else {
b45386ef 230 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
d8c6f4b9
NH
231 consume_skb(skb);
232 }
1da177e4
LT
233 }
234 }
235 out:
236 rcu_read_unlock();
237
238 return 0;
239}
240
241/*
242 * Deliver IP Packets to the higher protocol layers.
e905a9ed 243 */
1da177e4
LT
244int ip_local_deliver(struct sk_buff *skb)
245{
246 /*
247 * Reassemble IP fragments.
248 */
19bcf9f2 249 struct net *net = dev_net(skb->dev);
1da177e4 250
56f8a75c 251 if (ip_is_fragment(ip_hdr(skb))) {
19bcf9f2 252 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
1da177e4
LT
253 return 0;
254 }
255
29a26a56 256 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
19bcf9f2 257 net, NULL, skb, skb->dev, NULL,
1da177e4
LT
258 ip_local_deliver_finish);
259}
260
6a91395f 261static inline bool ip_rcv_options(struct sk_buff *skb)
d245407e
TG
262{
263 struct ip_options *opt;
b71d1d42 264 const struct iphdr *iph;
d245407e
TG
265 struct net_device *dev = skb->dev;
266
267 /* It looks as overkill, because not all
268 IP options require packet mangling.
269 But it is the easiest for now, especially taking
270 into account that combination of IP options
271 and running sniffer is extremely rare condition.
272 --ANK (980813)
273 */
274 if (skb_cow(skb, skb_headroom(skb))) {
b45386ef 275 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
d245407e
TG
276 goto drop;
277 }
278
eddc9ec5 279 iph = ip_hdr(skb);
22aba383
DL
280 opt = &(IPCB(skb)->opt);
281 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
d245407e 282
c346dca1 283 if (ip_options_compile(dev_net(dev), opt, skb)) {
b45386ef 284 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
d245407e
TG
285 goto drop;
286 }
287
d245407e 288 if (unlikely(opt->srr)) {
6e8b11b4
ED
289 struct in_device *in_dev = __in_dev_get_rcu(dev);
290
d245407e
TG
291 if (in_dev) {
292 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
e87cc472
JP
293 if (IN_DEV_LOG_MARTIANS(in_dev))
294 net_info_ratelimited("source route option %pI4 -> %pI4\n",
295 &iph->saddr,
296 &iph->daddr);
d245407e
TG
297 goto drop;
298 }
d245407e
TG
299 }
300
301 if (ip_options_rcv_srr(skb))
302 goto drop;
303 }
304
6a91395f 305 return false;
d245407e 306drop:
6a91395f 307 return true;
d245407e
TG
308}
309
5fa12739
EC
310static int ip_rcv_finish_core(struct net *net, struct sock *sk,
311 struct sk_buff *skb)
1da177e4 312{
eddc9ec5 313 const struct iphdr *iph = ip_hdr(skb);
7487449c 314 int (*edemux)(struct sk_buff *skb);
d6f64d72 315 struct net_device *dev = skb->dev;
7487449c
PA
316 struct rtable *rt;
317 int err;
1da177e4 318
e21145a9 319 if (net->ipv4.sysctl_ip_early_demux &&
63e51b6a
ED
320 !skb_dst(skb) &&
321 !skb->sk &&
322 !ip_is_fragment(iph)) {
160eb5a6
DM
323 const struct net_protocol *ipprot;
324 int protocol = iph->protocol;
325
160eb5a6 326 ipprot = rcu_dereference(inet_protos[protocol]);
dddb64bc 327 if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
7487449c
PA
328 err = edemux(skb);
329 if (unlikely(err))
330 goto drop_error;
9cb429d6
ED
331 /* must reload iph, skb->head might have changed */
332 iph = ip_hdr(skb);
333 }
160eb5a6
DM
334 }
335
1da177e4
LT
336 /*
337 * Initialise the virtual path cache for the packet. It describes
338 * how the packet travels inside Linux networking.
e905a9ed 339 */
f38a9eb1 340 if (!skb_valid_dst(skb)) {
7487449c
PA
341 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
342 iph->tos, dev);
343 if (unlikely(err))
344 goto drop_error;
1da177e4
LT
345 }
346
c7066f70 347#ifdef CONFIG_IP_ROUTE_CLASSID
adf30907 348 if (unlikely(skb_dst(skb)->tclassid)) {
7a9b2d59 349 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
adf30907 350 u32 idx = skb_dst(skb)->tclassid;
1da177e4 351 st[idx&0xFF].o_packets++;
fd3f8c4c 352 st[idx&0xFF].o_bytes += skb->len;
1da177e4 353 st[(idx>>16)&0xFF].i_packets++;
fd3f8c4c 354 st[(idx>>16)&0xFF].i_bytes += skb->len;
1da177e4
LT
355 }
356#endif
357
d245407e
TG
358 if (iph->ihl > 5 && ip_rcv_options(skb))
359 goto drop;
1da177e4 360
511c3f92 361 rt = skb_rtable(skb);
edf391ff 362 if (rt->rt_type == RTN_MULTICAST) {
b15084ec 363 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
12b74dfa 364 } else if (rt->rt_type == RTN_BROADCAST) {
b15084ec 365 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
12b74dfa
JB
366 } else if (skb->pkt_type == PACKET_BROADCAST ||
367 skb->pkt_type == PACKET_MULTICAST) {
d6f64d72 368 struct in_device *in_dev = __in_dev_get_rcu(dev);
12b74dfa
JB
369
370 /* RFC 1122 3.3.6:
371 *
372 * When a host sends a datagram to a link-layer broadcast
373 * address, the IP destination address MUST be a legal IP
374 * broadcast or IP multicast address.
375 *
376 * A host SHOULD silently discard a datagram that is received
377 * via a link-layer broadcast (see Section 2.4) but does not
378 * specify an IP multicast or broadcast destination address.
379 *
380 * This doesn't explicitly say L2 *broadcast*, but broadcast is
381 * in a way a form of multicast and the most common use case for
382 * this is 802.11 protecting against cross-station spoofing (the
383 * so-called "hole-196" attack) so do it for both.
384 */
385 if (in_dev &&
386 IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST))
387 goto drop;
388 }
5506b54b 389
5fa12739 390 return NET_RX_SUCCESS;
1da177e4 391
1da177e4 392drop:
e905a9ed
YH
393 kfree_skb(skb);
394 return NET_RX_DROP;
7487449c
PA
395
396drop_error:
397 if (err == -EXDEV)
398 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
399 goto drop;
1da177e4
LT
400}
401
5fa12739
EC
402static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
403{
efe6aaca
EC
404 int ret;
405
406 /* if ingress device is enslaved to an L3 master device pass the
407 * skb to its handler for processing
408 */
409 skb = l3mdev_ip_rcv(skb);
410 if (!skb)
411 return NET_RX_SUCCESS;
5fa12739 412
efe6aaca 413 ret = ip_rcv_finish_core(net, sk, skb);
5fa12739
EC
414 if (ret != NET_RX_DROP)
415 ret = dst_input(skb);
416 return ret;
417}
418
1da177e4
LT
419/*
420 * Main IP Receive routine.
e905a9ed 421 */
17266ee9 422static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
1da177e4 423{
b71d1d42 424 const struct iphdr *iph;
58615242 425 u32 len;
1da177e4
LT
426
427 /* When the interface is in promisc. mode, drop all the crap
428 * that it receives, do not try to analyse it.
429 */
430 if (skb->pkt_type == PACKET_OTHERHOST)
431 goto drop;
432
edf391ff 433
b15084ec 434 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
1da177e4 435
51456b29
IM
436 skb = skb_share_check(skb, GFP_ATOMIC);
437 if (!skb) {
b45386ef 438 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
1da177e4
LT
439 goto out;
440 }
441
442 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
443 goto inhdr_error;
444
eddc9ec5 445 iph = ip_hdr(skb);
1da177e4
LT
446
447 /*
c67fa027 448 * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
1da177e4
LT
449 *
450 * Is the datagram acceptable?
451 *
452 * 1. Length at least the size of an ip header
453 * 2. Version of 4
454 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
455 * 4. Doesn't have a bogus length
456 */
457
458 if (iph->ihl < 5 || iph->version != 4)
58615242 459 goto inhdr_error;
1da177e4 460
1f07d03e
ED
461 BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
462 BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
463 BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
98f61995
ED
464 __IP_ADD_STATS(net,
465 IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
466 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
1f07d03e 467
1da177e4
LT
468 if (!pskb_may_pull(skb, iph->ihl*4))
469 goto inhdr_error;
470
eddc9ec5 471 iph = ip_hdr(skb);
1da177e4 472
e9c60422 473 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
6a5dc9e5 474 goto csum_error;
1da177e4 475
58615242 476 len = ntohs(iph->tot_len);
704aed53 477 if (skb->len < len) {
b45386ef 478 __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
704aed53
MC
479 goto drop;
480 } else if (len < (iph->ihl*4))
58615242 481 goto inhdr_error;
1da177e4 482
58615242
TG
483 /* Our transport medium may have padded the buffer out. Now we know it
484 * is IP we can trim to the true length of the frame.
485 * Note this now means skb->len holds ntohs(iph->tot_len).
486 */
487 if (pskb_trim_rcsum(skb, len)) {
b45386ef 488 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
58615242 489 goto drop;
1da177e4
LT
490 }
491
21d1196a
ED
492 skb->transport_header = skb->network_header + iph->ihl*4;
493
53602f92 494 /* Remove any debris in the socket control block */
d569f1d7 495 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
0b922b7a 496 IPCB(skb)->iif = skb->skb_iif;
53602f92 497
71f9dacd
HX
498 /* Must drop socket now because of tproxy. */
499 skb_orphan(skb);
500
17266ee9 501 return skb;
1da177e4 502
6a5dc9e5 503csum_error:
b45386ef 504 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
1da177e4 505inhdr_error:
b45386ef 506 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
1da177e4 507drop:
e905a9ed 508 kfree_skb(skb);
1da177e4 509out:
17266ee9
EC
510 return NULL;
511}
512
513/*
514 * IP receive entry point
515 */
516int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
517 struct net_device *orig_dev)
518{
519 struct net *net = dev_net(dev);
520
521 skb = ip_rcv_core(skb, net);
522 if (skb == NULL)
523 return NET_RX_DROP;
524 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
525 net, NULL, skb, dev, NULL,
526 ip_rcv_finish);
527}
528
5fa12739 529static void ip_sublist_rcv_finish(struct list_head *head)
17266ee9
EC
530{
531 struct sk_buff *skb, *next;
532
0761680d
JDB
533 list_for_each_entry_safe(skb, next, head, list) {
534 list_del(&skb->list);
535 /* Handle ip{6}_forward case, as sch_direct_xmit have
536 * another kind of SKB-list usage (see validate_xmit_skb_list)
537 */
538 skb->next = NULL;
5fa12739 539 dst_input(skb);
0761680d 540 }
5fa12739
EC
541}
542
543static void ip_list_rcv_finish(struct net *net, struct sock *sk,
544 struct list_head *head)
545{
546 struct dst_entry *curr_dst = NULL;
547 struct sk_buff *skb, *next;
548 struct list_head sublist;
549
a4ca8b7d 550 INIT_LIST_HEAD(&sublist);
5fa12739
EC
551 list_for_each_entry_safe(skb, next, head, list) {
552 struct dst_entry *dst;
553
a4ca8b7d 554 list_del(&skb->list);
efe6aaca
EC
555 /* if ingress device is enslaved to an L3 master device pass the
556 * skb to its handler for processing
557 */
558 skb = l3mdev_ip_rcv(skb);
559 if (!skb)
560 continue;
5fa12739
EC
561 if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
562 continue;
563
564 dst = skb_dst(skb);
565 if (curr_dst != dst) {
566 /* dispatch old sublist */
5fa12739
EC
567 if (!list_empty(&sublist))
568 ip_sublist_rcv_finish(&sublist);
569 /* start new sublist */
a4ca8b7d 570 INIT_LIST_HEAD(&sublist);
5fa12739
EC
571 curr_dst = dst;
572 }
a4ca8b7d 573 list_add_tail(&skb->list, &sublist);
5fa12739
EC
574 }
575 /* dispatch final sublist */
a4ca8b7d 576 ip_sublist_rcv_finish(&sublist);
5fa12739
EC
577}
578
579static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
580 struct net *net)
581{
17266ee9
EC
582 NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
583 head, dev, NULL, ip_rcv_finish);
5fa12739 584 ip_list_rcv_finish(net, NULL, head);
17266ee9
EC
585}
586
587/* Receive a list of IP packets */
588void ip_list_rcv(struct list_head *head, struct packet_type *pt,
589 struct net_device *orig_dev)
590{
591 struct net_device *curr_dev = NULL;
592 struct net *curr_net = NULL;
593 struct sk_buff *skb, *next;
594 struct list_head sublist;
595
a4ca8b7d 596 INIT_LIST_HEAD(&sublist);
17266ee9
EC
597 list_for_each_entry_safe(skb, next, head, list) {
598 struct net_device *dev = skb->dev;
599 struct net *net = dev_net(dev);
600
a4ca8b7d 601 list_del(&skb->list);
17266ee9
EC
602 skb = ip_rcv_core(skb, net);
603 if (skb == NULL)
604 continue;
605
606 if (curr_dev != dev || curr_net != net) {
607 /* dispatch old sublist */
17266ee9 608 if (!list_empty(&sublist))
a4ca8b7d 609 ip_sublist_rcv(&sublist, curr_dev, curr_net);
17266ee9 610 /* start new sublist */
a4ca8b7d 611 INIT_LIST_HEAD(&sublist);
17266ee9
EC
612 curr_dev = dev;
613 curr_net = net;
614 }
a4ca8b7d 615 list_add_tail(&skb->list, &sublist);
17266ee9
EC
616 }
617 /* dispatch final sublist */
a4ca8b7d 618 ip_sublist_rcv(&sublist, curr_dev, curr_net);
1da177e4 619}