Merge branch 'batman-adv/next' of git://git.open-mesh.org/linux-merge
[linux-2.6-block.git] / net / packet / af_packet.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
1ce4f28b 12 * Fixes:
1da177e4
LT
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
1ce4f28b 35 * Ulises Alonso : Frame number limit removal and
1da177e4 36 * packet_set_ring memory leak.
0fb375fb
EB
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
1ce4f28b 40 * byte arrays at the end of sockaddr_ll
0fb375fb 41 * and packet_mreq.
69e3c75f 42 * Johann Baudy : Added TX RING.
1da177e4
LT
43 *
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
48 *
49 */
1ce4f28b 50
1da177e4 51#include <linux/types.h>
1da177e4 52#include <linux/mm.h>
4fc268d2 53#include <linux/capability.h>
1da177e4
LT
54#include <linux/fcntl.h>
55#include <linux/socket.h>
56#include <linux/in.h>
57#include <linux/inet.h>
58#include <linux/netdevice.h>
59#include <linux/if_packet.h>
60#include <linux/wireless.h>
ffbc6111 61#include <linux/kernel.h>
1da177e4 62#include <linux/kmod.h>
5a0e3ad6 63#include <linux/slab.h>
0e3125c7 64#include <linux/vmalloc.h>
457c4cbc 65#include <net/net_namespace.h>
1da177e4
LT
66#include <net/ip.h>
67#include <net/protocol.h>
68#include <linux/skbuff.h>
69#include <net/sock.h>
70#include <linux/errno.h>
71#include <linux/timer.h>
72#include <asm/system.h>
73#include <asm/uaccess.h>
74#include <asm/ioctls.h>
75#include <asm/page.h>
a1f8e7f7 76#include <asm/cacheflush.h>
1da177e4
LT
77#include <asm/io.h>
78#include <linux/proc_fs.h>
79#include <linux/seq_file.h>
80#include <linux/poll.h>
81#include <linux/module.h>
82#include <linux/init.h>
905db440 83#include <linux/mutex.h>
05423b24 84#include <linux/if_vlan.h>
bfd5f4a3 85#include <linux/virtio_net.h>
ed85b565 86#include <linux/errqueue.h>
614f60fa 87#include <linux/net_tstamp.h>
1da177e4
LT
88
89#ifdef CONFIG_INET
90#include <net/inet_common.h>
91#endif
92
1da177e4
LT
93/*
94 Assumptions:
95 - if device has no dev->hard_header routine, it adds and removes ll header
96 inside itself. In this case ll header is invisible outside of device,
97 but higher levels still should reserve dev->hard_header_len.
98 Some devices are enough clever to reallocate skb, when header
99 will not fit to reserved space (tunnel), another ones are silly
100 (PPP).
101 - packet socket receives packets with pulled ll header,
102 so that SOCK_RAW should push it back.
103
104On receive:
105-----------
106
107Incoming, dev->hard_header!=NULL
b0e380b1
ACM
108 mac_header -> ll header
109 data -> data
1da177e4
LT
110
111Outgoing, dev->hard_header!=NULL
b0e380b1
ACM
112 mac_header -> ll header
113 data -> ll header
1da177e4
LT
114
115Incoming, dev->hard_header==NULL
b0e380b1
ACM
116 mac_header -> UNKNOWN position. It is very likely, that it points to ll
117 header. PPP makes it, that is wrong, because introduce
db0c58f9 118 assymetry between rx and tx paths.
b0e380b1 119 data -> data
1da177e4
LT
120
121Outgoing, dev->hard_header==NULL
b0e380b1
ACM
122 mac_header -> data. ll header is still not built!
123 data -> data
1da177e4
LT
124
125Resume
126 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
127
128
129On transmit:
130------------
131
132dev->hard_header != NULL
b0e380b1
ACM
133 mac_header -> ll header
134 data -> ll header
1da177e4
LT
135
136dev->hard_header == NULL (ll header is added by device, we cannot control it)
b0e380b1
ACM
137 mac_header -> data
138 data -> data
1da177e4
LT
139
140 We should set nh.raw on output to correct posistion,
141 packet classifier depends on it.
142 */
143
1da177e4
LT
144/* Private packet socket structures. */
145
40d4e3df 146struct packet_mclist {
1da177e4
LT
147 struct packet_mclist *next;
148 int ifindex;
149 int count;
150 unsigned short type;
151 unsigned short alen;
0fb375fb
EB
152 unsigned char addr[MAX_ADDR_LEN];
153};
154/* identical to struct packet_mreq except it has
155 * a longer address field.
156 */
40d4e3df 157struct packet_mreq_max {
0fb375fb
EB
158 int mr_ifindex;
159 unsigned short mr_type;
160 unsigned short mr_alen;
161 unsigned char mr_address[MAX_ADDR_LEN];
1da177e4 162};
a2efcfa0 163
69e3c75f
JB
164static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
165 int closing, int tx_ring);
166
0e3125c7
NH
167struct pgv {
168 char *buffer;
0e3125c7
NH
169};
170
69e3c75f 171struct packet_ring_buffer {
0e3125c7 172 struct pgv *pg_vec;
69e3c75f
JB
173 unsigned int head;
174 unsigned int frames_per_block;
175 unsigned int frame_size;
176 unsigned int frame_max;
177
178 unsigned int pg_vec_order;
179 unsigned int pg_vec_pages;
180 unsigned int pg_vec_len;
181
182 atomic_t pending;
183};
184
185struct packet_sock;
186static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
1da177e4
LT
187
188static void packet_flush_mclist(struct sock *sk);
189
190struct packet_sock {
191 /* struct sock has to be the first member of packet_sock */
192 struct sock sk;
193 struct tpacket_stats stats;
69e3c75f
JB
194 struct packet_ring_buffer rx_ring;
195 struct packet_ring_buffer tx_ring;
1da177e4 196 int copy_thresh;
1da177e4 197 spinlock_t bind_lock;
905db440 198 struct mutex pg_vec_lock;
8dc41944 199 unsigned int running:1, /* prot_hook is attached*/
80feaacb 200 auxdata:1,
bfd5f4a3
SS
201 origdev:1,
202 has_vnet_hdr:1;
1da177e4 203 int ifindex; /* bound device */
0e11c91e 204 __be16 num;
1da177e4 205 struct packet_mclist *mclist;
1da177e4 206 atomic_t mapped;
bbd6ef87
PM
207 enum tpacket_versions tp_version;
208 unsigned int tp_hdrlen;
8913336a 209 unsigned int tp_reserve;
69e3c75f 210 unsigned int tp_loss:1;
614f60fa 211 unsigned int tp_tstamp;
94b05952 212 struct packet_type prot_hook ____cacheline_aligned_in_smp;
1da177e4
LT
213};
214
ffbc6111
HX
215struct packet_skb_cb {
216 unsigned int origlen;
217 union {
218 struct sockaddr_pkt pkt;
219 struct sockaddr_ll ll;
220 } sa;
221};
222
223#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
8dc41944 224
f6dafa95 225static inline __pure struct page *pgv_to_page(void *addr)
0af55bb5
CG
226{
227 if (is_vmalloc_addr(addr))
228 return vmalloc_to_page(addr);
229 return virt_to_page(addr);
230}
231
69e3c75f 232static void __packet_set_status(struct packet_sock *po, void *frame, int status)
1da177e4 233{
bbd6ef87
PM
234 union {
235 struct tpacket_hdr *h1;
236 struct tpacket2_hdr *h2;
237 void *raw;
238 } h;
1da177e4 239
69e3c75f 240 h.raw = frame;
bbd6ef87
PM
241 switch (po->tp_version) {
242 case TPACKET_V1:
69e3c75f 243 h.h1->tp_status = status;
0af55bb5 244 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
bbd6ef87
PM
245 break;
246 case TPACKET_V2:
69e3c75f 247 h.h2->tp_status = status;
0af55bb5 248 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
bbd6ef87 249 break;
69e3c75f 250 default:
40d4e3df 251 pr_err("TPACKET version not supported\n");
69e3c75f 252 BUG();
bbd6ef87 253 }
69e3c75f
JB
254
255 smp_wmb();
bbd6ef87
PM
256}
257
69e3c75f 258static int __packet_get_status(struct packet_sock *po, void *frame)
bbd6ef87
PM
259{
260 union {
261 struct tpacket_hdr *h1;
262 struct tpacket2_hdr *h2;
263 void *raw;
264 } h;
265
69e3c75f
JB
266 smp_rmb();
267
bbd6ef87
PM
268 h.raw = frame;
269 switch (po->tp_version) {
270 case TPACKET_V1:
0af55bb5 271 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
69e3c75f 272 return h.h1->tp_status;
bbd6ef87 273 case TPACKET_V2:
0af55bb5 274 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
69e3c75f
JB
275 return h.h2->tp_status;
276 default:
40d4e3df 277 pr_err("TPACKET version not supported\n");
69e3c75f
JB
278 BUG();
279 return 0;
bbd6ef87 280 }
1da177e4 281}
69e3c75f
JB
282
283static void *packet_lookup_frame(struct packet_sock *po,
284 struct packet_ring_buffer *rb,
285 unsigned int position,
286 int status)
287{
288 unsigned int pg_vec_pos, frame_offset;
289 union {
290 struct tpacket_hdr *h1;
291 struct tpacket2_hdr *h2;
292 void *raw;
293 } h;
294
295 pg_vec_pos = position / rb->frames_per_block;
296 frame_offset = position % rb->frames_per_block;
297
0e3125c7
NH
298 h.raw = rb->pg_vec[pg_vec_pos].buffer +
299 (frame_offset * rb->frame_size);
69e3c75f
JB
300
301 if (status != __packet_get_status(po, h.raw))
302 return NULL;
303
304 return h.raw;
305}
306
307static inline void *packet_current_frame(struct packet_sock *po,
308 struct packet_ring_buffer *rb,
309 int status)
310{
311 return packet_lookup_frame(po, rb, rb->head, status);
312}
313
314static inline void *packet_previous_frame(struct packet_sock *po,
315 struct packet_ring_buffer *rb,
316 int status)
317{
318 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
319 return packet_lookup_frame(po, rb, previous, status);
320}
321
322static inline void packet_increment_head(struct packet_ring_buffer *buff)
323{
324 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
325}
326
1da177e4
LT
327static inline struct packet_sock *pkt_sk(struct sock *sk)
328{
329 return (struct packet_sock *)sk;
330}
331
332static void packet_sock_destruct(struct sock *sk)
333{
ed85b565
RC
334 skb_queue_purge(&sk->sk_error_queue);
335
547b792c
IJ
336 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
337 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1da177e4
LT
338
339 if (!sock_flag(sk, SOCK_DEAD)) {
40d4e3df 340 pr_err("Attempt to release alive packet socket: %p\n", sk);
1da177e4
LT
341 return;
342 }
343
17ab56a2 344 sk_refcnt_debug_dec(sk);
1da177e4
LT
345}
346
347
90ddc4f0 348static const struct proto_ops packet_ops;
1da177e4 349
90ddc4f0 350static const struct proto_ops packet_ops_spkt;
1da177e4 351
40d4e3df
ED
352static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
353 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
354{
355 struct sock *sk;
356 struct sockaddr_pkt *spkt;
357
358 /*
359 * When we registered the protocol we saved the socket in the data
360 * field for just this event.
361 */
362
363 sk = pt->af_packet_priv;
1ce4f28b 364
1da177e4
LT
365 /*
366 * Yank back the headers [hope the device set this
367 * right or kerboom...]
368 *
369 * Incoming packets have ll header pulled,
370 * push it back.
371 *
98e399f8 372 * For outgoing ones skb->data == skb_mac_header(skb)
1da177e4
LT
373 * so that this procedure is noop.
374 */
375
376 if (skb->pkt_type == PACKET_LOOPBACK)
377 goto out;
378
09ad9bc7 379 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
380 goto out;
381
40d4e3df
ED
382 skb = skb_share_check(skb, GFP_ATOMIC);
383 if (skb == NULL)
1da177e4
LT
384 goto oom;
385
386 /* drop any routing info */
adf30907 387 skb_dst_drop(skb);
1da177e4 388
84531c24
PO
389 /* drop conntrack reference */
390 nf_reset(skb);
391
ffbc6111 392 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1da177e4 393
98e399f8 394 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
395
396 /*
397 * The SOCK_PACKET socket receives _all_ frames.
398 */
399
400 spkt->spkt_family = dev->type;
401 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
402 spkt->spkt_protocol = skb->protocol;
403
404 /*
405 * Charge the memory to the socket. This is done specifically
406 * to prevent sockets using all the memory up.
407 */
408
40d4e3df 409 if (sock_queue_rcv_skb(sk, skb) == 0)
1da177e4
LT
410 return 0;
411
412out:
413 kfree_skb(skb);
414oom:
415 return 0;
416}
417
418
419/*
420 * Output a raw packet to a device layer. This bypasses all the other
421 * protocol layers and you must therefore supply it with a complete frame
422 */
1ce4f28b 423
1da177e4
LT
424static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
425 struct msghdr *msg, size_t len)
426{
427 struct sock *sk = sock->sk;
40d4e3df 428 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1a35ca80 429 struct sk_buff *skb = NULL;
1da177e4 430 struct net_device *dev;
40d4e3df 431 __be16 proto = 0;
1da177e4 432 int err;
1ce4f28b 433
1da177e4 434 /*
1ce4f28b 435 * Get and verify the address.
1da177e4
LT
436 */
437
40d4e3df 438 if (saddr) {
1da177e4 439 if (msg->msg_namelen < sizeof(struct sockaddr))
40d4e3df
ED
440 return -EINVAL;
441 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
442 proto = saddr->spkt_protocol;
443 } else
444 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1da177e4
LT
445
446 /*
1ce4f28b 447 * Find the device first to size check it
1da177e4
LT
448 */
449
450 saddr->spkt_device[13] = 0;
1a35ca80 451retry:
654d1f8a
ED
452 rcu_read_lock();
453 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1da177e4
LT
454 err = -ENODEV;
455 if (dev == NULL)
456 goto out_unlock;
1ce4f28b 457
d5e76b0a
DM
458 err = -ENETDOWN;
459 if (!(dev->flags & IFF_UP))
460 goto out_unlock;
461
1da177e4 462 /*
40d4e3df
ED
463 * You may not queue a frame bigger than the mtu. This is the lowest level
464 * raw protocol and you must do your own fragmentation at this level.
1da177e4 465 */
1ce4f28b 466
1da177e4 467 err = -EMSGSIZE;
57f89bfa 468 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
1da177e4
LT
469 goto out_unlock;
470
1a35ca80
ED
471 if (!skb) {
472 size_t reserved = LL_RESERVED_SPACE(dev);
473 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
474
475 rcu_read_unlock();
476 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
477 if (skb == NULL)
478 return -ENOBUFS;
479 /* FIXME: Save some space for broken drivers that write a hard
480 * header at transmission time by themselves. PPP is the notable
481 * one here. This should really be fixed at the driver level.
482 */
483 skb_reserve(skb, reserved);
484 skb_reset_network_header(skb);
485
486 /* Try to align data part correctly */
487 if (hhlen) {
488 skb->data -= hhlen;
489 skb->tail -= hhlen;
490 if (len < hhlen)
491 skb_reset_network_header(skb);
492 }
493 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
494 if (err)
495 goto out_free;
496 goto retry;
1da177e4
LT
497 }
498
57f89bfa
BG
499 if (len > (dev->mtu + dev->hard_header_len)) {
500 /* Earlier code assumed this would be a VLAN pkt,
501 * double-check this now that we have the actual
502 * packet in hand.
503 */
504 struct ethhdr *ehdr;
505 skb_reset_mac_header(skb);
506 ehdr = eth_hdr(skb);
507 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
508 err = -EMSGSIZE;
509 goto out_unlock;
510 }
511 }
1a35ca80 512
1da177e4
LT
513 skb->protocol = proto;
514 skb->dev = dev;
515 skb->priority = sk->sk_priority;
2d37a186 516 skb->mark = sk->sk_mark;
2244d07b 517 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
ed85b565
RC
518 if (err < 0)
519 goto out_unlock;
1da177e4
LT
520
521 dev_queue_xmit(skb);
654d1f8a 522 rcu_read_unlock();
40d4e3df 523 return len;
1da177e4 524
1da177e4 525out_unlock:
654d1f8a 526 rcu_read_unlock();
1a35ca80
ED
527out_free:
528 kfree_skb(skb);
1da177e4
LT
529 return err;
530}
1da177e4 531
62ab0812
ED
532static inline unsigned int run_filter(const struct sk_buff *skb,
533 const struct sock *sk,
dbcb5855 534 unsigned int res)
1da177e4
LT
535{
536 struct sk_filter *filter;
fda9ef5d 537
80f8f102
ED
538 rcu_read_lock();
539 filter = rcu_dereference(sk->sk_filter);
dbcb5855 540 if (filter != NULL)
0a14842f 541 res = SK_RUN_FILTER(filter, skb);
80f8f102 542 rcu_read_unlock();
1da177e4 543
dbcb5855 544 return res;
1da177e4
LT
545}
546
547/*
62ab0812
ED
548 * This function makes lazy skb cloning in hope that most of packets
549 * are discarded by BPF.
550 *
551 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
552 * and skb->cb are mangled. It works because (and until) packets
553 * falling here are owned by current CPU. Output packets are cloned
554 * by dev_queue_xmit_nit(), input packets are processed by net_bh
555 * sequencially, so that if we return skb to original state on exit,
556 * we will not harm anyone.
1da177e4
LT
557 */
558
40d4e3df
ED
559static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
560 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
561{
562 struct sock *sk;
563 struct sockaddr_ll *sll;
564 struct packet_sock *po;
40d4e3df 565 u8 *skb_head = skb->data;
1da177e4 566 int skb_len = skb->len;
dbcb5855 567 unsigned int snaplen, res;
1da177e4
LT
568
569 if (skb->pkt_type == PACKET_LOOPBACK)
570 goto drop;
571
572 sk = pt->af_packet_priv;
573 po = pkt_sk(sk);
574
09ad9bc7 575 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
576 goto drop;
577
1da177e4
LT
578 skb->dev = dev;
579
3b04ddde 580 if (dev->header_ops) {
1da177e4 581 /* The device has an explicit notion of ll header,
62ab0812
ED
582 * exported to higher levels.
583 *
584 * Otherwise, the device hides details of its frame
585 * structure, so that corresponding packet head is
586 * never delivered to user.
1da177e4
LT
587 */
588 if (sk->sk_type != SOCK_DGRAM)
98e399f8 589 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
590 else if (skb->pkt_type == PACKET_OUTGOING) {
591 /* Special case: outgoing packets have ll header at head */
bbe735e4 592 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
593 }
594 }
595
596 snaplen = skb->len;
597
dbcb5855
DM
598 res = run_filter(skb, sk, snaplen);
599 if (!res)
fda9ef5d 600 goto drop_n_restore;
dbcb5855
DM
601 if (snaplen > res)
602 snaplen = res;
1da177e4
LT
603
604 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
605 (unsigned)sk->sk_rcvbuf)
606 goto drop_n_acct;
607
608 if (skb_shared(skb)) {
609 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
610 if (nskb == NULL)
611 goto drop_n_acct;
612
613 if (skb_head != skb->data) {
614 skb->data = skb_head;
615 skb->len = skb_len;
616 }
617 kfree_skb(skb);
618 skb = nskb;
619 }
620
ffbc6111
HX
621 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
622 sizeof(skb->cb));
623
624 sll = &PACKET_SKB_CB(skb)->sa.ll;
1da177e4
LT
625 sll->sll_family = AF_PACKET;
626 sll->sll_hatype = dev->type;
627 sll->sll_protocol = skb->protocol;
628 sll->sll_pkttype = skb->pkt_type;
8032b464 629 if (unlikely(po->origdev))
80feaacb
PWJ
630 sll->sll_ifindex = orig_dev->ifindex;
631 else
632 sll->sll_ifindex = dev->ifindex;
1da177e4 633
b95cce35 634 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4 635
ffbc6111 636 PACKET_SKB_CB(skb)->origlen = skb->len;
8dc41944 637
1da177e4
LT
638 if (pskb_trim(skb, snaplen))
639 goto drop_n_acct;
640
641 skb_set_owner_r(skb, sk);
642 skb->dev = NULL;
adf30907 643 skb_dst_drop(skb);
1da177e4 644
84531c24
PO
645 /* drop conntrack reference */
646 nf_reset(skb);
647
1da177e4
LT
648 spin_lock(&sk->sk_receive_queue.lock);
649 po->stats.tp_packets++;
3b885787 650 skb->dropcount = atomic_read(&sk->sk_drops);
1da177e4
LT
651 __skb_queue_tail(&sk->sk_receive_queue, skb);
652 spin_unlock(&sk->sk_receive_queue.lock);
653 sk->sk_data_ready(sk, skb->len);
654 return 0;
655
656drop_n_acct:
3b885787 657 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
1da177e4
LT
658
659drop_n_restore:
660 if (skb_head != skb->data && skb_shared(skb)) {
661 skb->data = skb_head;
662 skb->len = skb_len;
663 }
664drop:
ead2ceb0 665 consume_skb(skb);
1da177e4
LT
666 return 0;
667}
668
40d4e3df
ED
669static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
670 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
671{
672 struct sock *sk;
673 struct packet_sock *po;
674 struct sockaddr_ll *sll;
bbd6ef87
PM
675 union {
676 struct tpacket_hdr *h1;
677 struct tpacket2_hdr *h2;
678 void *raw;
679 } h;
40d4e3df 680 u8 *skb_head = skb->data;
1da177e4 681 int skb_len = skb->len;
dbcb5855 682 unsigned int snaplen, res;
1da177e4 683 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
bbd6ef87 684 unsigned short macoff, netoff, hdrlen;
1da177e4 685 struct sk_buff *copy_skb = NULL;
b7aa0bf7 686 struct timeval tv;
bbd6ef87 687 struct timespec ts;
614f60fa 688 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1da177e4
LT
689
690 if (skb->pkt_type == PACKET_LOOPBACK)
691 goto drop;
692
693 sk = pt->af_packet_priv;
694 po = pkt_sk(sk);
695
09ad9bc7 696 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
697 goto drop;
698
3b04ddde 699 if (dev->header_ops) {
1da177e4 700 if (sk->sk_type != SOCK_DGRAM)
98e399f8 701 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
702 else if (skb->pkt_type == PACKET_OUTGOING) {
703 /* Special case: outgoing packets have ll header at head */
bbe735e4 704 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
705 }
706 }
707
8dc41944
HX
708 if (skb->ip_summed == CHECKSUM_PARTIAL)
709 status |= TP_STATUS_CSUMNOTREADY;
710
1da177e4
LT
711 snaplen = skb->len;
712
dbcb5855
DM
713 res = run_filter(skb, sk, snaplen);
714 if (!res)
fda9ef5d 715 goto drop_n_restore;
dbcb5855
DM
716 if (snaplen > res)
717 snaplen = res;
1da177e4
LT
718
719 if (sk->sk_type == SOCK_DGRAM) {
8913336a
PM
720 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
721 po->tp_reserve;
1da177e4 722 } else {
bbe735e4 723 unsigned maclen = skb_network_offset(skb);
bbd6ef87 724 netoff = TPACKET_ALIGN(po->tp_hdrlen +
8913336a
PM
725 (maclen < 16 ? 16 : maclen)) +
726 po->tp_reserve;
1da177e4
LT
727 macoff = netoff - maclen;
728 }
729
69e3c75f 730 if (macoff + snaplen > po->rx_ring.frame_size) {
1da177e4
LT
731 if (po->copy_thresh &&
732 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
733 (unsigned)sk->sk_rcvbuf) {
734 if (skb_shared(skb)) {
735 copy_skb = skb_clone(skb, GFP_ATOMIC);
736 } else {
737 copy_skb = skb_get(skb);
738 skb_head = skb->data;
739 }
740 if (copy_skb)
741 skb_set_owner_r(copy_skb, sk);
742 }
69e3c75f 743 snaplen = po->rx_ring.frame_size - macoff;
1da177e4
LT
744 if ((int)snaplen < 0)
745 snaplen = 0;
746 }
1da177e4
LT
747
748 spin_lock(&sk->sk_receive_queue.lock);
69e3c75f 749 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
bbd6ef87 750 if (!h.raw)
1da177e4 751 goto ring_is_full;
69e3c75f 752 packet_increment_head(&po->rx_ring);
1da177e4
LT
753 po->stats.tp_packets++;
754 if (copy_skb) {
755 status |= TP_STATUS_COPY;
756 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
757 }
758 if (!po->stats.tp_drops)
759 status &= ~TP_STATUS_LOSING;
760 spin_unlock(&sk->sk_receive_queue.lock);
761
bbd6ef87 762 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1da177e4 763
bbd6ef87
PM
764 switch (po->tp_version) {
765 case TPACKET_V1:
766 h.h1->tp_len = skb->len;
767 h.h1->tp_snaplen = snaplen;
768 h.h1->tp_mac = macoff;
769 h.h1->tp_net = netoff;
614f60fa
SM
770 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
771 && shhwtstamps->syststamp.tv64)
772 tv = ktime_to_timeval(shhwtstamps->syststamp);
773 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
774 && shhwtstamps->hwtstamp.tv64)
775 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
776 else if (skb->tstamp.tv64)
bbd6ef87
PM
777 tv = ktime_to_timeval(skb->tstamp);
778 else
779 do_gettimeofday(&tv);
780 h.h1->tp_sec = tv.tv_sec;
781 h.h1->tp_usec = tv.tv_usec;
782 hdrlen = sizeof(*h.h1);
783 break;
784 case TPACKET_V2:
785 h.h2->tp_len = skb->len;
786 h.h2->tp_snaplen = snaplen;
787 h.h2->tp_mac = macoff;
788 h.h2->tp_net = netoff;
614f60fa
SM
789 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
790 && shhwtstamps->syststamp.tv64)
791 ts = ktime_to_timespec(shhwtstamps->syststamp);
792 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
793 && shhwtstamps->hwtstamp.tv64)
794 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
795 else if (skb->tstamp.tv64)
bbd6ef87
PM
796 ts = ktime_to_timespec(skb->tstamp);
797 else
798 getnstimeofday(&ts);
799 h.h2->tp_sec = ts.tv_sec;
800 h.h2->tp_nsec = ts.tv_nsec;
a3bcc23e
BG
801 if (vlan_tx_tag_present(skb)) {
802 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
803 status |= TP_STATUS_VLAN_VALID;
804 } else {
805 h.h2->tp_vlan_tci = 0;
806 }
13fcb7bd 807 h.h2->tp_padding = 0;
bbd6ef87
PM
808 hdrlen = sizeof(*h.h2);
809 break;
810 default:
811 BUG();
812 }
1da177e4 813
bbd6ef87 814 sll = h.raw + TPACKET_ALIGN(hdrlen);
b95cce35 815 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4
LT
816 sll->sll_family = AF_PACKET;
817 sll->sll_hatype = dev->type;
818 sll->sll_protocol = skb->protocol;
819 sll->sll_pkttype = skb->pkt_type;
8032b464 820 if (unlikely(po->origdev))
80feaacb
PWJ
821 sll->sll_ifindex = orig_dev->ifindex;
822 else
823 sll->sll_ifindex = dev->ifindex;
1da177e4 824
bbd6ef87 825 __packet_set_status(po, h.raw, status);
e16aa207 826 smp_mb();
f6dafa95 827#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1da177e4 828 {
0af55bb5
CG
829 u8 *start, *end;
830
831 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
832 for (start = h.raw; start < end; start += PAGE_SIZE)
833 flush_dcache_page(pgv_to_page(start));
1da177e4 834 }
f6dafa95 835#endif
1da177e4
LT
836
837 sk->sk_data_ready(sk, 0);
838
839drop_n_restore:
840 if (skb_head != skb->data && skb_shared(skb)) {
841 skb->data = skb_head;
842 skb->len = skb_len;
843 }
844drop:
1ce4f28b 845 kfree_skb(skb);
1da177e4
LT
846 return 0;
847
848ring_is_full:
849 po->stats.tp_drops++;
850 spin_unlock(&sk->sk_receive_queue.lock);
851
852 sk->sk_data_ready(sk, 0);
acb5d75b 853 kfree_skb(copy_skb);
1da177e4
LT
854 goto drop_n_restore;
855}
856
69e3c75f
JB
857static void tpacket_destruct_skb(struct sk_buff *skb)
858{
859 struct packet_sock *po = pkt_sk(skb->sk);
40d4e3df 860 void *ph;
1da177e4 861
69e3c75f 862 BUG_ON(skb == NULL);
1da177e4 863
69e3c75f
JB
864 if (likely(po->tx_ring.pg_vec)) {
865 ph = skb_shinfo(skb)->destructor_arg;
866 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
867 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
868 atomic_dec(&po->tx_ring.pending);
869 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
870 }
871
872 sock_wfree(skb);
873}
874
40d4e3df
ED
875static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
876 void *frame, struct net_device *dev, int size_max,
877 __be16 proto, unsigned char *addr)
69e3c75f
JB
878{
879 union {
880 struct tpacket_hdr *h1;
881 struct tpacket2_hdr *h2;
882 void *raw;
883 } ph;
884 int to_write, offset, len, tp_len, nr_frags, len_max;
885 struct socket *sock = po->sk.sk_socket;
886 struct page *page;
887 void *data;
888 int err;
889
890 ph.raw = frame;
891
892 skb->protocol = proto;
893 skb->dev = dev;
894 skb->priority = po->sk.sk_priority;
2d37a186 895 skb->mark = po->sk.sk_mark;
69e3c75f
JB
896 skb_shinfo(skb)->destructor_arg = ph.raw;
897
898 switch (po->tp_version) {
899 case TPACKET_V2:
900 tp_len = ph.h2->tp_len;
901 break;
902 default:
903 tp_len = ph.h1->tp_len;
904 break;
905 }
906 if (unlikely(tp_len > size_max)) {
40d4e3df 907 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
69e3c75f
JB
908 return -EMSGSIZE;
909 }
910
911 skb_reserve(skb, LL_RESERVED_SPACE(dev));
912 skb_reset_network_header(skb);
913
914 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
915 to_write = tp_len;
916
917 if (sock->type == SOCK_DGRAM) {
918 err = dev_hard_header(skb, dev, ntohs(proto), addr,
919 NULL, tp_len);
920 if (unlikely(err < 0))
921 return -EINVAL;
40d4e3df 922 } else if (dev->hard_header_len) {
69e3c75f
JB
923 /* net device doesn't like empty head */
924 if (unlikely(tp_len <= dev->hard_header_len)) {
40d4e3df
ED
925 pr_err("packet size is too short (%d < %d)\n",
926 tp_len, dev->hard_header_len);
69e3c75f
JB
927 return -EINVAL;
928 }
929
930 skb_push(skb, dev->hard_header_len);
931 err = skb_store_bits(skb, 0, data,
932 dev->hard_header_len);
933 if (unlikely(err))
934 return err;
935
936 data += dev->hard_header_len;
937 to_write -= dev->hard_header_len;
938 }
939
940 err = -EFAULT;
69e3c75f
JB
941 offset = offset_in_page(data);
942 len_max = PAGE_SIZE - offset;
943 len = ((to_write > len_max) ? len_max : to_write);
944
945 skb->data_len = to_write;
946 skb->len += to_write;
947 skb->truesize += to_write;
948 atomic_add(to_write, &po->sk.sk_wmem_alloc);
949
950 while (likely(to_write)) {
951 nr_frags = skb_shinfo(skb)->nr_frags;
952
953 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
40d4e3df
ED
954 pr_err("Packet exceed the number of skb frags(%lu)\n",
955 MAX_SKB_FRAGS);
69e3c75f
JB
956 return -EFAULT;
957 }
958
0af55bb5
CG
959 page = pgv_to_page(data);
960 data += len;
69e3c75f
JB
961 flush_dcache_page(page);
962 get_page(page);
0af55bb5 963 skb_fill_page_desc(skb, nr_frags, page, offset, len);
69e3c75f
JB
964 to_write -= len;
965 offset = 0;
966 len_max = PAGE_SIZE;
967 len = ((to_write > len_max) ? len_max : to_write);
968 }
969
970 return tp_len;
971}
972
973static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
974{
69e3c75f
JB
975 struct sk_buff *skb;
976 struct net_device *dev;
977 __be16 proto;
827d9780
BG
978 bool need_rls_dev = false;
979 int err, reserve = 0;
40d4e3df
ED
980 void *ph;
981 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
69e3c75f
JB
982 int tp_len, size_max;
983 unsigned char *addr;
984 int len_sum = 0;
985 int status = 0;
986
69e3c75f
JB
987 mutex_lock(&po->pg_vec_lock);
988
989 err = -EBUSY;
990 if (saddr == NULL) {
827d9780 991 dev = po->prot_hook.dev;
69e3c75f
JB
992 proto = po->num;
993 addr = NULL;
994 } else {
995 err = -EINVAL;
996 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
997 goto out;
998 if (msg->msg_namelen < (saddr->sll_halen
999 + offsetof(struct sockaddr_ll,
1000 sll_addr)))
1001 goto out;
69e3c75f
JB
1002 proto = saddr->sll_protocol;
1003 addr = saddr->sll_addr;
827d9780
BG
1004 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
1005 need_rls_dev = true;
69e3c75f
JB
1006 }
1007
69e3c75f
JB
1008 err = -ENXIO;
1009 if (unlikely(dev == NULL))
1010 goto out;
1011
1012 reserve = dev->hard_header_len;
1013
1014 err = -ENETDOWN;
1015 if (unlikely(!(dev->flags & IFF_UP)))
1016 goto out_put;
1017
1018 size_max = po->tx_ring.frame_size
b5dd884e 1019 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
69e3c75f
JB
1020
1021 if (size_max > dev->mtu + reserve)
1022 size_max = dev->mtu + reserve;
1023
1024 do {
1025 ph = packet_current_frame(po, &po->tx_ring,
1026 TP_STATUS_SEND_REQUEST);
1027
1028 if (unlikely(ph == NULL)) {
1029 schedule();
1030 continue;
1031 }
1032
1033 status = TP_STATUS_SEND_REQUEST;
1034 skb = sock_alloc_send_skb(&po->sk,
1035 LL_ALLOCATED_SPACE(dev)
1036 + sizeof(struct sockaddr_ll),
1037 0, &err);
1038
1039 if (unlikely(skb == NULL))
1040 goto out_status;
1041
1042 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1043 addr);
1044
1045 if (unlikely(tp_len < 0)) {
1046 if (po->tp_loss) {
1047 __packet_set_status(po, ph,
1048 TP_STATUS_AVAILABLE);
1049 packet_increment_head(&po->tx_ring);
1050 kfree_skb(skb);
1051 continue;
1052 } else {
1053 status = TP_STATUS_WRONG_FORMAT;
1054 err = tp_len;
1055 goto out_status;
1056 }
1057 }
1058
1059 skb->destructor = tpacket_destruct_skb;
1060 __packet_set_status(po, ph, TP_STATUS_SENDING);
1061 atomic_inc(&po->tx_ring.pending);
1062
1063 status = TP_STATUS_SEND_REQUEST;
1064 err = dev_queue_xmit(skb);
eb70df13
JP
1065 if (unlikely(err > 0)) {
1066 err = net_xmit_errno(err);
1067 if (err && __packet_get_status(po, ph) ==
1068 TP_STATUS_AVAILABLE) {
1069 /* skb was destructed already */
1070 skb = NULL;
1071 goto out_status;
1072 }
1073 /*
1074 * skb was dropped but not destructed yet;
1075 * let's treat it like congestion or err < 0
1076 */
1077 err = 0;
1078 }
69e3c75f
JB
1079 packet_increment_head(&po->tx_ring);
1080 len_sum += tp_len;
f64f9e71
JP
1081 } while (likely((ph != NULL) ||
1082 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
1083 (atomic_read(&po->tx_ring.pending))))
1084 );
69e3c75f
JB
1085
1086 err = len_sum;
1087 goto out_put;
1088
69e3c75f
JB
1089out_status:
1090 __packet_set_status(po, ph, status);
1091 kfree_skb(skb);
1092out_put:
827d9780
BG
1093 if (need_rls_dev)
1094 dev_put(dev);
69e3c75f
JB
1095out:
1096 mutex_unlock(&po->pg_vec_lock);
1097 return err;
1098}
69e3c75f 1099
bfd5f4a3
SS
1100static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1101 size_t reserve, size_t len,
1102 size_t linear, int noblock,
1103 int *err)
1104{
1105 struct sk_buff *skb;
1106
1107 /* Under a page? Don't bother with paged skb. */
1108 if (prepad + len < PAGE_SIZE || !linear)
1109 linear = len;
1110
1111 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1112 err);
1113 if (!skb)
1114 return NULL;
1115
1116 skb_reserve(skb, reserve);
1117 skb_put(skb, linear);
1118 skb->data_len = len - linear;
1119 skb->len += len - linear;
1120
1121 return skb;
1122}
1123
69e3c75f 1124static int packet_snd(struct socket *sock,
1da177e4
LT
1125 struct msghdr *msg, size_t len)
1126{
1127 struct sock *sk = sock->sk;
40d4e3df 1128 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1da177e4
LT
1129 struct sk_buff *skb;
1130 struct net_device *dev;
0e11c91e 1131 __be16 proto;
827d9780 1132 bool need_rls_dev = false;
1da177e4 1133 unsigned char *addr;
827d9780 1134 int err, reserve = 0;
bfd5f4a3
SS
1135 struct virtio_net_hdr vnet_hdr = { 0 };
1136 int offset = 0;
1137 int vnet_hdr_len;
1138 struct packet_sock *po = pkt_sk(sk);
1139 unsigned short gso_type = 0;
1da177e4
LT
1140
1141 /*
1ce4f28b 1142 * Get and verify the address.
1da177e4 1143 */
1ce4f28b 1144
1da177e4 1145 if (saddr == NULL) {
827d9780 1146 dev = po->prot_hook.dev;
1da177e4
LT
1147 proto = po->num;
1148 addr = NULL;
1149 } else {
1150 err = -EINVAL;
1151 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1152 goto out;
0fb375fb
EB
1153 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1154 goto out;
1da177e4
LT
1155 proto = saddr->sll_protocol;
1156 addr = saddr->sll_addr;
827d9780
BG
1157 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
1158 need_rls_dev = true;
1da177e4
LT
1159 }
1160
1da177e4
LT
1161 err = -ENXIO;
1162 if (dev == NULL)
1163 goto out_unlock;
1164 if (sock->type == SOCK_RAW)
1165 reserve = dev->hard_header_len;
1166
d5e76b0a
DM
1167 err = -ENETDOWN;
1168 if (!(dev->flags & IFF_UP))
1169 goto out_unlock;
1170
bfd5f4a3
SS
1171 if (po->has_vnet_hdr) {
1172 vnet_hdr_len = sizeof(vnet_hdr);
1173
1174 err = -EINVAL;
1175 if (len < vnet_hdr_len)
1176 goto out_unlock;
1177
1178 len -= vnet_hdr_len;
1179
1180 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1181 vnet_hdr_len);
1182 if (err < 0)
1183 goto out_unlock;
1184
1185 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1186 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1187 vnet_hdr.hdr_len))
1188 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1189 vnet_hdr.csum_offset + 2;
1190
1191 err = -EINVAL;
1192 if (vnet_hdr.hdr_len > len)
1193 goto out_unlock;
1194
1195 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1196 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1197 case VIRTIO_NET_HDR_GSO_TCPV4:
1198 gso_type = SKB_GSO_TCPV4;
1199 break;
1200 case VIRTIO_NET_HDR_GSO_TCPV6:
1201 gso_type = SKB_GSO_TCPV6;
1202 break;
1203 case VIRTIO_NET_HDR_GSO_UDP:
1204 gso_type = SKB_GSO_UDP;
1205 break;
1206 default:
1207 goto out_unlock;
1208 }
1209
1210 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1211 gso_type |= SKB_GSO_TCP_ECN;
1212
1213 if (vnet_hdr.gso_size == 0)
1214 goto out_unlock;
1215
1216 }
1217 }
1218
1da177e4 1219 err = -EMSGSIZE;
57f89bfa 1220 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
1da177e4
LT
1221 goto out_unlock;
1222
bfd5f4a3
SS
1223 err = -ENOBUFS;
1224 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1225 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1226 msg->msg_flags & MSG_DONTWAIT, &err);
40d4e3df 1227 if (skb == NULL)
1da177e4
LT
1228 goto out_unlock;
1229
bfd5f4a3 1230 skb_set_network_header(skb, reserve);
1da177e4 1231
0c4e8581
SH
1232 err = -EINVAL;
1233 if (sock->type == SOCK_DGRAM &&
bfd5f4a3 1234 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
0c4e8581 1235 goto out_free;
1da177e4
LT
1236
1237 /* Returns -EFAULT on error */
bfd5f4a3 1238 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1da177e4
LT
1239 if (err)
1240 goto out_free;
2244d07b 1241 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
ed85b565
RC
1242 if (err < 0)
1243 goto out_free;
1da177e4 1244
57f89bfa
BG
1245 if (!gso_type && (len > dev->mtu + reserve)) {
1246 /* Earlier code assumed this would be a VLAN pkt,
1247 * double-check this now that we have the actual
1248 * packet in hand.
1249 */
1250 struct ethhdr *ehdr;
1251 skb_reset_mac_header(skb);
1252 ehdr = eth_hdr(skb);
1253 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1254 err = -EMSGSIZE;
1255 goto out_free;
1256 }
1257 }
1258
1da177e4
LT
1259 skb->protocol = proto;
1260 skb->dev = dev;
1261 skb->priority = sk->sk_priority;
2d37a186 1262 skb->mark = sk->sk_mark;
1da177e4 1263
bfd5f4a3
SS
1264 if (po->has_vnet_hdr) {
1265 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1266 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1267 vnet_hdr.csum_offset)) {
1268 err = -EINVAL;
1269 goto out_free;
1270 }
1271 }
1272
1273 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1274 skb_shinfo(skb)->gso_type = gso_type;
1275
1276 /* Header must be checked, and gso_segs computed. */
1277 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1278 skb_shinfo(skb)->gso_segs = 0;
1279
1280 len += vnet_hdr_len;
1281 }
1282
1da177e4
LT
1283 /*
1284 * Now send it
1285 */
1286
1287 err = dev_queue_xmit(skb);
1288 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1289 goto out_unlock;
1290
827d9780
BG
1291 if (need_rls_dev)
1292 dev_put(dev);
1da177e4 1293
40d4e3df 1294 return len;
1da177e4
LT
1295
1296out_free:
1297 kfree_skb(skb);
1298out_unlock:
827d9780 1299 if (dev && need_rls_dev)
1da177e4
LT
1300 dev_put(dev);
1301out:
1302 return err;
1303}
1304
69e3c75f
JB
1305static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1306 struct msghdr *msg, size_t len)
1307{
69e3c75f
JB
1308 struct sock *sk = sock->sk;
1309 struct packet_sock *po = pkt_sk(sk);
1310 if (po->tx_ring.pg_vec)
1311 return tpacket_snd(po, msg);
1312 else
69e3c75f
JB
1313 return packet_snd(sock, msg, len);
1314}
1315
1da177e4
LT
1316/*
1317 * Close a PACKET socket. This is fairly simple. We immediately go
1318 * to 'closed' state and remove our protocol entry in the device list.
1319 */
1320
1321static int packet_release(struct socket *sock)
1322{
1323 struct sock *sk = sock->sk;
1324 struct packet_sock *po;
d12d01d6 1325 struct net *net;
69e3c75f 1326 struct tpacket_req req;
1da177e4
LT
1327
1328 if (!sk)
1329 return 0;
1330
3b1e0a65 1331 net = sock_net(sk);
1da177e4
LT
1332 po = pkt_sk(sk);
1333
808f5114 1334 spin_lock_bh(&net->packet.sklist_lock);
1335 sk_del_node_init_rcu(sk);
920de804 1336 sock_prot_inuse_add(net, sk->sk_prot, -1);
808f5114 1337 spin_unlock_bh(&net->packet.sklist_lock);
1da177e4 1338
808f5114 1339 spin_lock(&po->bind_lock);
1da177e4
LT
1340 if (po->running) {
1341 /*
808f5114 1342 * Remove from protocol table
1da177e4 1343 */
1da177e4
LT
1344 po->running = 0;
1345 po->num = 0;
808f5114 1346 __dev_remove_pack(&po->prot_hook);
1da177e4
LT
1347 __sock_put(sk);
1348 }
160ff18a
BG
1349 if (po->prot_hook.dev) {
1350 dev_put(po->prot_hook.dev);
1351 po->prot_hook.dev = NULL;
1352 }
808f5114 1353 spin_unlock(&po->bind_lock);
1da177e4 1354
1da177e4 1355 packet_flush_mclist(sk);
1da177e4 1356
69e3c75f
JB
1357 memset(&req, 0, sizeof(req));
1358
1359 if (po->rx_ring.pg_vec)
1360 packet_set_ring(sk, &req, 1, 0);
1361
1362 if (po->tx_ring.pg_vec)
1363 packet_set_ring(sk, &req, 1, 1);
1da177e4 1364
808f5114 1365 synchronize_net();
1da177e4
LT
1366 /*
1367 * Now the socket is dead. No more input will appear.
1368 */
1da177e4
LT
1369 sock_orphan(sk);
1370 sock->sk = NULL;
1371
1372 /* Purge queues */
1373
1374 skb_queue_purge(&sk->sk_receive_queue);
17ab56a2 1375 sk_refcnt_debug_release(sk);
1da177e4
LT
1376
1377 sock_put(sk);
1378 return 0;
1379}
1380
1381/*
1382 * Attach a packet hook.
1383 */
1384
0e11c91e 1385static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1da177e4
LT
1386{
1387 struct packet_sock *po = pkt_sk(sk);
1388 /*
1389 * Detach an existing hook if present.
1390 */
1391
1392 lock_sock(sk);
1393
1394 spin_lock(&po->bind_lock);
1395 if (po->running) {
1396 __sock_put(sk);
1397 po->running = 0;
1398 po->num = 0;
1399 spin_unlock(&po->bind_lock);
1400 dev_remove_pack(&po->prot_hook);
1401 spin_lock(&po->bind_lock);
1402 }
1403
1404 po->num = protocol;
1405 po->prot_hook.type = protocol;
160ff18a
BG
1406 if (po->prot_hook.dev)
1407 dev_put(po->prot_hook.dev);
1da177e4
LT
1408 po->prot_hook.dev = dev;
1409
1410 po->ifindex = dev ? dev->ifindex : 0;
1411
1412 if (protocol == 0)
1413 goto out_unlock;
1414
be85d4ad 1415 if (!dev || (dev->flags & IFF_UP)) {
1da177e4
LT
1416 dev_add_pack(&po->prot_hook);
1417 sock_hold(sk);
1418 po->running = 1;
be85d4ad
UT
1419 } else {
1420 sk->sk_err = ENETDOWN;
1421 if (!sock_flag(sk, SOCK_DEAD))
1422 sk->sk_error_report(sk);
1da177e4
LT
1423 }
1424
1425out_unlock:
1426 spin_unlock(&po->bind_lock);
1427 release_sock(sk);
1428 return 0;
1429}
1430
1431/*
1432 * Bind a packet socket to a device
1433 */
1434
40d4e3df
ED
1435static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1436 int addr_len)
1da177e4 1437{
40d4e3df 1438 struct sock *sk = sock->sk;
1da177e4
LT
1439 char name[15];
1440 struct net_device *dev;
1441 int err = -ENODEV;
1ce4f28b 1442
1da177e4
LT
1443 /*
1444 * Check legality
1445 */
1ce4f28b 1446
8ae55f04 1447 if (addr_len != sizeof(struct sockaddr))
1da177e4 1448 return -EINVAL;
40d4e3df 1449 strlcpy(name, uaddr->sa_data, sizeof(name));
1da177e4 1450
3b1e0a65 1451 dev = dev_get_by_name(sock_net(sk), name);
160ff18a 1452 if (dev)
1da177e4 1453 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1da177e4
LT
1454 return err;
1455}
1da177e4
LT
1456
1457static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1458{
40d4e3df
ED
1459 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1460 struct sock *sk = sock->sk;
1da177e4
LT
1461 struct net_device *dev = NULL;
1462 int err;
1463
1464
1465 /*
1466 * Check legality
1467 */
1ce4f28b 1468
1da177e4
LT
1469 if (addr_len < sizeof(struct sockaddr_ll))
1470 return -EINVAL;
1471 if (sll->sll_family != AF_PACKET)
1472 return -EINVAL;
1473
1474 if (sll->sll_ifindex) {
1475 err = -ENODEV;
3b1e0a65 1476 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1da177e4
LT
1477 if (dev == NULL)
1478 goto out;
1479 }
1480 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1da177e4
LT
1481
1482out:
1483 return err;
1484}
1485
1486static struct proto packet_proto = {
1487 .name = "PACKET",
1488 .owner = THIS_MODULE,
1489 .obj_size = sizeof(struct packet_sock),
1490};
1491
1492/*
1ce4f28b 1493 * Create a packet of type SOCK_PACKET.
1da177e4
LT
1494 */
1495
3f378b68
EP
1496static int packet_create(struct net *net, struct socket *sock, int protocol,
1497 int kern)
1da177e4
LT
1498{
1499 struct sock *sk;
1500 struct packet_sock *po;
0e11c91e 1501 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1da177e4
LT
1502 int err;
1503
1504 if (!capable(CAP_NET_RAW))
1505 return -EPERM;
be02097c
DM
1506 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1507 sock->type != SOCK_PACKET)
1da177e4
LT
1508 return -ESOCKTNOSUPPORT;
1509
1510 sock->state = SS_UNCONNECTED;
1511
1512 err = -ENOBUFS;
6257ff21 1513 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1da177e4
LT
1514 if (sk == NULL)
1515 goto out;
1516
1517 sock->ops = &packet_ops;
1da177e4
LT
1518 if (sock->type == SOCK_PACKET)
1519 sock->ops = &packet_ops_spkt;
be02097c 1520
1da177e4
LT
1521 sock_init_data(sock, sk);
1522
1523 po = pkt_sk(sk);
1524 sk->sk_family = PF_PACKET;
0e11c91e 1525 po->num = proto;
1da177e4
LT
1526
1527 sk->sk_destruct = packet_sock_destruct;
17ab56a2 1528 sk_refcnt_debug_inc(sk);
1da177e4
LT
1529
1530 /*
1531 * Attach a protocol block
1532 */
1533
1534 spin_lock_init(&po->bind_lock);
905db440 1535 mutex_init(&po->pg_vec_lock);
1da177e4 1536 po->prot_hook.func = packet_rcv;
be02097c 1537
1da177e4
LT
1538 if (sock->type == SOCK_PACKET)
1539 po->prot_hook.func = packet_rcv_spkt;
be02097c 1540
1da177e4
LT
1541 po->prot_hook.af_packet_priv = sk;
1542
0e11c91e
AV
1543 if (proto) {
1544 po->prot_hook.type = proto;
1da177e4
LT
1545 dev_add_pack(&po->prot_hook);
1546 sock_hold(sk);
1547 po->running = 1;
1548 }
1549
808f5114 1550 spin_lock_bh(&net->packet.sklist_lock);
1551 sk_add_node_rcu(sk, &net->packet.sklist);
3680453c 1552 sock_prot_inuse_add(net, &packet_proto, 1);
808f5114 1553 spin_unlock_bh(&net->packet.sklist_lock);
1554
40d4e3df 1555 return 0;
1da177e4
LT
1556out:
1557 return err;
1558}
1559
ed85b565
RC
1560static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1561{
1562 struct sock_exterr_skb *serr;
1563 struct sk_buff *skb, *skb2;
1564 int copied, err;
1565
1566 err = -EAGAIN;
1567 skb = skb_dequeue(&sk->sk_error_queue);
1568 if (skb == NULL)
1569 goto out;
1570
1571 copied = skb->len;
1572 if (copied > len) {
1573 msg->msg_flags |= MSG_TRUNC;
1574 copied = len;
1575 }
1576 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1577 if (err)
1578 goto out_free_skb;
1579
1580 sock_recv_timestamp(msg, sk, skb);
1581
1582 serr = SKB_EXT_ERR(skb);
1583 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1584 sizeof(serr->ee), &serr->ee);
1585
1586 msg->msg_flags |= MSG_ERRQUEUE;
1587 err = copied;
1588
1589 /* Reset and regenerate socket error */
1590 spin_lock_bh(&sk->sk_error_queue.lock);
1591 sk->sk_err = 0;
1592 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1593 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1594 spin_unlock_bh(&sk->sk_error_queue.lock);
1595 sk->sk_error_report(sk);
1596 } else
1597 spin_unlock_bh(&sk->sk_error_queue.lock);
1598
1599out_free_skb:
1600 kfree_skb(skb);
1601out:
1602 return err;
1603}
1604
1da177e4
LT
1605/*
1606 * Pull a packet from our receive queue and hand it to the user.
1607 * If necessary we block.
1608 */
1609
1610static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1611 struct msghdr *msg, size_t len, int flags)
1612{
1613 struct sock *sk = sock->sk;
1614 struct sk_buff *skb;
1615 int copied, err;
0fb375fb 1616 struct sockaddr_ll *sll;
bfd5f4a3 1617 int vnet_hdr_len = 0;
1da177e4
LT
1618
1619 err = -EINVAL;
ed85b565 1620 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1da177e4
LT
1621 goto out;
1622
1623#if 0
1624 /* What error should we return now? EUNATTACH? */
1625 if (pkt_sk(sk)->ifindex < 0)
1626 return -ENODEV;
1627#endif
1628
ed85b565
RC
1629 if (flags & MSG_ERRQUEUE) {
1630 err = packet_recv_error(sk, msg, len);
1631 goto out;
1632 }
1633
1da177e4
LT
1634 /*
1635 * Call the generic datagram receiver. This handles all sorts
1636 * of horrible races and re-entrancy so we can forget about it
1637 * in the protocol layers.
1638 *
1639 * Now it will return ENETDOWN, if device have just gone down,
1640 * but then it will block.
1641 */
1642
40d4e3df 1643 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1da177e4
LT
1644
1645 /*
1ce4f28b 1646 * An error occurred so return it. Because skb_recv_datagram()
1da177e4
LT
1647 * handles the blocking we don't see and worry about blocking
1648 * retries.
1649 */
1650
8ae55f04 1651 if (skb == NULL)
1da177e4
LT
1652 goto out;
1653
bfd5f4a3
SS
1654 if (pkt_sk(sk)->has_vnet_hdr) {
1655 struct virtio_net_hdr vnet_hdr = { 0 };
1656
1657 err = -EINVAL;
1658 vnet_hdr_len = sizeof(vnet_hdr);
1f18b717 1659 if (len < vnet_hdr_len)
bfd5f4a3
SS
1660 goto out_free;
1661
1f18b717
MK
1662 len -= vnet_hdr_len;
1663
bfd5f4a3
SS
1664 if (skb_is_gso(skb)) {
1665 struct skb_shared_info *sinfo = skb_shinfo(skb);
1666
1667 /* This is a hint as to how much should be linear. */
1668 vnet_hdr.hdr_len = skb_headlen(skb);
1669 vnet_hdr.gso_size = sinfo->gso_size;
1670 if (sinfo->gso_type & SKB_GSO_TCPV4)
1671 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1672 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1673 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1674 else if (sinfo->gso_type & SKB_GSO_UDP)
1675 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1676 else if (sinfo->gso_type & SKB_GSO_FCOE)
1677 goto out_free;
1678 else
1679 BUG();
1680 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1681 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1682 } else
1683 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1684
1685 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1686 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
55508d60 1687 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
bfd5f4a3 1688 vnet_hdr.csum_offset = skb->csum_offset;
10a8d94a
JW
1689 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1690 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
bfd5f4a3
SS
1691 } /* else everything is zero */
1692
1693 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1694 vnet_hdr_len);
1695 if (err < 0)
1696 goto out_free;
1697 }
1698
0fb375fb
EB
1699 /*
1700 * If the address length field is there to be filled in, we fill
1701 * it in now.
1702 */
1703
ffbc6111 1704 sll = &PACKET_SKB_CB(skb)->sa.ll;
0fb375fb
EB
1705 if (sock->type == SOCK_PACKET)
1706 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1707 else
1708 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1709
1da177e4
LT
1710 /*
1711 * You lose any data beyond the buffer you gave. If it worries a
1712 * user program they can ask the device for its MTU anyway.
1713 */
1714
1715 copied = skb->len;
40d4e3df
ED
1716 if (copied > len) {
1717 copied = len;
1718 msg->msg_flags |= MSG_TRUNC;
1da177e4
LT
1719 }
1720
1721 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1722 if (err)
1723 goto out_free;
1724
3b885787 1725 sock_recv_ts_and_drops(msg, sk, skb);
1da177e4
LT
1726
1727 if (msg->msg_name)
ffbc6111
HX
1728 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1729 msg->msg_namelen);
1da177e4 1730
8dc41944 1731 if (pkt_sk(sk)->auxdata) {
ffbc6111
HX
1732 struct tpacket_auxdata aux;
1733
1734 aux.tp_status = TP_STATUS_USER;
1735 if (skb->ip_summed == CHECKSUM_PARTIAL)
1736 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1737 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1738 aux.tp_snaplen = skb->len;
1739 aux.tp_mac = 0;
bbe735e4 1740 aux.tp_net = skb_network_offset(skb);
a3bcc23e
BG
1741 if (vlan_tx_tag_present(skb)) {
1742 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1743 aux.tp_status |= TP_STATUS_VLAN_VALID;
1744 } else {
1745 aux.tp_vlan_tci = 0;
1746 }
13fcb7bd 1747 aux.tp_padding = 0;
ffbc6111 1748 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
8dc41944
HX
1749 }
1750
1da177e4
LT
1751 /*
1752 * Free or return the buffer as appropriate. Again this
1753 * hides all the races and re-entrancy issues from us.
1754 */
bfd5f4a3 1755 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1da177e4
LT
1756
1757out_free:
1758 skb_free_datagram(sk, skb);
1759out:
1760 return err;
1761}
1762
1da177e4
LT
1763static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1764 int *uaddr_len, int peer)
1765{
1766 struct net_device *dev;
1767 struct sock *sk = sock->sk;
1768
1769 if (peer)
1770 return -EOPNOTSUPP;
1771
1772 uaddr->sa_family = AF_PACKET;
654d1f8a
ED
1773 rcu_read_lock();
1774 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1775 if (dev)
67286640 1776 strncpy(uaddr->sa_data, dev->name, 14);
654d1f8a 1777 else
1da177e4 1778 memset(uaddr->sa_data, 0, 14);
654d1f8a 1779 rcu_read_unlock();
1da177e4
LT
1780 *uaddr_len = sizeof(*uaddr);
1781
1782 return 0;
1783}
1da177e4
LT
1784
1785static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1786 int *uaddr_len, int peer)
1787{
1788 struct net_device *dev;
1789 struct sock *sk = sock->sk;
1790 struct packet_sock *po = pkt_sk(sk);
13cfa97b 1791 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1da177e4
LT
1792
1793 if (peer)
1794 return -EOPNOTSUPP;
1795
1796 sll->sll_family = AF_PACKET;
1797 sll->sll_ifindex = po->ifindex;
1798 sll->sll_protocol = po->num;
67286640 1799 sll->sll_pkttype = 0;
654d1f8a
ED
1800 rcu_read_lock();
1801 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1da177e4
LT
1802 if (dev) {
1803 sll->sll_hatype = dev->type;
1804 sll->sll_halen = dev->addr_len;
1805 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
1806 } else {
1807 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1808 sll->sll_halen = 0;
1809 }
654d1f8a 1810 rcu_read_unlock();
0fb375fb 1811 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1da177e4
LT
1812
1813 return 0;
1814}
1815
2aeb0b88
WC
1816static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1817 int what)
1da177e4
LT
1818{
1819 switch (i->type) {
1820 case PACKET_MR_MULTICAST:
1162563f
JP
1821 if (i->alen != dev->addr_len)
1822 return -EINVAL;
1da177e4 1823 if (what > 0)
22bedad3 1824 return dev_mc_add(dev, i->addr);
1da177e4 1825 else
22bedad3 1826 return dev_mc_del(dev, i->addr);
1da177e4
LT
1827 break;
1828 case PACKET_MR_PROMISC:
2aeb0b88 1829 return dev_set_promiscuity(dev, what);
1da177e4
LT
1830 break;
1831 case PACKET_MR_ALLMULTI:
2aeb0b88 1832 return dev_set_allmulti(dev, what);
1da177e4 1833 break;
d95ed927 1834 case PACKET_MR_UNICAST:
1162563f
JP
1835 if (i->alen != dev->addr_len)
1836 return -EINVAL;
d95ed927 1837 if (what > 0)
a748ee24 1838 return dev_uc_add(dev, i->addr);
d95ed927 1839 else
a748ee24 1840 return dev_uc_del(dev, i->addr);
d95ed927 1841 break;
40d4e3df
ED
1842 default:
1843 break;
1da177e4 1844 }
2aeb0b88 1845 return 0;
1da177e4
LT
1846}
1847
1848static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1849{
40d4e3df 1850 for ( ; i; i = i->next) {
1da177e4
LT
1851 if (i->ifindex == dev->ifindex)
1852 packet_dev_mc(dev, i, what);
1853 }
1854}
1855
0fb375fb 1856static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
1857{
1858 struct packet_sock *po = pkt_sk(sk);
1859 struct packet_mclist *ml, *i;
1860 struct net_device *dev;
1861 int err;
1862
1863 rtnl_lock();
1864
1865 err = -ENODEV;
3b1e0a65 1866 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1da177e4
LT
1867 if (!dev)
1868 goto done;
1869
1870 err = -EINVAL;
1162563f 1871 if (mreq->mr_alen > dev->addr_len)
1da177e4
LT
1872 goto done;
1873
1874 err = -ENOBUFS;
8b3a7005 1875 i = kmalloc(sizeof(*i), GFP_KERNEL);
1da177e4
LT
1876 if (i == NULL)
1877 goto done;
1878
1879 err = 0;
1880 for (ml = po->mclist; ml; ml = ml->next) {
1881 if (ml->ifindex == mreq->mr_ifindex &&
1882 ml->type == mreq->mr_type &&
1883 ml->alen == mreq->mr_alen &&
1884 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1885 ml->count++;
1886 /* Free the new element ... */
1887 kfree(i);
1888 goto done;
1889 }
1890 }
1891
1892 i->type = mreq->mr_type;
1893 i->ifindex = mreq->mr_ifindex;
1894 i->alen = mreq->mr_alen;
1895 memcpy(i->addr, mreq->mr_address, i->alen);
1896 i->count = 1;
1897 i->next = po->mclist;
1898 po->mclist = i;
2aeb0b88
WC
1899 err = packet_dev_mc(dev, i, 1);
1900 if (err) {
1901 po->mclist = i->next;
1902 kfree(i);
1903 }
1da177e4
LT
1904
1905done:
1906 rtnl_unlock();
1907 return err;
1908}
1909
0fb375fb 1910static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
1911{
1912 struct packet_mclist *ml, **mlp;
1913
1914 rtnl_lock();
1915
1916 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1917 if (ml->ifindex == mreq->mr_ifindex &&
1918 ml->type == mreq->mr_type &&
1919 ml->alen == mreq->mr_alen &&
1920 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1921 if (--ml->count == 0) {
1922 struct net_device *dev;
1923 *mlp = ml->next;
ad959e76
ED
1924 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1925 if (dev)
1da177e4 1926 packet_dev_mc(dev, ml, -1);
1da177e4
LT
1927 kfree(ml);
1928 }
1929 rtnl_unlock();
1930 return 0;
1931 }
1932 }
1933 rtnl_unlock();
1934 return -EADDRNOTAVAIL;
1935}
1936
1937static void packet_flush_mclist(struct sock *sk)
1938{
1939 struct packet_sock *po = pkt_sk(sk);
1940 struct packet_mclist *ml;
1941
1942 if (!po->mclist)
1943 return;
1944
1945 rtnl_lock();
1946 while ((ml = po->mclist) != NULL) {
1947 struct net_device *dev;
1948
1949 po->mclist = ml->next;
ad959e76
ED
1950 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1951 if (dev != NULL)
1da177e4 1952 packet_dev_mc(dev, ml, -1);
1da177e4
LT
1953 kfree(ml);
1954 }
1955 rtnl_unlock();
1956}
1da177e4
LT
1957
1958static int
b7058842 1959packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1da177e4
LT
1960{
1961 struct sock *sk = sock->sk;
8dc41944 1962 struct packet_sock *po = pkt_sk(sk);
1da177e4
LT
1963 int ret;
1964
1965 if (level != SOL_PACKET)
1966 return -ENOPROTOOPT;
1967
69e3c75f 1968 switch (optname) {
1ce4f28b 1969 case PACKET_ADD_MEMBERSHIP:
1da177e4
LT
1970 case PACKET_DROP_MEMBERSHIP:
1971 {
0fb375fb
EB
1972 struct packet_mreq_max mreq;
1973 int len = optlen;
1974 memset(&mreq, 0, sizeof(mreq));
1975 if (len < sizeof(struct packet_mreq))
1da177e4 1976 return -EINVAL;
0fb375fb
EB
1977 if (len > sizeof(mreq))
1978 len = sizeof(mreq);
40d4e3df 1979 if (copy_from_user(&mreq, optval, len))
1da177e4 1980 return -EFAULT;
0fb375fb
EB
1981 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1982 return -EINVAL;
1da177e4
LT
1983 if (optname == PACKET_ADD_MEMBERSHIP)
1984 ret = packet_mc_add(sk, &mreq);
1985 else
1986 ret = packet_mc_drop(sk, &mreq);
1987 return ret;
1988 }
a2efcfa0 1989
1da177e4 1990 case PACKET_RX_RING:
69e3c75f 1991 case PACKET_TX_RING:
1da177e4
LT
1992 {
1993 struct tpacket_req req;
1994
40d4e3df 1995 if (optlen < sizeof(req))
1da177e4 1996 return -EINVAL;
bfd5f4a3
SS
1997 if (pkt_sk(sk)->has_vnet_hdr)
1998 return -EINVAL;
40d4e3df 1999 if (copy_from_user(&req, optval, sizeof(req)))
1da177e4 2000 return -EFAULT;
69e3c75f 2001 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1da177e4
LT
2002 }
2003 case PACKET_COPY_THRESH:
2004 {
2005 int val;
2006
40d4e3df 2007 if (optlen != sizeof(val))
1da177e4 2008 return -EINVAL;
40d4e3df 2009 if (copy_from_user(&val, optval, sizeof(val)))
1da177e4
LT
2010 return -EFAULT;
2011
2012 pkt_sk(sk)->copy_thresh = val;
2013 return 0;
2014 }
bbd6ef87
PM
2015 case PACKET_VERSION:
2016 {
2017 int val;
2018
2019 if (optlen != sizeof(val))
2020 return -EINVAL;
69e3c75f 2021 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
bbd6ef87
PM
2022 return -EBUSY;
2023 if (copy_from_user(&val, optval, sizeof(val)))
2024 return -EFAULT;
2025 switch (val) {
2026 case TPACKET_V1:
2027 case TPACKET_V2:
2028 po->tp_version = val;
2029 return 0;
2030 default:
2031 return -EINVAL;
2032 }
2033 }
8913336a
PM
2034 case PACKET_RESERVE:
2035 {
2036 unsigned int val;
2037
2038 if (optlen != sizeof(val))
2039 return -EINVAL;
69e3c75f 2040 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
8913336a
PM
2041 return -EBUSY;
2042 if (copy_from_user(&val, optval, sizeof(val)))
2043 return -EFAULT;
2044 po->tp_reserve = val;
2045 return 0;
2046 }
69e3c75f
JB
2047 case PACKET_LOSS:
2048 {
2049 unsigned int val;
2050
2051 if (optlen != sizeof(val))
2052 return -EINVAL;
2053 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2054 return -EBUSY;
2055 if (copy_from_user(&val, optval, sizeof(val)))
2056 return -EFAULT;
2057 po->tp_loss = !!val;
2058 return 0;
2059 }
8dc41944
HX
2060 case PACKET_AUXDATA:
2061 {
2062 int val;
2063
2064 if (optlen < sizeof(val))
2065 return -EINVAL;
2066 if (copy_from_user(&val, optval, sizeof(val)))
2067 return -EFAULT;
2068
2069 po->auxdata = !!val;
2070 return 0;
2071 }
80feaacb
PWJ
2072 case PACKET_ORIGDEV:
2073 {
2074 int val;
2075
2076 if (optlen < sizeof(val))
2077 return -EINVAL;
2078 if (copy_from_user(&val, optval, sizeof(val)))
2079 return -EFAULT;
2080
2081 po->origdev = !!val;
2082 return 0;
2083 }
bfd5f4a3
SS
2084 case PACKET_VNET_HDR:
2085 {
2086 int val;
2087
2088 if (sock->type != SOCK_RAW)
2089 return -EINVAL;
2090 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2091 return -EBUSY;
2092 if (optlen < sizeof(val))
2093 return -EINVAL;
2094 if (copy_from_user(&val, optval, sizeof(val)))
2095 return -EFAULT;
2096
2097 po->has_vnet_hdr = !!val;
2098 return 0;
2099 }
614f60fa
SM
2100 case PACKET_TIMESTAMP:
2101 {
2102 int val;
2103
2104 if (optlen != sizeof(val))
2105 return -EINVAL;
2106 if (copy_from_user(&val, optval, sizeof(val)))
2107 return -EFAULT;
2108
2109 po->tp_tstamp = val;
2110 return 0;
2111 }
1da177e4
LT
2112 default:
2113 return -ENOPROTOOPT;
2114 }
2115}
2116
2117static int packet_getsockopt(struct socket *sock, int level, int optname,
2118 char __user *optval, int __user *optlen)
2119{
2120 int len;
8dc41944 2121 int val;
1da177e4
LT
2122 struct sock *sk = sock->sk;
2123 struct packet_sock *po = pkt_sk(sk);
8dc41944
HX
2124 void *data;
2125 struct tpacket_stats st;
1da177e4
LT
2126
2127 if (level != SOL_PACKET)
2128 return -ENOPROTOOPT;
2129
8ae55f04
KK
2130 if (get_user(len, optlen))
2131 return -EFAULT;
1da177e4
LT
2132
2133 if (len < 0)
2134 return -EINVAL;
1ce4f28b 2135
69e3c75f 2136 switch (optname) {
1da177e4 2137 case PACKET_STATISTICS:
1da177e4
LT
2138 if (len > sizeof(struct tpacket_stats))
2139 len = sizeof(struct tpacket_stats);
2140 spin_lock_bh(&sk->sk_receive_queue.lock);
2141 st = po->stats;
2142 memset(&po->stats, 0, sizeof(st));
2143 spin_unlock_bh(&sk->sk_receive_queue.lock);
2144 st.tp_packets += st.tp_drops;
2145
8dc41944
HX
2146 data = &st;
2147 break;
2148 case PACKET_AUXDATA:
2149 if (len > sizeof(int))
2150 len = sizeof(int);
2151 val = po->auxdata;
2152
80feaacb
PWJ
2153 data = &val;
2154 break;
2155 case PACKET_ORIGDEV:
2156 if (len > sizeof(int))
2157 len = sizeof(int);
2158 val = po->origdev;
2159
bfd5f4a3
SS
2160 data = &val;
2161 break;
2162 case PACKET_VNET_HDR:
2163 if (len > sizeof(int))
2164 len = sizeof(int);
2165 val = po->has_vnet_hdr;
2166
8dc41944 2167 data = &val;
1da177e4 2168 break;
bbd6ef87
PM
2169 case PACKET_VERSION:
2170 if (len > sizeof(int))
2171 len = sizeof(int);
2172 val = po->tp_version;
2173 data = &val;
2174 break;
2175 case PACKET_HDRLEN:
2176 if (len > sizeof(int))
2177 len = sizeof(int);
2178 if (copy_from_user(&val, optval, len))
2179 return -EFAULT;
2180 switch (val) {
2181 case TPACKET_V1:
2182 val = sizeof(struct tpacket_hdr);
2183 break;
2184 case TPACKET_V2:
2185 val = sizeof(struct tpacket2_hdr);
2186 break;
2187 default:
2188 return -EINVAL;
2189 }
2190 data = &val;
2191 break;
8913336a
PM
2192 case PACKET_RESERVE:
2193 if (len > sizeof(unsigned int))
2194 len = sizeof(unsigned int);
2195 val = po->tp_reserve;
2196 data = &val;
2197 break;
69e3c75f
JB
2198 case PACKET_LOSS:
2199 if (len > sizeof(unsigned int))
2200 len = sizeof(unsigned int);
2201 val = po->tp_loss;
2202 data = &val;
2203 break;
614f60fa
SM
2204 case PACKET_TIMESTAMP:
2205 if (len > sizeof(int))
2206 len = sizeof(int);
2207 val = po->tp_tstamp;
2208 data = &val;
2209 break;
1da177e4
LT
2210 default:
2211 return -ENOPROTOOPT;
2212 }
2213
8ae55f04
KK
2214 if (put_user(len, optlen))
2215 return -EFAULT;
8dc41944
HX
2216 if (copy_to_user(optval, data, len))
2217 return -EFAULT;
8ae55f04 2218 return 0;
1da177e4
LT
2219}
2220
2221
2222static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
2223{
2224 struct sock *sk;
2225 struct hlist_node *node;
ad930650 2226 struct net_device *dev = data;
c346dca1 2227 struct net *net = dev_net(dev);
1da177e4 2228
808f5114 2229 rcu_read_lock();
2230 sk_for_each_rcu(sk, node, &net->packet.sklist) {
1da177e4
LT
2231 struct packet_sock *po = pkt_sk(sk);
2232
2233 switch (msg) {
2234 case NETDEV_UNREGISTER:
1da177e4
LT
2235 if (po->mclist)
2236 packet_dev_mclist(dev, po->mclist, -1);
a2efcfa0
DM
2237 /* fallthrough */
2238
1da177e4
LT
2239 case NETDEV_DOWN:
2240 if (dev->ifindex == po->ifindex) {
2241 spin_lock(&po->bind_lock);
2242 if (po->running) {
2243 __dev_remove_pack(&po->prot_hook);
2244 __sock_put(sk);
2245 po->running = 0;
2246 sk->sk_err = ENETDOWN;
2247 if (!sock_flag(sk, SOCK_DEAD))
2248 sk->sk_error_report(sk);
2249 }
2250 if (msg == NETDEV_UNREGISTER) {
2251 po->ifindex = -1;
160ff18a
BG
2252 if (po->prot_hook.dev)
2253 dev_put(po->prot_hook.dev);
1da177e4
LT
2254 po->prot_hook.dev = NULL;
2255 }
2256 spin_unlock(&po->bind_lock);
2257 }
2258 break;
2259 case NETDEV_UP:
808f5114 2260 if (dev->ifindex == po->ifindex) {
2261 spin_lock(&po->bind_lock);
2262 if (po->num && !po->running) {
2263 dev_add_pack(&po->prot_hook);
2264 sock_hold(sk);
2265 po->running = 1;
2266 }
2267 spin_unlock(&po->bind_lock);
1da177e4 2268 }
1da177e4
LT
2269 break;
2270 }
2271 }
808f5114 2272 rcu_read_unlock();
1da177e4
LT
2273 return NOTIFY_DONE;
2274}
2275
2276
2277static int packet_ioctl(struct socket *sock, unsigned int cmd,
2278 unsigned long arg)
2279{
2280 struct sock *sk = sock->sk;
2281
69e3c75f 2282 switch (cmd) {
40d4e3df
ED
2283 case SIOCOUTQ:
2284 {
2285 int amount = sk_wmem_alloc_get(sk);
31e6d363 2286
40d4e3df
ED
2287 return put_user(amount, (int __user *)arg);
2288 }
2289 case SIOCINQ:
2290 {
2291 struct sk_buff *skb;
2292 int amount = 0;
2293
2294 spin_lock_bh(&sk->sk_receive_queue.lock);
2295 skb = skb_peek(&sk->sk_receive_queue);
2296 if (skb)
2297 amount = skb->len;
2298 spin_unlock_bh(&sk->sk_receive_queue.lock);
2299 return put_user(amount, (int __user *)arg);
2300 }
2301 case SIOCGSTAMP:
2302 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2303 case SIOCGSTAMPNS:
2304 return sock_get_timestampns(sk, (struct timespec __user *)arg);
1ce4f28b 2305
1da177e4 2306#ifdef CONFIG_INET
40d4e3df
ED
2307 case SIOCADDRT:
2308 case SIOCDELRT:
2309 case SIOCDARP:
2310 case SIOCGARP:
2311 case SIOCSARP:
2312 case SIOCGIFADDR:
2313 case SIOCSIFADDR:
2314 case SIOCGIFBRDADDR:
2315 case SIOCSIFBRDADDR:
2316 case SIOCGIFNETMASK:
2317 case SIOCSIFNETMASK:
2318 case SIOCGIFDSTADDR:
2319 case SIOCSIFDSTADDR:
2320 case SIOCSIFFLAGS:
40d4e3df 2321 return inet_dgram_ops.ioctl(sock, cmd, arg);
1da177e4
LT
2322#endif
2323
40d4e3df
ED
2324 default:
2325 return -ENOIOCTLCMD;
1da177e4
LT
2326 }
2327 return 0;
2328}
2329
40d4e3df 2330static unsigned int packet_poll(struct file *file, struct socket *sock,
1da177e4
LT
2331 poll_table *wait)
2332{
2333 struct sock *sk = sock->sk;
2334 struct packet_sock *po = pkt_sk(sk);
2335 unsigned int mask = datagram_poll(file, sock, wait);
2336
2337 spin_lock_bh(&sk->sk_receive_queue.lock);
69e3c75f
JB
2338 if (po->rx_ring.pg_vec) {
2339 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
1da177e4
LT
2340 mask |= POLLIN | POLLRDNORM;
2341 }
2342 spin_unlock_bh(&sk->sk_receive_queue.lock);
69e3c75f
JB
2343 spin_lock_bh(&sk->sk_write_queue.lock);
2344 if (po->tx_ring.pg_vec) {
2345 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2346 mask |= POLLOUT | POLLWRNORM;
2347 }
2348 spin_unlock_bh(&sk->sk_write_queue.lock);
1da177e4
LT
2349 return mask;
2350}
2351
2352
2353/* Dirty? Well, I still did not learn better way to account
2354 * for user mmaps.
2355 */
2356
2357static void packet_mm_open(struct vm_area_struct *vma)
2358{
2359 struct file *file = vma->vm_file;
40d4e3df 2360 struct socket *sock = file->private_data;
1da177e4 2361 struct sock *sk = sock->sk;
1ce4f28b 2362
1da177e4
LT
2363 if (sk)
2364 atomic_inc(&pkt_sk(sk)->mapped);
2365}
2366
2367static void packet_mm_close(struct vm_area_struct *vma)
2368{
2369 struct file *file = vma->vm_file;
40d4e3df 2370 struct socket *sock = file->private_data;
1da177e4 2371 struct sock *sk = sock->sk;
1ce4f28b 2372
1da177e4
LT
2373 if (sk)
2374 atomic_dec(&pkt_sk(sk)->mapped);
2375}
2376
f0f37e2f 2377static const struct vm_operations_struct packet_mmap_ops = {
40d4e3df
ED
2378 .open = packet_mm_open,
2379 .close = packet_mm_close,
1da177e4
LT
2380};
2381
0e3125c7
NH
2382static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
2383 unsigned int len)
1da177e4
LT
2384{
2385 int i;
2386
4ebf0ae2 2387 for (i = 0; i < len; i++) {
0e3125c7 2388 if (likely(pg_vec[i].buffer)) {
c56b4d90 2389 if (is_vmalloc_addr(pg_vec[i].buffer))
0e3125c7
NH
2390 vfree(pg_vec[i].buffer);
2391 else
2392 free_pages((unsigned long)pg_vec[i].buffer,
2393 order);
2394 pg_vec[i].buffer = NULL;
2395 }
1da177e4
LT
2396 }
2397 kfree(pg_vec);
2398}
2399
c56b4d90 2400static inline char *alloc_one_pg_vec_page(unsigned long order)
4ebf0ae2 2401{
0e3125c7
NH
2402 char *buffer = NULL;
2403 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
2404 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
2405
2406 buffer = (char *) __get_free_pages(gfp_flags, order);
2407
2408 if (buffer)
2409 return buffer;
2410
2411 /*
2412 * __get_free_pages failed, fall back to vmalloc
2413 */
bbce5a59 2414 buffer = vzalloc((1 << order) * PAGE_SIZE);
719bfeaa 2415
0e3125c7
NH
2416 if (buffer)
2417 return buffer;
2418
2419 /*
2420 * vmalloc failed, lets dig into swap here
2421 */
0e3125c7
NH
2422 gfp_flags &= ~__GFP_NORETRY;
2423 buffer = (char *)__get_free_pages(gfp_flags, order);
2424 if (buffer)
2425 return buffer;
2426
2427 /*
2428 * complete and utter failure
2429 */
2430 return NULL;
4ebf0ae2
DM
2431}
2432
0e3125c7 2433static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4ebf0ae2
DM
2434{
2435 unsigned int block_nr = req->tp_block_nr;
0e3125c7 2436 struct pgv *pg_vec;
4ebf0ae2
DM
2437 int i;
2438
0e3125c7 2439 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4ebf0ae2
DM
2440 if (unlikely(!pg_vec))
2441 goto out;
2442
2443 for (i = 0; i < block_nr; i++) {
c56b4d90 2444 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
0e3125c7 2445 if (unlikely(!pg_vec[i].buffer))
4ebf0ae2
DM
2446 goto out_free_pgvec;
2447 }
2448
2449out:
2450 return pg_vec;
2451
2452out_free_pgvec:
2453 free_pg_vec(pg_vec, order, block_nr);
2454 pg_vec = NULL;
2455 goto out;
2456}
1da177e4 2457
69e3c75f
JB
2458static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2459 int closing, int tx_ring)
1da177e4 2460{
0e3125c7 2461 struct pgv *pg_vec = NULL;
1da177e4 2462 struct packet_sock *po = pkt_sk(sk);
0e11c91e 2463 int was_running, order = 0;
69e3c75f
JB
2464 struct packet_ring_buffer *rb;
2465 struct sk_buff_head *rb_queue;
0e11c91e 2466 __be16 num;
69e3c75f 2467 int err;
1ce4f28b 2468
69e3c75f
JB
2469 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2470 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
1da177e4 2471
69e3c75f
JB
2472 err = -EBUSY;
2473 if (!closing) {
2474 if (atomic_read(&po->mapped))
2475 goto out;
2476 if (atomic_read(&rb->pending))
2477 goto out;
2478 }
1da177e4 2479
69e3c75f
JB
2480 if (req->tp_block_nr) {
2481 /* Sanity tests and some calculations */
2482 err = -EBUSY;
2483 if (unlikely(rb->pg_vec))
2484 goto out;
1da177e4 2485
bbd6ef87
PM
2486 switch (po->tp_version) {
2487 case TPACKET_V1:
2488 po->tp_hdrlen = TPACKET_HDRLEN;
2489 break;
2490 case TPACKET_V2:
2491 po->tp_hdrlen = TPACKET2_HDRLEN;
2492 break;
2493 }
2494
69e3c75f 2495 err = -EINVAL;
4ebf0ae2 2496 if (unlikely((int)req->tp_block_size <= 0))
69e3c75f 2497 goto out;
4ebf0ae2 2498 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
69e3c75f 2499 goto out;
8913336a 2500 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
69e3c75f
JB
2501 po->tp_reserve))
2502 goto out;
4ebf0ae2 2503 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
69e3c75f 2504 goto out;
1da177e4 2505
69e3c75f
JB
2506 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2507 if (unlikely(rb->frames_per_block <= 0))
2508 goto out;
2509 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2510 req->tp_frame_nr))
2511 goto out;
1da177e4
LT
2512
2513 err = -ENOMEM;
4ebf0ae2
DM
2514 order = get_order(req->tp_block_size);
2515 pg_vec = alloc_pg_vec(req, order);
2516 if (unlikely(!pg_vec))
1da177e4 2517 goto out;
69e3c75f
JB
2518 }
2519 /* Done */
2520 else {
2521 err = -EINVAL;
4ebf0ae2 2522 if (unlikely(req->tp_frame_nr))
69e3c75f 2523 goto out;
1da177e4
LT
2524 }
2525
2526 lock_sock(sk);
2527
2528 /* Detach socket from network */
2529 spin_lock(&po->bind_lock);
2530 was_running = po->running;
2531 num = po->num;
2532 if (was_running) {
2533 __dev_remove_pack(&po->prot_hook);
2534 po->num = 0;
2535 po->running = 0;
2536 __sock_put(sk);
2537 }
2538 spin_unlock(&po->bind_lock);
1ce4f28b 2539
1da177e4
LT
2540 synchronize_net();
2541
2542 err = -EBUSY;
905db440 2543 mutex_lock(&po->pg_vec_lock);
1da177e4
LT
2544 if (closing || atomic_read(&po->mapped) == 0) {
2545 err = 0;
69e3c75f 2546 spin_lock_bh(&rb_queue->lock);
c053fd96 2547 swap(rb->pg_vec, pg_vec);
69e3c75f
JB
2548 rb->frame_max = (req->tp_frame_nr - 1);
2549 rb->head = 0;
2550 rb->frame_size = req->tp_frame_size;
2551 spin_unlock_bh(&rb_queue->lock);
2552
c053fd96
CG
2553 swap(rb->pg_vec_order, order);
2554 swap(rb->pg_vec_len, req->tp_block_nr);
69e3c75f
JB
2555
2556 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2557 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2558 tpacket_rcv : packet_rcv;
2559 skb_queue_purge(rb_queue);
1da177e4 2560 if (atomic_read(&po->mapped))
40d4e3df
ED
2561 pr_err("packet_mmap: vma is busy: %d\n",
2562 atomic_read(&po->mapped));
1da177e4 2563 }
905db440 2564 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
2565
2566 spin_lock(&po->bind_lock);
2567 if (was_running && !po->running) {
2568 sock_hold(sk);
2569 po->running = 1;
2570 po->num = num;
2571 dev_add_pack(&po->prot_hook);
2572 }
2573 spin_unlock(&po->bind_lock);
2574
2575 release_sock(sk);
2576
1da177e4
LT
2577 if (pg_vec)
2578 free_pg_vec(pg_vec, order, req->tp_block_nr);
2579out:
2580 return err;
2581}
2582
69e3c75f
JB
2583static int packet_mmap(struct file *file, struct socket *sock,
2584 struct vm_area_struct *vma)
1da177e4
LT
2585{
2586 struct sock *sk = sock->sk;
2587 struct packet_sock *po = pkt_sk(sk);
69e3c75f
JB
2588 unsigned long size, expected_size;
2589 struct packet_ring_buffer *rb;
1da177e4
LT
2590 unsigned long start;
2591 int err = -EINVAL;
2592 int i;
2593
2594 if (vma->vm_pgoff)
2595 return -EINVAL;
2596
905db440 2597 mutex_lock(&po->pg_vec_lock);
69e3c75f
JB
2598
2599 expected_size = 0;
2600 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2601 if (rb->pg_vec) {
2602 expected_size += rb->pg_vec_len
2603 * rb->pg_vec_pages
2604 * PAGE_SIZE;
2605 }
2606 }
2607
2608 if (expected_size == 0)
1da177e4 2609 goto out;
69e3c75f
JB
2610
2611 size = vma->vm_end - vma->vm_start;
2612 if (size != expected_size)
1da177e4
LT
2613 goto out;
2614
1da177e4 2615 start = vma->vm_start;
69e3c75f
JB
2616 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2617 if (rb->pg_vec == NULL)
2618 continue;
2619
2620 for (i = 0; i < rb->pg_vec_len; i++) {
0e3125c7
NH
2621 struct page *page;
2622 void *kaddr = rb->pg_vec[i].buffer;
69e3c75f
JB
2623 int pg_num;
2624
c56b4d90
CG
2625 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
2626 page = pgv_to_page(kaddr);
69e3c75f
JB
2627 err = vm_insert_page(vma, start, page);
2628 if (unlikely(err))
2629 goto out;
2630 start += PAGE_SIZE;
0e3125c7 2631 kaddr += PAGE_SIZE;
69e3c75f 2632 }
4ebf0ae2 2633 }
1da177e4 2634 }
69e3c75f 2635
4ebf0ae2 2636 atomic_inc(&po->mapped);
1da177e4
LT
2637 vma->vm_ops = &packet_mmap_ops;
2638 err = 0;
2639
2640out:
905db440 2641 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
2642 return err;
2643}
1da177e4 2644
90ddc4f0 2645static const struct proto_ops packet_ops_spkt = {
1da177e4
LT
2646 .family = PF_PACKET,
2647 .owner = THIS_MODULE,
2648 .release = packet_release,
2649 .bind = packet_bind_spkt,
2650 .connect = sock_no_connect,
2651 .socketpair = sock_no_socketpair,
2652 .accept = sock_no_accept,
2653 .getname = packet_getname_spkt,
2654 .poll = datagram_poll,
2655 .ioctl = packet_ioctl,
2656 .listen = sock_no_listen,
2657 .shutdown = sock_no_shutdown,
2658 .setsockopt = sock_no_setsockopt,
2659 .getsockopt = sock_no_getsockopt,
2660 .sendmsg = packet_sendmsg_spkt,
2661 .recvmsg = packet_recvmsg,
2662 .mmap = sock_no_mmap,
2663 .sendpage = sock_no_sendpage,
2664};
1da177e4 2665
90ddc4f0 2666static const struct proto_ops packet_ops = {
1da177e4
LT
2667 .family = PF_PACKET,
2668 .owner = THIS_MODULE,
2669 .release = packet_release,
2670 .bind = packet_bind,
2671 .connect = sock_no_connect,
2672 .socketpair = sock_no_socketpair,
2673 .accept = sock_no_accept,
1ce4f28b 2674 .getname = packet_getname,
1da177e4
LT
2675 .poll = packet_poll,
2676 .ioctl = packet_ioctl,
2677 .listen = sock_no_listen,
2678 .shutdown = sock_no_shutdown,
2679 .setsockopt = packet_setsockopt,
2680 .getsockopt = packet_getsockopt,
2681 .sendmsg = packet_sendmsg,
2682 .recvmsg = packet_recvmsg,
2683 .mmap = packet_mmap,
2684 .sendpage = sock_no_sendpage,
2685};
2686
ec1b4cf7 2687static const struct net_proto_family packet_family_ops = {
1da177e4
LT
2688 .family = PF_PACKET,
2689 .create = packet_create,
2690 .owner = THIS_MODULE,
2691};
2692
2693static struct notifier_block packet_netdev_notifier = {
40d4e3df 2694 .notifier_call = packet_notifier,
1da177e4
LT
2695};
2696
2697#ifdef CONFIG_PROC_FS
1da177e4
LT
2698
2699static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
808f5114 2700 __acquires(RCU)
1da177e4 2701{
e372c414 2702 struct net *net = seq_file_net(seq);
808f5114 2703
2704 rcu_read_lock();
2705 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
1da177e4
LT
2706}
2707
2708static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2709{
1bf40954 2710 struct net *net = seq_file_net(seq);
808f5114 2711 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
1da177e4
LT
2712}
2713
2714static void packet_seq_stop(struct seq_file *seq, void *v)
808f5114 2715 __releases(RCU)
1da177e4 2716{
808f5114 2717 rcu_read_unlock();
1da177e4
LT
2718}
2719
1ce4f28b 2720static int packet_seq_show(struct seq_file *seq, void *v)
1da177e4
LT
2721{
2722 if (v == SEQ_START_TOKEN)
2723 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2724 else {
b7ceabd9 2725 struct sock *s = sk_entry(v);
1da177e4
LT
2726 const struct packet_sock *po = pkt_sk(s);
2727
2728 seq_printf(seq,
71338aa7 2729 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1da177e4
LT
2730 s,
2731 atomic_read(&s->sk_refcnt),
2732 s->sk_type,
2733 ntohs(po->num),
2734 po->ifindex,
2735 po->running,
2736 atomic_read(&s->sk_rmem_alloc),
2737 sock_i_uid(s),
40d4e3df 2738 sock_i_ino(s));
1da177e4
LT
2739 }
2740
2741 return 0;
2742}
2743
56b3d975 2744static const struct seq_operations packet_seq_ops = {
1da177e4
LT
2745 .start = packet_seq_start,
2746 .next = packet_seq_next,
2747 .stop = packet_seq_stop,
2748 .show = packet_seq_show,
2749};
2750
2751static int packet_seq_open(struct inode *inode, struct file *file)
2752{
e372c414
DL
2753 return seq_open_net(inode, file, &packet_seq_ops,
2754 sizeof(struct seq_net_private));
1da177e4
LT
2755}
2756
da7071d7 2757static const struct file_operations packet_seq_fops = {
1da177e4
LT
2758 .owner = THIS_MODULE,
2759 .open = packet_seq_open,
2760 .read = seq_read,
2761 .llseek = seq_lseek,
e372c414 2762 .release = seq_release_net,
1da177e4
LT
2763};
2764
2765#endif
2766
2c8c1e72 2767static int __net_init packet_net_init(struct net *net)
d12d01d6 2768{
808f5114 2769 spin_lock_init(&net->packet.sklist_lock);
2aaef4e4 2770 INIT_HLIST_HEAD(&net->packet.sklist);
d12d01d6
DL
2771
2772 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2773 return -ENOMEM;
2774
2775 return 0;
2776}
2777
2c8c1e72 2778static void __net_exit packet_net_exit(struct net *net)
d12d01d6
DL
2779{
2780 proc_net_remove(net, "packet");
2781}
2782
2783static struct pernet_operations packet_net_ops = {
2784 .init = packet_net_init,
2785 .exit = packet_net_exit,
2786};
2787
2788
1da177e4
LT
2789static void __exit packet_exit(void)
2790{
1da177e4 2791 unregister_netdevice_notifier(&packet_netdev_notifier);
d12d01d6 2792 unregister_pernet_subsys(&packet_net_ops);
1da177e4
LT
2793 sock_unregister(PF_PACKET);
2794 proto_unregister(&packet_proto);
2795}
2796
2797static int __init packet_init(void)
2798{
2799 int rc = proto_register(&packet_proto, 0);
2800
2801 if (rc != 0)
2802 goto out;
2803
2804 sock_register(&packet_family_ops);
d12d01d6 2805 register_pernet_subsys(&packet_net_ops);
1da177e4 2806 register_netdevice_notifier(&packet_netdev_notifier);
1da177e4
LT
2807out:
2808 return rc;
2809}
2810
2811module_init(packet_init);
2812module_exit(packet_exit);
2813MODULE_LICENSE("GPL");
2814MODULE_ALIAS_NETPROTO(PF_PACKET);