include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-2.6-block.git] / net / core / datagram.c
CommitLineData
1da177e4
LT
1/*
2 * SUCS NET3:
3 *
4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
11 *
113aa838 12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
1da177e4
LT
13 * udp.c code)
14 *
15 * Fixes:
16 * Alan Cox : NULL return from skb_peek_copy()
17 * understood
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
23 * feasible.
24 * Alan Cox : Fixed write poll of non IP protocol
25 * crash.
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
33 *
34 */
35
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/kernel.h>
39#include <asm/uaccess.h>
40#include <asm/system.h>
41#include <linux/mm.h>
42#include <linux/interrupt.h>
43#include <linux/errno.h>
44#include <linux/sched.h>
45#include <linux/inet.h>
1da177e4
LT
46#include <linux/netdevice.h>
47#include <linux/rtnetlink.h>
48#include <linux/poll.h>
49#include <linux/highmem.h>
3305b80c 50#include <linux/spinlock.h>
5a0e3ad6 51#include <linux/slab.h>
1da177e4
LT
52
53#include <net/protocol.h>
54#include <linux/skbuff.h>
1da177e4 55
c752f073
ACM
56#include <net/checksum.h>
57#include <net/sock.h>
58#include <net/tcp_states.h>
e9b3cc1b 59#include <trace/events/skb.h>
1da177e4
LT
60
61/*
62 * Is a socket 'connection oriented' ?
63 */
64static inline int connection_based(struct sock *sk)
65{
66 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
67}
68
bf368e4e
ED
69static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
70 void *key)
71{
72 unsigned long bits = (unsigned long)key;
73
74 /*
75 * Avoid a wakeup if event not interesting for us
76 */
77 if (bits && !(bits & (POLLIN | POLLERR)))
78 return 0;
79 return autoremove_wake_function(wait, mode, sync, key);
80}
1da177e4
LT
81/*
82 * Wait for a packet..
83 */
84static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
85{
86 int error;
bf368e4e 87 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
1da177e4
LT
88
89 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
90
91 /* Socket errors? */
92 error = sock_error(sk);
93 if (error)
94 goto out_err;
95
96 if (!skb_queue_empty(&sk->sk_receive_queue))
97 goto out;
98
99 /* Socket shut down? */
100 if (sk->sk_shutdown & RCV_SHUTDOWN)
101 goto out_noerr;
102
103 /* Sequenced packets can come disconnected.
104 * If so we report the problem
105 */
106 error = -ENOTCONN;
107 if (connection_based(sk) &&
108 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
109 goto out_err;
110
111 /* handle signals */
112 if (signal_pending(current))
113 goto interrupted;
114
115 error = 0;
116 *timeo_p = schedule_timeout(*timeo_p);
117out:
118 finish_wait(sk->sk_sleep, &wait);
119 return error;
120interrupted:
121 error = sock_intr_errno(*timeo_p);
122out_err:
123 *err = error;
124 goto out;
125out_noerr:
126 *err = 0;
127 error = 1;
128 goto out;
129}
130
131/**
a59322be 132 * __skb_recv_datagram - Receive a datagram skbuff
4dc3b16b
PP
133 * @sk: socket
134 * @flags: MSG_ flags
a59322be 135 * @peeked: returns non-zero if this packet has been seen before
4dc3b16b 136 * @err: error code returned
1da177e4
LT
137 *
138 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
139 * and possible races. This replaces identical code in packet, raw and
140 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
141 * the long standing peek and read race for datagram sockets. If you
142 * alter this routine remember it must be re-entrant.
143 *
144 * This function will lock the socket if a skb is returned, so the caller
145 * needs to unlock the socket in that case (usually by calling
146 * skb_free_datagram)
147 *
148 * * It does not lock socket since today. This function is
149 * * free of race conditions. This measure should/can improve
150 * * significantly datagram socket latencies at high loads,
151 * * when data copying to user space takes lots of time.
152 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
153 * * 8) Great win.)
154 * * --ANK (980729)
155 *
156 * The order of the tests when we find no data waiting are specified
157 * quite explicitly by POSIX 1003.1g, don't change them without having
158 * the standard around please.
159 */
a59322be
HX
160struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
161 int *peeked, int *err)
1da177e4
LT
162{
163 struct sk_buff *skb;
164 long timeo;
165 /*
166 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
167 */
168 int error = sock_error(sk);
169
170 if (error)
171 goto no_packet;
172
a59322be 173 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1da177e4
LT
174
175 do {
176 /* Again only user level code calls this function, so nothing
177 * interrupt level will suddenly eat the receive_queue.
178 *
179 * Look at current nfs client by the way...
180 * However, this function was corrent in any case. 8)
181 */
a59322be
HX
182 unsigned long cpu_flags;
183
184 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
185 skb = skb_peek(&sk->sk_receive_queue);
186 if (skb) {
187 *peeked = skb->peeked;
188 if (flags & MSG_PEEK) {
189 skb->peeked = 1;
1da177e4 190 atomic_inc(&skb->users);
a59322be
HX
191 } else
192 __skb_unlink(skb, &sk->sk_receive_queue);
193 }
194 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
1da177e4
LT
195
196 if (skb)
197 return skb;
198
199 /* User doesn't want to wait */
200 error = -EAGAIN;
201 if (!timeo)
202 goto no_packet;
203
204 } while (!wait_for_packet(sk, err, &timeo));
205
206 return NULL;
207
208no_packet:
209 *err = error;
210 return NULL;
211}
a59322be
HX
212EXPORT_SYMBOL(__skb_recv_datagram);
213
214struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
215 int noblock, int *err)
216{
217 int peeked;
218
219 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
220 &peeked, err);
221}
1da177e4
LT
222
223void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
224{
ead2ceb0 225 consume_skb(skb);
270acefa 226 sk_mem_reclaim_partial(sk);
1da177e4 227}
9d410c79
ED
228EXPORT_SYMBOL(skb_free_datagram);
229
230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
231{
232 lock_sock(sk);
233 skb_free_datagram(sk, skb);
234 release_sock(sk);
235}
236EXPORT_SYMBOL(skb_free_datagram_locked);
1da177e4 237
3305b80c
HX
238/**
239 * skb_kill_datagram - Free a datagram skbuff forcibly
240 * @sk: socket
241 * @skb: datagram skbuff
242 * @flags: MSG_ flags
243 *
244 * This function frees a datagram skbuff that was received by
245 * skb_recv_datagram. The flags argument must match the one
246 * used for skb_recv_datagram.
247 *
248 * If the MSG_PEEK flag is set, and the packet is still on the
249 * receive queue of the socket, it will be taken off the queue
250 * before it is freed.
251 *
252 * This function currently only disables BH when acquiring the
253 * sk_receive_queue lock. Therefore it must not be used in a
254 * context where that lock is acquired in an IRQ context.
27ab2568
HX
255 *
256 * It returns 0 if the packet was removed by us.
3305b80c
HX
257 */
258
27ab2568 259int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
3305b80c 260{
27ab2568
HX
261 int err = 0;
262
3305b80c 263 if (flags & MSG_PEEK) {
27ab2568 264 err = -ENOENT;
3305b80c
HX
265 spin_lock_bh(&sk->sk_receive_queue.lock);
266 if (skb == skb_peek(&sk->sk_receive_queue)) {
267 __skb_unlink(skb, &sk->sk_receive_queue);
268 atomic_dec(&skb->users);
27ab2568 269 err = 0;
3305b80c
HX
270 }
271 spin_unlock_bh(&sk->sk_receive_queue.lock);
272 }
273
61de71c6 274 kfree_skb(skb);
8edf19c2 275 atomic_inc(&sk->sk_drops);
61de71c6
JD
276 sk_mem_reclaim_partial(sk);
277
27ab2568 278 return err;
3305b80c
HX
279}
280
281EXPORT_SYMBOL(skb_kill_datagram);
282
1da177e4
LT
283/**
284 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
4dc3b16b
PP
285 * @skb: buffer to copy
286 * @offset: offset in the buffer to start copying from
67be2dd1 287 * @to: io vector to copy to
4dc3b16b 288 * @len: amount of data to copy from buffer to iovec
1da177e4
LT
289 *
290 * Note: the iovec is modified during the copy.
291 */
292int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
293 struct iovec *to, int len)
294{
1a028e50
DM
295 int start = skb_headlen(skb);
296 int i, copy = start - offset;
5b1a002a 297 struct sk_buff *frag_iter;
c75d721c 298
e9b3cc1b
NH
299 trace_skb_copy_datagram_iovec(skb, len);
300
b4d9eda0
DM
301 /* Copy header. */
302 if (copy > 0) {
303 if (copy > len)
304 copy = len;
305 if (memcpy_toiovec(to, skb->data + offset, copy))
306 goto fault;
307 if ((len -= copy) == 0)
308 return 0;
309 offset += copy;
310 }
c75d721c 311
b4d9eda0
DM
312 /* Copy paged appendix. Hmm... why does this look so complicated? */
313 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1a028e50 314 int end;
1da177e4 315
547b792c 316 WARN_ON(start > offset + len);
1a028e50
DM
317
318 end = start + skb_shinfo(skb)->frags[i].size;
b4d9eda0
DM
319 if ((copy = end - offset) > 0) {
320 int err;
321 u8 *vaddr;
322 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
323 struct page *page = frag->page;
1da177e4
LT
324
325 if (copy > len)
326 copy = len;
b4d9eda0 327 vaddr = kmap(page);
1a028e50
DM
328 err = memcpy_toiovec(to, vaddr + frag->page_offset +
329 offset - start, copy);
b4d9eda0 330 kunmap(page);
1da177e4
LT
331 if (err)
332 goto fault;
333 if (!(len -= copy))
334 return 0;
335 offset += copy;
336 }
1a028e50 337 start = end;
1da177e4 338 }
b4d9eda0 339
5b1a002a
DM
340 skb_walk_frags(skb, frag_iter) {
341 int end;
342
343 WARN_ON(start > offset + len);
344
345 end = start + frag_iter->len;
346 if ((copy = end - offset) > 0) {
347 if (copy > len)
348 copy = len;
349 if (skb_copy_datagram_iovec(frag_iter,
350 offset - start,
351 to, copy))
352 goto fault;
353 if ((len -= copy) == 0)
354 return 0;
355 offset += copy;
b4d9eda0 356 }
5b1a002a 357 start = end;
1da177e4 358 }
b4d9eda0
DM
359 if (!len)
360 return 0;
361
1da177e4
LT
362fault:
363 return -EFAULT;
364}
365
0a1ec07a
MT
366/**
367 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
368 * @skb: buffer to copy
369 * @offset: offset in the buffer to start copying from
370 * @to: io vector to copy to
371 * @to_offset: offset in the io vector to start copying to
372 * @len: amount of data to copy from buffer to iovec
373 *
374 * Returns 0 or -EFAULT.
375 * Note: the iovec is not modified during the copy.
376 */
377int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
378 const struct iovec *to, int to_offset,
379 int len)
380{
381 int start = skb_headlen(skb);
382 int i, copy = start - offset;
5b1a002a 383 struct sk_buff *frag_iter;
0a1ec07a
MT
384
385 /* Copy header. */
386 if (copy > 0) {
387 if (copy > len)
388 copy = len;
389 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
390 goto fault;
391 if ((len -= copy) == 0)
392 return 0;
393 offset += copy;
394 to_offset += copy;
395 }
396
397 /* Copy paged appendix. Hmm... why does this look so complicated? */
398 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
399 int end;
400
401 WARN_ON(start > offset + len);
402
403 end = start + skb_shinfo(skb)->frags[i].size;
404 if ((copy = end - offset) > 0) {
405 int err;
406 u8 *vaddr;
407 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
408 struct page *page = frag->page;
409
410 if (copy > len)
411 copy = len;
412 vaddr = kmap(page);
413 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
414 offset - start, to_offset, copy);
415 kunmap(page);
416 if (err)
417 goto fault;
418 if (!(len -= copy))
419 return 0;
420 offset += copy;
421 to_offset += copy;
422 }
423 start = end;
424 }
425
5b1a002a
DM
426 skb_walk_frags(skb, frag_iter) {
427 int end;
428
429 WARN_ON(start > offset + len);
430
431 end = start + frag_iter->len;
432 if ((copy = end - offset) > 0) {
433 if (copy > len)
434 copy = len;
435 if (skb_copy_datagram_const_iovec(frag_iter,
436 offset - start,
437 to, to_offset,
438 copy))
439 goto fault;
440 if ((len -= copy) == 0)
441 return 0;
442 offset += copy;
443 to_offset += copy;
0a1ec07a 444 }
5b1a002a 445 start = end;
0a1ec07a
MT
446 }
447 if (!len)
448 return 0;
449
450fault:
451 return -EFAULT;
452}
453EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
454
db543c1f
RR
455/**
456 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
457 * @skb: buffer to copy
458 * @offset: offset in the buffer to start copying to
459 * @from: io vector to copy to
6f26c9a7 460 * @from_offset: offset in the io vector to start copying from
db543c1f
RR
461 * @len: amount of data to copy to buffer from iovec
462 *
463 * Returns 0 or -EFAULT.
6f26c9a7 464 * Note: the iovec is not modified during the copy.
db543c1f
RR
465 */
466int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
6f26c9a7
MT
467 const struct iovec *from, int from_offset,
468 int len)
db543c1f
RR
469{
470 int start = skb_headlen(skb);
471 int i, copy = start - offset;
5b1a002a 472 struct sk_buff *frag_iter;
db543c1f
RR
473
474 /* Copy header. */
475 if (copy > 0) {
476 if (copy > len)
477 copy = len;
d2d27bfd
SS
478 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
479 copy))
db543c1f
RR
480 goto fault;
481 if ((len -= copy) == 0)
482 return 0;
483 offset += copy;
6f26c9a7 484 from_offset += copy;
db543c1f
RR
485 }
486
487 /* Copy paged appendix. Hmm... why does this look so complicated? */
488 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
489 int end;
490
491 WARN_ON(start > offset + len);
492
493 end = start + skb_shinfo(skb)->frags[i].size;
494 if ((copy = end - offset) > 0) {
495 int err;
496 u8 *vaddr;
497 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
498 struct page *page = frag->page;
499
500 if (copy > len)
501 copy = len;
502 vaddr = kmap(page);
6f26c9a7
MT
503 err = memcpy_fromiovecend(vaddr + frag->page_offset +
504 offset - start,
505 from, from_offset, copy);
db543c1f
RR
506 kunmap(page);
507 if (err)
508 goto fault;
509
510 if (!(len -= copy))
511 return 0;
512 offset += copy;
6f26c9a7 513 from_offset += copy;
db543c1f
RR
514 }
515 start = end;
516 }
517
5b1a002a
DM
518 skb_walk_frags(skb, frag_iter) {
519 int end;
520
521 WARN_ON(start > offset + len);
522
523 end = start + frag_iter->len;
524 if ((copy = end - offset) > 0) {
525 if (copy > len)
526 copy = len;
527 if (skb_copy_datagram_from_iovec(frag_iter,
528 offset - start,
529 from,
530 from_offset,
531 copy))
532 goto fault;
533 if ((len -= copy) == 0)
534 return 0;
535 offset += copy;
536 from_offset += copy;
db543c1f 537 }
5b1a002a 538 start = end;
db543c1f
RR
539 }
540 if (!len)
541 return 0;
542
543fault:
544 return -EFAULT;
545}
546EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
547
1da177e4
LT
548static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
549 u8 __user *to, int len,
5084205f 550 __wsum *csump)
1da177e4 551{
1a028e50 552 int start = skb_headlen(skb);
1a028e50 553 int i, copy = start - offset;
5b1a002a
DM
554 struct sk_buff *frag_iter;
555 int pos = 0;
1da177e4
LT
556
557 /* Copy header. */
558 if (copy > 0) {
559 int err = 0;
560 if (copy > len)
561 copy = len;
562 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
563 *csump, &err);
564 if (err)
565 goto fault;
566 if ((len -= copy) == 0)
567 return 0;
568 offset += copy;
569 to += copy;
570 pos = copy;
571 }
572
573 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1a028e50 574 int end;
1da177e4 575
547b792c 576 WARN_ON(start > offset + len);
1a028e50
DM
577
578 end = start + skb_shinfo(skb)->frags[i].size;
1da177e4 579 if ((copy = end - offset) > 0) {
5084205f 580 __wsum csum2;
1da177e4
LT
581 int err = 0;
582 u8 *vaddr;
583 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
584 struct page *page = frag->page;
585
586 if (copy > len)
587 copy = len;
588 vaddr = kmap(page);
589 csum2 = csum_and_copy_to_user(vaddr +
1a028e50
DM
590 frag->page_offset +
591 offset - start,
1da177e4
LT
592 to, copy, 0, &err);
593 kunmap(page);
594 if (err)
595 goto fault;
596 *csump = csum_block_add(*csump, csum2, pos);
597 if (!(len -= copy))
598 return 0;
599 offset += copy;
600 to += copy;
601 pos += copy;
602 }
1a028e50 603 start = end;
1da177e4
LT
604 }
605
5b1a002a
DM
606 skb_walk_frags(skb, frag_iter) {
607 int end;
608
609 WARN_ON(start > offset + len);
610
611 end = start + frag_iter->len;
612 if ((copy = end - offset) > 0) {
613 __wsum csum2 = 0;
614 if (copy > len)
615 copy = len;
616 if (skb_copy_and_csum_datagram(frag_iter,
617 offset - start,
618 to, copy,
619 &csum2))
620 goto fault;
621 *csump = csum_block_add(*csump, csum2, pos);
622 if ((len -= copy) == 0)
623 return 0;
624 offset += copy;
625 to += copy;
626 pos += copy;
1da177e4 627 }
5b1a002a 628 start = end;
1da177e4
LT
629 }
630 if (!len)
631 return 0;
632
633fault:
634 return -EFAULT;
635}
636
759e5d00 637__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
fb286bb2 638{
d3bc23e7 639 __sum16 sum;
fb286bb2 640
759e5d00 641 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
fb286bb2 642 if (likely(!sum)) {
84fa7933 643 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
fb286bb2
HX
644 netdev_rx_csum_fault(skb->dev);
645 skb->ip_summed = CHECKSUM_UNNECESSARY;
646 }
647 return sum;
648}
759e5d00
HX
649EXPORT_SYMBOL(__skb_checksum_complete_head);
650
651__sum16 __skb_checksum_complete(struct sk_buff *skb)
652{
653 return __skb_checksum_complete_head(skb, skb->len);
654}
fb286bb2
HX
655EXPORT_SYMBOL(__skb_checksum_complete);
656
1da177e4
LT
657/**
658 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
4dc3b16b
PP
659 * @skb: skbuff
660 * @hlen: hardware length
67be2dd1 661 * @iov: io vector
4ec93edb 662 *
1da177e4
LT
663 * Caller _must_ check that skb will fit to this iovec.
664 *
665 * Returns: 0 - success.
666 * -EINVAL - checksum failure.
667 * -EFAULT - fault during copy. Beware, in this case iovec
668 * can be modified!
669 */
fb286bb2 670int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1da177e4
LT
671 int hlen, struct iovec *iov)
672{
d3bc23e7 673 __wsum csum;
1da177e4
LT
674 int chunk = skb->len - hlen;
675
ef8aef55
HX
676 if (!chunk)
677 return 0;
678
1da177e4
LT
679 /* Skip filled elements.
680 * Pretty silly, look at memcpy_toiovec, though 8)
681 */
682 while (!iov->iov_len)
683 iov++;
684
685 if (iov->iov_len < chunk) {
fb286bb2 686 if (__skb_checksum_complete(skb))
1da177e4
LT
687 goto csum_error;
688 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
689 goto fault;
690 } else {
691 csum = csum_partial(skb->data, hlen, skb->csum);
692 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
693 chunk, &csum))
694 goto fault;
d3bc23e7 695 if (csum_fold(csum))
1da177e4 696 goto csum_error;
84fa7933 697 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
fb286bb2 698 netdev_rx_csum_fault(skb->dev);
1da177e4
LT
699 iov->iov_len -= chunk;
700 iov->iov_base += chunk;
701 }
702 return 0;
703csum_error:
704 return -EINVAL;
705fault:
706 return -EFAULT;
707}
708
709/**
710 * datagram_poll - generic datagram poll
4dc3b16b
PP
711 * @file: file struct
712 * @sock: socket
713 * @wait: poll table
1da177e4
LT
714 *
715 * Datagram poll: Again totally generic. This also handles
716 * sequenced packet sockets providing the socket receive queue
717 * is only ever holding data ready to receive.
718 *
719 * Note: when you _don't_ use this routine for this protocol,
720 * and you use a different write policy from sock_writeable()
721 * then please supply your own write_space callback.
722 */
723unsigned int datagram_poll(struct file *file, struct socket *sock,
724 poll_table *wait)
725{
726 struct sock *sk = sock->sk;
727 unsigned int mask;
728
a57de0b4 729 sock_poll_wait(file, sk->sk_sleep, wait);
1da177e4
LT
730 mask = 0;
731
732 /* exceptional events? */
733 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
734 mask |= POLLERR;
f348d70a
DL
735 if (sk->sk_shutdown & RCV_SHUTDOWN)
736 mask |= POLLRDHUP;
1da177e4
LT
737 if (sk->sk_shutdown == SHUTDOWN_MASK)
738 mask |= POLLHUP;
739
740 /* readable? */
741 if (!skb_queue_empty(&sk->sk_receive_queue) ||
742 (sk->sk_shutdown & RCV_SHUTDOWN))
743 mask |= POLLIN | POLLRDNORM;
744
745 /* Connection-based need to check for termination and startup */
746 if (connection_based(sk)) {
747 if (sk->sk_state == TCP_CLOSE)
748 mask |= POLLHUP;
749 /* connection hasn't started yet? */
750 if (sk->sk_state == TCP_SYN_SENT)
751 return mask;
752 }
753
754 /* writable? */
755 if (sock_writeable(sk))
756 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
757 else
758 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
759
760 return mask;
761}
762
763EXPORT_SYMBOL(datagram_poll);
764EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
765EXPORT_SYMBOL(skb_copy_datagram_iovec);
1da177e4 766EXPORT_SYMBOL(skb_recv_datagram);