Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SUCS NET3: | |
3 | * | |
4 | * Generic datagram handling routines. These are generic for all | |
5 | * protocols. Possibly a generic IP version on top of these would | |
6 | * make sense. Not tonight however 8-). | |
7 | * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and | |
8 | * NetROM layer all have identical poll code and mostly | |
9 | * identical recvmsg() code. So we share it here. The poll was | |
10 | * shared before but buried in udp.c so I moved it. | |
11 | * | |
113aa838 | 12 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old |
1da177e4 LT |
13 | * udp.c code) |
14 | * | |
15 | * Fixes: | |
16 | * Alan Cox : NULL return from skb_peek_copy() | |
17 | * understood | |
18 | * Alan Cox : Rewrote skb_read_datagram to avoid the | |
19 | * skb_peek_copy stuff. | |
20 | * Alan Cox : Added support for SOCK_SEQPACKET. | |
21 | * IPX can no longer use the SO_TYPE hack | |
22 | * but AX.25 now works right, and SPX is | |
23 | * feasible. | |
24 | * Alan Cox : Fixed write poll of non IP protocol | |
25 | * crash. | |
26 | * Florian La Roche: Changed for my new skbuff handling. | |
27 | * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET. | |
28 | * Linus Torvalds : BSD semantic fixes. | |
29 | * Alan Cox : Datagram iovec handling | |
30 | * Darryl Miles : Fixed non-blocking SOCK_STREAM. | |
31 | * Alan Cox : POSIXisms | |
32 | * Pete Wyckoff : Unconnected accept() fix. | |
33 | * | |
34 | */ | |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/types.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <asm/uaccess.h> | |
40 | #include <asm/system.h> | |
41 | #include <linux/mm.h> | |
42 | #include <linux/interrupt.h> | |
43 | #include <linux/errno.h> | |
44 | #include <linux/sched.h> | |
45 | #include <linux/inet.h> | |
1da177e4 LT |
46 | #include <linux/netdevice.h> |
47 | #include <linux/rtnetlink.h> | |
48 | #include <linux/poll.h> | |
49 | #include <linux/highmem.h> | |
3305b80c | 50 | #include <linux/spinlock.h> |
1da177e4 LT |
51 | |
52 | #include <net/protocol.h> | |
53 | #include <linux/skbuff.h> | |
1da177e4 | 54 | |
c752f073 ACM |
55 | #include <net/checksum.h> |
56 | #include <net/sock.h> | |
57 | #include <net/tcp_states.h> | |
e9b3cc1b | 58 | #include <trace/events/skb.h> |
1da177e4 LT |
59 | |
60 | /* | |
61 | * Is a socket 'connection oriented' ? | |
62 | */ | |
63 | static inline int connection_based(struct sock *sk) | |
64 | { | |
65 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | |
66 | } | |
67 | ||
bf368e4e ED |
68 | static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, |
69 | void *key) | |
70 | { | |
71 | unsigned long bits = (unsigned long)key; | |
72 | ||
73 | /* | |
74 | * Avoid a wakeup if event not interesting for us | |
75 | */ | |
76 | if (bits && !(bits & (POLLIN | POLLERR))) | |
77 | return 0; | |
78 | return autoremove_wake_function(wait, mode, sync, key); | |
79 | } | |
1da177e4 LT |
80 | /* |
81 | * Wait for a packet.. | |
82 | */ | |
83 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | |
84 | { | |
85 | int error; | |
bf368e4e | 86 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
1da177e4 LT |
87 | |
88 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | |
89 | ||
90 | /* Socket errors? */ | |
91 | error = sock_error(sk); | |
92 | if (error) | |
93 | goto out_err; | |
94 | ||
95 | if (!skb_queue_empty(&sk->sk_receive_queue)) | |
96 | goto out; | |
97 | ||
98 | /* Socket shut down? */ | |
99 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
100 | goto out_noerr; | |
101 | ||
102 | /* Sequenced packets can come disconnected. | |
103 | * If so we report the problem | |
104 | */ | |
105 | error = -ENOTCONN; | |
106 | if (connection_based(sk) && | |
107 | !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) | |
108 | goto out_err; | |
109 | ||
110 | /* handle signals */ | |
111 | if (signal_pending(current)) | |
112 | goto interrupted; | |
113 | ||
114 | error = 0; | |
115 | *timeo_p = schedule_timeout(*timeo_p); | |
116 | out: | |
117 | finish_wait(sk->sk_sleep, &wait); | |
118 | return error; | |
119 | interrupted: | |
120 | error = sock_intr_errno(*timeo_p); | |
121 | out_err: | |
122 | *err = error; | |
123 | goto out; | |
124 | out_noerr: | |
125 | *err = 0; | |
126 | error = 1; | |
127 | goto out; | |
128 | } | |
129 | ||
130 | /** | |
a59322be | 131 | * __skb_recv_datagram - Receive a datagram skbuff |
4dc3b16b PP |
132 | * @sk: socket |
133 | * @flags: MSG_ flags | |
a59322be | 134 | * @peeked: returns non-zero if this packet has been seen before |
4dc3b16b | 135 | * @err: error code returned |
1da177e4 LT |
136 | * |
137 | * Get a datagram skbuff, understands the peeking, nonblocking wakeups | |
138 | * and possible races. This replaces identical code in packet, raw and | |
139 | * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes | |
140 | * the long standing peek and read race for datagram sockets. If you | |
141 | * alter this routine remember it must be re-entrant. | |
142 | * | |
143 | * This function will lock the socket if a skb is returned, so the caller | |
144 | * needs to unlock the socket in that case (usually by calling | |
145 | * skb_free_datagram) | |
146 | * | |
147 | * * It does not lock socket since today. This function is | |
148 | * * free of race conditions. This measure should/can improve | |
149 | * * significantly datagram socket latencies at high loads, | |
150 | * * when data copying to user space takes lots of time. | |
151 | * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet | |
152 | * * 8) Great win.) | |
153 | * * --ANK (980729) | |
154 | * | |
155 | * The order of the tests when we find no data waiting are specified | |
156 | * quite explicitly by POSIX 1003.1g, don't change them without having | |
157 | * the standard around please. | |
158 | */ | |
a59322be HX |
159 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, |
160 | int *peeked, int *err) | |
1da177e4 LT |
161 | { |
162 | struct sk_buff *skb; | |
163 | long timeo; | |
164 | /* | |
165 | * Caller is allowed not to check sk->sk_err before skb_recv_datagram() | |
166 | */ | |
167 | int error = sock_error(sk); | |
168 | ||
169 | if (error) | |
170 | goto no_packet; | |
171 | ||
a59322be | 172 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1da177e4 LT |
173 | |
174 | do { | |
175 | /* Again only user level code calls this function, so nothing | |
176 | * interrupt level will suddenly eat the receive_queue. | |
177 | * | |
178 | * Look at current nfs client by the way... | |
179 | * However, this function was corrent in any case. 8) | |
180 | */ | |
a59322be HX |
181 | unsigned long cpu_flags; |
182 | ||
183 | spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); | |
184 | skb = skb_peek(&sk->sk_receive_queue); | |
185 | if (skb) { | |
186 | *peeked = skb->peeked; | |
187 | if (flags & MSG_PEEK) { | |
188 | skb->peeked = 1; | |
1da177e4 | 189 | atomic_inc(&skb->users); |
a59322be HX |
190 | } else |
191 | __skb_unlink(skb, &sk->sk_receive_queue); | |
192 | } | |
193 | spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); | |
1da177e4 LT |
194 | |
195 | if (skb) | |
196 | return skb; | |
197 | ||
198 | /* User doesn't want to wait */ | |
199 | error = -EAGAIN; | |
200 | if (!timeo) | |
201 | goto no_packet; | |
202 | ||
203 | } while (!wait_for_packet(sk, err, &timeo)); | |
204 | ||
205 | return NULL; | |
206 | ||
207 | no_packet: | |
208 | *err = error; | |
209 | return NULL; | |
210 | } | |
a59322be HX |
211 | EXPORT_SYMBOL(__skb_recv_datagram); |
212 | ||
213 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, | |
214 | int noblock, int *err) | |
215 | { | |
216 | int peeked; | |
217 | ||
218 | return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | |
219 | &peeked, err); | |
220 | } | |
1da177e4 LT |
221 | |
222 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb) | |
223 | { | |
ead2ceb0 | 224 | consume_skb(skb); |
270acefa | 225 | sk_mem_reclaim_partial(sk); |
1da177e4 LT |
226 | } |
227 | ||
3305b80c HX |
228 | /** |
229 | * skb_kill_datagram - Free a datagram skbuff forcibly | |
230 | * @sk: socket | |
231 | * @skb: datagram skbuff | |
232 | * @flags: MSG_ flags | |
233 | * | |
234 | * This function frees a datagram skbuff that was received by | |
235 | * skb_recv_datagram. The flags argument must match the one | |
236 | * used for skb_recv_datagram. | |
237 | * | |
238 | * If the MSG_PEEK flag is set, and the packet is still on the | |
239 | * receive queue of the socket, it will be taken off the queue | |
240 | * before it is freed. | |
241 | * | |
242 | * This function currently only disables BH when acquiring the | |
243 | * sk_receive_queue lock. Therefore it must not be used in a | |
244 | * context where that lock is acquired in an IRQ context. | |
27ab2568 HX |
245 | * |
246 | * It returns 0 if the packet was removed by us. | |
3305b80c HX |
247 | */ |
248 | ||
27ab2568 | 249 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) |
3305b80c | 250 | { |
27ab2568 HX |
251 | int err = 0; |
252 | ||
3305b80c | 253 | if (flags & MSG_PEEK) { |
27ab2568 | 254 | err = -ENOENT; |
3305b80c HX |
255 | spin_lock_bh(&sk->sk_receive_queue.lock); |
256 | if (skb == skb_peek(&sk->sk_receive_queue)) { | |
257 | __skb_unlink(skb, &sk->sk_receive_queue); | |
258 | atomic_dec(&skb->users); | |
27ab2568 | 259 | err = 0; |
3305b80c HX |
260 | } |
261 | spin_unlock_bh(&sk->sk_receive_queue.lock); | |
262 | } | |
263 | ||
61de71c6 JD |
264 | kfree_skb(skb); |
265 | sk_mem_reclaim_partial(sk); | |
266 | ||
27ab2568 | 267 | return err; |
3305b80c HX |
268 | } |
269 | ||
270 | EXPORT_SYMBOL(skb_kill_datagram); | |
271 | ||
1da177e4 LT |
272 | /** |
273 | * skb_copy_datagram_iovec - Copy a datagram to an iovec. | |
4dc3b16b PP |
274 | * @skb: buffer to copy |
275 | * @offset: offset in the buffer to start copying from | |
67be2dd1 | 276 | * @to: io vector to copy to |
4dc3b16b | 277 | * @len: amount of data to copy from buffer to iovec |
1da177e4 LT |
278 | * |
279 | * Note: the iovec is modified during the copy. | |
280 | */ | |
281 | int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, | |
282 | struct iovec *to, int len) | |
283 | { | |
1a028e50 DM |
284 | int start = skb_headlen(skb); |
285 | int i, copy = start - offset; | |
5b1a002a | 286 | struct sk_buff *frag_iter; |
c75d721c | 287 | |
e9b3cc1b NH |
288 | trace_skb_copy_datagram_iovec(skb, len); |
289 | ||
b4d9eda0 DM |
290 | /* Copy header. */ |
291 | if (copy > 0) { | |
292 | if (copy > len) | |
293 | copy = len; | |
294 | if (memcpy_toiovec(to, skb->data + offset, copy)) | |
295 | goto fault; | |
296 | if ((len -= copy) == 0) | |
297 | return 0; | |
298 | offset += copy; | |
299 | } | |
c75d721c | 300 | |
b4d9eda0 DM |
301 | /* Copy paged appendix. Hmm... why does this look so complicated? */ |
302 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 303 | int end; |
1da177e4 | 304 | |
547b792c | 305 | WARN_ON(start > offset + len); |
1a028e50 DM |
306 | |
307 | end = start + skb_shinfo(skb)->frags[i].size; | |
b4d9eda0 DM |
308 | if ((copy = end - offset) > 0) { |
309 | int err; | |
310 | u8 *vaddr; | |
311 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
312 | struct page *page = frag->page; | |
1da177e4 LT |
313 | |
314 | if (copy > len) | |
315 | copy = len; | |
b4d9eda0 | 316 | vaddr = kmap(page); |
1a028e50 DM |
317 | err = memcpy_toiovec(to, vaddr + frag->page_offset + |
318 | offset - start, copy); | |
b4d9eda0 | 319 | kunmap(page); |
1da177e4 LT |
320 | if (err) |
321 | goto fault; | |
322 | if (!(len -= copy)) | |
323 | return 0; | |
324 | offset += copy; | |
325 | } | |
1a028e50 | 326 | start = end; |
1da177e4 | 327 | } |
b4d9eda0 | 328 | |
5b1a002a DM |
329 | skb_walk_frags(skb, frag_iter) { |
330 | int end; | |
331 | ||
332 | WARN_ON(start > offset + len); | |
333 | ||
334 | end = start + frag_iter->len; | |
335 | if ((copy = end - offset) > 0) { | |
336 | if (copy > len) | |
337 | copy = len; | |
338 | if (skb_copy_datagram_iovec(frag_iter, | |
339 | offset - start, | |
340 | to, copy)) | |
341 | goto fault; | |
342 | if ((len -= copy) == 0) | |
343 | return 0; | |
344 | offset += copy; | |
b4d9eda0 | 345 | } |
5b1a002a | 346 | start = end; |
1da177e4 | 347 | } |
b4d9eda0 DM |
348 | if (!len) |
349 | return 0; | |
350 | ||
1da177e4 LT |
351 | fault: |
352 | return -EFAULT; | |
353 | } | |
354 | ||
0a1ec07a MT |
355 | /** |
356 | * skb_copy_datagram_const_iovec - Copy a datagram to an iovec. | |
357 | * @skb: buffer to copy | |
358 | * @offset: offset in the buffer to start copying from | |
359 | * @to: io vector to copy to | |
360 | * @to_offset: offset in the io vector to start copying to | |
361 | * @len: amount of data to copy from buffer to iovec | |
362 | * | |
363 | * Returns 0 or -EFAULT. | |
364 | * Note: the iovec is not modified during the copy. | |
365 | */ | |
366 | int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, | |
367 | const struct iovec *to, int to_offset, | |
368 | int len) | |
369 | { | |
370 | int start = skb_headlen(skb); | |
371 | int i, copy = start - offset; | |
5b1a002a | 372 | struct sk_buff *frag_iter; |
0a1ec07a MT |
373 | |
374 | /* Copy header. */ | |
375 | if (copy > 0) { | |
376 | if (copy > len) | |
377 | copy = len; | |
378 | if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy)) | |
379 | goto fault; | |
380 | if ((len -= copy) == 0) | |
381 | return 0; | |
382 | offset += copy; | |
383 | to_offset += copy; | |
384 | } | |
385 | ||
386 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
387 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
388 | int end; | |
389 | ||
390 | WARN_ON(start > offset + len); | |
391 | ||
392 | end = start + skb_shinfo(skb)->frags[i].size; | |
393 | if ((copy = end - offset) > 0) { | |
394 | int err; | |
395 | u8 *vaddr; | |
396 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
397 | struct page *page = frag->page; | |
398 | ||
399 | if (copy > len) | |
400 | copy = len; | |
401 | vaddr = kmap(page); | |
402 | err = memcpy_toiovecend(to, vaddr + frag->page_offset + | |
403 | offset - start, to_offset, copy); | |
404 | kunmap(page); | |
405 | if (err) | |
406 | goto fault; | |
407 | if (!(len -= copy)) | |
408 | return 0; | |
409 | offset += copy; | |
410 | to_offset += copy; | |
411 | } | |
412 | start = end; | |
413 | } | |
414 | ||
5b1a002a DM |
415 | skb_walk_frags(skb, frag_iter) { |
416 | int end; | |
417 | ||
418 | WARN_ON(start > offset + len); | |
419 | ||
420 | end = start + frag_iter->len; | |
421 | if ((copy = end - offset) > 0) { | |
422 | if (copy > len) | |
423 | copy = len; | |
424 | if (skb_copy_datagram_const_iovec(frag_iter, | |
425 | offset - start, | |
426 | to, to_offset, | |
427 | copy)) | |
428 | goto fault; | |
429 | if ((len -= copy) == 0) | |
430 | return 0; | |
431 | offset += copy; | |
432 | to_offset += copy; | |
0a1ec07a | 433 | } |
5b1a002a | 434 | start = end; |
0a1ec07a MT |
435 | } |
436 | if (!len) | |
437 | return 0; | |
438 | ||
439 | fault: | |
440 | return -EFAULT; | |
441 | } | |
442 | EXPORT_SYMBOL(skb_copy_datagram_const_iovec); | |
443 | ||
db543c1f RR |
444 | /** |
445 | * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. | |
446 | * @skb: buffer to copy | |
447 | * @offset: offset in the buffer to start copying to | |
448 | * @from: io vector to copy to | |
6f26c9a7 | 449 | * @from_offset: offset in the io vector to start copying from |
db543c1f RR |
450 | * @len: amount of data to copy to buffer from iovec |
451 | * | |
452 | * Returns 0 or -EFAULT. | |
6f26c9a7 | 453 | * Note: the iovec is not modified during the copy. |
db543c1f RR |
454 | */ |
455 | int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, | |
6f26c9a7 MT |
456 | const struct iovec *from, int from_offset, |
457 | int len) | |
db543c1f RR |
458 | { |
459 | int start = skb_headlen(skb); | |
460 | int i, copy = start - offset; | |
5b1a002a | 461 | struct sk_buff *frag_iter; |
db543c1f RR |
462 | |
463 | /* Copy header. */ | |
464 | if (copy > 0) { | |
465 | if (copy > len) | |
466 | copy = len; | |
d2d27bfd SS |
467 | if (memcpy_fromiovecend(skb->data + offset, from, from_offset, |
468 | copy)) | |
db543c1f RR |
469 | goto fault; |
470 | if ((len -= copy) == 0) | |
471 | return 0; | |
472 | offset += copy; | |
6f26c9a7 | 473 | from_offset += copy; |
db543c1f RR |
474 | } |
475 | ||
476 | /* Copy paged appendix. Hmm... why does this look so complicated? */ | |
477 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
478 | int end; | |
479 | ||
480 | WARN_ON(start > offset + len); | |
481 | ||
482 | end = start + skb_shinfo(skb)->frags[i].size; | |
483 | if ((copy = end - offset) > 0) { | |
484 | int err; | |
485 | u8 *vaddr; | |
486 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
487 | struct page *page = frag->page; | |
488 | ||
489 | if (copy > len) | |
490 | copy = len; | |
491 | vaddr = kmap(page); | |
6f26c9a7 MT |
492 | err = memcpy_fromiovecend(vaddr + frag->page_offset + |
493 | offset - start, | |
494 | from, from_offset, copy); | |
db543c1f RR |
495 | kunmap(page); |
496 | if (err) | |
497 | goto fault; | |
498 | ||
499 | if (!(len -= copy)) | |
500 | return 0; | |
501 | offset += copy; | |
6f26c9a7 | 502 | from_offset += copy; |
db543c1f RR |
503 | } |
504 | start = end; | |
505 | } | |
506 | ||
5b1a002a DM |
507 | skb_walk_frags(skb, frag_iter) { |
508 | int end; | |
509 | ||
510 | WARN_ON(start > offset + len); | |
511 | ||
512 | end = start + frag_iter->len; | |
513 | if ((copy = end - offset) > 0) { | |
514 | if (copy > len) | |
515 | copy = len; | |
516 | if (skb_copy_datagram_from_iovec(frag_iter, | |
517 | offset - start, | |
518 | from, | |
519 | from_offset, | |
520 | copy)) | |
521 | goto fault; | |
522 | if ((len -= copy) == 0) | |
523 | return 0; | |
524 | offset += copy; | |
525 | from_offset += copy; | |
db543c1f | 526 | } |
5b1a002a | 527 | start = end; |
db543c1f RR |
528 | } |
529 | if (!len) | |
530 | return 0; | |
531 | ||
532 | fault: | |
533 | return -EFAULT; | |
534 | } | |
535 | EXPORT_SYMBOL(skb_copy_datagram_from_iovec); | |
536 | ||
1da177e4 LT |
537 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
538 | u8 __user *to, int len, | |
5084205f | 539 | __wsum *csump) |
1da177e4 | 540 | { |
1a028e50 | 541 | int start = skb_headlen(skb); |
1a028e50 | 542 | int i, copy = start - offset; |
5b1a002a DM |
543 | struct sk_buff *frag_iter; |
544 | int pos = 0; | |
1da177e4 LT |
545 | |
546 | /* Copy header. */ | |
547 | if (copy > 0) { | |
548 | int err = 0; | |
549 | if (copy > len) | |
550 | copy = len; | |
551 | *csump = csum_and_copy_to_user(skb->data + offset, to, copy, | |
552 | *csump, &err); | |
553 | if (err) | |
554 | goto fault; | |
555 | if ((len -= copy) == 0) | |
556 | return 0; | |
557 | offset += copy; | |
558 | to += copy; | |
559 | pos = copy; | |
560 | } | |
561 | ||
562 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1a028e50 | 563 | int end; |
1da177e4 | 564 | |
547b792c | 565 | WARN_ON(start > offset + len); |
1a028e50 DM |
566 | |
567 | end = start + skb_shinfo(skb)->frags[i].size; | |
1da177e4 | 568 | if ((copy = end - offset) > 0) { |
5084205f | 569 | __wsum csum2; |
1da177e4 LT |
570 | int err = 0; |
571 | u8 *vaddr; | |
572 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
573 | struct page *page = frag->page; | |
574 | ||
575 | if (copy > len) | |
576 | copy = len; | |
577 | vaddr = kmap(page); | |
578 | csum2 = csum_and_copy_to_user(vaddr + | |
1a028e50 DM |
579 | frag->page_offset + |
580 | offset - start, | |
1da177e4 LT |
581 | to, copy, 0, &err); |
582 | kunmap(page); | |
583 | if (err) | |
584 | goto fault; | |
585 | *csump = csum_block_add(*csump, csum2, pos); | |
586 | if (!(len -= copy)) | |
587 | return 0; | |
588 | offset += copy; | |
589 | to += copy; | |
590 | pos += copy; | |
591 | } | |
1a028e50 | 592 | start = end; |
1da177e4 LT |
593 | } |
594 | ||
5b1a002a DM |
595 | skb_walk_frags(skb, frag_iter) { |
596 | int end; | |
597 | ||
598 | WARN_ON(start > offset + len); | |
599 | ||
600 | end = start + frag_iter->len; | |
601 | if ((copy = end - offset) > 0) { | |
602 | __wsum csum2 = 0; | |
603 | if (copy > len) | |
604 | copy = len; | |
605 | if (skb_copy_and_csum_datagram(frag_iter, | |
606 | offset - start, | |
607 | to, copy, | |
608 | &csum2)) | |
609 | goto fault; | |
610 | *csump = csum_block_add(*csump, csum2, pos); | |
611 | if ((len -= copy) == 0) | |
612 | return 0; | |
613 | offset += copy; | |
614 | to += copy; | |
615 | pos += copy; | |
1da177e4 | 616 | } |
5b1a002a | 617 | start = end; |
1da177e4 LT |
618 | } |
619 | if (!len) | |
620 | return 0; | |
621 | ||
622 | fault: | |
623 | return -EFAULT; | |
624 | } | |
625 | ||
759e5d00 | 626 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) |
fb286bb2 | 627 | { |
d3bc23e7 | 628 | __sum16 sum; |
fb286bb2 | 629 | |
759e5d00 | 630 | sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); |
fb286bb2 | 631 | if (likely(!sum)) { |
84fa7933 | 632 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 HX |
633 | netdev_rx_csum_fault(skb->dev); |
634 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
635 | } | |
636 | return sum; | |
637 | } | |
759e5d00 HX |
638 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
639 | ||
640 | __sum16 __skb_checksum_complete(struct sk_buff *skb) | |
641 | { | |
642 | return __skb_checksum_complete_head(skb, skb->len); | |
643 | } | |
fb286bb2 HX |
644 | EXPORT_SYMBOL(__skb_checksum_complete); |
645 | ||
1da177e4 LT |
646 | /** |
647 | * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec. | |
4dc3b16b PP |
648 | * @skb: skbuff |
649 | * @hlen: hardware length | |
67be2dd1 | 650 | * @iov: io vector |
4ec93edb | 651 | * |
1da177e4 LT |
652 | * Caller _must_ check that skb will fit to this iovec. |
653 | * | |
654 | * Returns: 0 - success. | |
655 | * -EINVAL - checksum failure. | |
656 | * -EFAULT - fault during copy. Beware, in this case iovec | |
657 | * can be modified! | |
658 | */ | |
fb286bb2 | 659 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, |
1da177e4 LT |
660 | int hlen, struct iovec *iov) |
661 | { | |
d3bc23e7 | 662 | __wsum csum; |
1da177e4 LT |
663 | int chunk = skb->len - hlen; |
664 | ||
ef8aef55 HX |
665 | if (!chunk) |
666 | return 0; | |
667 | ||
1da177e4 LT |
668 | /* Skip filled elements. |
669 | * Pretty silly, look at memcpy_toiovec, though 8) | |
670 | */ | |
671 | while (!iov->iov_len) | |
672 | iov++; | |
673 | ||
674 | if (iov->iov_len < chunk) { | |
fb286bb2 | 675 | if (__skb_checksum_complete(skb)) |
1da177e4 LT |
676 | goto csum_error; |
677 | if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) | |
678 | goto fault; | |
679 | } else { | |
680 | csum = csum_partial(skb->data, hlen, skb->csum); | |
681 | if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, | |
682 | chunk, &csum)) | |
683 | goto fault; | |
d3bc23e7 | 684 | if (csum_fold(csum)) |
1da177e4 | 685 | goto csum_error; |
84fa7933 | 686 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
fb286bb2 | 687 | netdev_rx_csum_fault(skb->dev); |
1da177e4 LT |
688 | iov->iov_len -= chunk; |
689 | iov->iov_base += chunk; | |
690 | } | |
691 | return 0; | |
692 | csum_error: | |
693 | return -EINVAL; | |
694 | fault: | |
695 | return -EFAULT; | |
696 | } | |
697 | ||
698 | /** | |
699 | * datagram_poll - generic datagram poll | |
4dc3b16b PP |
700 | * @file: file struct |
701 | * @sock: socket | |
702 | * @wait: poll table | |
1da177e4 LT |
703 | * |
704 | * Datagram poll: Again totally generic. This also handles | |
705 | * sequenced packet sockets providing the socket receive queue | |
706 | * is only ever holding data ready to receive. | |
707 | * | |
708 | * Note: when you _don't_ use this routine for this protocol, | |
709 | * and you use a different write policy from sock_writeable() | |
710 | * then please supply your own write_space callback. | |
711 | */ | |
712 | unsigned int datagram_poll(struct file *file, struct socket *sock, | |
713 | poll_table *wait) | |
714 | { | |
715 | struct sock *sk = sock->sk; | |
716 | unsigned int mask; | |
717 | ||
a57de0b4 | 718 | sock_poll_wait(file, sk->sk_sleep, wait); |
1da177e4 LT |
719 | mask = 0; |
720 | ||
721 | /* exceptional events? */ | |
722 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
723 | mask |= POLLERR; | |
f348d70a DL |
724 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
725 | mask |= POLLRDHUP; | |
1da177e4 LT |
726 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
727 | mask |= POLLHUP; | |
728 | ||
729 | /* readable? */ | |
730 | if (!skb_queue_empty(&sk->sk_receive_queue) || | |
731 | (sk->sk_shutdown & RCV_SHUTDOWN)) | |
732 | mask |= POLLIN | POLLRDNORM; | |
733 | ||
734 | /* Connection-based need to check for termination and startup */ | |
735 | if (connection_based(sk)) { | |
736 | if (sk->sk_state == TCP_CLOSE) | |
737 | mask |= POLLHUP; | |
738 | /* connection hasn't started yet? */ | |
739 | if (sk->sk_state == TCP_SYN_SENT) | |
740 | return mask; | |
741 | } | |
742 | ||
743 | /* writable? */ | |
744 | if (sock_writeable(sk)) | |
745 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
746 | else | |
747 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
748 | ||
749 | return mask; | |
750 | } | |
751 | ||
752 | EXPORT_SYMBOL(datagram_poll); | |
753 | EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec); | |
754 | EXPORT_SYMBOL(skb_copy_datagram_iovec); | |
755 | EXPORT_SYMBOL(skb_free_datagram); | |
756 | EXPORT_SYMBOL(skb_recv_datagram); |