Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
11 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
12 | * Florian La Roche, <flla@stud.uni-sb.de> | |
13 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> | |
14 | * Linus Torvalds, <torvalds@cs.helsinki.fi> | |
15 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
16 | * Matthew Dillon, <dillon@apollo.west.oic.com> | |
17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
18 | * Jorge Cwik, <jorge@laser.satlink.net> | |
19 | * | |
20 | * Fixes: | |
21 | * Alan Cox : Numerous verify_area() calls | |
22 | * Alan Cox : Set the ACK bit on a reset | |
23 | * Alan Cox : Stopped it crashing if it closed while | |
24 | * sk->inuse=1 and was trying to connect | |
25 | * (tcp_err()). | |
26 | * Alan Cox : All icmp error handling was broken | |
27 | * pointers passed where wrong and the | |
28 | * socket was looked up backwards. Nobody | |
29 | * tested any icmp error code obviously. | |
30 | * Alan Cox : tcp_err() now handled properly. It | |
31 | * wakes people on errors. poll | |
32 | * behaves and the icmp error race | |
33 | * has gone by moving it into sock.c | |
34 | * Alan Cox : tcp_send_reset() fixed to work for | |
35 | * everything not just packets for | |
36 | * unknown sockets. | |
37 | * Alan Cox : tcp option processing. | |
38 | * Alan Cox : Reset tweaked (still not 100%) [Had | |
39 | * syn rule wrong] | |
40 | * Herp Rosmanith : More reset fixes | |
41 | * Alan Cox : No longer acks invalid rst frames. | |
42 | * Acking any kind of RST is right out. | |
43 | * Alan Cox : Sets an ignore me flag on an rst | |
44 | * receive otherwise odd bits of prattle | |
45 | * escape still | |
46 | * Alan Cox : Fixed another acking RST frame bug. | |
47 | * Should stop LAN workplace lockups. | |
48 | * Alan Cox : Some tidyups using the new skb list | |
49 | * facilities | |
50 | * Alan Cox : sk->keepopen now seems to work | |
51 | * Alan Cox : Pulls options out correctly on accepts | |
52 | * Alan Cox : Fixed assorted sk->rqueue->next errors | |
53 | * Alan Cox : PSH doesn't end a TCP read. Switched a | |
54 | * bit to skb ops. | |
55 | * Alan Cox : Tidied tcp_data to avoid a potential | |
56 | * nasty. | |
57 | * Alan Cox : Added some better commenting, as the | |
58 | * tcp is hard to follow | |
59 | * Alan Cox : Removed incorrect check for 20 * psh | |
60 | * Michael O'Reilly : ack < copied bug fix. | |
61 | * Johannes Stille : Misc tcp fixes (not all in yet). | |
62 | * Alan Cox : FIN with no memory -> CRASH | |
63 | * Alan Cox : Added socket option proto entries. | |
64 | * Also added awareness of them to accept. | |
65 | * Alan Cox : Added TCP options (SOL_TCP) | |
66 | * Alan Cox : Switched wakeup calls to callbacks, | |
67 | * so the kernel can layer network | |
68 | * sockets. | |
69 | * Alan Cox : Use ip_tos/ip_ttl settings. | |
70 | * Alan Cox : Handle FIN (more) properly (we hope). | |
71 | * Alan Cox : RST frames sent on unsynchronised | |
72 | * state ack error. | |
73 | * Alan Cox : Put in missing check for SYN bit. | |
74 | * Alan Cox : Added tcp_select_window() aka NET2E | |
75 | * window non shrink trick. | |
76 | * Alan Cox : Added a couple of small NET2E timer | |
77 | * fixes | |
78 | * Charles Hedrick : TCP fixes | |
79 | * Toomas Tamm : TCP window fixes | |
80 | * Alan Cox : Small URG fix to rlogin ^C ack fight | |
81 | * Charles Hedrick : Rewrote most of it to actually work | |
82 | * Linus : Rewrote tcp_read() and URG handling | |
83 | * completely | |
84 | * Gerhard Koerting: Fixed some missing timer handling | |
85 | * Matthew Dillon : Reworked TCP machine states as per RFC | |
86 | * Gerhard Koerting: PC/TCP workarounds | |
87 | * Adam Caldwell : Assorted timer/timing errors | |
88 | * Matthew Dillon : Fixed another RST bug | |
89 | * Alan Cox : Move to kernel side addressing changes. | |
90 | * Alan Cox : Beginning work on TCP fastpathing | |
91 | * (not yet usable) | |
92 | * Arnt Gulbrandsen: Turbocharged tcp_check() routine. | |
93 | * Alan Cox : TCP fast path debugging | |
94 | * Alan Cox : Window clamping | |
95 | * Michael Riepe : Bug in tcp_check() | |
96 | * Matt Dillon : More TCP improvements and RST bug fixes | |
97 | * Matt Dillon : Yet more small nasties remove from the | |
98 | * TCP code (Be very nice to this man if | |
99 | * tcp finally works 100%) 8) | |
100 | * Alan Cox : BSD accept semantics. | |
101 | * Alan Cox : Reset on closedown bug. | |
102 | * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). | |
103 | * Michael Pall : Handle poll() after URG properly in | |
104 | * all cases. | |
105 | * Michael Pall : Undo the last fix in tcp_read_urg() | |
106 | * (multi URG PUSH broke rlogin). | |
107 | * Michael Pall : Fix the multi URG PUSH problem in | |
108 | * tcp_readable(), poll() after URG | |
109 | * works now. | |
110 | * Michael Pall : recv(...,MSG_OOB) never blocks in the | |
111 | * BSD api. | |
112 | * Alan Cox : Changed the semantics of sk->socket to | |
113 | * fix a race and a signal problem with | |
114 | * accept() and async I/O. | |
115 | * Alan Cox : Relaxed the rules on tcp_sendto(). | |
116 | * Yury Shevchuk : Really fixed accept() blocking problem. | |
117 | * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for | |
118 | * clients/servers which listen in on | |
119 | * fixed ports. | |
120 | * Alan Cox : Cleaned the above up and shrank it to | |
121 | * a sensible code size. | |
122 | * Alan Cox : Self connect lockup fix. | |
123 | * Alan Cox : No connect to multicast. | |
124 | * Ross Biro : Close unaccepted children on master | |
125 | * socket close. | |
126 | * Alan Cox : Reset tracing code. | |
127 | * Alan Cox : Spurious resets on shutdown. | |
128 | * Alan Cox : Giant 15 minute/60 second timer error | |
129 | * Alan Cox : Small whoops in polling before an | |
130 | * accept. | |
131 | * Alan Cox : Kept the state trace facility since | |
132 | * it's handy for debugging. | |
133 | * Alan Cox : More reset handler fixes. | |
134 | * Alan Cox : Started rewriting the code based on | |
135 | * the RFC's for other useful protocol | |
136 | * references see: Comer, KA9Q NOS, and | |
137 | * for a reference on the difference | |
138 | * between specifications and how BSD | |
139 | * works see the 4.4lite source. | |
140 | * A.N.Kuznetsov : Don't time wait on completion of tidy | |
141 | * close. | |
142 | * Linus Torvalds : Fin/Shutdown & copied_seq changes. | |
143 | * Linus Torvalds : Fixed BSD port reuse to work first syn | |
144 | * Alan Cox : Reimplemented timers as per the RFC | |
145 | * and using multiple timers for sanity. | |
146 | * Alan Cox : Small bug fixes, and a lot of new | |
147 | * comments. | |
148 | * Alan Cox : Fixed dual reader crash by locking | |
149 | * the buffers (much like datagram.c) | |
150 | * Alan Cox : Fixed stuck sockets in probe. A probe | |
151 | * now gets fed up of retrying without | |
152 | * (even a no space) answer. | |
153 | * Alan Cox : Extracted closing code better | |
154 | * Alan Cox : Fixed the closing state machine to | |
155 | * resemble the RFC. | |
156 | * Alan Cox : More 'per spec' fixes. | |
157 | * Jorge Cwik : Even faster checksumming. | |
158 | * Alan Cox : tcp_data() doesn't ack illegal PSH | |
159 | * only frames. At least one pc tcp stack | |
160 | * generates them. | |
161 | * Alan Cox : Cache last socket. | |
162 | * Alan Cox : Per route irtt. | |
163 | * Matt Day : poll()->select() match BSD precisely on error | |
164 | * Alan Cox : New buffers | |
165 | * Marc Tamsky : Various sk->prot->retransmits and | |
166 | * sk->retransmits misupdating fixed. | |
167 | * Fixed tcp_write_timeout: stuck close, | |
168 | * and TCP syn retries gets used now. | |
169 | * Mark Yarvis : In tcp_read_wakeup(), don't send an | |
170 | * ack if state is TCP_CLOSED. | |
171 | * Alan Cox : Look up device on a retransmit - routes may | |
172 | * change. Doesn't yet cope with MSS shrink right | |
173 | * but it's a start! | |
174 | * Marc Tamsky : Closing in closing fixes. | |
175 | * Mike Shaver : RFC1122 verifications. | |
176 | * Alan Cox : rcv_saddr errors. | |
177 | * Alan Cox : Block double connect(). | |
178 | * Alan Cox : Small hooks for enSKIP. | |
179 | * Alexey Kuznetsov: Path MTU discovery. | |
180 | * Alan Cox : Support soft errors. | |
181 | * Alan Cox : Fix MTU discovery pathological case | |
182 | * when the remote claims no mtu! | |
183 | * Marc Tamsky : TCP_CLOSE fix. | |
184 | * Colin (G3TNE) : Send a reset on syn ack replies in | |
185 | * window but wrong (fixes NT lpd problems) | |
186 | * Pedro Roque : Better TCP window handling, delayed ack. | |
187 | * Joerg Reuter : No modification of locked buffers in | |
188 | * tcp_do_retransmit() | |
189 | * Eric Schenk : Changed receiver side silly window | |
190 | * avoidance algorithm to BSD style | |
191 | * algorithm. This doubles throughput | |
192 | * against machines running Solaris, | |
193 | * and seems to result in general | |
194 | * improvement. | |
195 | * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD | |
196 | * Willy Konynenberg : Transparent proxying support. | |
197 | * Mike McLagan : Routing by source | |
198 | * Keith Owens : Do proper merging with partial SKB's in | |
199 | * tcp_do_sendmsg to avoid burstiness. | |
200 | * Eric Schenk : Fix fast close down bug with | |
201 | * shutdown() followed by close(). | |
202 | * Andi Kleen : Make poll agree with SIGIO | |
203 | * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and | |
204 | * lingertime == 0 (RFC 793 ABORT Call) | |
205 | * Hirokazu Takahashi : Use copy_from_user() instead of | |
206 | * csum_and_copy_from_user() if possible. | |
207 | * | |
208 | * This program is free software; you can redistribute it and/or | |
209 | * modify it under the terms of the GNU General Public License | |
210 | * as published by the Free Software Foundation; either version | |
211 | * 2 of the License, or(at your option) any later version. | |
212 | * | |
213 | * Description of States: | |
214 | * | |
215 | * TCP_SYN_SENT sent a connection request, waiting for ack | |
216 | * | |
217 | * TCP_SYN_RECV received a connection request, sent ack, | |
218 | * waiting for final ack in three-way handshake. | |
219 | * | |
220 | * TCP_ESTABLISHED connection established | |
221 | * | |
222 | * TCP_FIN_WAIT1 our side has shutdown, waiting to complete | |
223 | * transmission of remaining buffered data | |
224 | * | |
225 | * TCP_FIN_WAIT2 all buffered data sent, waiting for remote | |
226 | * to shutdown | |
227 | * | |
228 | * TCP_CLOSING both sides have shutdown but we still have | |
229 | * data we have to finish sending | |
230 | * | |
231 | * TCP_TIME_WAIT timeout to catch resent junk before entering | |
232 | * closed, can only be entered from FIN_WAIT2 | |
233 | * or CLOSING. Required because the other end | |
234 | * may not have gotten our last ACK causing it | |
235 | * to retransmit the data packet (which we ignore) | |
236 | * | |
237 | * TCP_CLOSE_WAIT remote side has shutdown and is waiting for | |
238 | * us to finish writing our data and to shutdown | |
239 | * (we have to close() to move on to LAST_ACK) | |
240 | * | |
241 | * TCP_LAST_ACK out side has shutdown after remote has | |
242 | * shutdown. There may still be data in our | |
243 | * buffer that we have to finish sending | |
244 | * | |
245 | * TCP_CLOSE socket is finished | |
246 | */ | |
247 | ||
afd46503 JP |
248 | #define pr_fmt(fmt) "TCP: " fmt |
249 | ||
172589cc | 250 | #include <linux/kernel.h> |
1da177e4 LT |
251 | #include <linux/module.h> |
252 | #include <linux/types.h> | |
253 | #include <linux/fcntl.h> | |
254 | #include <linux/poll.h> | |
255 | #include <linux/init.h> | |
1da177e4 | 256 | #include <linux/fs.h> |
9c55e01c | 257 | #include <linux/skbuff.h> |
81b23b4a | 258 | #include <linux/scatterlist.h> |
9c55e01c JA |
259 | #include <linux/splice.h> |
260 | #include <linux/net.h> | |
261 | #include <linux/socket.h> | |
1da177e4 LT |
262 | #include <linux/random.h> |
263 | #include <linux/bootmem.h> | |
57413ebc MS |
264 | #include <linux/highmem.h> |
265 | #include <linux/swap.h> | |
b8059ead | 266 | #include <linux/cache.h> |
f4c50d99 | 267 | #include <linux/err.h> |
cfb6eeb4 | 268 | #include <linux/crypto.h> |
da5c78c8 | 269 | #include <linux/time.h> |
5a0e3ad6 | 270 | #include <linux/slab.h> |
1da177e4 LT |
271 | |
272 | #include <net/icmp.h> | |
cf60af03 | 273 | #include <net/inet_common.h> |
1da177e4 LT |
274 | #include <net/tcp.h> |
275 | #include <net/xfrm.h> | |
276 | #include <net/ip.h> | |
1a2449a8 | 277 | #include <net/netdma.h> |
9c55e01c | 278 | #include <net/sock.h> |
1da177e4 LT |
279 | |
280 | #include <asm/uaccess.h> | |
281 | #include <asm/ioctls.h> | |
076bb0c8 | 282 | #include <net/busy_poll.h> |
1da177e4 | 283 | |
ab32ea5d | 284 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; |
1da177e4 | 285 | |
95bd09eb ED |
286 | int sysctl_tcp_min_tso_segs __read_mostly = 2; |
287 | ||
f54b3111 ED |
288 | int sysctl_tcp_autocorking __read_mostly = 1; |
289 | ||
dd24c001 | 290 | struct percpu_counter tcp_orphan_count; |
0a5578cf ACM |
291 | EXPORT_SYMBOL_GPL(tcp_orphan_count); |
292 | ||
a4fe34bf | 293 | long sysctl_tcp_mem[3] __read_mostly; |
b8059ead DM |
294 | int sysctl_tcp_wmem[3] __read_mostly; |
295 | int sysctl_tcp_rmem[3] __read_mostly; | |
1da177e4 | 296 | |
a4fe34bf | 297 | EXPORT_SYMBOL(sysctl_tcp_mem); |
1da177e4 LT |
298 | EXPORT_SYMBOL(sysctl_tcp_rmem); |
299 | EXPORT_SYMBOL(sysctl_tcp_wmem); | |
300 | ||
8d987e5c | 301 | atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ |
1da177e4 | 302 | EXPORT_SYMBOL(tcp_memory_allocated); |
1748376b ED |
303 | |
304 | /* | |
305 | * Current number of TCP sockets. | |
306 | */ | |
307 | struct percpu_counter tcp_sockets_allocated; | |
1da177e4 LT |
308 | EXPORT_SYMBOL(tcp_sockets_allocated); |
309 | ||
9c55e01c JA |
310 | /* |
311 | * TCP splice context | |
312 | */ | |
313 | struct tcp_splice_state { | |
314 | struct pipe_inode_info *pipe; | |
315 | size_t len; | |
316 | unsigned int flags; | |
317 | }; | |
318 | ||
1da177e4 LT |
319 | /* |
320 | * Pressure flag: try to collapse. | |
321 | * Technical note: it is used by multiple contexts non atomically. | |
3ab224be | 322 | * All the __sk_mem_schedule() is of this nature: accounting |
1da177e4 LT |
323 | * is strict, actions are advisory and have some latency. |
324 | */ | |
4103f8cd | 325 | int tcp_memory_pressure __read_mostly; |
1da177e4 LT |
326 | EXPORT_SYMBOL(tcp_memory_pressure); |
327 | ||
5c52ba17 | 328 | void tcp_enter_memory_pressure(struct sock *sk) |
1da177e4 LT |
329 | { |
330 | if (!tcp_memory_pressure) { | |
4e673444 | 331 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); |
1da177e4 LT |
332 | tcp_memory_pressure = 1; |
333 | } | |
334 | } | |
1da177e4 LT |
335 | EXPORT_SYMBOL(tcp_enter_memory_pressure); |
336 | ||
b103cf34 JA |
337 | /* Convert seconds to retransmits based on initial and max timeout */ |
338 | static u8 secs_to_retrans(int seconds, int timeout, int rto_max) | |
339 | { | |
340 | u8 res = 0; | |
341 | ||
342 | if (seconds > 0) { | |
343 | int period = timeout; | |
344 | ||
345 | res = 1; | |
346 | while (seconds > period && res < 255) { | |
347 | res++; | |
348 | timeout <<= 1; | |
349 | if (timeout > rto_max) | |
350 | timeout = rto_max; | |
351 | period += timeout; | |
352 | } | |
353 | } | |
354 | return res; | |
355 | } | |
356 | ||
357 | /* Convert retransmits to seconds based on initial and max timeout */ | |
358 | static int retrans_to_secs(u8 retrans, int timeout, int rto_max) | |
359 | { | |
360 | int period = 0; | |
361 | ||
362 | if (retrans > 0) { | |
363 | period = timeout; | |
364 | while (--retrans) { | |
365 | timeout <<= 1; | |
366 | if (timeout > rto_max) | |
367 | timeout = rto_max; | |
368 | period += timeout; | |
369 | } | |
370 | } | |
371 | return period; | |
372 | } | |
373 | ||
900f65d3 NC |
374 | /* Address-family independent initialization for a tcp_sock. |
375 | * | |
376 | * NOTE: A lot of things set to zero explicitly by call to | |
377 | * sk_alloc() so need not be done here. | |
378 | */ | |
379 | void tcp_init_sock(struct sock *sk) | |
380 | { | |
381 | struct inet_connection_sock *icsk = inet_csk(sk); | |
382 | struct tcp_sock *tp = tcp_sk(sk); | |
383 | ||
384 | skb_queue_head_init(&tp->out_of_order_queue); | |
385 | tcp_init_xmit_timers(sk); | |
386 | tcp_prequeue_init(tp); | |
46d3ceab | 387 | INIT_LIST_HEAD(&tp->tsq_node); |
900f65d3 NC |
388 | |
389 | icsk->icsk_rto = TCP_TIMEOUT_INIT; | |
390 | tp->mdev = TCP_TIMEOUT_INIT; | |
391 | ||
392 | /* So many TCP implementations out there (incorrectly) count the | |
393 | * initial SYN frame in their delayed-ACK and congestion control | |
394 | * algorithms that we must have the following bandaid to talk | |
395 | * efficiently to them. -DaveM | |
396 | */ | |
397 | tp->snd_cwnd = TCP_INIT_CWND; | |
398 | ||
399 | /* See draft-stevens-tcpca-spec-01 for discussion of the | |
400 | * initialization of these values. | |
401 | */ | |
402 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | |
403 | tp->snd_cwnd_clamp = ~0; | |
404 | tp->mss_cache = TCP_MSS_DEFAULT; | |
405 | ||
406 | tp->reordering = sysctl_tcp_reordering; | |
eed530b6 | 407 | tcp_enable_early_retrans(tp); |
900f65d3 NC |
408 | icsk->icsk_ca_ops = &tcp_init_congestion_ops; |
409 | ||
ceaa1fef AV |
410 | tp->tsoffset = 0; |
411 | ||
900f65d3 NC |
412 | sk->sk_state = TCP_CLOSE; |
413 | ||
414 | sk->sk_write_space = sk_stream_write_space; | |
415 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | |
416 | ||
417 | icsk->icsk_sync_mss = tcp_sync_mss; | |
418 | ||
900f65d3 NC |
419 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
420 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | |
421 | ||
422 | local_bh_disable(); | |
423 | sock_update_memcg(sk); | |
424 | sk_sockets_allocated_inc(sk); | |
425 | local_bh_enable(); | |
426 | } | |
427 | EXPORT_SYMBOL(tcp_init_sock); | |
428 | ||
1da177e4 LT |
429 | /* |
430 | * Wait for a TCP event. | |
431 | * | |
432 | * Note that we don't need to lock the socket, as the upper poll layers | |
433 | * take care of normal races (between the test and the event) and we don't | |
434 | * go look at any of the socket buffers directly. | |
435 | */ | |
436 | unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |
437 | { | |
438 | unsigned int mask; | |
439 | struct sock *sk = sock->sk; | |
cf533ea5 | 440 | const struct tcp_sock *tp = tcp_sk(sk); |
1da177e4 | 441 | |
c3f1dbaf DM |
442 | sock_rps_record_flow(sk); |
443 | ||
aa395145 | 444 | sock_poll_wait(file, sk_sleep(sk), wait); |
1da177e4 | 445 | if (sk->sk_state == TCP_LISTEN) |
dc40c7bc | 446 | return inet_csk_listen_poll(sk); |
1da177e4 LT |
447 | |
448 | /* Socket is not locked. We are protected from async events | |
70efce27 WN |
449 | * by poll logic and correct handling of state changes |
450 | * made by other threads is impossible in any case. | |
1da177e4 LT |
451 | */ |
452 | ||
453 | mask = 0; | |
1da177e4 LT |
454 | |
455 | /* | |
456 | * POLLHUP is certainly not done right. But poll() doesn't | |
457 | * have a notion of HUP in just one direction, and for a | |
458 | * socket the read side is more interesting. | |
459 | * | |
460 | * Some poll() documentation says that POLLHUP is incompatible | |
461 | * with the POLLOUT/POLLWR flags, so somebody should check this | |
462 | * all. But careful, it tends to be safer to return too many | |
463 | * bits than too few, and you can easily break real applications | |
464 | * if you don't tell them that something has hung up! | |
465 | * | |
466 | * Check-me. | |
467 | * | |
468 | * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and | |
469 | * our fs/select.c). It means that after we received EOF, | |
470 | * poll always returns immediately, making impossible poll() on write() | |
471 | * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP | |
472 | * if and only if shutdown has been made in both directions. | |
473 | * Actually, it is interesting to look how Solaris and DUX | |
70efce27 | 474 | * solve this dilemma. I would prefer, if POLLHUP were maskable, |
1da177e4 LT |
475 | * then we could set it on SND_SHUTDOWN. BTW examples given |
476 | * in Stevens' books assume exactly this behaviour, it explains | |
70efce27 | 477 | * why POLLHUP is incompatible with POLLOUT. --ANK |
1da177e4 LT |
478 | * |
479 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent | |
480 | * blocking on fresh not-connected or disconnected socket. --ANK | |
481 | */ | |
482 | if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) | |
483 | mask |= POLLHUP; | |
484 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
f348d70a | 485 | mask |= POLLIN | POLLRDNORM | POLLRDHUP; |
1da177e4 | 486 | |
8336886f JC |
487 | /* Connected or passive Fast Open socket? */ |
488 | if (sk->sk_state != TCP_SYN_SENT && | |
489 | (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) { | |
c7004482 DM |
490 | int target = sock_rcvlowat(sk, 0, INT_MAX); |
491 | ||
492 | if (tp->urg_seq == tp->copied_seq && | |
493 | !sock_flag(sk, SOCK_URGINLINE) && | |
494 | tp->urg_data) | |
b634f875 | 495 | target++; |
c7004482 | 496 | |
1da177e4 LT |
497 | /* Potential race condition. If read of tp below will |
498 | * escape above sk->sk_state, we can be illegally awaken | |
499 | * in SYN_* states. */ | |
c7004482 | 500 | if (tp->rcv_nxt - tp->copied_seq >= target) |
1da177e4 LT |
501 | mask |= POLLIN | POLLRDNORM; |
502 | ||
503 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { | |
64dc6130 | 504 | if (sk_stream_is_writeable(sk)) { |
1da177e4 LT |
505 | mask |= POLLOUT | POLLWRNORM; |
506 | } else { /* send SIGIO later */ | |
507 | set_bit(SOCK_ASYNC_NOSPACE, | |
508 | &sk->sk_socket->flags); | |
509 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
510 | ||
511 | /* Race breaker. If space is freed after | |
512 | * wspace test but before the flags are set, | |
513 | * IO signal will be lost. | |
514 | */ | |
64dc6130 | 515 | if (sk_stream_is_writeable(sk)) |
1da177e4 LT |
516 | mask |= POLLOUT | POLLWRNORM; |
517 | } | |
d84ba638 KM |
518 | } else |
519 | mask |= POLLOUT | POLLWRNORM; | |
1da177e4 LT |
520 | |
521 | if (tp->urg_data & TCP_URG_VALID) | |
522 | mask |= POLLPRI; | |
523 | } | |
a4d25803 TM |
524 | /* This barrier is coupled with smp_wmb() in tcp_reset() */ |
525 | smp_rmb(); | |
526 | if (sk->sk_err) | |
527 | mask |= POLLERR; | |
528 | ||
1da177e4 LT |
529 | return mask; |
530 | } | |
4bc2f18b | 531 | EXPORT_SYMBOL(tcp_poll); |
1da177e4 LT |
532 | |
533 | int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |
534 | { | |
535 | struct tcp_sock *tp = tcp_sk(sk); | |
536 | int answ; | |
0e71c55c | 537 | bool slow; |
1da177e4 LT |
538 | |
539 | switch (cmd) { | |
540 | case SIOCINQ: | |
541 | if (sk->sk_state == TCP_LISTEN) | |
542 | return -EINVAL; | |
543 | ||
0e71c55c | 544 | slow = lock_sock_fast(sk); |
1da177e4 LT |
545 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
546 | answ = 0; | |
547 | else if (sock_flag(sk, SOCK_URGINLINE) || | |
548 | !tp->urg_data || | |
549 | before(tp->urg_seq, tp->copied_seq) || | |
550 | !before(tp->urg_seq, tp->rcv_nxt)) { | |
91521944 | 551 | |
1da177e4 LT |
552 | answ = tp->rcv_nxt - tp->copied_seq; |
553 | ||
a3374c42 ED |
554 | /* Subtract 1, if FIN was received */ |
555 | if (answ && sock_flag(sk, SOCK_DONE)) | |
556 | answ--; | |
1da177e4 LT |
557 | } else |
558 | answ = tp->urg_seq - tp->copied_seq; | |
0e71c55c | 559 | unlock_sock_fast(sk, slow); |
1da177e4 LT |
560 | break; |
561 | case SIOCATMARK: | |
562 | answ = tp->urg_data && tp->urg_seq == tp->copied_seq; | |
563 | break; | |
564 | case SIOCOUTQ: | |
565 | if (sk->sk_state == TCP_LISTEN) | |
566 | return -EINVAL; | |
567 | ||
568 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
569 | answ = 0; | |
570 | else | |
571 | answ = tp->write_seq - tp->snd_una; | |
572 | break; | |
2f4e1b39 MS |
573 | case SIOCOUTQNSD: |
574 | if (sk->sk_state == TCP_LISTEN) | |
575 | return -EINVAL; | |
576 | ||
577 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
578 | answ = 0; | |
579 | else | |
580 | answ = tp->write_seq - tp->snd_nxt; | |
581 | break; | |
1da177e4 LT |
582 | default: |
583 | return -ENOIOCTLCMD; | |
3ff50b79 | 584 | } |
1da177e4 LT |
585 | |
586 | return put_user(answ, (int __user *)arg); | |
587 | } | |
4bc2f18b | 588 | EXPORT_SYMBOL(tcp_ioctl); |
1da177e4 | 589 | |
1da177e4 LT |
590 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) |
591 | { | |
4de075e0 | 592 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; |
1da177e4 LT |
593 | tp->pushed_seq = tp->write_seq; |
594 | } | |
595 | ||
a2a385d6 | 596 | static inline bool forced_push(const struct tcp_sock *tp) |
1da177e4 LT |
597 | { |
598 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); | |
599 | } | |
600 | ||
9e412ba7 | 601 | static inline void skb_entail(struct sock *sk, struct sk_buff *skb) |
1da177e4 | 602 | { |
9e412ba7 | 603 | struct tcp_sock *tp = tcp_sk(sk); |
352d4800 ACM |
604 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
605 | ||
606 | skb->csum = 0; | |
607 | tcb->seq = tcb->end_seq = tp->write_seq; | |
4de075e0 | 608 | tcb->tcp_flags = TCPHDR_ACK; |
352d4800 | 609 | tcb->sacked = 0; |
1da177e4 | 610 | skb_header_release(skb); |
fe067e8a | 611 | tcp_add_write_queue_tail(sk, skb); |
3ab224be HA |
612 | sk->sk_wmem_queued += skb->truesize; |
613 | sk_mem_charge(sk, skb->truesize); | |
89ebd197 | 614 | if (tp->nonagle & TCP_NAGLE_PUSH) |
e905a9ed | 615 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
1da177e4 LT |
616 | } |
617 | ||
afeca340 | 618 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) |
1da177e4 | 619 | { |
33f5f57e | 620 | if (flags & MSG_OOB) |
1da177e4 | 621 | tp->snd_up = tp->write_seq; |
1da177e4 LT |
622 | } |
623 | ||
f54b3111 ED |
624 | /* If a not yet filled skb is pushed, do not send it if |
625 | * we have packets in Qdisc or NIC queues : | |
626 | * Because TX completion will happen shortly, it gives a chance | |
627 | * to coalesce future sendmsg() payload into this skb, without | |
628 | * need for a timer, and with no latency trade off. | |
629 | * As packets containing data payload have a bigger truesize | |
630 | * than pure acks (dataless) packets, the last check prevents | |
631 | * autocorking if we only have an ACK in Qdisc/NIC queues. | |
632 | */ | |
633 | static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, | |
634 | int size_goal) | |
1da177e4 | 635 | { |
f54b3111 ED |
636 | return skb->len < size_goal && |
637 | sysctl_tcp_autocorking && | |
638 | atomic_read(&sk->sk_wmem_alloc) > skb->truesize; | |
639 | } | |
640 | ||
641 | static void tcp_push(struct sock *sk, int flags, int mss_now, | |
642 | int nonagle, int size_goal) | |
643 | { | |
644 | struct tcp_sock *tp = tcp_sk(sk); | |
645 | struct sk_buff *skb; | |
afeca340 | 646 | |
f54b3111 ED |
647 | if (!tcp_send_head(sk)) |
648 | return; | |
afeca340 | 649 | |
f54b3111 ED |
650 | skb = tcp_write_queue_tail(sk); |
651 | if (!(flags & MSG_MORE) || forced_push(tp)) | |
652 | tcp_mark_push(tp, skb); | |
653 | ||
654 | tcp_mark_urg(tp, flags); | |
655 | ||
656 | if (tcp_should_autocork(sk, skb, size_goal)) { | |
657 | ||
658 | /* avoid atomic op if TSQ_THROTTLED bit is already set */ | |
659 | if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) { | |
660 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); | |
661 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); | |
662 | } | |
663 | return; | |
1da177e4 | 664 | } |
f54b3111 ED |
665 | |
666 | if (flags & MSG_MORE) | |
667 | nonagle = TCP_NAGLE_CORK; | |
668 | ||
669 | __tcp_push_pending_frames(sk, mss_now, nonagle); | |
1da177e4 LT |
670 | } |
671 | ||
6ff7751d AB |
672 | static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, |
673 | unsigned int offset, size_t len) | |
9c55e01c JA |
674 | { |
675 | struct tcp_splice_state *tss = rd_desc->arg.data; | |
33966dd0 | 676 | int ret; |
9c55e01c | 677 | |
9fa5fdf2 DM |
678 | ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), |
679 | tss->flags); | |
33966dd0 WT |
680 | if (ret > 0) |
681 | rd_desc->count -= ret; | |
682 | return ret; | |
9c55e01c JA |
683 | } |
684 | ||
685 | static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) | |
686 | { | |
687 | /* Store TCP splice context information in read_descriptor_t. */ | |
688 | read_descriptor_t rd_desc = { | |
689 | .arg.data = tss, | |
33966dd0 | 690 | .count = tss->len, |
9c55e01c JA |
691 | }; |
692 | ||
693 | return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); | |
694 | } | |
695 | ||
696 | /** | |
697 | * tcp_splice_read - splice data from TCP socket to a pipe | |
698 | * @sock: socket to splice from | |
699 | * @ppos: position (not valid) | |
700 | * @pipe: pipe to splice to | |
701 | * @len: number of bytes to splice | |
702 | * @flags: splice modifier flags | |
703 | * | |
704 | * Description: | |
705 | * Will read pages from given socket and fill them into a pipe. | |
706 | * | |
707 | **/ | |
708 | ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, | |
709 | struct pipe_inode_info *pipe, size_t len, | |
710 | unsigned int flags) | |
711 | { | |
712 | struct sock *sk = sock->sk; | |
713 | struct tcp_splice_state tss = { | |
714 | .pipe = pipe, | |
715 | .len = len, | |
716 | .flags = flags, | |
717 | }; | |
718 | long timeo; | |
719 | ssize_t spliced; | |
720 | int ret; | |
721 | ||
3a047bf8 | 722 | sock_rps_record_flow(sk); |
9c55e01c JA |
723 | /* |
724 | * We can't seek on a socket input | |
725 | */ | |
726 | if (unlikely(*ppos)) | |
727 | return -ESPIPE; | |
728 | ||
729 | ret = spliced = 0; | |
730 | ||
731 | lock_sock(sk); | |
732 | ||
42324c62 | 733 | timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); |
9c55e01c JA |
734 | while (tss.len) { |
735 | ret = __tcp_splice_read(sk, &tss); | |
736 | if (ret < 0) | |
737 | break; | |
738 | else if (!ret) { | |
739 | if (spliced) | |
740 | break; | |
9c55e01c JA |
741 | if (sock_flag(sk, SOCK_DONE)) |
742 | break; | |
743 | if (sk->sk_err) { | |
744 | ret = sock_error(sk); | |
745 | break; | |
746 | } | |
747 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
748 | break; | |
749 | if (sk->sk_state == TCP_CLOSE) { | |
750 | /* | |
751 | * This occurs when user tries to read | |
752 | * from never connected socket. | |
753 | */ | |
754 | if (!sock_flag(sk, SOCK_DONE)) | |
755 | ret = -ENOTCONN; | |
756 | break; | |
757 | } | |
758 | if (!timeo) { | |
759 | ret = -EAGAIN; | |
760 | break; | |
761 | } | |
762 | sk_wait_data(sk, &timeo); | |
763 | if (signal_pending(current)) { | |
764 | ret = sock_intr_errno(timeo); | |
765 | break; | |
766 | } | |
767 | continue; | |
768 | } | |
769 | tss.len -= ret; | |
770 | spliced += ret; | |
771 | ||
33966dd0 WT |
772 | if (!timeo) |
773 | break; | |
9c55e01c JA |
774 | release_sock(sk); |
775 | lock_sock(sk); | |
776 | ||
777 | if (sk->sk_err || sk->sk_state == TCP_CLOSE || | |
33966dd0 | 778 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
9c55e01c JA |
779 | signal_pending(current)) |
780 | break; | |
781 | } | |
782 | ||
783 | release_sock(sk); | |
784 | ||
785 | if (spliced) | |
786 | return spliced; | |
787 | ||
788 | return ret; | |
789 | } | |
4bc2f18b | 790 | EXPORT_SYMBOL(tcp_splice_read); |
9c55e01c | 791 | |
df97c708 | 792 | struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) |
f561d0f2 PE |
793 | { |
794 | struct sk_buff *skb; | |
795 | ||
796 | /* The TCP header must be at least 32-bit aligned. */ | |
797 | size = ALIGN(size, 4); | |
798 | ||
799 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); | |
800 | if (skb) { | |
3ab224be | 801 | if (sk_wmem_schedule(sk, skb->truesize)) { |
a21d4572 | 802 | skb_reserve(skb, sk->sk_prot->max_header); |
f561d0f2 PE |
803 | /* |
804 | * Make sure that we have exactly size bytes | |
805 | * available to the caller, no more, no less. | |
806 | */ | |
16fad69c | 807 | skb->reserved_tailroom = skb->end - skb->tail - size; |
f561d0f2 PE |
808 | return skb; |
809 | } | |
810 | __kfree_skb(skb); | |
811 | } else { | |
5c52ba17 | 812 | sk->sk_prot->enter_memory_pressure(sk); |
f561d0f2 PE |
813 | sk_stream_moderate_sndbuf(sk); |
814 | } | |
815 | return NULL; | |
816 | } | |
817 | ||
0c54b85f IJ |
818 | static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, |
819 | int large_allowed) | |
820 | { | |
821 | struct tcp_sock *tp = tcp_sk(sk); | |
2a3a041c | 822 | u32 xmit_size_goal, old_size_goal; |
0c54b85f IJ |
823 | |
824 | xmit_size_goal = mss_now; | |
825 | ||
826 | if (large_allowed && sk_can_gso(sk)) { | |
95bd09eb ED |
827 | u32 gso_size, hlen; |
828 | ||
829 | /* Maybe we should/could use sk->sk_prot->max_header here ? */ | |
830 | hlen = inet_csk(sk)->icsk_af_ops->net_header_len + | |
831 | inet_csk(sk)->icsk_ext_hdr_len + | |
832 | tp->tcp_header_len; | |
833 | ||
834 | /* Goal is to send at least one packet per ms, | |
835 | * not one big TSO packet every 100 ms. | |
836 | * This preserves ACK clocking and is consistent | |
837 | * with tcp_tso_should_defer() heuristic. | |
838 | */ | |
839 | gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC); | |
840 | gso_size = max_t(u32, gso_size, | |
841 | sysctl_tcp_min_tso_segs * mss_now); | |
842 | ||
843 | xmit_size_goal = min_t(u32, gso_size, | |
844 | sk->sk_gso_max_size - 1 - hlen); | |
845 | ||
0c54b85f | 846 | xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); |
2a3a041c IJ |
847 | |
848 | /* We try hard to avoid divides here */ | |
849 | old_size_goal = tp->xmit_size_goal_segs * mss_now; | |
850 | ||
851 | if (likely(old_size_goal <= xmit_size_goal && | |
852 | old_size_goal + mss_now > xmit_size_goal)) { | |
853 | xmit_size_goal = old_size_goal; | |
854 | } else { | |
1485348d BH |
855 | tp->xmit_size_goal_segs = |
856 | min_t(u16, xmit_size_goal / mss_now, | |
857 | sk->sk_gso_max_segs); | |
2a3a041c IJ |
858 | xmit_size_goal = tp->xmit_size_goal_segs * mss_now; |
859 | } | |
0c54b85f IJ |
860 | } |
861 | ||
afece1c6 | 862 | return max(xmit_size_goal, mss_now); |
0c54b85f IJ |
863 | } |
864 | ||
865 | static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) | |
866 | { | |
867 | int mss_now; | |
868 | ||
869 | mss_now = tcp_current_mss(sk); | |
870 | *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); | |
871 | ||
872 | return mss_now; | |
873 | } | |
874 | ||
64022d0b ED |
875 | static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, |
876 | size_t size, int flags) | |
1da177e4 LT |
877 | { |
878 | struct tcp_sock *tp = tcp_sk(sk); | |
c1b4a7e6 | 879 | int mss_now, size_goal; |
1da177e4 LT |
880 | int err; |
881 | ssize_t copied; | |
882 | long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | |
883 | ||
8336886f JC |
884 | /* Wait for a connection to finish. One exception is TCP Fast Open |
885 | * (passive side) where data is allowed to be sent before a connection | |
886 | * is fully established. | |
887 | */ | |
888 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && | |
889 | !tcp_passive_fastopen(sk)) { | |
1da177e4 LT |
890 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) |
891 | goto out_err; | |
8336886f | 892 | } |
1da177e4 LT |
893 | |
894 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
895 | ||
0c54b85f | 896 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1da177e4 LT |
897 | copied = 0; |
898 | ||
899 | err = -EPIPE; | |
900 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | |
0d6a775e | 901 | goto out_err; |
1da177e4 | 902 | |
64022d0b | 903 | while (size > 0) { |
fe067e8a | 904 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
38ba0a65 | 905 | int copy, i; |
38ba0a65 | 906 | bool can_coalesce; |
1da177e4 | 907 | |
fe067e8a | 908 | if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { |
1da177e4 LT |
909 | new_segment: |
910 | if (!sk_stream_memory_free(sk)) | |
911 | goto wait_for_sndbuf; | |
912 | ||
df97c708 | 913 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); |
1da177e4 LT |
914 | if (!skb) |
915 | goto wait_for_memory; | |
916 | ||
9e412ba7 | 917 | skb_entail(sk, skb); |
c1b4a7e6 | 918 | copy = size_goal; |
1da177e4 LT |
919 | } |
920 | ||
921 | if (copy > size) | |
922 | copy = size; | |
923 | ||
924 | i = skb_shinfo(skb)->nr_frags; | |
925 | can_coalesce = skb_can_coalesce(skb, i, page, offset); | |
926 | if (!can_coalesce && i >= MAX_SKB_FRAGS) { | |
927 | tcp_mark_push(tp, skb); | |
928 | goto new_segment; | |
929 | } | |
3ab224be | 930 | if (!sk_wmem_schedule(sk, copy)) |
1da177e4 | 931 | goto wait_for_memory; |
e905a9ed | 932 | |
1da177e4 | 933 | if (can_coalesce) { |
9e903e08 | 934 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); |
1da177e4 LT |
935 | } else { |
936 | get_page(page); | |
937 | skb_fill_page_desc(skb, i, page, offset, copy); | |
938 | } | |
c9af6db4 | 939 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
cef401de | 940 | |
1da177e4 LT |
941 | skb->len += copy; |
942 | skb->data_len += copy; | |
943 | skb->truesize += copy; | |
944 | sk->sk_wmem_queued += copy; | |
3ab224be | 945 | sk_mem_charge(sk, copy); |
84fa7933 | 946 | skb->ip_summed = CHECKSUM_PARTIAL; |
1da177e4 LT |
947 | tp->write_seq += copy; |
948 | TCP_SKB_CB(skb)->end_seq += copy; | |
7967168c | 949 | skb_shinfo(skb)->gso_segs = 0; |
1da177e4 LT |
950 | |
951 | if (!copied) | |
4de075e0 | 952 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
1da177e4 LT |
953 | |
954 | copied += copy; | |
64022d0b ED |
955 | offset += copy; |
956 | if (!(size -= copy)) | |
1da177e4 LT |
957 | goto out; |
958 | ||
69d15067 | 959 | if (skb->len < size_goal || (flags & MSG_OOB)) |
1da177e4 LT |
960 | continue; |
961 | ||
962 | if (forced_push(tp)) { | |
963 | tcp_mark_push(tp, skb); | |
9e412ba7 | 964 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
fe067e8a | 965 | } else if (skb == tcp_send_head(sk)) |
1da177e4 LT |
966 | tcp_push_one(sk, mss_now); |
967 | continue; | |
968 | ||
969 | wait_for_sndbuf: | |
970 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
971 | wait_for_memory: | |
f54b3111 ED |
972 | tcp_push(sk, flags & ~MSG_MORE, mss_now, |
973 | TCP_NAGLE_PUSH, size_goal); | |
1da177e4 LT |
974 | |
975 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | |
976 | goto do_error; | |
977 | ||
0c54b85f | 978 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1da177e4 LT |
979 | } |
980 | ||
981 | out: | |
35f9c09f | 982 | if (copied && !(flags & MSG_SENDPAGE_NOTLAST)) |
f54b3111 | 983 | tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); |
1da177e4 LT |
984 | return copied; |
985 | ||
986 | do_error: | |
987 | if (copied) | |
988 | goto out; | |
989 | out_err: | |
990 | return sk_stream_error(sk, flags, err); | |
991 | } | |
992 | ||
7ba42910 CG |
993 | int tcp_sendpage(struct sock *sk, struct page *page, int offset, |
994 | size_t size, int flags) | |
1da177e4 LT |
995 | { |
996 | ssize_t res; | |
1da177e4 | 997 | |
1da177e4 | 998 | if (!(sk->sk_route_caps & NETIF_F_SG) || |
8648b305 | 999 | !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) |
7ba42910 CG |
1000 | return sock_no_sendpage(sk->sk_socket, page, offset, size, |
1001 | flags); | |
1da177e4 | 1002 | |
1da177e4 | 1003 | lock_sock(sk); |
64022d0b | 1004 | res = do_tcp_sendpages(sk, page, offset, size, flags); |
1da177e4 LT |
1005 | release_sock(sk); |
1006 | return res; | |
1007 | } | |
4bc2f18b | 1008 | EXPORT_SYMBOL(tcp_sendpage); |
1da177e4 | 1009 | |
690e99c4 | 1010 | static inline int select_size(const struct sock *sk, bool sg) |
1da177e4 | 1011 | { |
cf533ea5 | 1012 | const struct tcp_sock *tp = tcp_sk(sk); |
c1b4a7e6 | 1013 | int tmp = tp->mss_cache; |
1da177e4 | 1014 | |
def87cf4 | 1015 | if (sg) { |
f07d960d ED |
1016 | if (sk_can_gso(sk)) { |
1017 | /* Small frames wont use a full page: | |
1018 | * Payload will immediately follow tcp header. | |
1019 | */ | |
1020 | tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); | |
1021 | } else { | |
b4e26f5e DM |
1022 | int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); |
1023 | ||
1024 | if (tmp >= pgbreak && | |
1025 | tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) | |
1026 | tmp = pgbreak; | |
1027 | } | |
1028 | } | |
1da177e4 | 1029 | |
1da177e4 LT |
1030 | return tmp; |
1031 | } | |
1032 | ||
cf60af03 YC |
1033 | void tcp_free_fastopen_req(struct tcp_sock *tp) |
1034 | { | |
1035 | if (tp->fastopen_req != NULL) { | |
1036 | kfree(tp->fastopen_req); | |
1037 | tp->fastopen_req = NULL; | |
1038 | } | |
1039 | } | |
1040 | ||
1041 | static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size) | |
1042 | { | |
1043 | struct tcp_sock *tp = tcp_sk(sk); | |
1044 | int err, flags; | |
1045 | ||
1046 | if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) | |
1047 | return -EOPNOTSUPP; | |
1048 | if (tp->fastopen_req != NULL) | |
1049 | return -EALREADY; /* Another Fast Open is in progress */ | |
1050 | ||
1051 | tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), | |
1052 | sk->sk_allocation); | |
1053 | if (unlikely(tp->fastopen_req == NULL)) | |
1054 | return -ENOBUFS; | |
1055 | tp->fastopen_req->data = msg; | |
1056 | ||
1057 | flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; | |
1058 | err = __inet_stream_connect(sk->sk_socket, msg->msg_name, | |
1059 | msg->msg_namelen, flags); | |
1060 | *size = tp->fastopen_req->copied; | |
1061 | tcp_free_fastopen_req(tp); | |
1062 | return err; | |
1063 | } | |
1064 | ||
7ba42910 | 1065 | int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
1da177e4 LT |
1066 | size_t size) |
1067 | { | |
1068 | struct iovec *iov; | |
1069 | struct tcp_sock *tp = tcp_sk(sk); | |
1070 | struct sk_buff *skb; | |
cf60af03 YC |
1071 | int iovlen, flags, err, copied = 0; |
1072 | int mss_now = 0, size_goal, copied_syn = 0, offset = 0; | |
690e99c4 | 1073 | bool sg; |
1da177e4 LT |
1074 | long timeo; |
1075 | ||
1076 | lock_sock(sk); | |
1da177e4 LT |
1077 | |
1078 | flags = msg->msg_flags; | |
cf60af03 YC |
1079 | if (flags & MSG_FASTOPEN) { |
1080 | err = tcp_sendmsg_fastopen(sk, msg, &copied_syn); | |
1081 | if (err == -EINPROGRESS && copied_syn > 0) | |
1082 | goto out; | |
1083 | else if (err) | |
1084 | goto out_err; | |
1085 | offset = copied_syn; | |
1086 | } | |
1087 | ||
1da177e4 LT |
1088 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); |
1089 | ||
8336886f JC |
1090 | /* Wait for a connection to finish. One exception is TCP Fast Open |
1091 | * (passive side) where data is allowed to be sent before a connection | |
1092 | * is fully established. | |
1093 | */ | |
1094 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && | |
1095 | !tcp_passive_fastopen(sk)) { | |
1da177e4 | 1096 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) |
cf60af03 | 1097 | goto do_error; |
8336886f | 1098 | } |
1da177e4 | 1099 | |
c0e88ff0 PE |
1100 | if (unlikely(tp->repair)) { |
1101 | if (tp->repair_queue == TCP_RECV_QUEUE) { | |
1102 | copied = tcp_send_rcvq(sk, msg, size); | |
1103 | goto out; | |
1104 | } | |
1105 | ||
1106 | err = -EINVAL; | |
1107 | if (tp->repair_queue == TCP_NO_QUEUE) | |
1108 | goto out_err; | |
1109 | ||
1110 | /* 'common' sending to sendq */ | |
1111 | } | |
1112 | ||
1da177e4 LT |
1113 | /* This should be in poll */ |
1114 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
1115 | ||
0c54b85f | 1116 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1da177e4 LT |
1117 | |
1118 | /* Ok commence sending. */ | |
1119 | iovlen = msg->msg_iovlen; | |
1120 | iov = msg->msg_iov; | |
1121 | copied = 0; | |
1122 | ||
1123 | err = -EPIPE; | |
1124 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | |
0d6a775e | 1125 | goto out_err; |
1da177e4 | 1126 | |
690e99c4 | 1127 | sg = !!(sk->sk_route_caps & NETIF_F_SG); |
def87cf4 | 1128 | |
1da177e4 | 1129 | while (--iovlen >= 0) { |
01db403c | 1130 | size_t seglen = iov->iov_len; |
1da177e4 LT |
1131 | unsigned char __user *from = iov->iov_base; |
1132 | ||
1133 | iov++; | |
cf60af03 YC |
1134 | if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */ |
1135 | if (offset >= seglen) { | |
1136 | offset -= seglen; | |
1137 | continue; | |
1138 | } | |
1139 | seglen -= offset; | |
1140 | from += offset; | |
1141 | offset = 0; | |
1142 | } | |
1da177e4 LT |
1143 | |
1144 | while (seglen > 0) { | |
6828b92b HX |
1145 | int copy = 0; |
1146 | int max = size_goal; | |
1da177e4 | 1147 | |
fe067e8a | 1148 | skb = tcp_write_queue_tail(sk); |
6828b92b HX |
1149 | if (tcp_send_head(sk)) { |
1150 | if (skb->ip_summed == CHECKSUM_NONE) | |
1151 | max = mss_now; | |
1152 | copy = max - skb->len; | |
1153 | } | |
1da177e4 | 1154 | |
6828b92b | 1155 | if (copy <= 0) { |
1da177e4 LT |
1156 | new_segment: |
1157 | /* Allocate new segment. If the interface is SG, | |
1158 | * allocate skb fitting to single page. | |
1159 | */ | |
1160 | if (!sk_stream_memory_free(sk)) | |
1161 | goto wait_for_sndbuf; | |
1162 | ||
def87cf4 KK |
1163 | skb = sk_stream_alloc_skb(sk, |
1164 | select_size(sk, sg), | |
1165 | sk->sk_allocation); | |
1da177e4 LT |
1166 | if (!skb) |
1167 | goto wait_for_memory; | |
1168 | ||
7ed5c5ae AV |
1169 | /* |
1170 | * All packets are restored as if they have | |
1171 | * already been sent. | |
1172 | */ | |
1173 | if (tp->repair) | |
1174 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | |
1175 | ||
1da177e4 LT |
1176 | /* |
1177 | * Check whether we can use HW checksum. | |
1178 | */ | |
8648b305 | 1179 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) |
84fa7933 | 1180 | skb->ip_summed = CHECKSUM_PARTIAL; |
1da177e4 | 1181 | |
9e412ba7 | 1182 | skb_entail(sk, skb); |
c1b4a7e6 | 1183 | copy = size_goal; |
6828b92b | 1184 | max = size_goal; |
1da177e4 LT |
1185 | } |
1186 | ||
1187 | /* Try to append data to the end of skb. */ | |
1188 | if (copy > seglen) | |
1189 | copy = seglen; | |
1190 | ||
1191 | /* Where to copy to? */ | |
a21d4572 | 1192 | if (skb_availroom(skb) > 0) { |
1da177e4 | 1193 | /* We have some space in skb head. Superb! */ |
a21d4572 | 1194 | copy = min_t(int, copy, skb_availroom(skb)); |
c6e1a0d1 TH |
1195 | err = skb_add_data_nocache(sk, skb, from, copy); |
1196 | if (err) | |
1da177e4 LT |
1197 | goto do_fault; |
1198 | } else { | |
5640f768 | 1199 | bool merge = true; |
1da177e4 | 1200 | int i = skb_shinfo(skb)->nr_frags; |
5640f768 ED |
1201 | struct page_frag *pfrag = sk_page_frag(sk); |
1202 | ||
1203 | if (!sk_page_frag_refill(sk, pfrag)) | |
1204 | goto wait_for_memory; | |
1205 | ||
1206 | if (!skb_can_coalesce(skb, i, pfrag->page, | |
1207 | pfrag->offset)) { | |
1208 | if (i == MAX_SKB_FRAGS || !sg) { | |
1209 | tcp_mark_push(tp, skb); | |
1210 | goto new_segment; | |
1da177e4 | 1211 | } |
5640f768 ED |
1212 | merge = false; |
1213 | } | |
ef015786 | 1214 | |
5640f768 | 1215 | copy = min_t(int, copy, pfrag->size - pfrag->offset); |
ef015786 | 1216 | |
3ab224be | 1217 | if (!sk_wmem_schedule(sk, copy)) |
ef015786 | 1218 | goto wait_for_memory; |
1da177e4 | 1219 | |
c6e1a0d1 | 1220 | err = skb_copy_to_page_nocache(sk, from, skb, |
5640f768 ED |
1221 | pfrag->page, |
1222 | pfrag->offset, | |
1223 | copy); | |
1224 | if (err) | |
1da177e4 | 1225 | goto do_error; |
1da177e4 LT |
1226 | |
1227 | /* Update the skb. */ | |
1228 | if (merge) { | |
9e903e08 | 1229 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); |
1da177e4 | 1230 | } else { |
5640f768 ED |
1231 | skb_fill_page_desc(skb, i, pfrag->page, |
1232 | pfrag->offset, copy); | |
1233 | get_page(pfrag->page); | |
1da177e4 | 1234 | } |
5640f768 | 1235 | pfrag->offset += copy; |
1da177e4 LT |
1236 | } |
1237 | ||
1238 | if (!copied) | |
4de075e0 | 1239 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
1da177e4 LT |
1240 | |
1241 | tp->write_seq += copy; | |
1242 | TCP_SKB_CB(skb)->end_seq += copy; | |
7967168c | 1243 | skb_shinfo(skb)->gso_segs = 0; |
1da177e4 LT |
1244 | |
1245 | from += copy; | |
1246 | copied += copy; | |
1247 | if ((seglen -= copy) == 0 && iovlen == 0) | |
1248 | goto out; | |
1249 | ||
c0e88ff0 | 1250 | if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) |
1da177e4 LT |
1251 | continue; |
1252 | ||
1253 | if (forced_push(tp)) { | |
1254 | tcp_mark_push(tp, skb); | |
9e412ba7 | 1255 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
fe067e8a | 1256 | } else if (skb == tcp_send_head(sk)) |
1da177e4 LT |
1257 | tcp_push_one(sk, mss_now); |
1258 | continue; | |
1259 | ||
1260 | wait_for_sndbuf: | |
1261 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
1262 | wait_for_memory: | |
ec342325 | 1263 | if (copied) |
f54b3111 ED |
1264 | tcp_push(sk, flags & ~MSG_MORE, mss_now, |
1265 | TCP_NAGLE_PUSH, size_goal); | |
1da177e4 LT |
1266 | |
1267 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | |
1268 | goto do_error; | |
1269 | ||
0c54b85f | 1270 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1da177e4 LT |
1271 | } |
1272 | } | |
1273 | ||
1274 | out: | |
ec342325 | 1275 | if (copied) |
f54b3111 | 1276 | tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); |
1da177e4 | 1277 | release_sock(sk); |
cf60af03 | 1278 | return copied + copied_syn; |
1da177e4 LT |
1279 | |
1280 | do_fault: | |
1281 | if (!skb->len) { | |
fe067e8a DM |
1282 | tcp_unlink_write_queue(skb, sk); |
1283 | /* It is the one place in all of TCP, except connection | |
1284 | * reset, where we can be unlinking the send_head. | |
1285 | */ | |
1286 | tcp_check_send_head(sk, skb); | |
3ab224be | 1287 | sk_wmem_free_skb(sk, skb); |
1da177e4 LT |
1288 | } |
1289 | ||
1290 | do_error: | |
cf60af03 | 1291 | if (copied + copied_syn) |
1da177e4 LT |
1292 | goto out; |
1293 | out_err: | |
1294 | err = sk_stream_error(sk, flags, err); | |
1da177e4 LT |
1295 | release_sock(sk); |
1296 | return err; | |
1297 | } | |
4bc2f18b | 1298 | EXPORT_SYMBOL(tcp_sendmsg); |
1da177e4 LT |
1299 | |
1300 | /* | |
1301 | * Handle reading urgent data. BSD has very simple semantics for | |
1302 | * this, no blocking and very strange errors 8) | |
1303 | */ | |
1304 | ||
377f0a08 | 1305 | static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) |
1da177e4 LT |
1306 | { |
1307 | struct tcp_sock *tp = tcp_sk(sk); | |
1308 | ||
1309 | /* No URG data to read. */ | |
1310 | if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || | |
1311 | tp->urg_data == TCP_URG_READ) | |
1312 | return -EINVAL; /* Yes this is right ! */ | |
1313 | ||
1314 | if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) | |
1315 | return -ENOTCONN; | |
1316 | ||
1317 | if (tp->urg_data & TCP_URG_VALID) { | |
1318 | int err = 0; | |
1319 | char c = tp->urg_data; | |
1320 | ||
1321 | if (!(flags & MSG_PEEK)) | |
1322 | tp->urg_data = TCP_URG_READ; | |
1323 | ||
1324 | /* Read urgent data. */ | |
1325 | msg->msg_flags |= MSG_OOB; | |
1326 | ||
1327 | if (len > 0) { | |
1328 | if (!(flags & MSG_TRUNC)) | |
1329 | err = memcpy_toiovec(msg->msg_iov, &c, 1); | |
1330 | len = 1; | |
1331 | } else | |
1332 | msg->msg_flags |= MSG_TRUNC; | |
1333 | ||
1334 | return err ? -EFAULT : len; | |
1335 | } | |
1336 | ||
1337 | if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) | |
1338 | return 0; | |
1339 | ||
1340 | /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and | |
1341 | * the available implementations agree in this case: | |
1342 | * this call should never block, independent of the | |
1343 | * blocking state of the socket. | |
1344 | * Mike <pall@rz.uni-karlsruhe.de> | |
1345 | */ | |
1346 | return -EAGAIN; | |
1347 | } | |
1348 | ||
c0e88ff0 PE |
1349 | static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) |
1350 | { | |
1351 | struct sk_buff *skb; | |
1352 | int copied = 0, err = 0; | |
1353 | ||
1354 | /* XXX -- need to support SO_PEEK_OFF */ | |
1355 | ||
1356 | skb_queue_walk(&sk->sk_write_queue, skb) { | |
1357 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len); | |
1358 | if (err) | |
1359 | break; | |
1360 | ||
1361 | copied += skb->len; | |
1362 | } | |
1363 | ||
1364 | return err ?: copied; | |
1365 | } | |
1366 | ||
1da177e4 LT |
1367 | /* Clean up the receive buffer for full frames taken by the user, |
1368 | * then send an ACK if necessary. COPIED is the number of bytes | |
1369 | * tcp_recvmsg has given to the user so far, it speeds up the | |
1370 | * calculation of whether or not we must ACK for the sake of | |
1371 | * a window update. | |
1372 | */ | |
0e4b4992 | 1373 | void tcp_cleanup_rbuf(struct sock *sk, int copied) |
1da177e4 LT |
1374 | { |
1375 | struct tcp_sock *tp = tcp_sk(sk); | |
a2a385d6 | 1376 | bool time_to_ack = false; |
1da177e4 | 1377 | |
1da177e4 LT |
1378 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); |
1379 | ||
d792c100 | 1380 | WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), |
2af6fd8b | 1381 | "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", |
d792c100 | 1382 | tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); |
1da177e4 | 1383 | |
463c84b9 ACM |
1384 | if (inet_csk_ack_scheduled(sk)) { |
1385 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
1da177e4 LT |
1386 | /* Delayed ACKs frequently hit locked sockets during bulk |
1387 | * receive. */ | |
463c84b9 | 1388 | if (icsk->icsk_ack.blocked || |
1da177e4 | 1389 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ |
463c84b9 | 1390 | tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || |
1da177e4 LT |
1391 | /* |
1392 | * If this read emptied read buffer, we send ACK, if | |
1393 | * connection is not bidirectional, user drained | |
1394 | * receive buffer and there was a small segment | |
1395 | * in queue. | |
1396 | */ | |
1ef9696c AK |
1397 | (copied > 0 && |
1398 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || | |
1399 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && | |
1400 | !icsk->icsk_ack.pingpong)) && | |
1401 | !atomic_read(&sk->sk_rmem_alloc))) | |
a2a385d6 | 1402 | time_to_ack = true; |
1da177e4 LT |
1403 | } |
1404 | ||
1405 | /* We send an ACK if we can now advertise a non-zero window | |
1406 | * which has been raised "significantly". | |
1407 | * | |
1408 | * Even if window raised up to infinity, do not send window open ACK | |
1409 | * in states, where we will not receive more. It is useless. | |
1410 | */ | |
1411 | if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { | |
1412 | __u32 rcv_window_now = tcp_receive_window(tp); | |
1413 | ||
1414 | /* Optimize, __tcp_select_window() is not cheap. */ | |
1415 | if (2*rcv_window_now <= tp->window_clamp) { | |
1416 | __u32 new_window = __tcp_select_window(sk); | |
1417 | ||
1418 | /* Send ACK now, if this read freed lots of space | |
1419 | * in our buffer. Certainly, new_window is new window. | |
1420 | * We can advertise it now, if it is not less than current one. | |
1421 | * "Lots" means "at least twice" here. | |
1422 | */ | |
1423 | if (new_window && new_window >= 2 * rcv_window_now) | |
a2a385d6 | 1424 | time_to_ack = true; |
1da177e4 LT |
1425 | } |
1426 | } | |
1427 | if (time_to_ack) | |
1428 | tcp_send_ack(sk); | |
1429 | } | |
1430 | ||
1431 | static void tcp_prequeue_process(struct sock *sk) | |
1432 | { | |
1433 | struct sk_buff *skb; | |
1434 | struct tcp_sock *tp = tcp_sk(sk); | |
1435 | ||
6f67c817 | 1436 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); |
1da177e4 LT |
1437 | |
1438 | /* RX process wants to run with disabled BHs, though it is not | |
1439 | * necessary */ | |
1440 | local_bh_disable(); | |
1441 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) | |
c57943a1 | 1442 | sk_backlog_rcv(sk, skb); |
1da177e4 LT |
1443 | local_bh_enable(); |
1444 | ||
1445 | /* Clear memory counter. */ | |
1446 | tp->ucopy.memory = 0; | |
1447 | } | |
1448 | ||
73852e81 SM |
1449 | #ifdef CONFIG_NET_DMA |
1450 | static void tcp_service_net_dma(struct sock *sk, bool wait) | |
1451 | { | |
1452 | dma_cookie_t done, used; | |
1453 | dma_cookie_t last_issued; | |
1454 | struct tcp_sock *tp = tcp_sk(sk); | |
1455 | ||
1456 | if (!tp->ucopy.dma_chan) | |
1457 | return; | |
1458 | ||
1459 | last_issued = tp->ucopy.dma_cookie; | |
b9ee8683 | 1460 | dma_async_issue_pending(tp->ucopy.dma_chan); |
73852e81 SM |
1461 | |
1462 | do { | |
e239345f | 1463 | if (dma_async_is_tx_complete(tp->ucopy.dma_chan, |
73852e81 | 1464 | last_issued, &done, |
27bf6970 | 1465 | &used) == DMA_COMPLETE) { |
73852e81 SM |
1466 | /* Safe to free early-copied skbs now */ |
1467 | __skb_queue_purge(&sk->sk_async_wait_queue); | |
1468 | break; | |
1469 | } else { | |
1470 | struct sk_buff *skb; | |
1471 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | |
1472 | (dma_async_is_complete(skb->dma_cookie, done, | |
27bf6970 | 1473 | used) == DMA_COMPLETE)) { |
73852e81 SM |
1474 | __skb_dequeue(&sk->sk_async_wait_queue); |
1475 | kfree_skb(skb); | |
1476 | } | |
1477 | } | |
1478 | } while (wait); | |
1479 | } | |
1480 | #endif | |
1481 | ||
f26845b4 | 1482 | static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) |
1da177e4 LT |
1483 | { |
1484 | struct sk_buff *skb; | |
1485 | u32 offset; | |
1486 | ||
f26845b4 | 1487 | while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { |
1da177e4 | 1488 | offset = seq - TCP_SKB_CB(skb)->seq; |
aa8223c7 | 1489 | if (tcp_hdr(skb)->syn) |
1da177e4 | 1490 | offset--; |
aa8223c7 | 1491 | if (offset < skb->len || tcp_hdr(skb)->fin) { |
1da177e4 LT |
1492 | *off = offset; |
1493 | return skb; | |
1494 | } | |
f26845b4 ED |
1495 | /* This looks weird, but this can happen if TCP collapsing |
1496 | * splitted a fat GRO packet, while we released socket lock | |
1497 | * in skb_splice_bits() | |
1498 | */ | |
1499 | sk_eat_skb(sk, skb, false); | |
1da177e4 LT |
1500 | } |
1501 | return NULL; | |
1502 | } | |
1503 | ||
1504 | /* | |
1505 | * This routine provides an alternative to tcp_recvmsg() for routines | |
1506 | * that would like to handle copying from skbuffs directly in 'sendfile' | |
1507 | * fashion. | |
1508 | * Note: | |
1509 | * - It is assumed that the socket was locked by the caller. | |
1510 | * - The routine does not block. | |
1511 | * - At present, there is no support for reading OOB data | |
1512 | * or for 'peeking' the socket using this routine | |
1513 | * (although both would be easy to implement). | |
1514 | */ | |
1515 | int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |
1516 | sk_read_actor_t recv_actor) | |
1517 | { | |
1518 | struct sk_buff *skb; | |
1519 | struct tcp_sock *tp = tcp_sk(sk); | |
1520 | u32 seq = tp->copied_seq; | |
1521 | u32 offset; | |
1522 | int copied = 0; | |
1523 | ||
1524 | if (sk->sk_state == TCP_LISTEN) | |
1525 | return -ENOTCONN; | |
1526 | while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { | |
1527 | if (offset < skb->len) { | |
374e7b59 OP |
1528 | int used; |
1529 | size_t len; | |
1da177e4 LT |
1530 | |
1531 | len = skb->len - offset; | |
1532 | /* Stop reading if we hit a patch of urgent data */ | |
1533 | if (tp->urg_data) { | |
1534 | u32 urg_offset = tp->urg_seq - seq; | |
1535 | if (urg_offset < len) | |
1536 | len = urg_offset; | |
1537 | if (!len) | |
1538 | break; | |
1539 | } | |
1540 | used = recv_actor(desc, skb, offset, len); | |
ff905b1e | 1541 | if (used <= 0) { |
ddb61a57 JA |
1542 | if (!copied) |
1543 | copied = used; | |
1544 | break; | |
1545 | } else if (used <= len) { | |
1da177e4 LT |
1546 | seq += used; |
1547 | copied += used; | |
1548 | offset += used; | |
1549 | } | |
02275a2e | 1550 | /* If recv_actor drops the lock (e.g. TCP splice |
293ad604 OP |
1551 | * receive) the skb pointer might be invalid when |
1552 | * getting here: tcp_collapse might have deleted it | |
1553 | * while aggregating skbs from the socket queue. | |
1554 | */ | |
02275a2e WT |
1555 | skb = tcp_recv_skb(sk, seq - 1, &offset); |
1556 | if (!skb) | |
1da177e4 | 1557 | break; |
02275a2e WT |
1558 | /* TCP coalescing might have appended data to the skb. |
1559 | * Try to splice more frags | |
1560 | */ | |
1561 | if (offset + 1 != skb->len) | |
1562 | continue; | |
1da177e4 | 1563 | } |
aa8223c7 | 1564 | if (tcp_hdr(skb)->fin) { |
dc6b9b78 | 1565 | sk_eat_skb(sk, skb, false); |
1da177e4 LT |
1566 | ++seq; |
1567 | break; | |
1568 | } | |
dc6b9b78 | 1569 | sk_eat_skb(sk, skb, false); |
1da177e4 LT |
1570 | if (!desc->count) |
1571 | break; | |
baff42ab | 1572 | tp->copied_seq = seq; |
1da177e4 LT |
1573 | } |
1574 | tp->copied_seq = seq; | |
1575 | ||
1576 | tcp_rcv_space_adjust(sk); | |
1577 | ||
1578 | /* Clean up data we have read: This will do ACK frames. */ | |
f26845b4 ED |
1579 | if (copied > 0) { |
1580 | tcp_recv_skb(sk, seq, &offset); | |
0e4b4992 | 1581 | tcp_cleanup_rbuf(sk, copied); |
f26845b4 | 1582 | } |
1da177e4 LT |
1583 | return copied; |
1584 | } | |
4bc2f18b | 1585 | EXPORT_SYMBOL(tcp_read_sock); |
1da177e4 LT |
1586 | |
1587 | /* | |
1588 | * This routine copies from a sock struct into the user buffer. | |
1589 | * | |
1590 | * Technical note: in 2.3 we work on _locked_ socket, so that | |
1591 | * tricks with *seq access order and skb->users are not required. | |
1592 | * Probably, code can be easily improved even more. | |
1593 | */ | |
1594 | ||
1595 | int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
1596 | size_t len, int nonblock, int flags, int *addr_len) | |
1597 | { | |
1598 | struct tcp_sock *tp = tcp_sk(sk); | |
1599 | int copied = 0; | |
1600 | u32 peek_seq; | |
1601 | u32 *seq; | |
1602 | unsigned long used; | |
1603 | int err; | |
1604 | int target; /* Read at least this many bytes */ | |
1605 | long timeo; | |
1606 | struct task_struct *user_recv = NULL; | |
dc6b9b78 | 1607 | bool copied_early = false; |
2b1244a4 | 1608 | struct sk_buff *skb; |
77527313 | 1609 | u32 urg_hole = 0; |
1da177e4 | 1610 | |
cbf55001 ET |
1611 | if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && |
1612 | (sk->sk_state == TCP_ESTABLISHED)) | |
1613 | sk_busy_loop(sk, nonblock); | |
d30e383b | 1614 | |
1da177e4 LT |
1615 | lock_sock(sk); |
1616 | ||
1da177e4 LT |
1617 | err = -ENOTCONN; |
1618 | if (sk->sk_state == TCP_LISTEN) | |
1619 | goto out; | |
1620 | ||
1621 | timeo = sock_rcvtimeo(sk, nonblock); | |
1622 | ||
1623 | /* Urgent data needs to be handled specially. */ | |
1624 | if (flags & MSG_OOB) | |
1625 | goto recv_urg; | |
1626 | ||
c0e88ff0 PE |
1627 | if (unlikely(tp->repair)) { |
1628 | err = -EPERM; | |
1629 | if (!(flags & MSG_PEEK)) | |
1630 | goto out; | |
1631 | ||
1632 | if (tp->repair_queue == TCP_SEND_QUEUE) | |
1633 | goto recv_sndq; | |
1634 | ||
1635 | err = -EINVAL; | |
1636 | if (tp->repair_queue == TCP_NO_QUEUE) | |
1637 | goto out; | |
1638 | ||
1639 | /* 'common' recv queue MSG_PEEK-ing */ | |
1640 | } | |
1641 | ||
1da177e4 LT |
1642 | seq = &tp->copied_seq; |
1643 | if (flags & MSG_PEEK) { | |
1644 | peek_seq = tp->copied_seq; | |
1645 | seq = &peek_seq; | |
1646 | } | |
1647 | ||
1648 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | |
1649 | ||
1a2449a8 CL |
1650 | #ifdef CONFIG_NET_DMA |
1651 | tp->ucopy.dma_chan = NULL; | |
1652 | preempt_disable(); | |
2b1244a4 | 1653 | skb = skb_peek_tail(&sk->sk_receive_queue); |
e00c5d8b AM |
1654 | { |
1655 | int available = 0; | |
1656 | ||
1657 | if (skb) | |
1658 | available = TCP_SKB_CB(skb)->seq + skb->len - (*seq); | |
1659 | if ((available < target) && | |
1660 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && | |
1661 | !sysctl_tcp_low_latency && | |
a2bd1140 | 1662 | net_dma_find_channel()) { |
e00c5d8b AM |
1663 | preempt_enable_no_resched(); |
1664 | tp->ucopy.pinned_list = | |
1665 | dma_pin_iovec_pages(msg->msg_iov, len); | |
1666 | } else { | |
1667 | preempt_enable_no_resched(); | |
1668 | } | |
1669 | } | |
1a2449a8 CL |
1670 | #endif |
1671 | ||
1da177e4 | 1672 | do { |
1da177e4 LT |
1673 | u32 offset; |
1674 | ||
1675 | /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ | |
1676 | if (tp->urg_data && tp->urg_seq == *seq) { | |
1677 | if (copied) | |
1678 | break; | |
1679 | if (signal_pending(current)) { | |
1680 | copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; | |
1681 | break; | |
1682 | } | |
1683 | } | |
1684 | ||
1685 | /* Next get a buffer. */ | |
1686 | ||
91521944 | 1687 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
1da177e4 LT |
1688 | /* Now that we have two receive queues this |
1689 | * shouldn't happen. | |
1690 | */ | |
d792c100 | 1691 | if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), |
2af6fd8b JP |
1692 | "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", |
1693 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, | |
1694 | flags)) | |
1da177e4 | 1695 | break; |
d792c100 | 1696 | |
1da177e4 | 1697 | offset = *seq - TCP_SKB_CB(skb)->seq; |
aa8223c7 | 1698 | if (tcp_hdr(skb)->syn) |
1da177e4 LT |
1699 | offset--; |
1700 | if (offset < skb->len) | |
1701 | goto found_ok_skb; | |
aa8223c7 | 1702 | if (tcp_hdr(skb)->fin) |
1da177e4 | 1703 | goto found_fin_ok; |
2af6fd8b JP |
1704 | WARN(!(flags & MSG_PEEK), |
1705 | "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", | |
1706 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); | |
91521944 | 1707 | } |
1da177e4 LT |
1708 | |
1709 | /* Well, if we have backlog, try to process it now yet. */ | |
1710 | ||
1711 | if (copied >= target && !sk->sk_backlog.tail) | |
1712 | break; | |
1713 | ||
1714 | if (copied) { | |
1715 | if (sk->sk_err || | |
1716 | sk->sk_state == TCP_CLOSE || | |
1717 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
1718 | !timeo || | |
518a09ef | 1719 | signal_pending(current)) |
1da177e4 LT |
1720 | break; |
1721 | } else { | |
1722 | if (sock_flag(sk, SOCK_DONE)) | |
1723 | break; | |
1724 | ||
1725 | if (sk->sk_err) { | |
1726 | copied = sock_error(sk); | |
1727 | break; | |
1728 | } | |
1729 | ||
1730 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
1731 | break; | |
1732 | ||
1733 | if (sk->sk_state == TCP_CLOSE) { | |
1734 | if (!sock_flag(sk, SOCK_DONE)) { | |
1735 | /* This occurs when user tries to read | |
1736 | * from never connected socket. | |
1737 | */ | |
1738 | copied = -ENOTCONN; | |
1739 | break; | |
1740 | } | |
1741 | break; | |
1742 | } | |
1743 | ||
1744 | if (!timeo) { | |
1745 | copied = -EAGAIN; | |
1746 | break; | |
1747 | } | |
1748 | ||
1749 | if (signal_pending(current)) { | |
1750 | copied = sock_intr_errno(timeo); | |
1751 | break; | |
1752 | } | |
1753 | } | |
1754 | ||
0e4b4992 | 1755 | tcp_cleanup_rbuf(sk, copied); |
1da177e4 | 1756 | |
7df55125 | 1757 | if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { |
1da177e4 LT |
1758 | /* Install new reader */ |
1759 | if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { | |
1760 | user_recv = current; | |
1761 | tp->ucopy.task = user_recv; | |
1762 | tp->ucopy.iov = msg->msg_iov; | |
1763 | } | |
1764 | ||
1765 | tp->ucopy.len = len; | |
1766 | ||
547b792c IJ |
1767 | WARN_ON(tp->copied_seq != tp->rcv_nxt && |
1768 | !(flags & (MSG_PEEK | MSG_TRUNC))); | |
1da177e4 LT |
1769 | |
1770 | /* Ugly... If prequeue is not empty, we have to | |
1771 | * process it before releasing socket, otherwise | |
1772 | * order will be broken at second iteration. | |
1773 | * More elegant solution is required!!! | |
1774 | * | |
1775 | * Look: we have the following (pseudo)queues: | |
1776 | * | |
1777 | * 1. packets in flight | |
1778 | * 2. backlog | |
1779 | * 3. prequeue | |
1780 | * 4. receive_queue | |
1781 | * | |
1782 | * Each queue can be processed only if the next ones | |
1783 | * are empty. At this point we have empty receive_queue. | |
1784 | * But prequeue _can_ be not empty after 2nd iteration, | |
1785 | * when we jumped to start of loop because backlog | |
1786 | * processing added something to receive_queue. | |
1787 | * We cannot release_sock(), because backlog contains | |
1788 | * packets arrived _after_ prequeued ones. | |
1789 | * | |
1790 | * Shortly, algorithm is clear --- to process all | |
1791 | * the queues in order. We could make it more directly, | |
1792 | * requeueing packets from backlog to prequeue, if | |
1793 | * is not empty. It is more elegant, but eats cycles, | |
1794 | * unfortunately. | |
1795 | */ | |
b03efcfb | 1796 | if (!skb_queue_empty(&tp->ucopy.prequeue)) |
1da177e4 LT |
1797 | goto do_prequeue; |
1798 | ||
1799 | /* __ Set realtime policy in scheduler __ */ | |
1800 | } | |
1801 | ||
73852e81 | 1802 | #ifdef CONFIG_NET_DMA |
15c04175 MK |
1803 | if (tp->ucopy.dma_chan) { |
1804 | if (tp->rcv_wnd == 0 && | |
1805 | !skb_queue_empty(&sk->sk_async_wait_queue)) { | |
1806 | tcp_service_net_dma(sk, true); | |
1807 | tcp_cleanup_rbuf(sk, copied); | |
1808 | } else | |
b9ee8683 | 1809 | dma_async_issue_pending(tp->ucopy.dma_chan); |
15c04175 | 1810 | } |
73852e81 | 1811 | #endif |
1da177e4 LT |
1812 | if (copied >= target) { |
1813 | /* Do not sleep, just process backlog. */ | |
1814 | release_sock(sk); | |
1815 | lock_sock(sk); | |
1816 | } else | |
1817 | sk_wait_data(sk, &timeo); | |
1818 | ||
1a2449a8 | 1819 | #ifdef CONFIG_NET_DMA |
73852e81 | 1820 | tcp_service_net_dma(sk, false); /* Don't block */ |
1a2449a8 CL |
1821 | tp->ucopy.wakeup = 0; |
1822 | #endif | |
1823 | ||
1da177e4 LT |
1824 | if (user_recv) { |
1825 | int chunk; | |
1826 | ||
1827 | /* __ Restore normal policy in scheduler __ */ | |
1828 | ||
1829 | if ((chunk = len - tp->ucopy.len) != 0) { | |
ed88098e | 1830 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); |
1da177e4 LT |
1831 | len -= chunk; |
1832 | copied += chunk; | |
1833 | } | |
1834 | ||
1835 | if (tp->rcv_nxt == tp->copied_seq && | |
b03efcfb | 1836 | !skb_queue_empty(&tp->ucopy.prequeue)) { |
1da177e4 LT |
1837 | do_prequeue: |
1838 | tcp_prequeue_process(sk); | |
1839 | ||
1840 | if ((chunk = len - tp->ucopy.len) != 0) { | |
ed88098e | 1841 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1da177e4 LT |
1842 | len -= chunk; |
1843 | copied += chunk; | |
1844 | } | |
1845 | } | |
1846 | } | |
77527313 IJ |
1847 | if ((flags & MSG_PEEK) && |
1848 | (peek_seq - copied - urg_hole != tp->copied_seq)) { | |
e87cc472 JP |
1849 | net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", |
1850 | current->comm, | |
1851 | task_pid_nr(current)); | |
1da177e4 LT |
1852 | peek_seq = tp->copied_seq; |
1853 | } | |
1854 | continue; | |
1855 | ||
1856 | found_ok_skb: | |
1857 | /* Ok so how much can we use? */ | |
1858 | used = skb->len - offset; | |
1859 | if (len < used) | |
1860 | used = len; | |
1861 | ||
1862 | /* Do we have urgent data here? */ | |
1863 | if (tp->urg_data) { | |
1864 | u32 urg_offset = tp->urg_seq - *seq; | |
1865 | if (urg_offset < used) { | |
1866 | if (!urg_offset) { | |
1867 | if (!sock_flag(sk, SOCK_URGINLINE)) { | |
1868 | ++*seq; | |
77527313 | 1869 | urg_hole++; |
1da177e4 LT |
1870 | offset++; |
1871 | used--; | |
1872 | if (!used) | |
1873 | goto skip_copy; | |
1874 | } | |
1875 | } else | |
1876 | used = urg_offset; | |
1877 | } | |
1878 | } | |
1879 | ||
1880 | if (!(flags & MSG_TRUNC)) { | |
1a2449a8 CL |
1881 | #ifdef CONFIG_NET_DMA |
1882 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | |
a2bd1140 | 1883 | tp->ucopy.dma_chan = net_dma_find_channel(); |
1a2449a8 CL |
1884 | |
1885 | if (tp->ucopy.dma_chan) { | |
1886 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( | |
1887 | tp->ucopy.dma_chan, skb, offset, | |
1888 | msg->msg_iov, used, | |
1889 | tp->ucopy.pinned_list); | |
1890 | ||
1891 | if (tp->ucopy.dma_cookie < 0) { | |
1892 | ||
afd46503 JP |
1893 | pr_alert("%s: dma_cookie < 0\n", |
1894 | __func__); | |
1a2449a8 CL |
1895 | |
1896 | /* Exception. Bailout! */ | |
1897 | if (!copied) | |
1898 | copied = -EFAULT; | |
1899 | break; | |
1900 | } | |
73852e81 | 1901 | |
b9ee8683 | 1902 | dma_async_issue_pending(tp->ucopy.dma_chan); |
73852e81 | 1903 | |
1a2449a8 | 1904 | if ((offset + used) == skb->len) |
dc6b9b78 | 1905 | copied_early = true; |
1a2449a8 CL |
1906 | |
1907 | } else | |
1908 | #endif | |
1909 | { | |
1910 | err = skb_copy_datagram_iovec(skb, offset, | |
1911 | msg->msg_iov, used); | |
1912 | if (err) { | |
1913 | /* Exception. Bailout! */ | |
1914 | if (!copied) | |
1915 | copied = -EFAULT; | |
1916 | break; | |
1917 | } | |
1da177e4 LT |
1918 | } |
1919 | } | |
1920 | ||
1921 | *seq += used; | |
1922 | copied += used; | |
1923 | len -= used; | |
1924 | ||
1925 | tcp_rcv_space_adjust(sk); | |
1926 | ||
1927 | skip_copy: | |
1928 | if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { | |
1929 | tp->urg_data = 0; | |
9e412ba7 | 1930 | tcp_fast_path_check(sk); |
1da177e4 LT |
1931 | } |
1932 | if (used + offset < skb->len) | |
1933 | continue; | |
1934 | ||
aa8223c7 | 1935 | if (tcp_hdr(skb)->fin) |
1da177e4 | 1936 | goto found_fin_ok; |
1a2449a8 CL |
1937 | if (!(flags & MSG_PEEK)) { |
1938 | sk_eat_skb(sk, skb, copied_early); | |
dc6b9b78 | 1939 | copied_early = false; |
1a2449a8 | 1940 | } |
1da177e4 LT |
1941 | continue; |
1942 | ||
1943 | found_fin_ok: | |
1944 | /* Process the FIN. */ | |
1945 | ++*seq; | |
1a2449a8 CL |
1946 | if (!(flags & MSG_PEEK)) { |
1947 | sk_eat_skb(sk, skb, copied_early); | |
dc6b9b78 | 1948 | copied_early = false; |
1a2449a8 | 1949 | } |
1da177e4 LT |
1950 | break; |
1951 | } while (len > 0); | |
1952 | ||
1953 | if (user_recv) { | |
b03efcfb | 1954 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
1da177e4 LT |
1955 | int chunk; |
1956 | ||
1957 | tp->ucopy.len = copied > 0 ? len : 0; | |
1958 | ||
1959 | tcp_prequeue_process(sk); | |
1960 | ||
1961 | if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { | |
ed88098e | 1962 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1da177e4 LT |
1963 | len -= chunk; |
1964 | copied += chunk; | |
1965 | } | |
1966 | } | |
1967 | ||
1968 | tp->ucopy.task = NULL; | |
1969 | tp->ucopy.len = 0; | |
1970 | } | |
1971 | ||
1a2449a8 | 1972 | #ifdef CONFIG_NET_DMA |
73852e81 SM |
1973 | tcp_service_net_dma(sk, true); /* Wait for queue to drain */ |
1974 | tp->ucopy.dma_chan = NULL; | |
1a2449a8 | 1975 | |
1a2449a8 CL |
1976 | if (tp->ucopy.pinned_list) { |
1977 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); | |
1978 | tp->ucopy.pinned_list = NULL; | |
1979 | } | |
1980 | #endif | |
1981 | ||
1da177e4 LT |
1982 | /* According to UNIX98, msg_name/msg_namelen are ignored |
1983 | * on connected socket. I was just happy when found this 8) --ANK | |
1984 | */ | |
1985 | ||
1986 | /* Clean up data we have read: This will do ACK frames. */ | |
0e4b4992 | 1987 | tcp_cleanup_rbuf(sk, copied); |
1da177e4 | 1988 | |
1da177e4 LT |
1989 | release_sock(sk); |
1990 | return copied; | |
1991 | ||
1992 | out: | |
1da177e4 LT |
1993 | release_sock(sk); |
1994 | return err; | |
1995 | ||
1996 | recv_urg: | |
377f0a08 | 1997 | err = tcp_recv_urg(sk, msg, len, flags); |
1da177e4 | 1998 | goto out; |
c0e88ff0 PE |
1999 | |
2000 | recv_sndq: | |
2001 | err = tcp_peek_sndq(sk, msg, len); | |
2002 | goto out; | |
1da177e4 | 2003 | } |
4bc2f18b | 2004 | EXPORT_SYMBOL(tcp_recvmsg); |
1da177e4 | 2005 | |
490d5046 IJ |
2006 | void tcp_set_state(struct sock *sk, int state) |
2007 | { | |
2008 | int oldstate = sk->sk_state; | |
2009 | ||
2010 | switch (state) { | |
2011 | case TCP_ESTABLISHED: | |
2012 | if (oldstate != TCP_ESTABLISHED) | |
81cc8a75 | 2013 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
490d5046 IJ |
2014 | break; |
2015 | ||
2016 | case TCP_CLOSE: | |
2017 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) | |
81cc8a75 | 2018 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); |
490d5046 IJ |
2019 | |
2020 | sk->sk_prot->unhash(sk); | |
2021 | if (inet_csk(sk)->icsk_bind_hash && | |
2022 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) | |
ab1e0a13 | 2023 | inet_put_port(sk); |
490d5046 IJ |
2024 | /* fall through */ |
2025 | default: | |
5a5f3a8d | 2026 | if (oldstate == TCP_ESTABLISHED) |
74688e48 | 2027 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
490d5046 IJ |
2028 | } |
2029 | ||
2030 | /* Change state AFTER socket is unhashed to avoid closed | |
2031 | * socket sitting in hash tables. | |
2032 | */ | |
2033 | sk->sk_state = state; | |
2034 | ||
2035 | #ifdef STATE_TRACE | |
5a5f3a8d | 2036 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); |
490d5046 IJ |
2037 | #endif |
2038 | } | |
2039 | EXPORT_SYMBOL_GPL(tcp_set_state); | |
2040 | ||
1da177e4 LT |
2041 | /* |
2042 | * State processing on a close. This implements the state shift for | |
2043 | * sending our FIN frame. Note that we only send a FIN for some | |
2044 | * states. A shutdown() may have already sent the FIN, or we may be | |
2045 | * closed. | |
2046 | */ | |
2047 | ||
9b5b5cff | 2048 | static const unsigned char new_state[16] = { |
1da177e4 LT |
2049 | /* current state: new state: action: */ |
2050 | /* (Invalid) */ TCP_CLOSE, | |
2051 | /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
2052 | /* TCP_SYN_SENT */ TCP_CLOSE, | |
2053 | /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
2054 | /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, | |
2055 | /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, | |
2056 | /* TCP_TIME_WAIT */ TCP_CLOSE, | |
2057 | /* TCP_CLOSE */ TCP_CLOSE, | |
2058 | /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, | |
2059 | /* TCP_LAST_ACK */ TCP_LAST_ACK, | |
2060 | /* TCP_LISTEN */ TCP_CLOSE, | |
2061 | /* TCP_CLOSING */ TCP_CLOSING, | |
2062 | }; | |
2063 | ||
2064 | static int tcp_close_state(struct sock *sk) | |
2065 | { | |
2066 | int next = (int)new_state[sk->sk_state]; | |
2067 | int ns = next & TCP_STATE_MASK; | |
2068 | ||
2069 | tcp_set_state(sk, ns); | |
2070 | ||
2071 | return next & TCP_ACTION_FIN; | |
2072 | } | |
2073 | ||
2074 | /* | |
2075 | * Shutdown the sending side of a connection. Much like close except | |
1f29b058 | 2076 | * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). |
1da177e4 LT |
2077 | */ |
2078 | ||
2079 | void tcp_shutdown(struct sock *sk, int how) | |
2080 | { | |
2081 | /* We need to grab some memory, and put together a FIN, | |
2082 | * and then put it into the queue to be sent. | |
2083 | * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. | |
2084 | */ | |
2085 | if (!(how & SEND_SHUTDOWN)) | |
2086 | return; | |
2087 | ||
2088 | /* If we've already sent a FIN, or it's a closed state, skip this. */ | |
2089 | if ((1 << sk->sk_state) & | |
2090 | (TCPF_ESTABLISHED | TCPF_SYN_SENT | | |
2091 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { | |
2092 | /* Clear out any half completed packets. FIN if needed. */ | |
2093 | if (tcp_close_state(sk)) | |
2094 | tcp_send_fin(sk); | |
2095 | } | |
2096 | } | |
4bc2f18b | 2097 | EXPORT_SYMBOL(tcp_shutdown); |
1da177e4 | 2098 | |
efcdbf24 AS |
2099 | bool tcp_check_oom(struct sock *sk, int shift) |
2100 | { | |
2101 | bool too_many_orphans, out_of_socket_memory; | |
2102 | ||
2103 | too_many_orphans = tcp_too_many_orphans(sk, shift); | |
2104 | out_of_socket_memory = tcp_out_of_memory(sk); | |
2105 | ||
e87cc472 JP |
2106 | if (too_many_orphans) |
2107 | net_info_ratelimited("too many orphaned sockets\n"); | |
2108 | if (out_of_socket_memory) | |
2109 | net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); | |
efcdbf24 AS |
2110 | return too_many_orphans || out_of_socket_memory; |
2111 | } | |
2112 | ||
1da177e4 LT |
2113 | void tcp_close(struct sock *sk, long timeout) |
2114 | { | |
2115 | struct sk_buff *skb; | |
2116 | int data_was_unread = 0; | |
75c2d907 | 2117 | int state; |
1da177e4 LT |
2118 | |
2119 | lock_sock(sk); | |
2120 | sk->sk_shutdown = SHUTDOWN_MASK; | |
2121 | ||
2122 | if (sk->sk_state == TCP_LISTEN) { | |
2123 | tcp_set_state(sk, TCP_CLOSE); | |
2124 | ||
2125 | /* Special case. */ | |
0a5578cf | 2126 | inet_csk_listen_stop(sk); |
1da177e4 LT |
2127 | |
2128 | goto adjudge_to_death; | |
2129 | } | |
2130 | ||
2131 | /* We need to flush the recv. buffs. We do this only on the | |
2132 | * descriptor close, not protocol-sourced closes, because the | |
2133 | * reader process may not have drained the data yet! | |
2134 | */ | |
2135 | while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { | |
2136 | u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - | |
aa8223c7 | 2137 | tcp_hdr(skb)->fin; |
1da177e4 LT |
2138 | data_was_unread += len; |
2139 | __kfree_skb(skb); | |
2140 | } | |
2141 | ||
3ab224be | 2142 | sk_mem_reclaim(sk); |
1da177e4 | 2143 | |
565b7b2d KK |
2144 | /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ |
2145 | if (sk->sk_state == TCP_CLOSE) | |
2146 | goto adjudge_to_death; | |
2147 | ||
65bb723c GR |
2148 | /* As outlined in RFC 2525, section 2.17, we send a RST here because |
2149 | * data was lost. To witness the awful effects of the old behavior of | |
2150 | * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk | |
2151 | * GET in an FTP client, suspend the process, wait for the client to | |
2152 | * advertise a zero window, then kill -9 the FTP client, wheee... | |
2153 | * Note: timeout is always zero in such a case. | |
1da177e4 | 2154 | */ |
ee995283 PE |
2155 | if (unlikely(tcp_sk(sk)->repair)) { |
2156 | sk->sk_prot->disconnect(sk, 0); | |
2157 | } else if (data_was_unread) { | |
1da177e4 | 2158 | /* Unread data was tossed, zap the connection. */ |
6f67c817 | 2159 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
1da177e4 | 2160 | tcp_set_state(sk, TCP_CLOSE); |
aa133076 | 2161 | tcp_send_active_reset(sk, sk->sk_allocation); |
1da177e4 LT |
2162 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
2163 | /* Check zero linger _after_ checking for unread data. */ | |
2164 | sk->sk_prot->disconnect(sk, 0); | |
6f67c817 | 2165 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); |
1da177e4 LT |
2166 | } else if (tcp_close_state(sk)) { |
2167 | /* We FIN if the application ate all the data before | |
2168 | * zapping the connection. | |
2169 | */ | |
2170 | ||
2171 | /* RED-PEN. Formally speaking, we have broken TCP state | |
2172 | * machine. State transitions: | |
2173 | * | |
2174 | * TCP_ESTABLISHED -> TCP_FIN_WAIT1 | |
2175 | * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) | |
2176 | * TCP_CLOSE_WAIT -> TCP_LAST_ACK | |
2177 | * | |
2178 | * are legal only when FIN has been sent (i.e. in window), | |
2179 | * rather than queued out of window. Purists blame. | |
2180 | * | |
2181 | * F.e. "RFC state" is ESTABLISHED, | |
2182 | * if Linux state is FIN-WAIT-1, but FIN is still not sent. | |
2183 | * | |
2184 | * The visible declinations are that sometimes | |
2185 | * we enter time-wait state, when it is not required really | |
2186 | * (harmless), do not send active resets, when they are | |
2187 | * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when | |
2188 | * they look as CLOSING or LAST_ACK for Linux) | |
2189 | * Probably, I missed some more holelets. | |
2190 | * --ANK | |
8336886f JC |
2191 | * XXX (TFO) - To start off we don't support SYN+ACK+FIN |
2192 | * in a single packet! (May consider it later but will | |
2193 | * probably need API support or TCP_CORK SYN-ACK until | |
2194 | * data is written and socket is closed.) | |
1da177e4 LT |
2195 | */ |
2196 | tcp_send_fin(sk); | |
2197 | } | |
2198 | ||
2199 | sk_stream_wait_close(sk, timeout); | |
2200 | ||
2201 | adjudge_to_death: | |
75c2d907 HX |
2202 | state = sk->sk_state; |
2203 | sock_hold(sk); | |
2204 | sock_orphan(sk); | |
75c2d907 | 2205 | |
1da177e4 LT |
2206 | /* It is the last release_sock in its life. It will remove backlog. */ |
2207 | release_sock(sk); | |
2208 | ||
2209 | ||
2210 | /* Now socket is owned by kernel and we acquire BH lock | |
2211 | to finish close. No need to check for user refs. | |
2212 | */ | |
2213 | local_bh_disable(); | |
2214 | bh_lock_sock(sk); | |
547b792c | 2215 | WARN_ON(sock_owned_by_user(sk)); |
1da177e4 | 2216 | |
eb4dea58 HX |
2217 | percpu_counter_inc(sk->sk_prot->orphan_count); |
2218 | ||
75c2d907 HX |
2219 | /* Have we already been destroyed by a softirq or backlog? */ |
2220 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) | |
2221 | goto out; | |
1da177e4 LT |
2222 | |
2223 | /* This is a (useful) BSD violating of the RFC. There is a | |
2224 | * problem with TCP as specified in that the other end could | |
2225 | * keep a socket open forever with no application left this end. | |
2226 | * We use a 3 minute timeout (about the same as BSD) then kill | |
2227 | * our end. If they send after that then tough - BUT: long enough | |
2228 | * that we won't make the old 4*rto = almost no time - whoops | |
2229 | * reset mistake. | |
2230 | * | |
2231 | * Nope, it was not mistake. It is really desired behaviour | |
2232 | * f.e. on http servers, when such sockets are useless, but | |
2233 | * consume significant resources. Let's do it with special | |
2234 | * linger2 option. --ANK | |
2235 | */ | |
2236 | ||
2237 | if (sk->sk_state == TCP_FIN_WAIT2) { | |
2238 | struct tcp_sock *tp = tcp_sk(sk); | |
2239 | if (tp->linger2 < 0) { | |
2240 | tcp_set_state(sk, TCP_CLOSE); | |
2241 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
de0744af PE |
2242 | NET_INC_STATS_BH(sock_net(sk), |
2243 | LINUX_MIB_TCPABORTONLINGER); | |
1da177e4 | 2244 | } else { |
463c84b9 | 2245 | const int tmo = tcp_fin_time(sk); |
1da177e4 LT |
2246 | |
2247 | if (tmo > TCP_TIMEWAIT_LEN) { | |
52499afe DM |
2248 | inet_csk_reset_keepalive_timer(sk, |
2249 | tmo - TCP_TIMEWAIT_LEN); | |
1da177e4 | 2250 | } else { |
1da177e4 LT |
2251 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
2252 | goto out; | |
2253 | } | |
2254 | } | |
2255 | } | |
2256 | if (sk->sk_state != TCP_CLOSE) { | |
3ab224be | 2257 | sk_mem_reclaim(sk); |
efcdbf24 | 2258 | if (tcp_check_oom(sk, 0)) { |
1da177e4 LT |
2259 | tcp_set_state(sk, TCP_CLOSE); |
2260 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
de0744af PE |
2261 | NET_INC_STATS_BH(sock_net(sk), |
2262 | LINUX_MIB_TCPABORTONMEMORY); | |
1da177e4 LT |
2263 | } |
2264 | } | |
1da177e4 | 2265 | |
8336886f JC |
2266 | if (sk->sk_state == TCP_CLOSE) { |
2267 | struct request_sock *req = tcp_sk(sk)->fastopen_rsk; | |
2268 | /* We could get here with a non-NULL req if the socket is | |
2269 | * aborted (e.g., closed with unread data) before 3WHS | |
2270 | * finishes. | |
2271 | */ | |
2272 | if (req != NULL) | |
2273 | reqsk_fastopen_remove(sk, req, false); | |
0a5578cf | 2274 | inet_csk_destroy_sock(sk); |
8336886f | 2275 | } |
1da177e4 LT |
2276 | /* Otherwise, socket is reprieved until protocol close. */ |
2277 | ||
2278 | out: | |
2279 | bh_unlock_sock(sk); | |
2280 | local_bh_enable(); | |
2281 | sock_put(sk); | |
2282 | } | |
4bc2f18b | 2283 | EXPORT_SYMBOL(tcp_close); |
1da177e4 LT |
2284 | |
2285 | /* These states need RST on ABORT according to RFC793 */ | |
2286 | ||
a2a385d6 | 2287 | static inline bool tcp_need_reset(int state) |
1da177e4 LT |
2288 | { |
2289 | return (1 << state) & | |
2290 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | | |
2291 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV); | |
2292 | } | |
2293 | ||
2294 | int tcp_disconnect(struct sock *sk, int flags) | |
2295 | { | |
2296 | struct inet_sock *inet = inet_sk(sk); | |
463c84b9 | 2297 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
2298 | struct tcp_sock *tp = tcp_sk(sk); |
2299 | int err = 0; | |
2300 | int old_state = sk->sk_state; | |
2301 | ||
2302 | if (old_state != TCP_CLOSE) | |
2303 | tcp_set_state(sk, TCP_CLOSE); | |
2304 | ||
2305 | /* ABORT function of RFC793 */ | |
2306 | if (old_state == TCP_LISTEN) { | |
0a5578cf | 2307 | inet_csk_listen_stop(sk); |
ee995283 PE |
2308 | } else if (unlikely(tp->repair)) { |
2309 | sk->sk_err = ECONNABORTED; | |
1da177e4 LT |
2310 | } else if (tcp_need_reset(old_state) || |
2311 | (tp->snd_nxt != tp->write_seq && | |
2312 | (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { | |
caa20d9a | 2313 | /* The last check adjusts for discrepancy of Linux wrt. RFC |
1da177e4 LT |
2314 | * states |
2315 | */ | |
2316 | tcp_send_active_reset(sk, gfp_any()); | |
2317 | sk->sk_err = ECONNRESET; | |
2318 | } else if (old_state == TCP_SYN_SENT) | |
2319 | sk->sk_err = ECONNRESET; | |
2320 | ||
2321 | tcp_clear_xmit_timers(sk); | |
2322 | __skb_queue_purge(&sk->sk_receive_queue); | |
fe067e8a | 2323 | tcp_write_queue_purge(sk); |
1da177e4 | 2324 | __skb_queue_purge(&tp->out_of_order_queue); |
1a2449a8 CL |
2325 | #ifdef CONFIG_NET_DMA |
2326 | __skb_queue_purge(&sk->sk_async_wait_queue); | |
2327 | #endif | |
1da177e4 | 2328 | |
c720c7e8 | 2329 | inet->inet_dport = 0; |
1da177e4 LT |
2330 | |
2331 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | |
2332 | inet_reset_saddr(sk); | |
2333 | ||
2334 | sk->sk_shutdown = 0; | |
2335 | sock_reset_flag(sk, SOCK_DONE); | |
2336 | tp->srtt = 0; | |
2337 | if ((tp->write_seq += tp->max_window + 2) == 0) | |
2338 | tp->write_seq = 1; | |
463c84b9 | 2339 | icsk->icsk_backoff = 0; |
1da177e4 | 2340 | tp->snd_cwnd = 2; |
6687e988 | 2341 | icsk->icsk_probes_out = 0; |
1da177e4 | 2342 | tp->packets_out = 0; |
0b6a05c1 | 2343 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
1da177e4 | 2344 | tp->snd_cwnd_cnt = 0; |
1fdf475a | 2345 | tp->window_clamp = 0; |
6687e988 | 2346 | tcp_set_ca_state(sk, TCP_CA_Open); |
1da177e4 | 2347 | tcp_clear_retrans(tp); |
463c84b9 | 2348 | inet_csk_delack_init(sk); |
fe067e8a | 2349 | tcp_init_send_head(sk); |
b40b4f79 | 2350 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); |
1da177e4 LT |
2351 | __sk_dst_reset(sk); |
2352 | ||
c720c7e8 | 2353 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
1da177e4 LT |
2354 | |
2355 | sk->sk_error_report(sk); | |
2356 | return err; | |
2357 | } | |
4bc2f18b | 2358 | EXPORT_SYMBOL(tcp_disconnect); |
1da177e4 | 2359 | |
bb68b647 CP |
2360 | void tcp_sock_destruct(struct sock *sk) |
2361 | { | |
2362 | inet_sock_destruct(sk); | |
2363 | ||
2364 | kfree(inet_csk(sk)->icsk_accept_queue.fastopenq); | |
2365 | } | |
2366 | ||
a2a385d6 | 2367 | static inline bool tcp_can_repair_sock(const struct sock *sk) |
ee995283 | 2368 | { |
52e804c6 | 2369 | return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && |
ee995283 PE |
2370 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); |
2371 | } | |
2372 | ||
de248a75 PE |
2373 | static int tcp_repair_options_est(struct tcp_sock *tp, |
2374 | struct tcp_repair_opt __user *optbuf, unsigned int len) | |
b139ba4e | 2375 | { |
de248a75 | 2376 | struct tcp_repair_opt opt; |
b139ba4e | 2377 | |
de248a75 PE |
2378 | while (len >= sizeof(opt)) { |
2379 | if (copy_from_user(&opt, optbuf, sizeof(opt))) | |
b139ba4e PE |
2380 | return -EFAULT; |
2381 | ||
2382 | optbuf++; | |
de248a75 | 2383 | len -= sizeof(opt); |
b139ba4e | 2384 | |
de248a75 PE |
2385 | switch (opt.opt_code) { |
2386 | case TCPOPT_MSS: | |
2387 | tp->rx_opt.mss_clamp = opt.opt_val; | |
b139ba4e | 2388 | break; |
de248a75 | 2389 | case TCPOPT_WINDOW: |
bc26ccd8 AV |
2390 | { |
2391 | u16 snd_wscale = opt.opt_val & 0xFFFF; | |
2392 | u16 rcv_wscale = opt.opt_val >> 16; | |
2393 | ||
2394 | if (snd_wscale > 14 || rcv_wscale > 14) | |
2395 | return -EFBIG; | |
b139ba4e | 2396 | |
bc26ccd8 AV |
2397 | tp->rx_opt.snd_wscale = snd_wscale; |
2398 | tp->rx_opt.rcv_wscale = rcv_wscale; | |
2399 | tp->rx_opt.wscale_ok = 1; | |
2400 | } | |
b139ba4e | 2401 | break; |
b139ba4e | 2402 | case TCPOPT_SACK_PERM: |
de248a75 PE |
2403 | if (opt.opt_val != 0) |
2404 | return -EINVAL; | |
2405 | ||
b139ba4e PE |
2406 | tp->rx_opt.sack_ok |= TCP_SACK_SEEN; |
2407 | if (sysctl_tcp_fack) | |
2408 | tcp_enable_fack(tp); | |
2409 | break; | |
2410 | case TCPOPT_TIMESTAMP: | |
de248a75 PE |
2411 | if (opt.opt_val != 0) |
2412 | return -EINVAL; | |
2413 | ||
b139ba4e PE |
2414 | tp->rx_opt.tstamp_ok = 1; |
2415 | break; | |
2416 | } | |
2417 | } | |
2418 | ||
2419 | return 0; | |
2420 | } | |
2421 | ||
1da177e4 LT |
2422 | /* |
2423 | * Socket option code for TCP. | |
2424 | */ | |
3fdadf7d | 2425 | static int do_tcp_setsockopt(struct sock *sk, int level, |
b7058842 | 2426 | int optname, char __user *optval, unsigned int optlen) |
1da177e4 LT |
2427 | { |
2428 | struct tcp_sock *tp = tcp_sk(sk); | |
463c84b9 | 2429 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
2430 | int val; |
2431 | int err = 0; | |
2432 | ||
e56fb50f WAS |
2433 | /* These are data/string values, all the others are ints */ |
2434 | switch (optname) { | |
2435 | case TCP_CONGESTION: { | |
5f8ef48d SH |
2436 | char name[TCP_CA_NAME_MAX]; |
2437 | ||
2438 | if (optlen < 1) | |
2439 | return -EINVAL; | |
2440 | ||
2441 | val = strncpy_from_user(name, optval, | |
4fdb78d3 | 2442 | min_t(long, TCP_CA_NAME_MAX-1, optlen)); |
5f8ef48d SH |
2443 | if (val < 0) |
2444 | return -EFAULT; | |
2445 | name[val] = 0; | |
2446 | ||
2447 | lock_sock(sk); | |
6687e988 | 2448 | err = tcp_set_congestion_control(sk, name); |
5f8ef48d SH |
2449 | release_sock(sk); |
2450 | return err; | |
2451 | } | |
e56fb50f WAS |
2452 | default: |
2453 | /* fallthru */ | |
2454 | break; | |
ccbd6a5a | 2455 | } |
5f8ef48d | 2456 | |
1da177e4 LT |
2457 | if (optlen < sizeof(int)) |
2458 | return -EINVAL; | |
2459 | ||
2460 | if (get_user(val, (int __user *)optval)) | |
2461 | return -EFAULT; | |
2462 | ||
2463 | lock_sock(sk); | |
2464 | ||
2465 | switch (optname) { | |
2466 | case TCP_MAXSEG: | |
2467 | /* Values greater than interface MTU won't take effect. However | |
2468 | * at the point when this call is done we typically don't yet | |
2469 | * know which interface is going to be used */ | |
c39508d6 | 2470 | if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) { |
1da177e4 LT |
2471 | err = -EINVAL; |
2472 | break; | |
2473 | } | |
2474 | tp->rx_opt.user_mss = val; | |
2475 | break; | |
2476 | ||
2477 | case TCP_NODELAY: | |
2478 | if (val) { | |
2479 | /* TCP_NODELAY is weaker than TCP_CORK, so that | |
2480 | * this option on corked socket is remembered, but | |
2481 | * it is not activated until cork is cleared. | |
2482 | * | |
2483 | * However, when TCP_NODELAY is set we make | |
2484 | * an explicit push, which overrides even TCP_CORK | |
2485 | * for currently queued segments. | |
2486 | */ | |
2487 | tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; | |
9e412ba7 | 2488 | tcp_push_pending_frames(sk); |
1da177e4 LT |
2489 | } else { |
2490 | tp->nonagle &= ~TCP_NAGLE_OFF; | |
2491 | } | |
2492 | break; | |
2493 | ||
36e31b0a AP |
2494 | case TCP_THIN_LINEAR_TIMEOUTS: |
2495 | if (val < 0 || val > 1) | |
2496 | err = -EINVAL; | |
2497 | else | |
2498 | tp->thin_lto = val; | |
2499 | break; | |
2500 | ||
7e380175 AP |
2501 | case TCP_THIN_DUPACK: |
2502 | if (val < 0 || val > 1) | |
2503 | err = -EINVAL; | |
e2e5c4c0 | 2504 | else { |
7e380175 | 2505 | tp->thin_dupack = val; |
eed530b6 YC |
2506 | if (tp->thin_dupack) |
2507 | tcp_disable_early_retrans(tp); | |
e2e5c4c0 | 2508 | } |
7e380175 AP |
2509 | break; |
2510 | ||
ee995283 PE |
2511 | case TCP_REPAIR: |
2512 | if (!tcp_can_repair_sock(sk)) | |
2513 | err = -EPERM; | |
2514 | else if (val == 1) { | |
2515 | tp->repair = 1; | |
2516 | sk->sk_reuse = SK_FORCE_REUSE; | |
2517 | tp->repair_queue = TCP_NO_QUEUE; | |
2518 | } else if (val == 0) { | |
2519 | tp->repair = 0; | |
2520 | sk->sk_reuse = SK_NO_REUSE; | |
2521 | tcp_send_window_probe(sk); | |
2522 | } else | |
2523 | err = -EINVAL; | |
2524 | ||
2525 | break; | |
2526 | ||
2527 | case TCP_REPAIR_QUEUE: | |
2528 | if (!tp->repair) | |
2529 | err = -EPERM; | |
2530 | else if (val < TCP_QUEUES_NR) | |
2531 | tp->repair_queue = val; | |
2532 | else | |
2533 | err = -EINVAL; | |
2534 | break; | |
2535 | ||
2536 | case TCP_QUEUE_SEQ: | |
2537 | if (sk->sk_state != TCP_CLOSE) | |
2538 | err = -EPERM; | |
2539 | else if (tp->repair_queue == TCP_SEND_QUEUE) | |
2540 | tp->write_seq = val; | |
2541 | else if (tp->repair_queue == TCP_RECV_QUEUE) | |
2542 | tp->rcv_nxt = val; | |
2543 | else | |
2544 | err = -EINVAL; | |
2545 | break; | |
2546 | ||
b139ba4e PE |
2547 | case TCP_REPAIR_OPTIONS: |
2548 | if (!tp->repair) | |
2549 | err = -EINVAL; | |
2550 | else if (sk->sk_state == TCP_ESTABLISHED) | |
de248a75 PE |
2551 | err = tcp_repair_options_est(tp, |
2552 | (struct tcp_repair_opt __user *)optval, | |
2553 | optlen); | |
b139ba4e PE |
2554 | else |
2555 | err = -EPERM; | |
2556 | break; | |
2557 | ||
1da177e4 LT |
2558 | case TCP_CORK: |
2559 | /* When set indicates to always queue non-full frames. | |
2560 | * Later the user clears this option and we transmit | |
2561 | * any pending partial frames in the queue. This is | |
2562 | * meant to be used alongside sendfile() to get properly | |
2563 | * filled frames when the user (for example) must write | |
2564 | * out headers with a write() call first and then use | |
2565 | * sendfile to send out the data parts. | |
2566 | * | |
2567 | * TCP_CORK can be set together with TCP_NODELAY and it is | |
2568 | * stronger than TCP_NODELAY. | |
2569 | */ | |
2570 | if (val) { | |
2571 | tp->nonagle |= TCP_NAGLE_CORK; | |
2572 | } else { | |
2573 | tp->nonagle &= ~TCP_NAGLE_CORK; | |
2574 | if (tp->nonagle&TCP_NAGLE_OFF) | |
2575 | tp->nonagle |= TCP_NAGLE_PUSH; | |
9e412ba7 | 2576 | tcp_push_pending_frames(sk); |
1da177e4 LT |
2577 | } |
2578 | break; | |
2579 | ||
2580 | case TCP_KEEPIDLE: | |
2581 | if (val < 1 || val > MAX_TCP_KEEPIDLE) | |
2582 | err = -EINVAL; | |
2583 | else { | |
2584 | tp->keepalive_time = val * HZ; | |
2585 | if (sock_flag(sk, SOCK_KEEPOPEN) && | |
2586 | !((1 << sk->sk_state) & | |
2587 | (TCPF_CLOSE | TCPF_LISTEN))) { | |
6c37e5de | 2588 | u32 elapsed = keepalive_time_elapsed(tp); |
1da177e4 LT |
2589 | if (tp->keepalive_time > elapsed) |
2590 | elapsed = tp->keepalive_time - elapsed; | |
2591 | else | |
2592 | elapsed = 0; | |
463c84b9 | 2593 | inet_csk_reset_keepalive_timer(sk, elapsed); |
1da177e4 LT |
2594 | } |
2595 | } | |
2596 | break; | |
2597 | case TCP_KEEPINTVL: | |
2598 | if (val < 1 || val > MAX_TCP_KEEPINTVL) | |
2599 | err = -EINVAL; | |
2600 | else | |
2601 | tp->keepalive_intvl = val * HZ; | |
2602 | break; | |
2603 | case TCP_KEEPCNT: | |
2604 | if (val < 1 || val > MAX_TCP_KEEPCNT) | |
2605 | err = -EINVAL; | |
2606 | else | |
2607 | tp->keepalive_probes = val; | |
2608 | break; | |
2609 | case TCP_SYNCNT: | |
2610 | if (val < 1 || val > MAX_TCP_SYNCNT) | |
2611 | err = -EINVAL; | |
2612 | else | |
463c84b9 | 2613 | icsk->icsk_syn_retries = val; |
1da177e4 LT |
2614 | break; |
2615 | ||
2616 | case TCP_LINGER2: | |
2617 | if (val < 0) | |
2618 | tp->linger2 = -1; | |
2619 | else if (val > sysctl_tcp_fin_timeout / HZ) | |
2620 | tp->linger2 = 0; | |
2621 | else | |
2622 | tp->linger2 = val * HZ; | |
2623 | break; | |
2624 | ||
2625 | case TCP_DEFER_ACCEPT: | |
b103cf34 JA |
2626 | /* Translate value in seconds to number of retransmits */ |
2627 | icsk->icsk_accept_queue.rskq_defer_accept = | |
2628 | secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, | |
2629 | TCP_RTO_MAX / HZ); | |
1da177e4 LT |
2630 | break; |
2631 | ||
2632 | case TCP_WINDOW_CLAMP: | |
2633 | if (!val) { | |
2634 | if (sk->sk_state != TCP_CLOSE) { | |
2635 | err = -EINVAL; | |
2636 | break; | |
2637 | } | |
2638 | tp->window_clamp = 0; | |
2639 | } else | |
2640 | tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? | |
2641 | SOCK_MIN_RCVBUF / 2 : val; | |
2642 | break; | |
2643 | ||
2644 | case TCP_QUICKACK: | |
2645 | if (!val) { | |
463c84b9 | 2646 | icsk->icsk_ack.pingpong = 1; |
1da177e4 | 2647 | } else { |
463c84b9 | 2648 | icsk->icsk_ack.pingpong = 0; |
1da177e4 LT |
2649 | if ((1 << sk->sk_state) & |
2650 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && | |
463c84b9 ACM |
2651 | inet_csk_ack_scheduled(sk)) { |
2652 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; | |
0e4b4992 | 2653 | tcp_cleanup_rbuf(sk, 1); |
1da177e4 | 2654 | if (!(val & 1)) |
463c84b9 | 2655 | icsk->icsk_ack.pingpong = 1; |
1da177e4 LT |
2656 | } |
2657 | } | |
2658 | break; | |
2659 | ||
cfb6eeb4 YH |
2660 | #ifdef CONFIG_TCP_MD5SIG |
2661 | case TCP_MD5SIG: | |
2662 | /* Read the IP->Key mappings from userspace */ | |
2663 | err = tp->af_specific->md5_parse(sk, optval, optlen); | |
2664 | break; | |
2665 | #endif | |
dca43c75 JC |
2666 | case TCP_USER_TIMEOUT: |
2667 | /* Cap the max timeout in ms TCP will retry/retrans | |
2668 | * before giving up and aborting (ETIMEDOUT) a connection. | |
2669 | */ | |
42493570 HL |
2670 | if (val < 0) |
2671 | err = -EINVAL; | |
2672 | else | |
2673 | icsk->icsk_user_timeout = msecs_to_jiffies(val); | |
dca43c75 | 2674 | break; |
8336886f JC |
2675 | |
2676 | case TCP_FASTOPEN: | |
2677 | if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | | |
2678 | TCPF_LISTEN))) | |
2679 | err = fastopen_init_queue(sk, val); | |
2680 | else | |
2681 | err = -EINVAL; | |
2682 | break; | |
93be6ce0 AV |
2683 | case TCP_TIMESTAMP: |
2684 | if (!tp->repair) | |
2685 | err = -EPERM; | |
2686 | else | |
2687 | tp->tsoffset = val - tcp_time_stamp; | |
2688 | break; | |
c9bee3b7 ED |
2689 | case TCP_NOTSENT_LOWAT: |
2690 | tp->notsent_lowat = val; | |
2691 | sk->sk_write_space(sk); | |
2692 | break; | |
1da177e4 LT |
2693 | default: |
2694 | err = -ENOPROTOOPT; | |
2695 | break; | |
3ff50b79 SH |
2696 | } |
2697 | ||
1da177e4 LT |
2698 | release_sock(sk); |
2699 | return err; | |
2700 | } | |
2701 | ||
3fdadf7d | 2702 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, |
b7058842 | 2703 | unsigned int optlen) |
3fdadf7d | 2704 | { |
cf533ea5 | 2705 | const struct inet_connection_sock *icsk = inet_csk(sk); |
3fdadf7d DM |
2706 | |
2707 | if (level != SOL_TCP) | |
2708 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, | |
2709 | optval, optlen); | |
2710 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); | |
2711 | } | |
4bc2f18b | 2712 | EXPORT_SYMBOL(tcp_setsockopt); |
3fdadf7d DM |
2713 | |
2714 | #ifdef CONFIG_COMPAT | |
543d9cfe | 2715 | int compat_tcp_setsockopt(struct sock *sk, int level, int optname, |
b7058842 | 2716 | char __user *optval, unsigned int optlen) |
3fdadf7d | 2717 | { |
dec73ff0 ACM |
2718 | if (level != SOL_TCP) |
2719 | return inet_csk_compat_setsockopt(sk, level, optname, | |
2720 | optval, optlen); | |
3fdadf7d DM |
2721 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); |
2722 | } | |
543d9cfe | 2723 | EXPORT_SYMBOL(compat_tcp_setsockopt); |
3fdadf7d DM |
2724 | #endif |
2725 | ||
1da177e4 | 2726 | /* Return information about state of tcp endpoint in API format. */ |
cf533ea5 | 2727 | void tcp_get_info(const struct sock *sk, struct tcp_info *info) |
1da177e4 | 2728 | { |
cf533ea5 | 2729 | const struct tcp_sock *tp = tcp_sk(sk); |
463c84b9 | 2730 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
2731 | u32 now = tcp_time_stamp; |
2732 | ||
2733 | memset(info, 0, sizeof(*info)); | |
2734 | ||
2735 | info->tcpi_state = sk->sk_state; | |
6687e988 | 2736 | info->tcpi_ca_state = icsk->icsk_ca_state; |
463c84b9 | 2737 | info->tcpi_retransmits = icsk->icsk_retransmits; |
6687e988 | 2738 | info->tcpi_probes = icsk->icsk_probes_out; |
463c84b9 | 2739 | info->tcpi_backoff = icsk->icsk_backoff; |
1da177e4 LT |
2740 | |
2741 | if (tp->rx_opt.tstamp_ok) | |
2742 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; | |
e60402d0 | 2743 | if (tcp_is_sack(tp)) |
1da177e4 LT |
2744 | info->tcpi_options |= TCPI_OPT_SACK; |
2745 | if (tp->rx_opt.wscale_ok) { | |
2746 | info->tcpi_options |= TCPI_OPT_WSCALE; | |
2747 | info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; | |
2748 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; | |
e905a9ed | 2749 | } |
1da177e4 | 2750 | |
b5c5693b | 2751 | if (tp->ecn_flags & TCP_ECN_OK) |
1da177e4 | 2752 | info->tcpi_options |= TCPI_OPT_ECN; |
b5c5693b ED |
2753 | if (tp->ecn_flags & TCP_ECN_SEEN) |
2754 | info->tcpi_options |= TCPI_OPT_ECN_SEEN; | |
6f73601e YC |
2755 | if (tp->syn_data_acked) |
2756 | info->tcpi_options |= TCPI_OPT_SYN_DATA; | |
1da177e4 | 2757 | |
463c84b9 ACM |
2758 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); |
2759 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); | |
c1b4a7e6 | 2760 | info->tcpi_snd_mss = tp->mss_cache; |
463c84b9 | 2761 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; |
1da177e4 | 2762 | |
5ee3afba RJ |
2763 | if (sk->sk_state == TCP_LISTEN) { |
2764 | info->tcpi_unacked = sk->sk_ack_backlog; | |
2765 | info->tcpi_sacked = sk->sk_max_ack_backlog; | |
2766 | } else { | |
2767 | info->tcpi_unacked = tp->packets_out; | |
2768 | info->tcpi_sacked = tp->sacked_out; | |
2769 | } | |
1da177e4 LT |
2770 | info->tcpi_lost = tp->lost_out; |
2771 | info->tcpi_retrans = tp->retrans_out; | |
2772 | info->tcpi_fackets = tp->fackets_out; | |
2773 | ||
2774 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); | |
463c84b9 | 2775 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); |
1da177e4 LT |
2776 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); |
2777 | ||
d83d8461 | 2778 | info->tcpi_pmtu = icsk->icsk_pmtu_cookie; |
1da177e4 LT |
2779 | info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; |
2780 | info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; | |
2781 | info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; | |
2782 | info->tcpi_snd_ssthresh = tp->snd_ssthresh; | |
2783 | info->tcpi_snd_cwnd = tp->snd_cwnd; | |
2784 | info->tcpi_advmss = tp->advmss; | |
2785 | info->tcpi_reordering = tp->reordering; | |
2786 | ||
2787 | info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; | |
2788 | info->tcpi_rcv_space = tp->rcvq_space.space; | |
2789 | ||
2790 | info->tcpi_total_retrans = tp->total_retrans; | |
2791 | } | |
1da177e4 LT |
2792 | EXPORT_SYMBOL_GPL(tcp_get_info); |
2793 | ||
3fdadf7d DM |
2794 | static int do_tcp_getsockopt(struct sock *sk, int level, |
2795 | int optname, char __user *optval, int __user *optlen) | |
1da177e4 | 2796 | { |
295f7324 | 2797 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
2798 | struct tcp_sock *tp = tcp_sk(sk); |
2799 | int val, len; | |
2800 | ||
1da177e4 LT |
2801 | if (get_user(len, optlen)) |
2802 | return -EFAULT; | |
2803 | ||
2804 | len = min_t(unsigned int, len, sizeof(int)); | |
2805 | ||
2806 | if (len < 0) | |
2807 | return -EINVAL; | |
2808 | ||
2809 | switch (optname) { | |
2810 | case TCP_MAXSEG: | |
c1b4a7e6 | 2811 | val = tp->mss_cache; |
1da177e4 LT |
2812 | if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
2813 | val = tp->rx_opt.user_mss; | |
5e6a3ce6 PE |
2814 | if (tp->repair) |
2815 | val = tp->rx_opt.mss_clamp; | |
1da177e4 LT |
2816 | break; |
2817 | case TCP_NODELAY: | |
2818 | val = !!(tp->nonagle&TCP_NAGLE_OFF); | |
2819 | break; | |
2820 | case TCP_CORK: | |
2821 | val = !!(tp->nonagle&TCP_NAGLE_CORK); | |
2822 | break; | |
2823 | case TCP_KEEPIDLE: | |
df19a626 | 2824 | val = keepalive_time_when(tp) / HZ; |
1da177e4 LT |
2825 | break; |
2826 | case TCP_KEEPINTVL: | |
df19a626 | 2827 | val = keepalive_intvl_when(tp) / HZ; |
1da177e4 LT |
2828 | break; |
2829 | case TCP_KEEPCNT: | |
df19a626 | 2830 | val = keepalive_probes(tp); |
1da177e4 LT |
2831 | break; |
2832 | case TCP_SYNCNT: | |
295f7324 | 2833 | val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
1da177e4 LT |
2834 | break; |
2835 | case TCP_LINGER2: | |
2836 | val = tp->linger2; | |
2837 | if (val >= 0) | |
2838 | val = (val ? : sysctl_tcp_fin_timeout) / HZ; | |
2839 | break; | |
2840 | case TCP_DEFER_ACCEPT: | |
b103cf34 JA |
2841 | val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, |
2842 | TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); | |
1da177e4 LT |
2843 | break; |
2844 | case TCP_WINDOW_CLAMP: | |
2845 | val = tp->window_clamp; | |
2846 | break; | |
2847 | case TCP_INFO: { | |
2848 | struct tcp_info info; | |
2849 | ||
2850 | if (get_user(len, optlen)) | |
2851 | return -EFAULT; | |
2852 | ||
2853 | tcp_get_info(sk, &info); | |
2854 | ||
2855 | len = min_t(unsigned int, len, sizeof(info)); | |
2856 | if (put_user(len, optlen)) | |
2857 | return -EFAULT; | |
2858 | if (copy_to_user(optval, &info, len)) | |
2859 | return -EFAULT; | |
2860 | return 0; | |
2861 | } | |
2862 | case TCP_QUICKACK: | |
295f7324 | 2863 | val = !icsk->icsk_ack.pingpong; |
1da177e4 | 2864 | break; |
5f8ef48d SH |
2865 | |
2866 | case TCP_CONGESTION: | |
2867 | if (get_user(len, optlen)) | |
2868 | return -EFAULT; | |
2869 | len = min_t(unsigned int, len, TCP_CA_NAME_MAX); | |
2870 | if (put_user(len, optlen)) | |
2871 | return -EFAULT; | |
6687e988 | 2872 | if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) |
5f8ef48d SH |
2873 | return -EFAULT; |
2874 | return 0; | |
e56fb50f | 2875 | |
3c0fef0b JH |
2876 | case TCP_THIN_LINEAR_TIMEOUTS: |
2877 | val = tp->thin_lto; | |
2878 | break; | |
2879 | case TCP_THIN_DUPACK: | |
2880 | val = tp->thin_dupack; | |
2881 | break; | |
dca43c75 | 2882 | |
ee995283 PE |
2883 | case TCP_REPAIR: |
2884 | val = tp->repair; | |
2885 | break; | |
2886 | ||
2887 | case TCP_REPAIR_QUEUE: | |
2888 | if (tp->repair) | |
2889 | val = tp->repair_queue; | |
2890 | else | |
2891 | return -EINVAL; | |
2892 | break; | |
2893 | ||
2894 | case TCP_QUEUE_SEQ: | |
2895 | if (tp->repair_queue == TCP_SEND_QUEUE) | |
2896 | val = tp->write_seq; | |
2897 | else if (tp->repair_queue == TCP_RECV_QUEUE) | |
2898 | val = tp->rcv_nxt; | |
2899 | else | |
2900 | return -EINVAL; | |
2901 | break; | |
2902 | ||
dca43c75 JC |
2903 | case TCP_USER_TIMEOUT: |
2904 | val = jiffies_to_msecs(icsk->icsk_user_timeout); | |
2905 | break; | |
93be6ce0 AV |
2906 | case TCP_TIMESTAMP: |
2907 | val = tcp_time_stamp + tp->tsoffset; | |
2908 | break; | |
c9bee3b7 ED |
2909 | case TCP_NOTSENT_LOWAT: |
2910 | val = tp->notsent_lowat; | |
2911 | break; | |
1da177e4 LT |
2912 | default: |
2913 | return -ENOPROTOOPT; | |
3ff50b79 | 2914 | } |
1da177e4 LT |
2915 | |
2916 | if (put_user(len, optlen)) | |
2917 | return -EFAULT; | |
2918 | if (copy_to_user(optval, &val, len)) | |
2919 | return -EFAULT; | |
2920 | return 0; | |
2921 | } | |
2922 | ||
3fdadf7d DM |
2923 | int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, |
2924 | int __user *optlen) | |
2925 | { | |
2926 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2927 | ||
2928 | if (level != SOL_TCP) | |
2929 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, | |
2930 | optval, optlen); | |
2931 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); | |
2932 | } | |
4bc2f18b | 2933 | EXPORT_SYMBOL(tcp_getsockopt); |
3fdadf7d DM |
2934 | |
2935 | #ifdef CONFIG_COMPAT | |
543d9cfe ACM |
2936 | int compat_tcp_getsockopt(struct sock *sk, int level, int optname, |
2937 | char __user *optval, int __user *optlen) | |
3fdadf7d | 2938 | { |
dec73ff0 ACM |
2939 | if (level != SOL_TCP) |
2940 | return inet_csk_compat_getsockopt(sk, level, optname, | |
2941 | optval, optlen); | |
3fdadf7d DM |
2942 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); |
2943 | } | |
543d9cfe | 2944 | EXPORT_SYMBOL(compat_tcp_getsockopt); |
3fdadf7d | 2945 | #endif |
1da177e4 | 2946 | |
cfb6eeb4 | 2947 | #ifdef CONFIG_TCP_MD5SIG |
71cea17e ED |
2948 | static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; |
2949 | static DEFINE_MUTEX(tcp_md5sig_mutex); | |
cfb6eeb4 | 2950 | |
765cf997 | 2951 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) |
cfb6eeb4 YH |
2952 | { |
2953 | int cpu; | |
765cf997 | 2954 | |
cfb6eeb4 | 2955 | for_each_possible_cpu(cpu) { |
765cf997 ED |
2956 | struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); |
2957 | ||
2958 | if (p->md5_desc.tfm) | |
2959 | crypto_free_hash(p->md5_desc.tfm); | |
cfb6eeb4 YH |
2960 | } |
2961 | free_percpu(pool); | |
2962 | } | |
2963 | ||
71cea17e | 2964 | static void __tcp_alloc_md5sig_pool(void) |
cfb6eeb4 YH |
2965 | { |
2966 | int cpu; | |
765cf997 | 2967 | struct tcp_md5sig_pool __percpu *pool; |
cfb6eeb4 | 2968 | |
765cf997 | 2969 | pool = alloc_percpu(struct tcp_md5sig_pool); |
cfb6eeb4 | 2970 | if (!pool) |
71cea17e | 2971 | return; |
cfb6eeb4 YH |
2972 | |
2973 | for_each_possible_cpu(cpu) { | |
cfb6eeb4 YH |
2974 | struct crypto_hash *hash; |
2975 | ||
cfb6eeb4 | 2976 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); |
50c3a487 | 2977 | if (IS_ERR_OR_NULL(hash)) |
cfb6eeb4 YH |
2978 | goto out_free; |
2979 | ||
765cf997 | 2980 | per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; |
cfb6eeb4 | 2981 | } |
71cea17e ED |
2982 | /* before setting tcp_md5sig_pool, we must commit all writes |
2983 | * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() | |
2984 | */ | |
2985 | smp_wmb(); | |
2986 | tcp_md5sig_pool = pool; | |
2987 | return; | |
cfb6eeb4 YH |
2988 | out_free: |
2989 | __tcp_free_md5sig_pool(pool); | |
cfb6eeb4 YH |
2990 | } |
2991 | ||
71cea17e | 2992 | bool tcp_alloc_md5sig_pool(void) |
cfb6eeb4 | 2993 | { |
71cea17e ED |
2994 | if (unlikely(!tcp_md5sig_pool)) { |
2995 | mutex_lock(&tcp_md5sig_mutex); | |
2996 | ||
2997 | if (!tcp_md5sig_pool) | |
2998 | __tcp_alloc_md5sig_pool(); | |
2999 | ||
3000 | mutex_unlock(&tcp_md5sig_mutex); | |
cfb6eeb4 | 3001 | } |
71cea17e | 3002 | return tcp_md5sig_pool != NULL; |
cfb6eeb4 | 3003 | } |
cfb6eeb4 YH |
3004 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); |
3005 | ||
35790c04 ED |
3006 | |
3007 | /** | |
3008 | * tcp_get_md5sig_pool - get md5sig_pool for this user | |
3009 | * | |
3010 | * We use percpu structure, so if we succeed, we exit with preemption | |
3011 | * and BH disabled, to make sure another thread or softirq handling | |
3012 | * wont try to get same context. | |
3013 | */ | |
3014 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | |
cfb6eeb4 | 3015 | { |
765cf997 | 3016 | struct tcp_md5sig_pool __percpu *p; |
35790c04 ED |
3017 | |
3018 | local_bh_disable(); | |
71cea17e | 3019 | p = ACCESS_ONCE(tcp_md5sig_pool); |
35790c04 | 3020 | if (p) |
71cea17e | 3021 | return __this_cpu_ptr(p); |
cfb6eeb4 | 3022 | |
35790c04 ED |
3023 | local_bh_enable(); |
3024 | return NULL; | |
3025 | } | |
3026 | EXPORT_SYMBOL(tcp_get_md5sig_pool); | |
cfb6eeb4 | 3027 | |
49a72dfb | 3028 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, |
ca35a0ef | 3029 | const struct tcphdr *th) |
49a72dfb AL |
3030 | { |
3031 | struct scatterlist sg; | |
ca35a0ef | 3032 | struct tcphdr hdr; |
49a72dfb AL |
3033 | int err; |
3034 | ||
ca35a0ef ED |
3035 | /* We are not allowed to change tcphdr, make a local copy */ |
3036 | memcpy(&hdr, th, sizeof(hdr)); | |
3037 | hdr.check = 0; | |
3038 | ||
49a72dfb | 3039 | /* options aren't included in the hash */ |
ca35a0ef ED |
3040 | sg_init_one(&sg, &hdr, sizeof(hdr)); |
3041 | err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr)); | |
49a72dfb AL |
3042 | return err; |
3043 | } | |
49a72dfb AL |
3044 | EXPORT_SYMBOL(tcp_md5_hash_header); |
3045 | ||
3046 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | |
cf533ea5 | 3047 | const struct sk_buff *skb, unsigned int header_len) |
49a72dfb AL |
3048 | { |
3049 | struct scatterlist sg; | |
3050 | const struct tcphdr *tp = tcp_hdr(skb); | |
3051 | struct hash_desc *desc = &hp->md5_desc; | |
95c96174 ED |
3052 | unsigned int i; |
3053 | const unsigned int head_data_len = skb_headlen(skb) > header_len ? | |
3054 | skb_headlen(skb) - header_len : 0; | |
49a72dfb | 3055 | const struct skb_shared_info *shi = skb_shinfo(skb); |
d7fd1b57 | 3056 | struct sk_buff *frag_iter; |
49a72dfb AL |
3057 | |
3058 | sg_init_table(&sg, 1); | |
3059 | ||
3060 | sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); | |
3061 | if (crypto_hash_update(desc, &sg, head_data_len)) | |
3062 | return 1; | |
3063 | ||
3064 | for (i = 0; i < shi->nr_frags; ++i) { | |
3065 | const struct skb_frag_struct *f = &shi->frags[i]; | |
54d27fcb ED |
3066 | unsigned int offset = f->page_offset; |
3067 | struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); | |
3068 | ||
3069 | sg_set_page(&sg, page, skb_frag_size(f), | |
3070 | offset_in_page(offset)); | |
9e903e08 | 3071 | if (crypto_hash_update(desc, &sg, skb_frag_size(f))) |
49a72dfb AL |
3072 | return 1; |
3073 | } | |
3074 | ||
d7fd1b57 ED |
3075 | skb_walk_frags(skb, frag_iter) |
3076 | if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) | |
3077 | return 1; | |
3078 | ||
49a72dfb AL |
3079 | return 0; |
3080 | } | |
49a72dfb AL |
3081 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); |
3082 | ||
cf533ea5 | 3083 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) |
49a72dfb AL |
3084 | { |
3085 | struct scatterlist sg; | |
3086 | ||
3087 | sg_init_one(&sg, key->key, key->keylen); | |
3088 | return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); | |
3089 | } | |
49a72dfb AL |
3090 | EXPORT_SYMBOL(tcp_md5_hash_key); |
3091 | ||
cfb6eeb4 YH |
3092 | #endif |
3093 | ||
4ac02bab AK |
3094 | void tcp_done(struct sock *sk) |
3095 | { | |
8336886f JC |
3096 | struct request_sock *req = tcp_sk(sk)->fastopen_rsk; |
3097 | ||
5a5f3a8d | 3098 | if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) |
63231bdd | 3099 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
4ac02bab AK |
3100 | |
3101 | tcp_set_state(sk, TCP_CLOSE); | |
3102 | tcp_clear_xmit_timers(sk); | |
8336886f JC |
3103 | if (req != NULL) |
3104 | reqsk_fastopen_remove(sk, req, false); | |
4ac02bab AK |
3105 | |
3106 | sk->sk_shutdown = SHUTDOWN_MASK; | |
3107 | ||
3108 | if (!sock_flag(sk, SOCK_DEAD)) | |
3109 | sk->sk_state_change(sk); | |
3110 | else | |
3111 | inet_csk_destroy_sock(sk); | |
3112 | } | |
3113 | EXPORT_SYMBOL_GPL(tcp_done); | |
3114 | ||
5f8ef48d | 3115 | extern struct tcp_congestion_ops tcp_reno; |
1da177e4 LT |
3116 | |
3117 | static __initdata unsigned long thash_entries; | |
3118 | static int __init set_thash_entries(char *str) | |
3119 | { | |
413c27d8 EZ |
3120 | ssize_t ret; |
3121 | ||
1da177e4 LT |
3122 | if (!str) |
3123 | return 0; | |
413c27d8 EZ |
3124 | |
3125 | ret = kstrtoul(str, 0, &thash_entries); | |
3126 | if (ret) | |
3127 | return 0; | |
3128 | ||
1da177e4 LT |
3129 | return 1; |
3130 | } | |
3131 | __setup("thash_entries=", set_thash_entries); | |
3132 | ||
a4fe34bf | 3133 | static void tcp_init_mem(void) |
4acb4190 | 3134 | { |
4acb4190 GC |
3135 | unsigned long limit = nr_free_buffer_pages() / 8; |
3136 | limit = max(limit, 128UL); | |
a4fe34bf EB |
3137 | sysctl_tcp_mem[0] = limit / 4 * 3; |
3138 | sysctl_tcp_mem[1] = limit; | |
3139 | sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; | |
4acb4190 GC |
3140 | } |
3141 | ||
1da177e4 LT |
3142 | void __init tcp_init(void) |
3143 | { | |
3144 | struct sk_buff *skb = NULL; | |
f03d78db | 3145 | unsigned long limit; |
b49960a0 | 3146 | int max_rshare, max_wshare, cnt; |
074b8517 | 3147 | unsigned int i; |
1da177e4 | 3148 | |
1f9e636e | 3149 | BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); |
1da177e4 | 3150 | |
1748376b | 3151 | percpu_counter_init(&tcp_sockets_allocated, 0); |
dd24c001 | 3152 | percpu_counter_init(&tcp_orphan_count, 0); |
6e04e021 ACM |
3153 | tcp_hashinfo.bind_bucket_cachep = |
3154 | kmem_cache_create("tcp_bind_bucket", | |
3155 | sizeof(struct inet_bind_bucket), 0, | |
20c2df83 | 3156 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4 | 3157 | |
1da177e4 LT |
3158 | /* Size and allocate the main established and bind bucket |
3159 | * hash tables. | |
3160 | * | |
3161 | * The methodology is similar to that of the buffer cache. | |
3162 | */ | |
6e04e021 | 3163 | tcp_hashinfo.ehash = |
1da177e4 | 3164 | alloc_large_system_hash("TCP established", |
0f7ff927 | 3165 | sizeof(struct inet_ehash_bucket), |
1da177e4 | 3166 | thash_entries, |
fd90b29d | 3167 | 17, /* one slot per 128 KB of memory */ |
9e950efa | 3168 | 0, |
1da177e4 | 3169 | NULL, |
f373b53b | 3170 | &tcp_hashinfo.ehash_mask, |
31fe62b9 | 3171 | 0, |
0ccfe618 | 3172 | thash_entries ? 0 : 512 * 1024); |
05dbc7b5 | 3173 | for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) |
3ab5aee7 | 3174 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); |
05dbc7b5 | 3175 | |
230140cf ED |
3176 | if (inet_ehash_locks_alloc(&tcp_hashinfo)) |
3177 | panic("TCP: failed to alloc ehash_locks"); | |
6e04e021 | 3178 | tcp_hashinfo.bhash = |
1da177e4 | 3179 | alloc_large_system_hash("TCP bind", |
0f7ff927 | 3180 | sizeof(struct inet_bind_hashbucket), |
f373b53b | 3181 | tcp_hashinfo.ehash_mask + 1, |
fd90b29d | 3182 | 17, /* one slot per 128 KB of memory */ |
9e950efa | 3183 | 0, |
6e04e021 | 3184 | &tcp_hashinfo.bhash_size, |
1da177e4 | 3185 | NULL, |
31fe62b9 | 3186 | 0, |
1da177e4 | 3187 | 64 * 1024); |
074b8517 | 3188 | tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; |
6e04e021 ACM |
3189 | for (i = 0; i < tcp_hashinfo.bhash_size; i++) { |
3190 | spin_lock_init(&tcp_hashinfo.bhash[i].lock); | |
3191 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); | |
1da177e4 LT |
3192 | } |
3193 | ||
c5ed63d6 ED |
3194 | |
3195 | cnt = tcp_hashinfo.ehash_mask + 1; | |
3196 | ||
3197 | tcp_death_row.sysctl_max_tw_buckets = cnt / 2; | |
3198 | sysctl_tcp_max_orphans = cnt / 2; | |
3199 | sysctl_max_syn_backlog = max(128, cnt / 256); | |
1da177e4 | 3200 | |
a4fe34bf | 3201 | tcp_init_mem(); |
c43b874d | 3202 | /* Set per-socket limits to no more than 1/128 the pressure threshold */ |
5fb84b14 | 3203 | limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); |
b49960a0 ED |
3204 | max_wshare = min(4UL*1024*1024, limit); |
3205 | max_rshare = min(6UL*1024*1024, limit); | |
7b4f4b5e | 3206 | |
3ab224be | 3207 | sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; |
7b4f4b5e | 3208 | sysctl_tcp_wmem[1] = 16*1024; |
b49960a0 | 3209 | sysctl_tcp_wmem[2] = max(64*1024, max_wshare); |
7b4f4b5e | 3210 | |
3ab224be | 3211 | sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; |
7b4f4b5e | 3212 | sysctl_tcp_rmem[1] = 87380; |
b49960a0 | 3213 | sysctl_tcp_rmem[2] = max(87380, max_rshare); |
1da177e4 | 3214 | |
afd46503 | 3215 | pr_info("Hash tables configured (established %u bind %u)\n", |
058bd4d2 | 3216 | tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); |
317a76f9 | 3217 | |
51c5d0c4 DM |
3218 | tcp_metrics_init(); |
3219 | ||
317a76f9 | 3220 | tcp_register_congestion_control(&tcp_reno); |
da5c78c8 | 3221 | |
46d3ceab | 3222 | tcp_tasklet_init(); |
1da177e4 | 3223 | } |