Merge tag 'drm-fixes-for-v4.17-rc4' of git://people.freedesktop.org/~airlied/linux
[linux-2.6-block.git] / net / ipv4 / tcp.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
11  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
12  *              Florian La Roche, <flla@stud.uni-sb.de>
13  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
15  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
16  *              Matthew Dillon, <dillon@apollo.west.oic.com>
17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *              Jorge Cwik, <jorge@laser.satlink.net>
19  *
20  * Fixes:
21  *              Alan Cox        :       Numerous verify_area() calls
22  *              Alan Cox        :       Set the ACK bit on a reset
23  *              Alan Cox        :       Stopped it crashing if it closed while
24  *                                      sk->inuse=1 and was trying to connect
25  *                                      (tcp_err()).
26  *              Alan Cox        :       All icmp error handling was broken
27  *                                      pointers passed where wrong and the
28  *                                      socket was looked up backwards. Nobody
29  *                                      tested any icmp error code obviously.
30  *              Alan Cox        :       tcp_err() now handled properly. It
31  *                                      wakes people on errors. poll
32  *                                      behaves and the icmp error race
33  *                                      has gone by moving it into sock.c
34  *              Alan Cox        :       tcp_send_reset() fixed to work for
35  *                                      everything not just packets for
36  *                                      unknown sockets.
37  *              Alan Cox        :       tcp option processing.
38  *              Alan Cox        :       Reset tweaked (still not 100%) [Had
39  *                                      syn rule wrong]
40  *              Herp Rosmanith  :       More reset fixes
41  *              Alan Cox        :       No longer acks invalid rst frames.
42  *                                      Acking any kind of RST is right out.
43  *              Alan Cox        :       Sets an ignore me flag on an rst
44  *                                      receive otherwise odd bits of prattle
45  *                                      escape still
46  *              Alan Cox        :       Fixed another acking RST frame bug.
47  *                                      Should stop LAN workplace lockups.
48  *              Alan Cox        :       Some tidyups using the new skb list
49  *                                      facilities
50  *              Alan Cox        :       sk->keepopen now seems to work
51  *              Alan Cox        :       Pulls options out correctly on accepts
52  *              Alan Cox        :       Fixed assorted sk->rqueue->next errors
53  *              Alan Cox        :       PSH doesn't end a TCP read. Switched a
54  *                                      bit to skb ops.
55  *              Alan Cox        :       Tidied tcp_data to avoid a potential
56  *                                      nasty.
57  *              Alan Cox        :       Added some better commenting, as the
58  *                                      tcp is hard to follow
59  *              Alan Cox        :       Removed incorrect check for 20 * psh
60  *      Michael O'Reilly        :       ack < copied bug fix.
61  *      Johannes Stille         :       Misc tcp fixes (not all in yet).
62  *              Alan Cox        :       FIN with no memory -> CRASH
63  *              Alan Cox        :       Added socket option proto entries.
64  *                                      Also added awareness of them to accept.
65  *              Alan Cox        :       Added TCP options (SOL_TCP)
66  *              Alan Cox        :       Switched wakeup calls to callbacks,
67  *                                      so the kernel can layer network
68  *                                      sockets.
69  *              Alan Cox        :       Use ip_tos/ip_ttl settings.
70  *              Alan Cox        :       Handle FIN (more) properly (we hope).
71  *              Alan Cox        :       RST frames sent on unsynchronised
72  *                                      state ack error.
73  *              Alan Cox        :       Put in missing check for SYN bit.
74  *              Alan Cox        :       Added tcp_select_window() aka NET2E
75  *                                      window non shrink trick.
76  *              Alan Cox        :       Added a couple of small NET2E timer
77  *                                      fixes
78  *              Charles Hedrick :       TCP fixes
79  *              Toomas Tamm     :       TCP window fixes
80  *              Alan Cox        :       Small URG fix to rlogin ^C ack fight
81  *              Charles Hedrick :       Rewrote most of it to actually work
82  *              Linus           :       Rewrote tcp_read() and URG handling
83  *                                      completely
84  *              Gerhard Koerting:       Fixed some missing timer handling
85  *              Matthew Dillon  :       Reworked TCP machine states as per RFC
86  *              Gerhard Koerting:       PC/TCP workarounds
87  *              Adam Caldwell   :       Assorted timer/timing errors
88  *              Matthew Dillon  :       Fixed another RST bug
89  *              Alan Cox        :       Move to kernel side addressing changes.
90  *              Alan Cox        :       Beginning work on TCP fastpathing
91  *                                      (not yet usable)
92  *              Arnt Gulbrandsen:       Turbocharged tcp_check() routine.
93  *              Alan Cox        :       TCP fast path debugging
94  *              Alan Cox        :       Window clamping
95  *              Michael Riepe   :       Bug in tcp_check()
96  *              Matt Dillon     :       More TCP improvements and RST bug fixes
97  *              Matt Dillon     :       Yet more small nasties remove from the
98  *                                      TCP code (Be very nice to this man if
99  *                                      tcp finally works 100%) 8)
100  *              Alan Cox        :       BSD accept semantics.
101  *              Alan Cox        :       Reset on closedown bug.
102  *      Peter De Schrijver      :       ENOTCONN check missing in tcp_sendto().
103  *              Michael Pall    :       Handle poll() after URG properly in
104  *                                      all cases.
105  *              Michael Pall    :       Undo the last fix in tcp_read_urg()
106  *                                      (multi URG PUSH broke rlogin).
107  *              Michael Pall    :       Fix the multi URG PUSH problem in
108  *                                      tcp_readable(), poll() after URG
109  *                                      works now.
110  *              Michael Pall    :       recv(...,MSG_OOB) never blocks in the
111  *                                      BSD api.
112  *              Alan Cox        :       Changed the semantics of sk->socket to
113  *                                      fix a race and a signal problem with
114  *                                      accept() and async I/O.
115  *              Alan Cox        :       Relaxed the rules on tcp_sendto().
116  *              Yury Shevchuk   :       Really fixed accept() blocking problem.
117  *              Craig I. Hagan  :       Allow for BSD compatible TIME_WAIT for
118  *                                      clients/servers which listen in on
119  *                                      fixed ports.
120  *              Alan Cox        :       Cleaned the above up and shrank it to
121  *                                      a sensible code size.
122  *              Alan Cox        :       Self connect lockup fix.
123  *              Alan Cox        :       No connect to multicast.
124  *              Ross Biro       :       Close unaccepted children on master
125  *                                      socket close.
126  *              Alan Cox        :       Reset tracing code.
127  *              Alan Cox        :       Spurious resets on shutdown.
128  *              Alan Cox        :       Giant 15 minute/60 second timer error
129  *              Alan Cox        :       Small whoops in polling before an
130  *                                      accept.
131  *              Alan Cox        :       Kept the state trace facility since
132  *                                      it's handy for debugging.
133  *              Alan Cox        :       More reset handler fixes.
134  *              Alan Cox        :       Started rewriting the code based on
135  *                                      the RFC's for other useful protocol
136  *                                      references see: Comer, KA9Q NOS, and
137  *                                      for a reference on the difference
138  *                                      between specifications and how BSD
139  *                                      works see the 4.4lite source.
140  *              A.N.Kuznetsov   :       Don't time wait on completion of tidy
141  *                                      close.
142  *              Linus Torvalds  :       Fin/Shutdown & copied_seq changes.
143  *              Linus Torvalds  :       Fixed BSD port reuse to work first syn
144  *              Alan Cox        :       Reimplemented timers as per the RFC
145  *                                      and using multiple timers for sanity.
146  *              Alan Cox        :       Small bug fixes, and a lot of new
147  *                                      comments.
148  *              Alan Cox        :       Fixed dual reader crash by locking
149  *                                      the buffers (much like datagram.c)
150  *              Alan Cox        :       Fixed stuck sockets in probe. A probe
151  *                                      now gets fed up of retrying without
152  *                                      (even a no space) answer.
153  *              Alan Cox        :       Extracted closing code better
154  *              Alan Cox        :       Fixed the closing state machine to
155  *                                      resemble the RFC.
156  *              Alan Cox        :       More 'per spec' fixes.
157  *              Jorge Cwik      :       Even faster checksumming.
158  *              Alan Cox        :       tcp_data() doesn't ack illegal PSH
159  *                                      only frames. At least one pc tcp stack
160  *                                      generates them.
161  *              Alan Cox        :       Cache last socket.
162  *              Alan Cox        :       Per route irtt.
163  *              Matt Day        :       poll()->select() match BSD precisely on error
164  *              Alan Cox        :       New buffers
165  *              Marc Tamsky     :       Various sk->prot->retransmits and
166  *                                      sk->retransmits misupdating fixed.
167  *                                      Fixed tcp_write_timeout: stuck close,
168  *                                      and TCP syn retries gets used now.
169  *              Mark Yarvis     :       In tcp_read_wakeup(), don't send an
170  *                                      ack if state is TCP_CLOSED.
171  *              Alan Cox        :       Look up device on a retransmit - routes may
172  *                                      change. Doesn't yet cope with MSS shrink right
173  *                                      but it's a start!
174  *              Marc Tamsky     :       Closing in closing fixes.
175  *              Mike Shaver     :       RFC1122 verifications.
176  *              Alan Cox        :       rcv_saddr errors.
177  *              Alan Cox        :       Block double connect().
178  *              Alan Cox        :       Small hooks for enSKIP.
179  *              Alexey Kuznetsov:       Path MTU discovery.
180  *              Alan Cox        :       Support soft errors.
181  *              Alan Cox        :       Fix MTU discovery pathological case
182  *                                      when the remote claims no mtu!
183  *              Marc Tamsky     :       TCP_CLOSE fix.
184  *              Colin (G3TNE)   :       Send a reset on syn ack replies in
185  *                                      window but wrong (fixes NT lpd problems)
186  *              Pedro Roque     :       Better TCP window handling, delayed ack.
187  *              Joerg Reuter    :       No modification of locked buffers in
188  *                                      tcp_do_retransmit()
189  *              Eric Schenk     :       Changed receiver side silly window
190  *                                      avoidance algorithm to BSD style
191  *                                      algorithm. This doubles throughput
192  *                                      against machines running Solaris,
193  *                                      and seems to result in general
194  *                                      improvement.
195  *      Stefan Magdalinski      :       adjusted tcp_readable() to fix FIONREAD
196  *      Willy Konynenberg       :       Transparent proxying support.
197  *      Mike McLagan            :       Routing by source
198  *              Keith Owens     :       Do proper merging with partial SKB's in
199  *                                      tcp_do_sendmsg to avoid burstiness.
200  *              Eric Schenk     :       Fix fast close down bug with
201  *                                      shutdown() followed by close().
202  *              Andi Kleen      :       Make poll agree with SIGIO
203  *      Salvatore Sanfilippo    :       Support SO_LINGER with linger == 1 and
204  *                                      lingertime == 0 (RFC 793 ABORT Call)
205  *      Hirokazu Takahashi      :       Use copy_from_user() instead of
206  *                                      csum_and_copy_from_user() if possible.
207  *
208  *              This program is free software; you can redistribute it and/or
209  *              modify it under the terms of the GNU General Public License
210  *              as published by the Free Software Foundation; either version
211  *              2 of the License, or(at your option) any later version.
212  *
213  * Description of States:
214  *
215  *      TCP_SYN_SENT            sent a connection request, waiting for ack
216  *
217  *      TCP_SYN_RECV            received a connection request, sent ack,
218  *                              waiting for final ack in three-way handshake.
219  *
220  *      TCP_ESTABLISHED         connection established
221  *
222  *      TCP_FIN_WAIT1           our side has shutdown, waiting to complete
223  *                              transmission of remaining buffered data
224  *
225  *      TCP_FIN_WAIT2           all buffered data sent, waiting for remote
226  *                              to shutdown
227  *
228  *      TCP_CLOSING             both sides have shutdown but we still have
229  *                              data we have to finish sending
230  *
231  *      TCP_TIME_WAIT           timeout to catch resent junk before entering
232  *                              closed, can only be entered from FIN_WAIT2
233  *                              or CLOSING.  Required because the other end
234  *                              may not have gotten our last ACK causing it
235  *                              to retransmit the data packet (which we ignore)
236  *
237  *      TCP_CLOSE_WAIT          remote side has shutdown and is waiting for
238  *                              us to finish writing our data and to shutdown
239  *                              (we have to close() to move on to LAST_ACK)
240  *
241  *      TCP_LAST_ACK            out side has shutdown after remote has
242  *                              shutdown.  There may still be data in our
243  *                              buffer that we have to finish sending
244  *
245  *      TCP_CLOSE               socket is finished
246  */
247
248 #define pr_fmt(fmt) "TCP: " fmt
249
250 #include <crypto/hash.h>
251 #include <linux/kernel.h>
252 #include <linux/module.h>
253 #include <linux/types.h>
254 #include <linux/fcntl.h>
255 #include <linux/poll.h>
256 #include <linux/inet_diag.h>
257 #include <linux/init.h>
258 #include <linux/fs.h>
259 #include <linux/skbuff.h>
260 #include <linux/scatterlist.h>
261 #include <linux/splice.h>
262 #include <linux/net.h>
263 #include <linux/socket.h>
264 #include <linux/random.h>
265 #include <linux/bootmem.h>
266 #include <linux/highmem.h>
267 #include <linux/swap.h>
268 #include <linux/cache.h>
269 #include <linux/err.h>
270 #include <linux/time.h>
271 #include <linux/slab.h>
272 #include <linux/errqueue.h>
273 #include <linux/static_key.h>
274
275 #include <net/icmp.h>
276 #include <net/inet_common.h>
277 #include <net/tcp.h>
278 #include <net/xfrm.h>
279 #include <net/ip.h>
280 #include <net/sock.h>
281
282 #include <linux/uaccess.h>
283 #include <asm/ioctls.h>
284 #include <net/busy_poll.h>
285
286 struct percpu_counter tcp_orphan_count;
287 EXPORT_SYMBOL_GPL(tcp_orphan_count);
288
289 long sysctl_tcp_mem[3] __read_mostly;
290 EXPORT_SYMBOL(sysctl_tcp_mem);
291
292 atomic_long_t tcp_memory_allocated;     /* Current allocated memory. */
293 EXPORT_SYMBOL(tcp_memory_allocated);
294
295 #if IS_ENABLED(CONFIG_SMC)
296 DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
297 EXPORT_SYMBOL(tcp_have_smc);
298 #endif
299
300 /*
301  * Current number of TCP sockets.
302  */
303 struct percpu_counter tcp_sockets_allocated;
304 EXPORT_SYMBOL(tcp_sockets_allocated);
305
306 /*
307  * TCP splice context
308  */
309 struct tcp_splice_state {
310         struct pipe_inode_info *pipe;
311         size_t len;
312         unsigned int flags;
313 };
314
315 /*
316  * Pressure flag: try to collapse.
317  * Technical note: it is used by multiple contexts non atomically.
318  * All the __sk_mem_schedule() is of this nature: accounting
319  * is strict, actions are advisory and have some latency.
320  */
321 unsigned long tcp_memory_pressure __read_mostly;
322 EXPORT_SYMBOL_GPL(tcp_memory_pressure);
323
324 void tcp_enter_memory_pressure(struct sock *sk)
325 {
326         unsigned long val;
327
328         if (tcp_memory_pressure)
329                 return;
330         val = jiffies;
331
332         if (!val)
333                 val--;
334         if (!cmpxchg(&tcp_memory_pressure, 0, val))
335                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
336 }
337 EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure);
338
339 void tcp_leave_memory_pressure(struct sock *sk)
340 {
341         unsigned long val;
342
343         if (!tcp_memory_pressure)
344                 return;
345         val = xchg(&tcp_memory_pressure, 0);
346         if (val)
347                 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
348                               jiffies_to_msecs(jiffies - val));
349 }
350 EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure);
351
352 /* Convert seconds to retransmits based on initial and max timeout */
353 static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
354 {
355         u8 res = 0;
356
357         if (seconds > 0) {
358                 int period = timeout;
359
360                 res = 1;
361                 while (seconds > period && res < 255) {
362                         res++;
363                         timeout <<= 1;
364                         if (timeout > rto_max)
365                                 timeout = rto_max;
366                         period += timeout;
367                 }
368         }
369         return res;
370 }
371
372 /* Convert retransmits to seconds based on initial and max timeout */
373 static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
374 {
375         int period = 0;
376
377         if (retrans > 0) {
378                 period = timeout;
379                 while (--retrans) {
380                         timeout <<= 1;
381                         if (timeout > rto_max)
382                                 timeout = rto_max;
383                         period += timeout;
384                 }
385         }
386         return period;
387 }
388
389 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
390 {
391         u32 rate = READ_ONCE(tp->rate_delivered);
392         u32 intv = READ_ONCE(tp->rate_interval_us);
393         u64 rate64 = 0;
394
395         if (rate && intv) {
396                 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
397                 do_div(rate64, intv);
398         }
399         return rate64;
400 }
401
402 /* Address-family independent initialization for a tcp_sock.
403  *
404  * NOTE: A lot of things set to zero explicitly by call to
405  *       sk_alloc() so need not be done here.
406  */
407 void tcp_init_sock(struct sock *sk)
408 {
409         struct inet_connection_sock *icsk = inet_csk(sk);
410         struct tcp_sock *tp = tcp_sk(sk);
411
412         tp->out_of_order_queue = RB_ROOT;
413         sk->tcp_rtx_queue = RB_ROOT;
414         tcp_init_xmit_timers(sk);
415         INIT_LIST_HEAD(&tp->tsq_node);
416         INIT_LIST_HEAD(&tp->tsorted_sent_queue);
417
418         icsk->icsk_rto = TCP_TIMEOUT_INIT;
419         tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
420         minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
421
422         /* So many TCP implementations out there (incorrectly) count the
423          * initial SYN frame in their delayed-ACK and congestion control
424          * algorithms that we must have the following bandaid to talk
425          * efficiently to them.  -DaveM
426          */
427         tp->snd_cwnd = TCP_INIT_CWND;
428
429         /* There's a bubble in the pipe until at least the first ACK. */
430         tp->app_limited = ~0U;
431
432         /* See draft-stevens-tcpca-spec-01 for discussion of the
433          * initialization of these values.
434          */
435         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
436         tp->snd_cwnd_clamp = ~0;
437         tp->mss_cache = TCP_MSS_DEFAULT;
438
439         tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
440         tcp_assign_congestion_control(sk);
441
442         tp->tsoffset = 0;
443         tp->rack.reo_wnd_steps = 1;
444
445         sk->sk_state = TCP_CLOSE;
446
447         sk->sk_write_space = sk_stream_write_space;
448         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
449
450         icsk->icsk_sync_mss = tcp_sync_mss;
451
452         sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
453         sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
454
455         sk_sockets_allocated_inc(sk);
456         sk->sk_route_forced_caps = NETIF_F_GSO;
457 }
458 EXPORT_SYMBOL(tcp_init_sock);
459
460 void tcp_init_transfer(struct sock *sk, int bpf_op)
461 {
462         struct inet_connection_sock *icsk = inet_csk(sk);
463
464         tcp_mtup_init(sk);
465         icsk->icsk_af_ops->rebuild_header(sk);
466         tcp_init_metrics(sk);
467         tcp_call_bpf(sk, bpf_op, 0, NULL);
468         tcp_init_congestion_control(sk);
469         tcp_init_buffer_space(sk);
470 }
471
472 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
473 {
474         struct sk_buff *skb = tcp_write_queue_tail(sk);
475
476         if (tsflags && skb) {
477                 struct skb_shared_info *shinfo = skb_shinfo(skb);
478                 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
479
480                 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
481                 if (tsflags & SOF_TIMESTAMPING_TX_ACK)
482                         tcb->txstamp_ack = 1;
483                 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
484                         shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
485         }
486 }
487
488 static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
489                                           int target, struct sock *sk)
490 {
491         return (tp->rcv_nxt - tp->copied_seq >= target) ||
492                 (sk->sk_prot->stream_memory_read ?
493                 sk->sk_prot->stream_memory_read(sk) : false);
494 }
495
496 /*
497  *      Wait for a TCP event.
498  *
499  *      Note that we don't need to lock the socket, as the upper poll layers
500  *      take care of normal races (between the test and the event) and we don't
501  *      go look at any of the socket buffers directly.
502  */
503 __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
504 {
505         __poll_t mask;
506         struct sock *sk = sock->sk;
507         const struct tcp_sock *tp = tcp_sk(sk);
508         int state;
509
510         sock_poll_wait(file, sk_sleep(sk), wait);
511
512         state = inet_sk_state_load(sk);
513         if (state == TCP_LISTEN)
514                 return inet_csk_listen_poll(sk);
515
516         /* Socket is not locked. We are protected from async events
517          * by poll logic and correct handling of state changes
518          * made by other threads is impossible in any case.
519          */
520
521         mask = 0;
522
523         /*
524          * EPOLLHUP is certainly not done right. But poll() doesn't
525          * have a notion of HUP in just one direction, and for a
526          * socket the read side is more interesting.
527          *
528          * Some poll() documentation says that EPOLLHUP is incompatible
529          * with the EPOLLOUT/POLLWR flags, so somebody should check this
530          * all. But careful, it tends to be safer to return too many
531          * bits than too few, and you can easily break real applications
532          * if you don't tell them that something has hung up!
533          *
534          * Check-me.
535          *
536          * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
537          * our fs/select.c). It means that after we received EOF,
538          * poll always returns immediately, making impossible poll() on write()
539          * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
540          * if and only if shutdown has been made in both directions.
541          * Actually, it is interesting to look how Solaris and DUX
542          * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
543          * then we could set it on SND_SHUTDOWN. BTW examples given
544          * in Stevens' books assume exactly this behaviour, it explains
545          * why EPOLLHUP is incompatible with EPOLLOUT.  --ANK
546          *
547          * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
548          * blocking on fresh not-connected or disconnected socket. --ANK
549          */
550         if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
551                 mask |= EPOLLHUP;
552         if (sk->sk_shutdown & RCV_SHUTDOWN)
553                 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
554
555         /* Connected or passive Fast Open socket? */
556         if (state != TCP_SYN_SENT &&
557             (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
558                 int target = sock_rcvlowat(sk, 0, INT_MAX);
559
560                 if (tp->urg_seq == tp->copied_seq &&
561                     !sock_flag(sk, SOCK_URGINLINE) &&
562                     tp->urg_data)
563                         target++;
564
565                 if (tcp_stream_is_readable(tp, target, sk))
566                         mask |= EPOLLIN | EPOLLRDNORM;
567
568                 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
569                         if (sk_stream_is_writeable(sk)) {
570                                 mask |= EPOLLOUT | EPOLLWRNORM;
571                         } else {  /* send SIGIO later */
572                                 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
573                                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
574
575                                 /* Race breaker. If space is freed after
576                                  * wspace test but before the flags are set,
577                                  * IO signal will be lost. Memory barrier
578                                  * pairs with the input side.
579                                  */
580                                 smp_mb__after_atomic();
581                                 if (sk_stream_is_writeable(sk))
582                                         mask |= EPOLLOUT | EPOLLWRNORM;
583                         }
584                 } else
585                         mask |= EPOLLOUT | EPOLLWRNORM;
586
587                 if (tp->urg_data & TCP_URG_VALID)
588                         mask |= EPOLLPRI;
589         } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
590                 /* Active TCP fastopen socket with defer_connect
591                  * Return EPOLLOUT so application can call write()
592                  * in order for kernel to generate SYN+data
593                  */
594                 mask |= EPOLLOUT | EPOLLWRNORM;
595         }
596         /* This barrier is coupled with smp_wmb() in tcp_reset() */
597         smp_rmb();
598         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
599                 mask |= EPOLLERR;
600
601         return mask;
602 }
603 EXPORT_SYMBOL(tcp_poll);
604
605 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
606 {
607         struct tcp_sock *tp = tcp_sk(sk);
608         int answ;
609         bool slow;
610
611         switch (cmd) {
612         case SIOCINQ:
613                 if (sk->sk_state == TCP_LISTEN)
614                         return -EINVAL;
615
616                 slow = lock_sock_fast(sk);
617                 answ = tcp_inq(sk);
618                 unlock_sock_fast(sk, slow);
619                 break;
620         case SIOCATMARK:
621                 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
622                 break;
623         case SIOCOUTQ:
624                 if (sk->sk_state == TCP_LISTEN)
625                         return -EINVAL;
626
627                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
628                         answ = 0;
629                 else
630                         answ = tp->write_seq - tp->snd_una;
631                 break;
632         case SIOCOUTQNSD:
633                 if (sk->sk_state == TCP_LISTEN)
634                         return -EINVAL;
635
636                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
637                         answ = 0;
638                 else
639                         answ = tp->write_seq - tp->snd_nxt;
640                 break;
641         default:
642                 return -ENOIOCTLCMD;
643         }
644
645         return put_user(answ, (int __user *)arg);
646 }
647 EXPORT_SYMBOL(tcp_ioctl);
648
649 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
650 {
651         TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
652         tp->pushed_seq = tp->write_seq;
653 }
654
655 static inline bool forced_push(const struct tcp_sock *tp)
656 {
657         return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
658 }
659
660 static void skb_entail(struct sock *sk, struct sk_buff *skb)
661 {
662         struct tcp_sock *tp = tcp_sk(sk);
663         struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
664
665         skb->csum    = 0;
666         tcb->seq     = tcb->end_seq = tp->write_seq;
667         tcb->tcp_flags = TCPHDR_ACK;
668         tcb->sacked  = 0;
669         __skb_header_release(skb);
670         tcp_add_write_queue_tail(sk, skb);
671         sk->sk_wmem_queued += skb->truesize;
672         sk_mem_charge(sk, skb->truesize);
673         if (tp->nonagle & TCP_NAGLE_PUSH)
674                 tp->nonagle &= ~TCP_NAGLE_PUSH;
675
676         tcp_slow_start_after_idle_check(sk);
677 }
678
679 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
680 {
681         if (flags & MSG_OOB)
682                 tp->snd_up = tp->write_seq;
683 }
684
685 /* If a not yet filled skb is pushed, do not send it if
686  * we have data packets in Qdisc or NIC queues :
687  * Because TX completion will happen shortly, it gives a chance
688  * to coalesce future sendmsg() payload into this skb, without
689  * need for a timer, and with no latency trade off.
690  * As packets containing data payload have a bigger truesize
691  * than pure acks (dataless) packets, the last checks prevent
692  * autocorking if we only have an ACK in Qdisc/NIC queues,
693  * or if TX completion was delayed after we processed ACK packet.
694  */
695 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
696                                 int size_goal)
697 {
698         return skb->len < size_goal &&
699                sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
700                !tcp_rtx_queue_empty(sk) &&
701                refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
702 }
703
704 static void tcp_push(struct sock *sk, int flags, int mss_now,
705                      int nonagle, int size_goal)
706 {
707         struct tcp_sock *tp = tcp_sk(sk);
708         struct sk_buff *skb;
709
710         skb = tcp_write_queue_tail(sk);
711         if (!skb)
712                 return;
713         if (!(flags & MSG_MORE) || forced_push(tp))
714                 tcp_mark_push(tp, skb);
715
716         tcp_mark_urg(tp, flags);
717
718         if (tcp_should_autocork(sk, skb, size_goal)) {
719
720                 /* avoid atomic op if TSQ_THROTTLED bit is already set */
721                 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
722                         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
723                         set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
724                 }
725                 /* It is possible TX completion already happened
726                  * before we set TSQ_THROTTLED.
727                  */
728                 if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
729                         return;
730         }
731
732         if (flags & MSG_MORE)
733                 nonagle = TCP_NAGLE_CORK;
734
735         __tcp_push_pending_frames(sk, mss_now, nonagle);
736 }
737
738 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
739                                 unsigned int offset, size_t len)
740 {
741         struct tcp_splice_state *tss = rd_desc->arg.data;
742         int ret;
743
744         ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
745                               min(rd_desc->count, len), tss->flags);
746         if (ret > 0)
747                 rd_desc->count -= ret;
748         return ret;
749 }
750
751 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
752 {
753         /* Store TCP splice context information in read_descriptor_t. */
754         read_descriptor_t rd_desc = {
755                 .arg.data = tss,
756                 .count    = tss->len,
757         };
758
759         return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
760 }
761
762 /**
763  *  tcp_splice_read - splice data from TCP socket to a pipe
764  * @sock:       socket to splice from
765  * @ppos:       position (not valid)
766  * @pipe:       pipe to splice to
767  * @len:        number of bytes to splice
768  * @flags:      splice modifier flags
769  *
770  * Description:
771  *    Will read pages from given socket and fill them into a pipe.
772  *
773  **/
774 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
775                         struct pipe_inode_info *pipe, size_t len,
776                         unsigned int flags)
777 {
778         struct sock *sk = sock->sk;
779         struct tcp_splice_state tss = {
780                 .pipe = pipe,
781                 .len = len,
782                 .flags = flags,
783         };
784         long timeo;
785         ssize_t spliced;
786         int ret;
787
788         sock_rps_record_flow(sk);
789         /*
790          * We can't seek on a socket input
791          */
792         if (unlikely(*ppos))
793                 return -ESPIPE;
794
795         ret = spliced = 0;
796
797         lock_sock(sk);
798
799         timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
800         while (tss.len) {
801                 ret = __tcp_splice_read(sk, &tss);
802                 if (ret < 0)
803                         break;
804                 else if (!ret) {
805                         if (spliced)
806                                 break;
807                         if (sock_flag(sk, SOCK_DONE))
808                                 break;
809                         if (sk->sk_err) {
810                                 ret = sock_error(sk);
811                                 break;
812                         }
813                         if (sk->sk_shutdown & RCV_SHUTDOWN)
814                                 break;
815                         if (sk->sk_state == TCP_CLOSE) {
816                                 /*
817                                  * This occurs when user tries to read
818                                  * from never connected socket.
819                                  */
820                                 if (!sock_flag(sk, SOCK_DONE))
821                                         ret = -ENOTCONN;
822                                 break;
823                         }
824                         if (!timeo) {
825                                 ret = -EAGAIN;
826                                 break;
827                         }
828                         /* if __tcp_splice_read() got nothing while we have
829                          * an skb in receive queue, we do not want to loop.
830                          * This might happen with URG data.
831                          */
832                         if (!skb_queue_empty(&sk->sk_receive_queue))
833                                 break;
834                         sk_wait_data(sk, &timeo, NULL);
835                         if (signal_pending(current)) {
836                                 ret = sock_intr_errno(timeo);
837                                 break;
838                         }
839                         continue;
840                 }
841                 tss.len -= ret;
842                 spliced += ret;
843
844                 if (!timeo)
845                         break;
846                 release_sock(sk);
847                 lock_sock(sk);
848
849                 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
850                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
851                     signal_pending(current))
852                         break;
853         }
854
855         release_sock(sk);
856
857         if (spliced)
858                 return spliced;
859
860         return ret;
861 }
862 EXPORT_SYMBOL(tcp_splice_read);
863
864 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
865                                     bool force_schedule)
866 {
867         struct sk_buff *skb;
868
869         /* The TCP header must be at least 32-bit aligned.  */
870         size = ALIGN(size, 4);
871
872         if (unlikely(tcp_under_memory_pressure(sk)))
873                 sk_mem_reclaim_partial(sk);
874
875         skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
876         if (likely(skb)) {
877                 bool mem_scheduled;
878
879                 if (force_schedule) {
880                         mem_scheduled = true;
881                         sk_forced_mem_schedule(sk, skb->truesize);
882                 } else {
883                         mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
884                 }
885                 if (likely(mem_scheduled)) {
886                         skb_reserve(skb, sk->sk_prot->max_header);
887                         /*
888                          * Make sure that we have exactly size bytes
889                          * available to the caller, no more, no less.
890                          */
891                         skb->reserved_tailroom = skb->end - skb->tail - size;
892                         INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
893                         return skb;
894                 }
895                 __kfree_skb(skb);
896         } else {
897                 sk->sk_prot->enter_memory_pressure(sk);
898                 sk_stream_moderate_sndbuf(sk);
899         }
900         return NULL;
901 }
902
903 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
904                                        int large_allowed)
905 {
906         struct tcp_sock *tp = tcp_sk(sk);
907         u32 new_size_goal, size_goal;
908
909         if (!large_allowed)
910                 return mss_now;
911
912         /* Note : tcp_tso_autosize() will eventually split this later */
913         new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
914         new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
915
916         /* We try hard to avoid divides here */
917         size_goal = tp->gso_segs * mss_now;
918         if (unlikely(new_size_goal < size_goal ||
919                      new_size_goal >= size_goal + mss_now)) {
920                 tp->gso_segs = min_t(u16, new_size_goal / mss_now,
921                                      sk->sk_gso_max_segs);
922                 size_goal = tp->gso_segs * mss_now;
923         }
924
925         return max(size_goal, mss_now);
926 }
927
928 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
929 {
930         int mss_now;
931
932         mss_now = tcp_current_mss(sk);
933         *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
934
935         return mss_now;
936 }
937
938 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
939                          size_t size, int flags)
940 {
941         struct tcp_sock *tp = tcp_sk(sk);
942         int mss_now, size_goal;
943         int err;
944         ssize_t copied;
945         long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
946
947         /* Wait for a connection to finish. One exception is TCP Fast Open
948          * (passive side) where data is allowed to be sent before a connection
949          * is fully established.
950          */
951         if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
952             !tcp_passive_fastopen(sk)) {
953                 err = sk_stream_wait_connect(sk, &timeo);
954                 if (err != 0)
955                         goto out_err;
956         }
957
958         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
959
960         mss_now = tcp_send_mss(sk, &size_goal, flags);
961         copied = 0;
962
963         err = -EPIPE;
964         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
965                 goto out_err;
966
967         while (size > 0) {
968                 struct sk_buff *skb = tcp_write_queue_tail(sk);
969                 int copy, i;
970                 bool can_coalesce;
971
972                 if (!skb || (copy = size_goal - skb->len) <= 0 ||
973                     !tcp_skb_can_collapse_to(skb)) {
974 new_segment:
975                         if (!sk_stream_memory_free(sk))
976                                 goto wait_for_sndbuf;
977
978                         skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
979                                         tcp_rtx_and_write_queues_empty(sk));
980                         if (!skb)
981                                 goto wait_for_memory;
982
983                         skb_entail(sk, skb);
984                         copy = size_goal;
985                 }
986
987                 if (copy > size)
988                         copy = size;
989
990                 i = skb_shinfo(skb)->nr_frags;
991                 can_coalesce = skb_can_coalesce(skb, i, page, offset);
992                 if (!can_coalesce && i >= sysctl_max_skb_frags) {
993                         tcp_mark_push(tp, skb);
994                         goto new_segment;
995                 }
996                 if (!sk_wmem_schedule(sk, copy))
997                         goto wait_for_memory;
998
999                 if (can_coalesce) {
1000                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1001                 } else {
1002                         get_page(page);
1003                         skb_fill_page_desc(skb, i, page, offset, copy);
1004                 }
1005
1006                 if (!(flags & MSG_NO_SHARED_FRAGS))
1007                         skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1008
1009                 skb->len += copy;
1010                 skb->data_len += copy;
1011                 skb->truesize += copy;
1012                 sk->sk_wmem_queued += copy;
1013                 sk_mem_charge(sk, copy);
1014                 skb->ip_summed = CHECKSUM_PARTIAL;
1015                 tp->write_seq += copy;
1016                 TCP_SKB_CB(skb)->end_seq += copy;
1017                 tcp_skb_pcount_set(skb, 0);
1018
1019                 if (!copied)
1020                         TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1021
1022                 copied += copy;
1023                 offset += copy;
1024                 size -= copy;
1025                 if (!size)
1026                         goto out;
1027
1028                 if (skb->len < size_goal || (flags & MSG_OOB))
1029                         continue;
1030
1031                 if (forced_push(tp)) {
1032                         tcp_mark_push(tp, skb);
1033                         __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1034                 } else if (skb == tcp_send_head(sk))
1035                         tcp_push_one(sk, mss_now);
1036                 continue;
1037
1038 wait_for_sndbuf:
1039                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1040 wait_for_memory:
1041                 tcp_push(sk, flags & ~MSG_MORE, mss_now,
1042                          TCP_NAGLE_PUSH, size_goal);
1043
1044                 err = sk_stream_wait_memory(sk, &timeo);
1045                 if (err != 0)
1046                         goto do_error;
1047
1048                 mss_now = tcp_send_mss(sk, &size_goal, flags);
1049         }
1050
1051 out:
1052         if (copied) {
1053                 tcp_tx_timestamp(sk, sk->sk_tsflags);
1054                 if (!(flags & MSG_SENDPAGE_NOTLAST))
1055                         tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1056         }
1057         return copied;
1058
1059 do_error:
1060         if (copied)
1061                 goto out;
1062 out_err:
1063         /* make sure we wake any epoll edge trigger waiter */
1064         if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
1065                      err == -EAGAIN)) {
1066                 sk->sk_write_space(sk);
1067                 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1068         }
1069         return sk_stream_error(sk, flags, err);
1070 }
1071 EXPORT_SYMBOL_GPL(do_tcp_sendpages);
1072
1073 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
1074                         size_t size, int flags)
1075 {
1076         if (!(sk->sk_route_caps & NETIF_F_SG))
1077                 return sock_no_sendpage_locked(sk, page, offset, size, flags);
1078
1079         tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
1080
1081         return do_tcp_sendpages(sk, page, offset, size, flags);
1082 }
1083 EXPORT_SYMBOL_GPL(tcp_sendpage_locked);
1084
1085 int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1086                  size_t size, int flags)
1087 {
1088         int ret;
1089
1090         lock_sock(sk);
1091         ret = tcp_sendpage_locked(sk, page, offset, size, flags);
1092         release_sock(sk);
1093
1094         return ret;
1095 }
1096 EXPORT_SYMBOL(tcp_sendpage);
1097
1098 /* Do not bother using a page frag for very small frames.
1099  * But use this heuristic only for the first skb in write queue.
1100  *
1101  * Having no payload in skb->head allows better SACK shifting
1102  * in tcp_shift_skb_data(), reducing sack/rack overhead, because
1103  * write queue has less skbs.
1104  * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB.
1105  * This also speeds up tso_fragment(), since it wont fallback
1106  * to tcp_fragment().
1107  */
1108 static int linear_payload_sz(bool first_skb)
1109 {
1110         if (first_skb)
1111                 return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1112         return 0;
1113 }
1114
1115 static int select_size(bool first_skb, bool zc)
1116 {
1117         if (zc)
1118                 return 0;
1119         return linear_payload_sz(first_skb);
1120 }
1121
1122 void tcp_free_fastopen_req(struct tcp_sock *tp)
1123 {
1124         if (tp->fastopen_req) {
1125                 kfree(tp->fastopen_req);
1126                 tp->fastopen_req = NULL;
1127         }
1128 }
1129
1130 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1131                                 int *copied, size_t size)
1132 {
1133         struct tcp_sock *tp = tcp_sk(sk);
1134         struct inet_sock *inet = inet_sk(sk);
1135         struct sockaddr *uaddr = msg->msg_name;
1136         int err, flags;
1137
1138         if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
1139             (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1140              uaddr->sa_family == AF_UNSPEC))
1141                 return -EOPNOTSUPP;
1142         if (tp->fastopen_req)
1143                 return -EALREADY; /* Another Fast Open is in progress */
1144
1145         tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1146                                    sk->sk_allocation);
1147         if (unlikely(!tp->fastopen_req))
1148                 return -ENOBUFS;
1149         tp->fastopen_req->data = msg;
1150         tp->fastopen_req->size = size;
1151
1152         if (inet->defer_connect) {
1153                 err = tcp_connect(sk);
1154                 /* Same failure procedure as in tcp_v4/6_connect */
1155                 if (err) {
1156                         tcp_set_state(sk, TCP_CLOSE);
1157                         inet->inet_dport = 0;
1158                         sk->sk_route_caps = 0;
1159                 }
1160         }
1161         flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1162         err = __inet_stream_connect(sk->sk_socket, uaddr,
1163                                     msg->msg_namelen, flags, 1);
1164         /* fastopen_req could already be freed in __inet_stream_connect
1165          * if the connection times out or gets rst
1166          */
1167         if (tp->fastopen_req) {
1168                 *copied = tp->fastopen_req->copied;
1169                 tcp_free_fastopen_req(tp);
1170                 inet->defer_connect = 0;
1171         }
1172         return err;
1173 }
1174
1175 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
1176 {
1177         struct tcp_sock *tp = tcp_sk(sk);
1178         struct ubuf_info *uarg = NULL;
1179         struct sk_buff *skb;
1180         struct sockcm_cookie sockc;
1181         int flags, err, copied = 0;
1182         int mss_now = 0, size_goal, copied_syn = 0;
1183         bool process_backlog = false;
1184         bool zc = false;
1185         long timeo;
1186
1187         flags = msg->msg_flags;
1188
1189         if (flags & MSG_ZEROCOPY && size) {
1190                 if (sk->sk_state != TCP_ESTABLISHED) {
1191                         err = -EINVAL;
1192                         goto out_err;
1193                 }
1194
1195                 skb = tcp_write_queue_tail(sk);
1196                 uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb));
1197                 if (!uarg) {
1198                         err = -ENOBUFS;
1199                         goto out_err;
1200                 }
1201
1202                 zc = sk->sk_route_caps & NETIF_F_SG;
1203                 if (!zc)
1204                         uarg->zerocopy = 0;
1205         }
1206
1207         if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
1208             !tp->repair) {
1209                 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1210                 if (err == -EINPROGRESS && copied_syn > 0)
1211                         goto out;
1212                 else if (err)
1213                         goto out_err;
1214         }
1215
1216         timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1217
1218         tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
1219
1220         /* Wait for a connection to finish. One exception is TCP Fast Open
1221          * (passive side) where data is allowed to be sent before a connection
1222          * is fully established.
1223          */
1224         if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1225             !tcp_passive_fastopen(sk)) {
1226                 err = sk_stream_wait_connect(sk, &timeo);
1227                 if (err != 0)
1228                         goto do_error;
1229         }
1230
1231         if (unlikely(tp->repair)) {
1232                 if (tp->repair_queue == TCP_RECV_QUEUE) {
1233                         copied = tcp_send_rcvq(sk, msg, size);
1234                         goto out_nopush;
1235                 }
1236
1237                 err = -EINVAL;
1238                 if (tp->repair_queue == TCP_NO_QUEUE)
1239                         goto out_err;
1240
1241                 /* 'common' sending to sendq */
1242         }
1243
1244         sockc.tsflags = sk->sk_tsflags;
1245         if (msg->msg_controllen) {
1246                 err = sock_cmsg_send(sk, msg, &sockc);
1247                 if (unlikely(err)) {
1248                         err = -EINVAL;
1249                         goto out_err;
1250                 }
1251         }
1252
1253         /* This should be in poll */
1254         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1255
1256         /* Ok commence sending. */
1257         copied = 0;
1258
1259 restart:
1260         mss_now = tcp_send_mss(sk, &size_goal, flags);
1261
1262         err = -EPIPE;
1263         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1264                 goto do_error;
1265
1266         while (msg_data_left(msg)) {
1267                 int copy = 0;
1268
1269                 skb = tcp_write_queue_tail(sk);
1270                 if (skb)
1271                         copy = size_goal - skb->len;
1272
1273                 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
1274                         bool first_skb;
1275                         int linear;
1276
1277 new_segment:
1278                         /* Allocate new segment. If the interface is SG,
1279                          * allocate skb fitting to single page.
1280                          */
1281                         if (!sk_stream_memory_free(sk))
1282                                 goto wait_for_sndbuf;
1283
1284                         if (process_backlog && sk_flush_backlog(sk)) {
1285                                 process_backlog = false;
1286                                 goto restart;
1287                         }
1288                         first_skb = tcp_rtx_and_write_queues_empty(sk);
1289                         linear = select_size(first_skb, zc);
1290                         skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
1291                                                   first_skb);
1292                         if (!skb)
1293                                 goto wait_for_memory;
1294
1295                         process_backlog = true;
1296                         skb->ip_summed = CHECKSUM_PARTIAL;
1297
1298                         skb_entail(sk, skb);
1299                         copy = size_goal;
1300
1301                         /* All packets are restored as if they have
1302                          * already been sent. skb_mstamp isn't set to
1303                          * avoid wrong rtt estimation.
1304                          */
1305                         if (tp->repair)
1306                                 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1307                 }
1308
1309                 /* Try to append data to the end of skb. */
1310                 if (copy > msg_data_left(msg))
1311                         copy = msg_data_left(msg);
1312
1313                 /* Where to copy to? */
1314                 if (skb_availroom(skb) > 0 && !zc) {
1315                         /* We have some space in skb head. Superb! */
1316                         copy = min_t(int, copy, skb_availroom(skb));
1317                         err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
1318                         if (err)
1319                                 goto do_fault;
1320                 } else if (!zc) {
1321                         bool merge = true;
1322                         int i = skb_shinfo(skb)->nr_frags;
1323                         struct page_frag *pfrag = sk_page_frag(sk);
1324
1325                         if (!sk_page_frag_refill(sk, pfrag))
1326                                 goto wait_for_memory;
1327
1328                         if (!skb_can_coalesce(skb, i, pfrag->page,
1329                                               pfrag->offset)) {
1330                                 if (i >= sysctl_max_skb_frags) {
1331                                         tcp_mark_push(tp, skb);
1332                                         goto new_segment;
1333                                 }
1334                                 merge = false;
1335                         }
1336
1337                         copy = min_t(int, copy, pfrag->size - pfrag->offset);
1338
1339                         if (!sk_wmem_schedule(sk, copy))
1340                                 goto wait_for_memory;
1341
1342                         err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1343                                                        pfrag->page,
1344                                                        pfrag->offset,
1345                                                        copy);
1346                         if (err)
1347                                 goto do_error;
1348
1349                         /* Update the skb. */
1350                         if (merge) {
1351                                 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1352                         } else {
1353                                 skb_fill_page_desc(skb, i, pfrag->page,
1354                                                    pfrag->offset, copy);
1355                                 page_ref_inc(pfrag->page);
1356                         }
1357                         pfrag->offset += copy;
1358                 } else {
1359                         err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
1360                         if (err == -EMSGSIZE || err == -EEXIST) {
1361                                 tcp_mark_push(tp, skb);
1362                                 goto new_segment;
1363                         }
1364                         if (err < 0)
1365                                 goto do_error;
1366                         copy = err;
1367                 }
1368
1369                 if (!copied)
1370                         TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1371
1372                 tp->write_seq += copy;
1373                 TCP_SKB_CB(skb)->end_seq += copy;
1374                 tcp_skb_pcount_set(skb, 0);
1375
1376                 copied += copy;
1377                 if (!msg_data_left(msg)) {
1378                         if (unlikely(flags & MSG_EOR))
1379                                 TCP_SKB_CB(skb)->eor = 1;
1380                         goto out;
1381                 }
1382
1383                 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
1384                         continue;
1385
1386                 if (forced_push(tp)) {
1387                         tcp_mark_push(tp, skb);
1388                         __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1389                 } else if (skb == tcp_send_head(sk))
1390                         tcp_push_one(sk, mss_now);
1391                 continue;
1392
1393 wait_for_sndbuf:
1394                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1395 wait_for_memory:
1396                 if (copied)
1397                         tcp_push(sk, flags & ~MSG_MORE, mss_now,
1398                                  TCP_NAGLE_PUSH, size_goal);
1399
1400                 err = sk_stream_wait_memory(sk, &timeo);
1401                 if (err != 0)
1402                         goto do_error;
1403
1404                 mss_now = tcp_send_mss(sk, &size_goal, flags);
1405         }
1406
1407 out:
1408         if (copied) {
1409                 tcp_tx_timestamp(sk, sockc.tsflags);
1410                 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1411         }
1412 out_nopush:
1413         sock_zerocopy_put(uarg);
1414         return copied + copied_syn;
1415
1416 do_fault:
1417         if (!skb->len) {
1418                 tcp_unlink_write_queue(skb, sk);
1419                 /* It is the one place in all of TCP, except connection
1420                  * reset, where we can be unlinking the send_head.
1421                  */
1422                 tcp_check_send_head(sk, skb);
1423                 sk_wmem_free_skb(sk, skb);
1424         }
1425
1426 do_error:
1427         if (copied + copied_syn)
1428                 goto out;
1429 out_err:
1430         sock_zerocopy_put_abort(uarg);
1431         err = sk_stream_error(sk, flags, err);
1432         /* make sure we wake any epoll edge trigger waiter */
1433         if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
1434                      err == -EAGAIN)) {
1435                 sk->sk_write_space(sk);
1436                 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1437         }
1438         return err;
1439 }
1440 EXPORT_SYMBOL_GPL(tcp_sendmsg_locked);
1441
1442 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1443 {
1444         int ret;
1445
1446         lock_sock(sk);
1447         ret = tcp_sendmsg_locked(sk, msg, size);
1448         release_sock(sk);
1449
1450         return ret;
1451 }
1452 EXPORT_SYMBOL(tcp_sendmsg);
1453
1454 /*
1455  *      Handle reading urgent data. BSD has very simple semantics for
1456  *      this, no blocking and very strange errors 8)
1457  */
1458
1459 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1460 {
1461         struct tcp_sock *tp = tcp_sk(sk);
1462
1463         /* No URG data to read. */
1464         if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1465             tp->urg_data == TCP_URG_READ)
1466                 return -EINVAL; /* Yes this is right ! */
1467
1468         if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1469                 return -ENOTCONN;
1470
1471         if (tp->urg_data & TCP_URG_VALID) {
1472                 int err = 0;
1473                 char c = tp->urg_data;
1474
1475                 if (!(flags & MSG_PEEK))
1476                         tp->urg_data = TCP_URG_READ;
1477
1478                 /* Read urgent data. */
1479                 msg->msg_flags |= MSG_OOB;
1480
1481                 if (len > 0) {
1482                         if (!(flags & MSG_TRUNC))
1483                                 err = memcpy_to_msg(msg, &c, 1);
1484                         len = 1;
1485                 } else
1486                         msg->msg_flags |= MSG_TRUNC;
1487
1488                 return err ? -EFAULT : len;
1489         }
1490
1491         if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1492                 return 0;
1493
1494         /* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1495          * the available implementations agree in this case:
1496          * this call should never block, independent of the
1497          * blocking state of the socket.
1498          * Mike <pall@rz.uni-karlsruhe.de>
1499          */
1500         return -EAGAIN;
1501 }
1502
1503 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1504 {
1505         struct sk_buff *skb;
1506         int copied = 0, err = 0;
1507
1508         /* XXX -- need to support SO_PEEK_OFF */
1509
1510         skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
1511                 err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1512                 if (err)
1513                         return err;
1514                 copied += skb->len;
1515         }
1516
1517         skb_queue_walk(&sk->sk_write_queue, skb) {
1518                 err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1519                 if (err)
1520                         break;
1521
1522                 copied += skb->len;
1523         }
1524
1525         return err ?: copied;
1526 }
1527
1528 /* Clean up the receive buffer for full frames taken by the user,
1529  * then send an ACK if necessary.  COPIED is the number of bytes
1530  * tcp_recvmsg has given to the user so far, it speeds up the
1531  * calculation of whether or not we must ACK for the sake of
1532  * a window update.
1533  */
1534 static void tcp_cleanup_rbuf(struct sock *sk, int copied)
1535 {
1536         struct tcp_sock *tp = tcp_sk(sk);
1537         bool time_to_ack = false;
1538
1539         struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1540
1541         WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1542              "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1543              tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1544
1545         if (inet_csk_ack_scheduled(sk)) {
1546                 const struct inet_connection_sock *icsk = inet_csk(sk);
1547                    /* Delayed ACKs frequently hit locked sockets during bulk
1548                     * receive. */
1549                 if (icsk->icsk_ack.blocked ||
1550                     /* Once-per-two-segments ACK was not sent by tcp_input.c */
1551                     tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1552                     /*
1553                      * If this read emptied read buffer, we send ACK, if
1554                      * connection is not bidirectional, user drained
1555                      * receive buffer and there was a small segment
1556                      * in queue.
1557                      */
1558                     (copied > 0 &&
1559                      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1560                       ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1561                        !icsk->icsk_ack.pingpong)) &&
1562                       !atomic_read(&sk->sk_rmem_alloc)))
1563                         time_to_ack = true;
1564         }
1565
1566         /* We send an ACK if we can now advertise a non-zero window
1567          * which has been raised "significantly".
1568          *
1569          * Even if window raised up to infinity, do not send window open ACK
1570          * in states, where we will not receive more. It is useless.
1571          */
1572         if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1573                 __u32 rcv_window_now = tcp_receive_window(tp);
1574
1575                 /* Optimize, __tcp_select_window() is not cheap. */
1576                 if (2*rcv_window_now <= tp->window_clamp) {
1577                         __u32 new_window = __tcp_select_window(sk);
1578
1579                         /* Send ACK now, if this read freed lots of space
1580                          * in our buffer. Certainly, new_window is new window.
1581                          * We can advertise it now, if it is not less than current one.
1582                          * "Lots" means "at least twice" here.
1583                          */
1584                         if (new_window && new_window >= 2 * rcv_window_now)
1585                                 time_to_ack = true;
1586                 }
1587         }
1588         if (time_to_ack)
1589                 tcp_send_ack(sk);
1590 }
1591
1592 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1593 {
1594         struct sk_buff *skb;
1595         u32 offset;
1596
1597         while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1598                 offset = seq - TCP_SKB_CB(skb)->seq;
1599                 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
1600                         pr_err_once("%s: found a SYN, please report !\n", __func__);
1601                         offset--;
1602                 }
1603                 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
1604                         *off = offset;
1605                         return skb;
1606                 }
1607                 /* This looks weird, but this can happen if TCP collapsing
1608                  * splitted a fat GRO packet, while we released socket lock
1609                  * in skb_splice_bits()
1610                  */
1611                 sk_eat_skb(sk, skb);
1612         }
1613         return NULL;
1614 }
1615
1616 /*
1617  * This routine provides an alternative to tcp_recvmsg() for routines
1618  * that would like to handle copying from skbuffs directly in 'sendfile'
1619  * fashion.
1620  * Note:
1621  *      - It is assumed that the socket was locked by the caller.
1622  *      - The routine does not block.
1623  *      - At present, there is no support for reading OOB data
1624  *        or for 'peeking' the socket using this routine
1625  *        (although both would be easy to implement).
1626  */
1627 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1628                   sk_read_actor_t recv_actor)
1629 {
1630         struct sk_buff *skb;
1631         struct tcp_sock *tp = tcp_sk(sk);
1632         u32 seq = tp->copied_seq;
1633         u32 offset;
1634         int copied = 0;
1635
1636         if (sk->sk_state == TCP_LISTEN)
1637                 return -ENOTCONN;
1638         while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1639                 if (offset < skb->len) {
1640                         int used;
1641                         size_t len;
1642
1643                         len = skb->len - offset;
1644                         /* Stop reading if we hit a patch of urgent data */
1645                         if (tp->urg_data) {
1646                                 u32 urg_offset = tp->urg_seq - seq;
1647                                 if (urg_offset < len)
1648                                         len = urg_offset;
1649                                 if (!len)
1650                                         break;
1651                         }
1652                         used = recv_actor(desc, skb, offset, len);
1653                         if (used <= 0) {
1654                                 if (!copied)
1655                                         copied = used;
1656                                 break;
1657                         } else if (used <= len) {
1658                                 seq += used;
1659                                 copied += used;
1660                                 offset += used;
1661                         }
1662                         /* If recv_actor drops the lock (e.g. TCP splice
1663                          * receive) the skb pointer might be invalid when
1664                          * getting here: tcp_collapse might have deleted it
1665                          * while aggregating skbs from the socket queue.
1666                          */
1667                         skb = tcp_recv_skb(sk, seq - 1, &offset);
1668                         if (!skb)
1669                                 break;
1670                         /* TCP coalescing might have appended data to the skb.
1671                          * Try to splice more frags
1672                          */
1673                         if (offset + 1 != skb->len)
1674                                 continue;
1675                 }
1676                 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
1677                         sk_eat_skb(sk, skb);
1678                         ++seq;
1679                         break;
1680                 }
1681                 sk_eat_skb(sk, skb);
1682                 if (!desc->count)
1683                         break;
1684                 tp->copied_seq = seq;
1685         }
1686         tp->copied_seq = seq;
1687
1688         tcp_rcv_space_adjust(sk);
1689
1690         /* Clean up data we have read: This will do ACK frames. */
1691         if (copied > 0) {
1692                 tcp_recv_skb(sk, seq, &offset);
1693                 tcp_cleanup_rbuf(sk, copied);
1694         }
1695         return copied;
1696 }
1697 EXPORT_SYMBOL(tcp_read_sock);
1698
1699 int tcp_peek_len(struct socket *sock)
1700 {
1701         return tcp_inq(sock->sk);
1702 }
1703 EXPORT_SYMBOL(tcp_peek_len);
1704
1705 static void tcp_update_recv_tstamps(struct sk_buff *skb,
1706                                     struct scm_timestamping *tss)
1707 {
1708         if (skb->tstamp)
1709                 tss->ts[0] = ktime_to_timespec(skb->tstamp);
1710         else
1711                 tss->ts[0] = (struct timespec) {0};
1712
1713         if (skb_hwtstamps(skb)->hwtstamp)
1714                 tss->ts[2] = ktime_to_timespec(skb_hwtstamps(skb)->hwtstamp);
1715         else
1716                 tss->ts[2] = (struct timespec) {0};
1717 }
1718
1719 /* Similar to __sock_recv_timestamp, but does not require an skb */
1720 static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
1721                                struct scm_timestamping *tss)
1722 {
1723         struct timeval tv;
1724         bool has_timestamping = false;
1725
1726         if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
1727                 if (sock_flag(sk, SOCK_RCVTSTAMP)) {
1728                         if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
1729                                 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
1730                                          sizeof(tss->ts[0]), &tss->ts[0]);
1731                         } else {
1732                                 tv.tv_sec = tss->ts[0].tv_sec;
1733                                 tv.tv_usec = tss->ts[0].tv_nsec / 1000;
1734
1735                                 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
1736                                          sizeof(tv), &tv);
1737                         }
1738                 }
1739
1740                 if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE)
1741                         has_timestamping = true;
1742                 else
1743                         tss->ts[0] = (struct timespec) {0};
1744         }
1745
1746         if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
1747                 if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)
1748                         has_timestamping = true;
1749                 else
1750                         tss->ts[2] = (struct timespec) {0};
1751         }
1752
1753         if (has_timestamping) {
1754                 tss->ts[1] = (struct timespec) {0};
1755                 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING,
1756                          sizeof(*tss), tss);
1757         }
1758 }
1759
1760 /*
1761  *      This routine copies from a sock struct into the user buffer.
1762  *
1763  *      Technical note: in 2.3 we work on _locked_ socket, so that
1764  *      tricks with *seq access order and skb->users are not required.
1765  *      Probably, code can be easily improved even more.
1766  */
1767
1768 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1769                 int flags, int *addr_len)
1770 {
1771         struct tcp_sock *tp = tcp_sk(sk);
1772         int copied = 0;
1773         u32 peek_seq;
1774         u32 *seq;
1775         unsigned long used;
1776         int err;
1777         int target;             /* Read at least this many bytes */
1778         long timeo;
1779         struct sk_buff *skb, *last;
1780         u32 urg_hole = 0;
1781         struct scm_timestamping tss;
1782         bool has_tss = false;
1783
1784         if (unlikely(flags & MSG_ERRQUEUE))
1785                 return inet_recv_error(sk, msg, len, addr_len);
1786
1787         if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1788             (sk->sk_state == TCP_ESTABLISHED))
1789                 sk_busy_loop(sk, nonblock);
1790
1791         lock_sock(sk);
1792
1793         err = -ENOTCONN;
1794         if (sk->sk_state == TCP_LISTEN)
1795                 goto out;
1796
1797         timeo = sock_rcvtimeo(sk, nonblock);
1798
1799         /* Urgent data needs to be handled specially. */
1800         if (flags & MSG_OOB)
1801                 goto recv_urg;
1802
1803         if (unlikely(tp->repair)) {
1804                 err = -EPERM;
1805                 if (!(flags & MSG_PEEK))
1806                         goto out;
1807
1808                 if (tp->repair_queue == TCP_SEND_QUEUE)
1809                         goto recv_sndq;
1810
1811                 err = -EINVAL;
1812                 if (tp->repair_queue == TCP_NO_QUEUE)
1813                         goto out;
1814
1815                 /* 'common' recv queue MSG_PEEK-ing */
1816         }
1817
1818         seq = &tp->copied_seq;
1819         if (flags & MSG_PEEK) {
1820                 peek_seq = tp->copied_seq;
1821                 seq = &peek_seq;
1822         }
1823
1824         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1825
1826         do {
1827                 u32 offset;
1828
1829                 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1830                 if (tp->urg_data && tp->urg_seq == *seq) {
1831                         if (copied)
1832                                 break;
1833                         if (signal_pending(current)) {
1834                                 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1835                                 break;
1836                         }
1837                 }
1838
1839                 /* Next get a buffer. */
1840
1841                 last = skb_peek_tail(&sk->sk_receive_queue);
1842                 skb_queue_walk(&sk->sk_receive_queue, skb) {
1843                         last = skb;
1844                         /* Now that we have two receive queues this
1845                          * shouldn't happen.
1846                          */
1847                         if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1848                                  "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1849                                  *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1850                                  flags))
1851                                 break;
1852
1853                         offset = *seq - TCP_SKB_CB(skb)->seq;
1854                         if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
1855                                 pr_err_once("%s: found a SYN, please report !\n", __func__);
1856                                 offset--;
1857                         }
1858                         if (offset < skb->len)
1859                                 goto found_ok_skb;
1860                         if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1861                                 goto found_fin_ok;
1862                         WARN(!(flags & MSG_PEEK),
1863                              "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1864                              *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1865                 }
1866
1867                 /* Well, if we have backlog, try to process it now yet. */
1868
1869                 if (copied >= target && !sk->sk_backlog.tail)
1870                         break;
1871
1872                 if (copied) {
1873                         if (sk->sk_err ||
1874                             sk->sk_state == TCP_CLOSE ||
1875                             (sk->sk_shutdown & RCV_SHUTDOWN) ||
1876                             !timeo ||
1877                             signal_pending(current))
1878                                 break;
1879                 } else {
1880                         if (sock_flag(sk, SOCK_DONE))
1881                                 break;
1882
1883                         if (sk->sk_err) {
1884                                 copied = sock_error(sk);
1885                                 break;
1886                         }
1887
1888                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1889                                 break;
1890
1891                         if (sk->sk_state == TCP_CLOSE) {
1892                                 if (!sock_flag(sk, SOCK_DONE)) {
1893                                         /* This occurs when user tries to read
1894                                          * from never connected socket.
1895                                          */
1896                                         copied = -ENOTCONN;
1897                                         break;
1898                                 }
1899                                 break;
1900                         }
1901
1902                         if (!timeo) {
1903                                 copied = -EAGAIN;
1904                                 break;
1905                         }
1906
1907                         if (signal_pending(current)) {
1908                                 copied = sock_intr_errno(timeo);
1909                                 break;
1910                         }
1911                 }
1912
1913                 tcp_cleanup_rbuf(sk, copied);
1914
1915                 if (copied >= target) {
1916                         /* Do not sleep, just process backlog. */
1917                         release_sock(sk);
1918                         lock_sock(sk);
1919                 } else {
1920                         sk_wait_data(sk, &timeo, last);
1921                 }
1922
1923                 if ((flags & MSG_PEEK) &&
1924                     (peek_seq - copied - urg_hole != tp->copied_seq)) {
1925                         net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1926                                             current->comm,
1927                                             task_pid_nr(current));
1928                         peek_seq = tp->copied_seq;
1929                 }
1930                 continue;
1931
1932         found_ok_skb:
1933                 /* Ok so how much can we use? */
1934                 used = skb->len - offset;
1935                 if (len < used)
1936                         used = len;
1937
1938                 /* Do we have urgent data here? */
1939                 if (tp->urg_data) {
1940                         u32 urg_offset = tp->urg_seq - *seq;
1941                         if (urg_offset < used) {
1942                                 if (!urg_offset) {
1943                                         if (!sock_flag(sk, SOCK_URGINLINE)) {
1944                                                 ++*seq;
1945                                                 urg_hole++;
1946                                                 offset++;
1947                                                 used--;
1948                                                 if (!used)
1949                                                         goto skip_copy;
1950                                         }
1951                                 } else
1952                                         used = urg_offset;
1953                         }
1954                 }
1955
1956                 if (!(flags & MSG_TRUNC)) {
1957                         err = skb_copy_datagram_msg(skb, offset, msg, used);
1958                         if (err) {
1959                                 /* Exception. Bailout! */
1960                                 if (!copied)
1961                                         copied = -EFAULT;
1962                                 break;
1963                         }
1964                 }
1965
1966                 *seq += used;
1967                 copied += used;
1968                 len -= used;
1969
1970                 tcp_rcv_space_adjust(sk);
1971
1972 skip_copy:
1973                 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1974                         tp->urg_data = 0;
1975                         tcp_fast_path_check(sk);
1976                 }
1977                 if (used + offset < skb->len)
1978                         continue;
1979
1980                 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1981                         tcp_update_recv_tstamps(skb, &tss);
1982                         has_tss = true;
1983                 }
1984                 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1985                         goto found_fin_ok;
1986                 if (!(flags & MSG_PEEK))
1987                         sk_eat_skb(sk, skb);
1988                 continue;
1989
1990         found_fin_ok:
1991                 /* Process the FIN. */
1992                 ++*seq;
1993                 if (!(flags & MSG_PEEK))
1994                         sk_eat_skb(sk, skb);
1995                 break;
1996         } while (len > 0);
1997
1998         /* According to UNIX98, msg_name/msg_namelen are ignored
1999          * on connected socket. I was just happy when found this 8) --ANK
2000          */
2001
2002         if (has_tss)
2003                 tcp_recv_timestamp(msg, sk, &tss);
2004
2005         /* Clean up data we have read: This will do ACK frames. */
2006         tcp_cleanup_rbuf(sk, copied);
2007
2008         release_sock(sk);
2009         return copied;
2010
2011 out:
2012         release_sock(sk);
2013         return err;
2014
2015 recv_urg:
2016         err = tcp_recv_urg(sk, msg, len, flags);
2017         goto out;
2018
2019 recv_sndq:
2020         err = tcp_peek_sndq(sk, msg, len);
2021         goto out;
2022 }
2023 EXPORT_SYMBOL(tcp_recvmsg);
2024
2025 void tcp_set_state(struct sock *sk, int state)
2026 {
2027         int oldstate = sk->sk_state;
2028
2029         /* We defined a new enum for TCP states that are exported in BPF
2030          * so as not force the internal TCP states to be frozen. The
2031          * following checks will detect if an internal state value ever
2032          * differs from the BPF value. If this ever happens, then we will
2033          * need to remap the internal value to the BPF value before calling
2034          * tcp_call_bpf_2arg.
2035          */
2036         BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED);
2037         BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT);
2038         BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV);
2039         BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1);
2040         BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2);
2041         BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT);
2042         BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE);
2043         BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT);
2044         BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK);
2045         BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN);
2046         BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING);
2047         BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
2048         BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
2049
2050         if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
2051                 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
2052
2053         switch (state) {
2054         case TCP_ESTABLISHED:
2055                 if (oldstate != TCP_ESTABLISHED)
2056                         TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2057                 break;
2058
2059         case TCP_CLOSE:
2060                 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
2061                         TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2062
2063                 sk->sk_prot->unhash(sk);
2064                 if (inet_csk(sk)->icsk_bind_hash &&
2065                     !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2066                         inet_put_port(sk);
2067                 /* fall through */
2068         default:
2069                 if (oldstate == TCP_ESTABLISHED)
2070                         TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2071         }
2072
2073         /* Change state AFTER socket is unhashed to avoid closed
2074          * socket sitting in hash tables.
2075          */
2076         inet_sk_state_store(sk, state);
2077
2078 #ifdef STATE_TRACE
2079         SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
2080 #endif
2081 }
2082 EXPORT_SYMBOL_GPL(tcp_set_state);
2083
2084 /*
2085  *      State processing on a close. This implements the state shift for
2086  *      sending our FIN frame. Note that we only send a FIN for some
2087  *      states. A shutdown() may have already sent the FIN, or we may be
2088  *      closed.
2089  */
2090
2091 static const unsigned char new_state[16] = {
2092   /* current state:        new state:      action:      */
2093   [0 /* (Invalid) */]   = TCP_CLOSE,
2094   [TCP_ESTABLISHED]     = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2095   [TCP_SYN_SENT]        = TCP_CLOSE,
2096   [TCP_SYN_RECV]        = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2097   [TCP_FIN_WAIT1]       = TCP_FIN_WAIT1,
2098   [TCP_FIN_WAIT2]       = TCP_FIN_WAIT2,
2099   [TCP_TIME_WAIT]       = TCP_CLOSE,
2100   [TCP_CLOSE]           = TCP_CLOSE,
2101   [TCP_CLOSE_WAIT]      = TCP_LAST_ACK  | TCP_ACTION_FIN,
2102   [TCP_LAST_ACK]        = TCP_LAST_ACK,
2103   [TCP_LISTEN]          = TCP_CLOSE,
2104   [TCP_CLOSING]         = TCP_CLOSING,
2105   [TCP_NEW_SYN_RECV]    = TCP_CLOSE,    /* should not happen ! */
2106 };
2107
2108 static int tcp_close_state(struct sock *sk)
2109 {
2110         int next = (int)new_state[sk->sk_state];
2111         int ns = next & TCP_STATE_MASK;
2112
2113         tcp_set_state(sk, ns);
2114
2115         return next & TCP_ACTION_FIN;
2116 }
2117
2118 /*
2119  *      Shutdown the sending side of a connection. Much like close except
2120  *      that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2121  */
2122
2123 void tcp_shutdown(struct sock *sk, int how)
2124 {
2125         /*      We need to grab some memory, and put together a FIN,
2126          *      and then put it into the queue to be sent.
2127          *              Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2128          */
2129         if (!(how & SEND_SHUTDOWN))
2130                 return;
2131
2132         /* If we've already sent a FIN, or it's a closed state, skip this. */
2133         if ((1 << sk->sk_state) &
2134             (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2135              TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2136                 /* Clear out any half completed packets.  FIN if needed. */
2137                 if (tcp_close_state(sk))
2138                         tcp_send_fin(sk);
2139         }
2140 }
2141 EXPORT_SYMBOL(tcp_shutdown);
2142
2143 bool tcp_check_oom(struct sock *sk, int shift)
2144 {
2145         bool too_many_orphans, out_of_socket_memory;
2146
2147         too_many_orphans = tcp_too_many_orphans(sk, shift);
2148         out_of_socket_memory = tcp_out_of_memory(sk);
2149
2150         if (too_many_orphans)
2151                 net_info_ratelimited("too many orphaned sockets\n");
2152         if (out_of_socket_memory)
2153                 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2154         return too_many_orphans || out_of_socket_memory;
2155 }
2156
2157 void tcp_close(struct sock *sk, long timeout)
2158 {
2159         struct sk_buff *skb;
2160         int data_was_unread = 0;
2161         int state;
2162
2163         lock_sock(sk);
2164         sk->sk_shutdown = SHUTDOWN_MASK;
2165
2166         if (sk->sk_state == TCP_LISTEN) {
2167                 tcp_set_state(sk, TCP_CLOSE);
2168
2169                 /* Special case. */
2170                 inet_csk_listen_stop(sk);
2171
2172                 goto adjudge_to_death;
2173         }
2174
2175         /*  We need to flush the recv. buffs.  We do this only on the
2176          *  descriptor close, not protocol-sourced closes, because the
2177          *  reader process may not have drained the data yet!
2178          */
2179         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2180                 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
2181
2182                 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2183                         len--;
2184                 data_was_unread += len;
2185                 __kfree_skb(skb);
2186         }
2187
2188         sk_mem_reclaim(sk);
2189
2190         /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2191         if (sk->sk_state == TCP_CLOSE)
2192                 goto adjudge_to_death;
2193
2194         /* As outlined in RFC 2525, section 2.17, we send a RST here because
2195          * data was lost. To witness the awful effects of the old behavior of
2196          * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2197          * GET in an FTP client, suspend the process, wait for the client to
2198          * advertise a zero window, then kill -9 the FTP client, wheee...
2199          * Note: timeout is always zero in such a case.
2200          */
2201         if (unlikely(tcp_sk(sk)->repair)) {
2202                 sk->sk_prot->disconnect(sk, 0);
2203         } else if (data_was_unread) {
2204                 /* Unread data was tossed, zap the connection. */
2205                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2206                 tcp_set_state(sk, TCP_CLOSE);
2207                 tcp_send_active_reset(sk, sk->sk_allocation);
2208         } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2209                 /* Check zero linger _after_ checking for unread data. */
2210                 sk->sk_prot->disconnect(sk, 0);
2211                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2212         } else if (tcp_close_state(sk)) {
2213                 /* We FIN if the application ate all the data before
2214                  * zapping the connection.
2215                  */
2216
2217                 /* RED-PEN. Formally speaking, we have broken TCP state
2218                  * machine. State transitions:
2219                  *
2220                  * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2221                  * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2222                  * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2223                  *
2224                  * are legal only when FIN has been sent (i.e. in window),
2225                  * rather than queued out of window. Purists blame.
2226                  *
2227                  * F.e. "RFC state" is ESTABLISHED,
2228                  * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2229                  *
2230                  * The visible declinations are that sometimes
2231                  * we enter time-wait state, when it is not required really
2232                  * (harmless), do not send active resets, when they are
2233                  * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2234                  * they look as CLOSING or LAST_ACK for Linux)
2235                  * Probably, I missed some more holelets.
2236                  *                                              --ANK
2237                  * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2238                  * in a single packet! (May consider it later but will
2239                  * probably need API support or TCP_CORK SYN-ACK until
2240                  * data is written and socket is closed.)
2241                  */
2242                 tcp_send_fin(sk);
2243         }
2244
2245         sk_stream_wait_close(sk, timeout);
2246
2247 adjudge_to_death:
2248         state = sk->sk_state;
2249         sock_hold(sk);
2250         sock_orphan(sk);
2251
2252         /* It is the last release_sock in its life. It will remove backlog. */
2253         release_sock(sk);
2254
2255
2256         /* Now socket is owned by kernel and we acquire BH lock
2257          *  to finish close. No need to check for user refs.
2258          */
2259         local_bh_disable();
2260         bh_lock_sock(sk);
2261         WARN_ON(sock_owned_by_user(sk));
2262
2263         percpu_counter_inc(sk->sk_prot->orphan_count);
2264
2265         /* Have we already been destroyed by a softirq or backlog? */
2266         if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2267                 goto out;
2268
2269         /*      This is a (useful) BSD violating of the RFC. There is a
2270          *      problem with TCP as specified in that the other end could
2271          *      keep a socket open forever with no application left this end.
2272          *      We use a 1 minute timeout (about the same as BSD) then kill
2273          *      our end. If they send after that then tough - BUT: long enough
2274          *      that we won't make the old 4*rto = almost no time - whoops
2275          *      reset mistake.
2276          *
2277          *      Nope, it was not mistake. It is really desired behaviour
2278          *      f.e. on http servers, when such sockets are useless, but
2279          *      consume significant resources. Let's do it with special
2280          *      linger2 option.                                 --ANK
2281          */
2282
2283         if (sk->sk_state == TCP_FIN_WAIT2) {
2284                 struct tcp_sock *tp = tcp_sk(sk);
2285                 if (tp->linger2 < 0) {
2286                         tcp_set_state(sk, TCP_CLOSE);
2287                         tcp_send_active_reset(sk, GFP_ATOMIC);
2288                         __NET_INC_STATS(sock_net(sk),
2289                                         LINUX_MIB_TCPABORTONLINGER);
2290                 } else {
2291                         const int tmo = tcp_fin_time(sk);
2292
2293                         if (tmo > TCP_TIMEWAIT_LEN) {
2294                                 inet_csk_reset_keepalive_timer(sk,
2295                                                 tmo - TCP_TIMEWAIT_LEN);
2296                         } else {
2297                                 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2298                                 goto out;
2299                         }
2300                 }
2301         }
2302         if (sk->sk_state != TCP_CLOSE) {
2303                 sk_mem_reclaim(sk);
2304                 if (tcp_check_oom(sk, 0)) {
2305                         tcp_set_state(sk, TCP_CLOSE);
2306                         tcp_send_active_reset(sk, GFP_ATOMIC);
2307                         __NET_INC_STATS(sock_net(sk),
2308                                         LINUX_MIB_TCPABORTONMEMORY);
2309                 } else if (!check_net(sock_net(sk))) {
2310                         /* Not possible to send reset; just close */
2311                         tcp_set_state(sk, TCP_CLOSE);
2312                 }
2313         }
2314
2315         if (sk->sk_state == TCP_CLOSE) {
2316                 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2317                 /* We could get here with a non-NULL req if the socket is
2318                  * aborted (e.g., closed with unread data) before 3WHS
2319                  * finishes.
2320                  */
2321                 if (req)
2322                         reqsk_fastopen_remove(sk, req, false);
2323                 inet_csk_destroy_sock(sk);
2324         }
2325         /* Otherwise, socket is reprieved until protocol close. */
2326
2327 out:
2328         bh_unlock_sock(sk);
2329         local_bh_enable();
2330         sock_put(sk);
2331 }
2332 EXPORT_SYMBOL(tcp_close);
2333
2334 /* These states need RST on ABORT according to RFC793 */
2335
2336 static inline bool tcp_need_reset(int state)
2337 {
2338         return (1 << state) &
2339                (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2340                 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2341 }
2342
2343 static void tcp_rtx_queue_purge(struct sock *sk)
2344 {
2345         struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
2346
2347         while (p) {
2348                 struct sk_buff *skb = rb_to_skb(p);
2349
2350                 p = rb_next(p);
2351                 /* Since we are deleting whole queue, no need to
2352                  * list_del(&skb->tcp_tsorted_anchor)
2353                  */
2354                 tcp_rtx_queue_unlink(skb, sk);
2355                 sk_wmem_free_skb(sk, skb);
2356         }
2357 }
2358
2359 void tcp_write_queue_purge(struct sock *sk)
2360 {
2361         struct sk_buff *skb;
2362
2363         tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
2364         while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
2365                 tcp_skb_tsorted_anchor_cleanup(skb);
2366                 sk_wmem_free_skb(sk, skb);
2367         }
2368         tcp_rtx_queue_purge(sk);
2369         INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
2370         sk_mem_reclaim(sk);
2371         tcp_clear_all_retrans_hints(tcp_sk(sk));
2372         tcp_sk(sk)->packets_out = 0;
2373 }
2374
2375 int tcp_disconnect(struct sock *sk, int flags)
2376 {
2377         struct inet_sock *inet = inet_sk(sk);
2378         struct inet_connection_sock *icsk = inet_csk(sk);
2379         struct tcp_sock *tp = tcp_sk(sk);
2380         int err = 0;
2381         int old_state = sk->sk_state;
2382
2383         if (old_state != TCP_CLOSE)
2384                 tcp_set_state(sk, TCP_CLOSE);
2385
2386         /* ABORT function of RFC793 */
2387         if (old_state == TCP_LISTEN) {
2388                 inet_csk_listen_stop(sk);
2389         } else if (unlikely(tp->repair)) {
2390                 sk->sk_err = ECONNABORTED;
2391         } else if (tcp_need_reset(old_state) ||
2392                    (tp->snd_nxt != tp->write_seq &&
2393                     (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2394                 /* The last check adjusts for discrepancy of Linux wrt. RFC
2395                  * states
2396                  */
2397                 tcp_send_active_reset(sk, gfp_any());
2398                 sk->sk_err = ECONNRESET;
2399         } else if (old_state == TCP_SYN_SENT)
2400                 sk->sk_err = ECONNRESET;
2401
2402         tcp_clear_xmit_timers(sk);
2403         __skb_queue_purge(&sk->sk_receive_queue);
2404         tcp_write_queue_purge(sk);
2405         tcp_fastopen_active_disable_ofo_check(sk);
2406         skb_rbtree_purge(&tp->out_of_order_queue);
2407
2408         inet->inet_dport = 0;
2409
2410         if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2411                 inet_reset_saddr(sk);
2412
2413         sk->sk_shutdown = 0;
2414         sock_reset_flag(sk, SOCK_DONE);
2415         tp->srtt_us = 0;
2416         tp->write_seq += tp->max_window + 2;
2417         if (tp->write_seq == 0)
2418                 tp->write_seq = 1;
2419         icsk->icsk_backoff = 0;
2420         tp->snd_cwnd = 2;
2421         icsk->icsk_probes_out = 0;
2422         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2423         tp->snd_cwnd_cnt = 0;
2424         tp->window_clamp = 0;
2425         tcp_set_ca_state(sk, TCP_CA_Open);
2426         tp->is_sack_reneg = 0;
2427         tcp_clear_retrans(tp);
2428         inet_csk_delack_init(sk);
2429         /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2430          * issue in __tcp_select_window()
2431          */
2432         icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
2433         memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2434         __sk_dst_reset(sk);
2435         dst_release(sk->sk_rx_dst);
2436         sk->sk_rx_dst = NULL;
2437         tcp_saved_syn_free(tp);
2438
2439         /* Clean up fastopen related fields */
2440         tcp_free_fastopen_req(tp);
2441         inet->defer_connect = 0;
2442
2443         WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2444
2445         if (sk->sk_frag.page) {
2446                 put_page(sk->sk_frag.page);
2447                 sk->sk_frag.page = NULL;
2448                 sk->sk_frag.offset = 0;
2449         }
2450
2451         sk->sk_error_report(sk);
2452         return err;
2453 }
2454 EXPORT_SYMBOL(tcp_disconnect);
2455
2456 static inline bool tcp_can_repair_sock(const struct sock *sk)
2457 {
2458         return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2459                 (sk->sk_state != TCP_LISTEN);
2460 }
2461
2462 static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len)
2463 {
2464         struct tcp_repair_window opt;
2465
2466         if (!tp->repair)
2467                 return -EPERM;
2468
2469         if (len != sizeof(opt))
2470                 return -EINVAL;
2471
2472         if (copy_from_user(&opt, optbuf, sizeof(opt)))
2473                 return -EFAULT;
2474
2475         if (opt.max_window < opt.snd_wnd)
2476                 return -EINVAL;
2477
2478         if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
2479                 return -EINVAL;
2480
2481         if (after(opt.rcv_wup, tp->rcv_nxt))
2482                 return -EINVAL;
2483
2484         tp->snd_wl1     = opt.snd_wl1;
2485         tp->snd_wnd     = opt.snd_wnd;
2486         tp->max_window  = opt.max_window;
2487
2488         tp->rcv_wnd     = opt.rcv_wnd;
2489         tp->rcv_wup     = opt.rcv_wup;
2490
2491         return 0;
2492 }
2493
2494 static int tcp_repair_options_est(struct sock *sk,
2495                 struct tcp_repair_opt __user *optbuf, unsigned int len)
2496 {
2497         struct tcp_sock *tp = tcp_sk(sk);
2498         struct tcp_repair_opt opt;
2499
2500         while (len >= sizeof(opt)) {
2501                 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2502                         return -EFAULT;
2503
2504                 optbuf++;
2505                 len -= sizeof(opt);
2506
2507                 switch (opt.opt_code) {
2508                 case TCPOPT_MSS:
2509                         tp->rx_opt.mss_clamp = opt.opt_val;
2510                         tcp_mtup_init(sk);
2511                         break;
2512                 case TCPOPT_WINDOW:
2513                         {
2514                                 u16 snd_wscale = opt.opt_val & 0xFFFF;
2515                                 u16 rcv_wscale = opt.opt_val >> 16;
2516
2517                                 if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE)
2518                                         return -EFBIG;
2519
2520                                 tp->rx_opt.snd_wscale = snd_wscale;
2521                                 tp->rx_opt.rcv_wscale = rcv_wscale;
2522                                 tp->rx_opt.wscale_ok = 1;
2523                         }
2524                         break;
2525                 case TCPOPT_SACK_PERM:
2526                         if (opt.opt_val != 0)
2527                                 return -EINVAL;
2528
2529                         tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2530                         break;
2531                 case TCPOPT_TIMESTAMP:
2532                         if (opt.opt_val != 0)
2533                                 return -EINVAL;
2534
2535                         tp->rx_opt.tstamp_ok = 1;
2536                         break;
2537                 }
2538         }
2539
2540         return 0;
2541 }
2542
2543 /*
2544  *      Socket option code for TCP.
2545  */
2546 static int do_tcp_setsockopt(struct sock *sk, int level,
2547                 int optname, char __user *optval, unsigned int optlen)
2548 {
2549         struct tcp_sock *tp = tcp_sk(sk);
2550         struct inet_connection_sock *icsk = inet_csk(sk);
2551         struct net *net = sock_net(sk);
2552         int val;
2553         int err = 0;
2554
2555         /* These are data/string values, all the others are ints */
2556         switch (optname) {
2557         case TCP_CONGESTION: {
2558                 char name[TCP_CA_NAME_MAX];
2559
2560                 if (optlen < 1)
2561                         return -EINVAL;
2562
2563                 val = strncpy_from_user(name, optval,
2564                                         min_t(long, TCP_CA_NAME_MAX-1, optlen));
2565                 if (val < 0)
2566                         return -EFAULT;
2567                 name[val] = 0;
2568
2569                 lock_sock(sk);
2570                 err = tcp_set_congestion_control(sk, name, true, true);
2571                 release_sock(sk);
2572                 return err;
2573         }
2574         case TCP_ULP: {
2575                 char name[TCP_ULP_NAME_MAX];
2576
2577                 if (optlen < 1)
2578                         return -EINVAL;
2579
2580                 val = strncpy_from_user(name, optval,
2581                                         min_t(long, TCP_ULP_NAME_MAX - 1,
2582                                               optlen));
2583                 if (val < 0)
2584                         return -EFAULT;
2585                 name[val] = 0;
2586
2587                 lock_sock(sk);
2588                 err = tcp_set_ulp(sk, name);
2589                 release_sock(sk);
2590                 return err;
2591         }
2592         case TCP_FASTOPEN_KEY: {
2593                 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
2594
2595                 if (optlen != sizeof(key))
2596                         return -EINVAL;
2597
2598                 if (copy_from_user(key, optval, optlen))
2599                         return -EFAULT;
2600
2601                 return tcp_fastopen_reset_cipher(net, sk, key, sizeof(key));
2602         }
2603         default:
2604                 /* fallthru */
2605                 break;
2606         }
2607
2608         if (optlen < sizeof(int))
2609                 return -EINVAL;
2610
2611         if (get_user(val, (int __user *)optval))
2612                 return -EFAULT;
2613
2614         lock_sock(sk);
2615
2616         switch (optname) {
2617         case TCP_MAXSEG:
2618                 /* Values greater than interface MTU won't take effect. However
2619                  * at the point when this call is done we typically don't yet
2620                  * know which interface is going to be used
2621                  */
2622                 if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
2623                         err = -EINVAL;
2624                         break;
2625                 }
2626                 tp->rx_opt.user_mss = val;
2627                 break;
2628
2629         case TCP_NODELAY:
2630                 if (val) {
2631                         /* TCP_NODELAY is weaker than TCP_CORK, so that
2632                          * this option on corked socket is remembered, but
2633                          * it is not activated until cork is cleared.
2634                          *
2635                          * However, when TCP_NODELAY is set we make
2636                          * an explicit push, which overrides even TCP_CORK
2637                          * for currently queued segments.
2638                          */
2639                         tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2640                         tcp_push_pending_frames(sk);
2641                 } else {
2642                         tp->nonagle &= ~TCP_NAGLE_OFF;
2643                 }
2644                 break;
2645
2646         case TCP_THIN_LINEAR_TIMEOUTS:
2647                 if (val < 0 || val > 1)
2648                         err = -EINVAL;
2649                 else
2650                         tp->thin_lto = val;
2651                 break;
2652
2653         case TCP_THIN_DUPACK:
2654                 if (val < 0 || val > 1)
2655                         err = -EINVAL;
2656                 break;
2657
2658         case TCP_REPAIR:
2659                 if (!tcp_can_repair_sock(sk))
2660                         err = -EPERM;
2661                 else if (val == 1) {
2662                         tp->repair = 1;
2663                         sk->sk_reuse = SK_FORCE_REUSE;
2664                         tp->repair_queue = TCP_NO_QUEUE;
2665                 } else if (val == 0) {
2666                         tp->repair = 0;
2667                         sk->sk_reuse = SK_NO_REUSE;
2668                         tcp_send_window_probe(sk);
2669                 } else
2670                         err = -EINVAL;
2671
2672                 break;
2673
2674         case TCP_REPAIR_QUEUE:
2675                 if (!tp->repair)
2676                         err = -EPERM;
2677                 else if ((unsigned int)val < TCP_QUEUES_NR)
2678                         tp->repair_queue = val;
2679                 else
2680                         err = -EINVAL;
2681                 break;
2682
2683         case TCP_QUEUE_SEQ:
2684                 if (sk->sk_state != TCP_CLOSE)
2685                         err = -EPERM;
2686                 else if (tp->repair_queue == TCP_SEND_QUEUE)
2687                         tp->write_seq = val;
2688                 else if (tp->repair_queue == TCP_RECV_QUEUE)
2689                         tp->rcv_nxt = val;
2690                 else
2691                         err = -EINVAL;
2692                 break;
2693
2694         case TCP_REPAIR_OPTIONS:
2695                 if (!tp->repair)
2696                         err = -EINVAL;
2697                 else if (sk->sk_state == TCP_ESTABLISHED)
2698                         err = tcp_repair_options_est(sk,
2699                                         (struct tcp_repair_opt __user *)optval,
2700                                         optlen);
2701                 else
2702                         err = -EPERM;
2703                 break;
2704
2705         case TCP_CORK:
2706                 /* When set indicates to always queue non-full frames.
2707                  * Later the user clears this option and we transmit
2708                  * any pending partial frames in the queue.  This is
2709                  * meant to be used alongside sendfile() to get properly
2710                  * filled frames when the user (for example) must write
2711                  * out headers with a write() call first and then use
2712                  * sendfile to send out the data parts.
2713                  *
2714                  * TCP_CORK can be set together with TCP_NODELAY and it is
2715                  * stronger than TCP_NODELAY.
2716                  */
2717                 if (val) {
2718                         tp->nonagle |= TCP_NAGLE_CORK;
2719                 } else {
2720                         tp->nonagle &= ~TCP_NAGLE_CORK;
2721                         if (tp->nonagle&TCP_NAGLE_OFF)
2722                                 tp->nonagle |= TCP_NAGLE_PUSH;
2723                         tcp_push_pending_frames(sk);
2724                 }
2725                 break;
2726
2727         case TCP_KEEPIDLE:
2728                 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2729                         err = -EINVAL;
2730                 else {
2731                         tp->keepalive_time = val * HZ;
2732                         if (sock_flag(sk, SOCK_KEEPOPEN) &&
2733                             !((1 << sk->sk_state) &
2734                               (TCPF_CLOSE | TCPF_LISTEN))) {
2735                                 u32 elapsed = keepalive_time_elapsed(tp);
2736                                 if (tp->keepalive_time > elapsed)
2737                                         elapsed = tp->keepalive_time - elapsed;
2738                                 else
2739                                         elapsed = 0;
2740                                 inet_csk_reset_keepalive_timer(sk, elapsed);
2741                         }
2742                 }
2743                 break;
2744         case TCP_KEEPINTVL:
2745                 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2746                         err = -EINVAL;
2747                 else
2748                         tp->keepalive_intvl = val * HZ;
2749                 break;
2750         case TCP_KEEPCNT:
2751                 if (val < 1 || val > MAX_TCP_KEEPCNT)
2752                         err = -EINVAL;
2753                 else
2754                         tp->keepalive_probes = val;
2755                 break;
2756         case TCP_SYNCNT:
2757                 if (val < 1 || val > MAX_TCP_SYNCNT)
2758                         err = -EINVAL;
2759                 else
2760                         icsk->icsk_syn_retries = val;
2761                 break;
2762
2763         case TCP_SAVE_SYN:
2764                 if (val < 0 || val > 1)
2765                         err = -EINVAL;
2766                 else
2767                         tp->save_syn = val;
2768                 break;
2769
2770         case TCP_LINGER2:
2771                 if (val < 0)
2772                         tp->linger2 = -1;
2773                 else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ)
2774                         tp->linger2 = 0;
2775                 else
2776                         tp->linger2 = val * HZ;
2777                 break;
2778
2779         case TCP_DEFER_ACCEPT:
2780                 /* Translate value in seconds to number of retransmits */
2781                 icsk->icsk_accept_queue.rskq_defer_accept =
2782                         secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2783                                         TCP_RTO_MAX / HZ);
2784                 break;
2785
2786         case TCP_WINDOW_CLAMP:
2787                 if (!val) {
2788                         if (sk->sk_state != TCP_CLOSE) {
2789                                 err = -EINVAL;
2790                                 break;
2791                         }
2792                         tp->window_clamp = 0;
2793                 } else
2794                         tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2795                                                 SOCK_MIN_RCVBUF / 2 : val;
2796                 break;
2797
2798         case TCP_QUICKACK:
2799                 if (!val) {
2800                         icsk->icsk_ack.pingpong = 1;
2801                 } else {
2802                         icsk->icsk_ack.pingpong = 0;
2803                         if ((1 << sk->sk_state) &
2804                             (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2805                             inet_csk_ack_scheduled(sk)) {
2806                                 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2807                                 tcp_cleanup_rbuf(sk, 1);
2808                                 if (!(val & 1))
2809                                         icsk->icsk_ack.pingpong = 1;
2810                         }
2811                 }
2812                 break;
2813
2814 #ifdef CONFIG_TCP_MD5SIG
2815         case TCP_MD5SIG:
2816         case TCP_MD5SIG_EXT:
2817                 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
2818                         err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
2819                 else
2820                         err = -EINVAL;
2821                 break;
2822 #endif
2823         case TCP_USER_TIMEOUT:
2824                 /* Cap the max time in ms TCP will retry or probe the window
2825                  * before giving up and aborting (ETIMEDOUT) a connection.
2826                  */
2827                 if (val < 0)
2828                         err = -EINVAL;
2829                 else
2830                         icsk->icsk_user_timeout = msecs_to_jiffies(val);
2831                 break;
2832
2833         case TCP_FASTOPEN:
2834                 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2835                     TCPF_LISTEN))) {
2836                         tcp_fastopen_init_key_once(net);
2837
2838                         fastopen_queue_tune(sk, val);
2839                 } else {
2840                         err = -EINVAL;
2841                 }
2842                 break;
2843         case TCP_FASTOPEN_CONNECT:
2844                 if (val > 1 || val < 0) {
2845                         err = -EINVAL;
2846                 } else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
2847                         if (sk->sk_state == TCP_CLOSE)
2848                                 tp->fastopen_connect = val;
2849                         else
2850                                 err = -EINVAL;
2851                 } else {
2852                         err = -EOPNOTSUPP;
2853                 }
2854                 break;
2855         case TCP_FASTOPEN_NO_COOKIE:
2856                 if (val > 1 || val < 0)
2857                         err = -EINVAL;
2858                 else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2859                         err = -EINVAL;
2860                 else
2861                         tp->fastopen_no_cookie = val;
2862                 break;
2863         case TCP_TIMESTAMP:
2864                 if (!tp->repair)
2865                         err = -EPERM;
2866                 else
2867                         tp->tsoffset = val - tcp_time_stamp_raw();
2868                 break;
2869         case TCP_REPAIR_WINDOW:
2870                 err = tcp_repair_set_window(tp, optval, optlen);
2871                 break;
2872         case TCP_NOTSENT_LOWAT:
2873                 tp->notsent_lowat = val;
2874                 sk->sk_write_space(sk);
2875                 break;
2876         default:
2877                 err = -ENOPROTOOPT;
2878                 break;
2879         }
2880
2881         release_sock(sk);
2882         return err;
2883 }
2884
2885 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2886                    unsigned int optlen)
2887 {
2888         const struct inet_connection_sock *icsk = inet_csk(sk);
2889
2890         if (level != SOL_TCP)
2891                 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2892                                                      optval, optlen);
2893         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2894 }
2895 EXPORT_SYMBOL(tcp_setsockopt);
2896
2897 #ifdef CONFIG_COMPAT
2898 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2899                           char __user *optval, unsigned int optlen)
2900 {
2901         if (level != SOL_TCP)
2902                 return inet_csk_compat_setsockopt(sk, level, optname,
2903                                                   optval, optlen);
2904         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2905 }
2906 EXPORT_SYMBOL(compat_tcp_setsockopt);
2907 #endif
2908
2909 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
2910                                       struct tcp_info *info)
2911 {
2912         u64 stats[__TCP_CHRONO_MAX], total = 0;
2913         enum tcp_chrono i;
2914
2915         for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
2916                 stats[i] = tp->chrono_stat[i - 1];
2917                 if (i == tp->chrono_type)
2918                         stats[i] += tcp_jiffies32 - tp->chrono_start;
2919                 stats[i] *= USEC_PER_SEC / HZ;
2920                 total += stats[i];
2921         }
2922
2923         info->tcpi_busy_time = total;
2924         info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED];
2925         info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED];
2926 }
2927
2928 /* Return information about state of tcp endpoint in API format. */
2929 void tcp_get_info(struct sock *sk, struct tcp_info *info)
2930 {
2931         const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
2932         const struct inet_connection_sock *icsk = inet_csk(sk);
2933         u32 now;
2934         u64 rate64;
2935         bool slow;
2936         u32 rate;
2937
2938         memset(info, 0, sizeof(*info));
2939         if (sk->sk_type != SOCK_STREAM)
2940                 return;
2941
2942         info->tcpi_state = inet_sk_state_load(sk);
2943
2944         /* Report meaningful fields for all TCP states, including listeners */
2945         rate = READ_ONCE(sk->sk_pacing_rate);
2946         rate64 = rate != ~0U ? rate : ~0ULL;
2947         info->tcpi_pacing_rate = rate64;
2948
2949         rate = READ_ONCE(sk->sk_max_pacing_rate);
2950         rate64 = rate != ~0U ? rate : ~0ULL;
2951         info->tcpi_max_pacing_rate = rate64;
2952
2953         info->tcpi_reordering = tp->reordering;
2954         info->tcpi_snd_cwnd = tp->snd_cwnd;
2955
2956         if (info->tcpi_state == TCP_LISTEN) {
2957                 /* listeners aliased fields :
2958                  * tcpi_unacked -> Number of children ready for accept()
2959                  * tcpi_sacked  -> max backlog
2960                  */
2961                 info->tcpi_unacked = sk->sk_ack_backlog;
2962                 info->tcpi_sacked = sk->sk_max_ack_backlog;
2963                 return;
2964         }
2965
2966         slow = lock_sock_fast(sk);
2967
2968         info->tcpi_ca_state = icsk->icsk_ca_state;
2969         info->tcpi_retransmits = icsk->icsk_retransmits;
2970         info->tcpi_probes = icsk->icsk_probes_out;
2971         info->tcpi_backoff = icsk->icsk_backoff;
2972
2973         if (tp->rx_opt.tstamp_ok)
2974                 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2975         if (tcp_is_sack(tp))
2976                 info->tcpi_options |= TCPI_OPT_SACK;
2977         if (tp->rx_opt.wscale_ok) {
2978                 info->tcpi_options |= TCPI_OPT_WSCALE;
2979                 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2980                 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2981         }
2982
2983         if (tp->ecn_flags & TCP_ECN_OK)
2984                 info->tcpi_options |= TCPI_OPT_ECN;
2985         if (tp->ecn_flags & TCP_ECN_SEEN)
2986                 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2987         if (tp->syn_data_acked)
2988                 info->tcpi_options |= TCPI_OPT_SYN_DATA;
2989
2990         info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2991         info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2992         info->tcpi_snd_mss = tp->mss_cache;
2993         info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2994
2995         info->tcpi_unacked = tp->packets_out;
2996         info->tcpi_sacked = tp->sacked_out;
2997
2998         info->tcpi_lost = tp->lost_out;
2999         info->tcpi_retrans = tp->retrans_out;
3000
3001         now = tcp_jiffies32;
3002         info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
3003         info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
3004         info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
3005
3006         info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
3007         info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
3008         info->tcpi_rtt = tp->srtt_us >> 3;
3009         info->tcpi_rttvar = tp->mdev_us >> 2;
3010         info->tcpi_snd_ssthresh = tp->snd_ssthresh;
3011         info->tcpi_advmss = tp->advmss;
3012
3013         info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3;
3014         info->tcpi_rcv_space = tp->rcvq_space.space;
3015
3016         info->tcpi_total_retrans = tp->total_retrans;
3017
3018         info->tcpi_bytes_acked = tp->bytes_acked;
3019         info->tcpi_bytes_received = tp->bytes_received;
3020         info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
3021         tcp_get_info_chrono_stats(tp, info);
3022
3023         info->tcpi_segs_out = tp->segs_out;
3024         info->tcpi_segs_in = tp->segs_in;
3025
3026         info->tcpi_min_rtt = tcp_min_rtt(tp);
3027         info->tcpi_data_segs_in = tp->data_segs_in;
3028         info->tcpi_data_segs_out = tp->data_segs_out;
3029
3030         info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
3031         rate64 = tcp_compute_delivery_rate(tp);
3032         if (rate64)
3033                 info->tcpi_delivery_rate = rate64;
3034         unlock_sock_fast(sk, slow);
3035 }
3036 EXPORT_SYMBOL_GPL(tcp_get_info);
3037
3038 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
3039 {
3040         const struct tcp_sock *tp = tcp_sk(sk);
3041         struct sk_buff *stats;
3042         struct tcp_info info;
3043         u64 rate64;
3044         u32 rate;
3045
3046         stats = alloc_skb(7 * nla_total_size_64bit(sizeof(u64)) +
3047                           5 * nla_total_size(sizeof(u32)) +
3048                           3 * nla_total_size(sizeof(u8)), GFP_ATOMIC);
3049         if (!stats)
3050                 return NULL;
3051
3052         tcp_get_info_chrono_stats(tp, &info);
3053         nla_put_u64_64bit(stats, TCP_NLA_BUSY,
3054                           info.tcpi_busy_time, TCP_NLA_PAD);
3055         nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
3056                           info.tcpi_rwnd_limited, TCP_NLA_PAD);
3057         nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
3058                           info.tcpi_sndbuf_limited, TCP_NLA_PAD);
3059         nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
3060                           tp->data_segs_out, TCP_NLA_PAD);
3061         nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
3062                           tp->total_retrans, TCP_NLA_PAD);
3063
3064         rate = READ_ONCE(sk->sk_pacing_rate);
3065         rate64 = rate != ~0U ? rate : ~0ULL;
3066         nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
3067
3068         rate64 = tcp_compute_delivery_rate(tp);
3069         nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
3070
3071         nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd);
3072         nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
3073         nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
3074
3075         nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
3076         nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
3077         nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
3078
3079         nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
3080         nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
3081         return stats;
3082 }
3083
3084 static int do_tcp_getsockopt(struct sock *sk, int level,
3085                 int optname, char __user *optval, int __user *optlen)
3086 {
3087         struct inet_connection_sock *icsk = inet_csk(sk);
3088         struct tcp_sock *tp = tcp_sk(sk);
3089         struct net *net = sock_net(sk);
3090         int val, len;
3091
3092         if (get_user(len, optlen))
3093                 return -EFAULT;
3094
3095         len = min_t(unsigned int, len, sizeof(int));
3096
3097         if (len < 0)
3098                 return -EINVAL;
3099
3100         switch (optname) {
3101         case TCP_MAXSEG:
3102                 val = tp->mss_cache;
3103                 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
3104                         val = tp->rx_opt.user_mss;
3105                 if (tp->repair)
3106                         val = tp->rx_opt.mss_clamp;
3107                 break;
3108         case TCP_NODELAY:
3109                 val = !!(tp->nonagle&TCP_NAGLE_OFF);
3110                 break;
3111         case TCP_CORK:
3112                 val = !!(tp->nonagle&TCP_NAGLE_CORK);
3113                 break;
3114         case TCP_KEEPIDLE:
3115                 val = keepalive_time_when(tp) / HZ;
3116                 break;
3117         case TCP_KEEPINTVL:
3118                 val = keepalive_intvl_when(tp) / HZ;
3119                 break;
3120         case TCP_KEEPCNT:
3121                 val = keepalive_probes(tp);
3122                 break;
3123         case TCP_SYNCNT:
3124                 val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
3125                 break;
3126         case TCP_LINGER2:
3127                 val = tp->linger2;
3128                 if (val >= 0)
3129                         val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
3130                 break;
3131         case TCP_DEFER_ACCEPT:
3132                 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
3133                                       TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
3134                 break;
3135         case TCP_WINDOW_CLAMP:
3136                 val = tp->window_clamp;
3137                 break;
3138         case TCP_INFO: {
3139                 struct tcp_info info;
3140
3141                 if (get_user(len, optlen))
3142                         return -EFAULT;
3143
3144                 tcp_get_info(sk, &info);
3145
3146                 len = min_t(unsigned int, len, sizeof(info));
3147                 if (put_user(len, optlen))
3148                         return -EFAULT;
3149                 if (copy_to_user(optval, &info, len))
3150                         return -EFAULT;
3151                 return 0;
3152         }
3153         case TCP_CC_INFO: {
3154                 const struct tcp_congestion_ops *ca_ops;
3155                 union tcp_cc_info info;
3156                 size_t sz = 0;
3157                 int attr;
3158
3159                 if (get_user(len, optlen))
3160                         return -EFAULT;
3161
3162                 ca_ops = icsk->icsk_ca_ops;
3163                 if (ca_ops && ca_ops->get_info)
3164                         sz = ca_ops->get_info(sk, ~0U, &attr, &info);
3165
3166                 len = min_t(unsigned int, len, sz);
3167                 if (put_user(len, optlen))
3168                         return -EFAULT;
3169                 if (copy_to_user(optval, &info, len))
3170                         return -EFAULT;
3171                 return 0;
3172         }
3173         case TCP_QUICKACK:
3174                 val = !icsk->icsk_ack.pingpong;
3175                 break;
3176
3177         case TCP_CONGESTION:
3178                 if (get_user(len, optlen))
3179                         return -EFAULT;
3180                 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
3181                 if (put_user(len, optlen))
3182                         return -EFAULT;
3183                 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
3184                         return -EFAULT;
3185                 return 0;
3186
3187         case TCP_ULP:
3188                 if (get_user(len, optlen))
3189                         return -EFAULT;
3190                 len = min_t(unsigned int, len, TCP_ULP_NAME_MAX);
3191                 if (!icsk->icsk_ulp_ops) {
3192                         if (put_user(0, optlen))
3193                                 return -EFAULT;
3194                         return 0;
3195                 }
3196                 if (put_user(len, optlen))
3197                         return -EFAULT;
3198                 if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len))
3199                         return -EFAULT;
3200                 return 0;
3201
3202         case TCP_FASTOPEN_KEY: {
3203                 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
3204                 struct tcp_fastopen_context *ctx;
3205
3206                 if (get_user(len, optlen))
3207                         return -EFAULT;
3208
3209                 rcu_read_lock();
3210                 ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
3211                 if (ctx)
3212                         memcpy(key, ctx->key, sizeof(key));
3213                 else
3214                         len = 0;
3215                 rcu_read_unlock();
3216
3217                 len = min_t(unsigned int, len, sizeof(key));
3218                 if (put_user(len, optlen))
3219                         return -EFAULT;
3220                 if (copy_to_user(optval, key, len))
3221                         return -EFAULT;
3222                 return 0;
3223         }
3224         case TCP_THIN_LINEAR_TIMEOUTS:
3225                 val = tp->thin_lto;
3226                 break;
3227
3228         case TCP_THIN_DUPACK:
3229                 val = 0;
3230                 break;
3231
3232         case TCP_REPAIR:
3233                 val = tp->repair;
3234                 break;
3235
3236         case TCP_REPAIR_QUEUE:
3237                 if (tp->repair)
3238                         val = tp->repair_queue;
3239                 else
3240                         return -EINVAL;
3241                 break;
3242
3243         case TCP_REPAIR_WINDOW: {
3244                 struct tcp_repair_window opt;
3245
3246                 if (get_user(len, optlen))
3247                         return -EFAULT;
3248
3249                 if (len != sizeof(opt))
3250                         return -EINVAL;
3251
3252                 if (!tp->repair)
3253                         return -EPERM;
3254
3255                 opt.snd_wl1     = tp->snd_wl1;
3256                 opt.snd_wnd     = tp->snd_wnd;
3257                 opt.max_window  = tp->max_window;
3258                 opt.rcv_wnd     = tp->rcv_wnd;
3259                 opt.rcv_wup     = tp->rcv_wup;
3260
3261                 if (copy_to_user(optval, &opt, len))
3262                         return -EFAULT;
3263                 return 0;
3264         }
3265         case TCP_QUEUE_SEQ:
3266                 if (tp->repair_queue == TCP_SEND_QUEUE)
3267                         val = tp->write_seq;
3268                 else if (tp->repair_queue == TCP_RECV_QUEUE)
3269                         val = tp->rcv_nxt;
3270                 else
3271                         return -EINVAL;
3272                 break;
3273
3274         case TCP_USER_TIMEOUT:
3275                 val = jiffies_to_msecs(icsk->icsk_user_timeout);
3276                 break;
3277
3278         case TCP_FASTOPEN:
3279                 val = icsk->icsk_accept_queue.fastopenq.max_qlen;
3280                 break;
3281
3282         case TCP_FASTOPEN_CONNECT:
3283                 val = tp->fastopen_connect;
3284                 break;
3285
3286         case TCP_FASTOPEN_NO_COOKIE:
3287                 val = tp->fastopen_no_cookie;
3288                 break;
3289
3290         case TCP_TIMESTAMP:
3291                 val = tcp_time_stamp_raw() + tp->tsoffset;
3292                 break;
3293         case TCP_NOTSENT_LOWAT:
3294                 val = tp->notsent_lowat;
3295                 break;
3296         case TCP_SAVE_SYN:
3297                 val = tp->save_syn;
3298                 break;
3299         case TCP_SAVED_SYN: {
3300                 if (get_user(len, optlen))
3301                         return -EFAULT;
3302
3303                 lock_sock(sk);
3304                 if (tp->saved_syn) {
3305                         if (len < tp->saved_syn[0]) {
3306                                 if (put_user(tp->saved_syn[0], optlen)) {
3307                                         release_sock(sk);
3308                                         return -EFAULT;
3309                                 }
3310                                 release_sock(sk);
3311                                 return -EINVAL;
3312                         }
3313                         len = tp->saved_syn[0];
3314                         if (put_user(len, optlen)) {
3315                                 release_sock(sk);
3316                                 return -EFAULT;
3317                         }
3318                         if (copy_to_user(optval, tp->saved_syn + 1, len)) {
3319                                 release_sock(sk);
3320                                 return -EFAULT;
3321                         }
3322                         tcp_saved_syn_free(tp);
3323                         release_sock(sk);
3324                 } else {
3325                         release_sock(sk);
3326                         len = 0;
3327                         if (put_user(len, optlen))
3328                                 return -EFAULT;
3329                 }
3330                 return 0;
3331         }
3332         default:
3333                 return -ENOPROTOOPT;
3334         }
3335
3336         if (put_user(len, optlen))
3337                 return -EFAULT;
3338         if (copy_to_user(optval, &val, len))
3339                 return -EFAULT;
3340         return 0;
3341 }
3342
3343 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
3344                    int __user *optlen)
3345 {
3346         struct inet_connection_sock *icsk = inet_csk(sk);
3347
3348         if (level != SOL_TCP)
3349                 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
3350                                                      optval, optlen);
3351         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
3352 }
3353 EXPORT_SYMBOL(tcp_getsockopt);
3354
3355 #ifdef CONFIG_COMPAT
3356 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
3357                           char __user *optval, int __user *optlen)
3358 {
3359         if (level != SOL_TCP)
3360                 return inet_csk_compat_getsockopt(sk, level, optname,
3361                                                   optval, optlen);
3362         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
3363 }
3364 EXPORT_SYMBOL(compat_tcp_getsockopt);
3365 #endif
3366
3367 #ifdef CONFIG_TCP_MD5SIG
3368 static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
3369 static DEFINE_MUTEX(tcp_md5sig_mutex);
3370 static bool tcp_md5sig_pool_populated = false;
3371
3372 static void __tcp_alloc_md5sig_pool(void)
3373 {
3374         struct crypto_ahash *hash;
3375         int cpu;
3376
3377         hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
3378         if (IS_ERR(hash))
3379                 return;
3380
3381         for_each_possible_cpu(cpu) {
3382                 void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
3383                 struct ahash_request *req;
3384
3385                 if (!scratch) {
3386                         scratch = kmalloc_node(sizeof(union tcp_md5sum_block) +
3387                                                sizeof(struct tcphdr),
3388                                                GFP_KERNEL,
3389                                                cpu_to_node(cpu));
3390                         if (!scratch)
3391                                 return;
3392                         per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
3393                 }
3394                 if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
3395                         continue;
3396
3397                 req = ahash_request_alloc(hash, GFP_KERNEL);
3398                 if (!req)
3399                         return;
3400
3401                 ahash_request_set_callback(req, 0, NULL, NULL);
3402
3403                 per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
3404         }
3405         /* before setting tcp_md5sig_pool_populated, we must commit all writes
3406          * to memory. See smp_rmb() in tcp_get_md5sig_pool()
3407          */
3408         smp_wmb();
3409         tcp_md5sig_pool_populated = true;
3410 }
3411
3412 bool tcp_alloc_md5sig_pool(void)
3413 {
3414         if (unlikely(!tcp_md5sig_pool_populated)) {
3415                 mutex_lock(&tcp_md5sig_mutex);
3416
3417                 if (!tcp_md5sig_pool_populated)
3418                         __tcp_alloc_md5sig_pool();
3419
3420                 mutex_unlock(&tcp_md5sig_mutex);
3421         }
3422         return tcp_md5sig_pool_populated;
3423 }
3424 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3425
3426
3427 /**
3428  *      tcp_get_md5sig_pool - get md5sig_pool for this user
3429  *
3430  *      We use percpu structure, so if we succeed, we exit with preemption
3431  *      and BH disabled, to make sure another thread or softirq handling
3432  *      wont try to get same context.
3433  */
3434 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
3435 {
3436         local_bh_disable();
3437
3438         if (tcp_md5sig_pool_populated) {
3439                 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
3440                 smp_rmb();
3441                 return this_cpu_ptr(&tcp_md5sig_pool);
3442         }
3443         local_bh_enable();
3444         return NULL;
3445 }
3446 EXPORT_SYMBOL(tcp_get_md5sig_pool);
3447
3448 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3449                           const struct sk_buff *skb, unsigned int header_len)
3450 {
3451         struct scatterlist sg;
3452         const struct tcphdr *tp = tcp_hdr(skb);
3453         struct ahash_request *req = hp->md5_req;
3454         unsigned int i;
3455         const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3456                                            skb_headlen(skb) - header_len : 0;
3457         const struct skb_shared_info *shi = skb_shinfo(skb);
3458         struct sk_buff *frag_iter;
3459
3460         sg_init_table(&sg, 1);
3461
3462         sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3463         ahash_request_set_crypt(req, &sg, NULL, head_data_len);
3464         if (crypto_ahash_update(req))
3465                 return 1;
3466
3467         for (i = 0; i < shi->nr_frags; ++i) {
3468                 const struct skb_frag_struct *f = &shi->frags[i];
3469                 unsigned int offset = f->page_offset;
3470                 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3471
3472                 sg_set_page(&sg, page, skb_frag_size(f),
3473                             offset_in_page(offset));
3474                 ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
3475                 if (crypto_ahash_update(req))
3476                         return 1;
3477         }
3478
3479         skb_walk_frags(skb, frag_iter)
3480                 if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3481                         return 1;
3482
3483         return 0;
3484 }
3485 EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3486
3487 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3488 {
3489         struct scatterlist sg;
3490
3491         sg_init_one(&sg, key->key, key->keylen);
3492         ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
3493         return crypto_ahash_update(hp->md5_req);
3494 }
3495 EXPORT_SYMBOL(tcp_md5_hash_key);
3496
3497 #endif
3498
3499 void tcp_done(struct sock *sk)
3500 {
3501         struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3502
3503         if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3504                 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3505
3506         tcp_set_state(sk, TCP_CLOSE);
3507         tcp_clear_xmit_timers(sk);
3508         if (req)
3509                 reqsk_fastopen_remove(sk, req, false);
3510
3511         sk->sk_shutdown = SHUTDOWN_MASK;
3512
3513         if (!sock_flag(sk, SOCK_DEAD))
3514                 sk->sk_state_change(sk);
3515         else
3516                 inet_csk_destroy_sock(sk);
3517 }
3518 EXPORT_SYMBOL_GPL(tcp_done);
3519
3520 int tcp_abort(struct sock *sk, int err)
3521 {
3522         if (!sk_fullsock(sk)) {
3523                 if (sk->sk_state == TCP_NEW_SYN_RECV) {
3524                         struct request_sock *req = inet_reqsk(sk);
3525
3526                         local_bh_disable();
3527                         inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
3528                                                           req);
3529                         local_bh_enable();
3530                         return 0;
3531                 }
3532                 return -EOPNOTSUPP;
3533         }
3534
3535         /* Don't race with userspace socket closes such as tcp_close. */
3536         lock_sock(sk);
3537
3538         if (sk->sk_state == TCP_LISTEN) {
3539                 tcp_set_state(sk, TCP_CLOSE);
3540                 inet_csk_listen_stop(sk);
3541         }
3542
3543         /* Don't race with BH socket closes such as inet_csk_listen_stop. */
3544         local_bh_disable();
3545         bh_lock_sock(sk);
3546
3547         if (!sock_flag(sk, SOCK_DEAD)) {
3548                 sk->sk_err = err;
3549                 /* This barrier is coupled with smp_rmb() in tcp_poll() */
3550                 smp_wmb();
3551                 sk->sk_error_report(sk);
3552                 if (tcp_need_reset(sk->sk_state))
3553                         tcp_send_active_reset(sk, GFP_ATOMIC);
3554                 tcp_done(sk);
3555         }
3556
3557         bh_unlock_sock(sk);
3558         local_bh_enable();
3559         tcp_write_queue_purge(sk);
3560         release_sock(sk);
3561         return 0;
3562 }
3563 EXPORT_SYMBOL_GPL(tcp_abort);
3564
3565 extern struct tcp_congestion_ops tcp_reno;
3566
3567 static __initdata unsigned long thash_entries;
3568 static int __init set_thash_entries(char *str)
3569 {
3570         ssize_t ret;
3571
3572         if (!str)
3573                 return 0;
3574
3575         ret = kstrtoul(str, 0, &thash_entries);
3576         if (ret)
3577                 return 0;
3578
3579         return 1;
3580 }
3581 __setup("thash_entries=", set_thash_entries);
3582
3583 static void __init tcp_init_mem(void)
3584 {
3585         unsigned long limit = nr_free_buffer_pages() / 16;
3586
3587         limit = max(limit, 128UL);
3588         sysctl_tcp_mem[0] = limit / 4 * 3;              /* 4.68 % */
3589         sysctl_tcp_mem[1] = limit;                      /* 6.25 % */
3590         sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;      /* 9.37 % */
3591 }
3592
3593 void __init tcp_init(void)
3594 {
3595         int max_rshare, max_wshare, cnt;
3596         unsigned long limit;
3597         unsigned int i;
3598
3599         BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
3600                      FIELD_SIZEOF(struct sk_buff, cb));
3601
3602         percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
3603         percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
3604         inet_hashinfo_init(&tcp_hashinfo);
3605         inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
3606                             thash_entries, 21,  /* one slot per 2 MB*/
3607                             0, 64 * 1024);
3608         tcp_hashinfo.bind_bucket_cachep =
3609                 kmem_cache_create("tcp_bind_bucket",
3610                                   sizeof(struct inet_bind_bucket), 0,
3611                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3612
3613         /* Size and allocate the main established and bind bucket
3614          * hash tables.
3615          *
3616          * The methodology is similar to that of the buffer cache.
3617          */
3618         tcp_hashinfo.ehash =
3619                 alloc_large_system_hash("TCP established",
3620                                         sizeof(struct inet_ehash_bucket),
3621                                         thash_entries,
3622                                         17, /* one slot per 128 KB of memory */
3623                                         0,
3624                                         NULL,
3625                                         &tcp_hashinfo.ehash_mask,
3626                                         0,
3627                                         thash_entries ? 0 : 512 * 1024);
3628         for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
3629                 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3630
3631         if (inet_ehash_locks_alloc(&tcp_hashinfo))
3632                 panic("TCP: failed to alloc ehash_locks");
3633         tcp_hashinfo.bhash =
3634                 alloc_large_system_hash("TCP bind",
3635                                         sizeof(struct inet_bind_hashbucket),
3636                                         tcp_hashinfo.ehash_mask + 1,
3637                                         17, /* one slot per 128 KB of memory */
3638                                         0,
3639                                         &tcp_hashinfo.bhash_size,
3640                                         NULL,
3641                                         0,
3642                                         64 * 1024);
3643         tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3644         for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3645                 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3646                 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3647         }
3648
3649
3650         cnt = tcp_hashinfo.ehash_mask + 1;
3651         sysctl_tcp_max_orphans = cnt / 2;
3652
3653         tcp_init_mem();
3654         /* Set per-socket limits to no more than 1/128 the pressure threshold */
3655         limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3656         max_wshare = min(4UL*1024*1024, limit);
3657         max_rshare = min(6UL*1024*1024, limit);
3658
3659         init_net.ipv4.sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3660         init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024;
3661         init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3662
3663         init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3664         init_net.ipv4.sysctl_tcp_rmem[1] = 87380;
3665         init_net.ipv4.sysctl_tcp_rmem[2] = max(87380, max_rshare);
3666
3667         pr_info("Hash tables configured (established %u bind %u)\n",
3668                 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3669
3670         tcp_v4_init();
3671         tcp_metrics_init();
3672         BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
3673         tcp_tasklet_init();
3674 }