Merge branch 's390-qeth-updates'
[linux-block.git] / net / ipv4 / tcp_timer.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21#include <linux/module.h>
5a0e3ad6 22#include <linux/gfp.h>
1da177e4
LT
23#include <net/tcp.h>
24
c380d37e
RS
25/**
26 * tcp_write_err() - close socket and save error info
27 * @sk: The socket the error has appeared on.
28 *
29 * Returns: Nothing (void)
30 */
31
1da177e4
LT
32static void tcp_write_err(struct sock *sk)
33{
34 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
35 sk->sk_error_report(sk);
36
e05836ac 37 tcp_write_queue_purge(sk);
1da177e4 38 tcp_done(sk);
02a1d6e7 39 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
1da177e4
LT
40}
41
c380d37e
RS
42/**
43 * tcp_out_of_resources() - Close socket if out of resources
44 * @sk: pointer to current socket
45 * @do_reset: send a last packet with reset flag
1da177e4 46 *
c380d37e
RS
47 * Do not allow orphaned sockets to eat all our resources.
48 * This is direct violation of TCP specs, but it is required
49 * to prevent DoS attacks. It is called when a retransmission timeout
50 * or zero probe timeout occurs on orphaned socket.
51 *
4ee806d5
DS
52 * Also close if our net namespace is exiting; in that case there is no
53 * hope of ever communicating again since all netns interfaces are already
54 * down (or about to be down), and we need to release our dst references,
55 * which have been moved to the netns loopback interface, so the namespace
56 * can finish exiting. This condition is only possible if we are a kernel
57 * socket, as those do not hold references to the namespace.
58 *
c380d37e
RS
59 * Criteria is still not confirmed experimentally and may change.
60 * We kill the socket, if:
61 * 1. If number of orphaned sockets exceeds an administratively configured
62 * limit.
63 * 2. If we have strong memory pressure.
4ee806d5 64 * 3. If our net namespace is exiting.
1da177e4 65 */
b248230c 66static int tcp_out_of_resources(struct sock *sk, bool do_reset)
1da177e4
LT
67{
68 struct tcp_sock *tp = tcp_sk(sk);
ad1af0fe 69 int shift = 0;
1da177e4 70
e905a9ed 71 /* If peer does not open window for long time, or did not transmit
1da177e4 72 * anything for long time, penalize it. */
d635fbe2 73 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
ad1af0fe 74 shift++;
1da177e4
LT
75
76 /* If some dubious ICMP arrived, penalize even more. */
77 if (sk->sk_err_soft)
ad1af0fe 78 shift++;
1da177e4 79
efcdbf24 80 if (tcp_check_oom(sk, shift)) {
1da177e4
LT
81 /* Catch exceptional cases, when connection requires reset.
82 * 1. Last segment was sent recently. */
d635fbe2 83 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
1da177e4
LT
84 /* 2. Window is closed. */
85 (!tp->snd_wnd && !tp->packets_out))
b248230c 86 do_reset = true;
1da177e4
LT
87 if (do_reset)
88 tcp_send_active_reset(sk, GFP_ATOMIC);
89 tcp_done(sk);
02a1d6e7 90 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
1da177e4
LT
91 return 1;
92 }
4ee806d5
DS
93
94 if (!check_net(sock_net(sk))) {
95 /* Not possible to send reset; just close */
96 tcp_done(sk);
97 return 1;
98 }
99
1da177e4
LT
100 return 0;
101}
102
c380d37e
RS
103/**
104 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
105 * @sk: Pointer to the current socket.
106 * @alive: bool, socket alive state
107 */
7533ce30 108static int tcp_orphan_retries(struct sock *sk, bool alive)
1da177e4 109{
c402d9be 110 int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
1da177e4
LT
111
112 /* We know from an ICMP that something is wrong. */
113 if (sk->sk_err_soft && !alive)
114 retries = 0;
115
116 /* However, if socket sent something recently, select some safe
117 * number of retries. 8 corresponds to >100 seconds with minimal
118 * RTO of 200msec. */
119 if (retries == 0 && alive)
120 retries = 8;
121 return retries;
122}
123
ce55dd36
ED
124static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
125{
d0f36847
ED
126 const struct net *net = sock_net(sk);
127 int mss;
b0f9ca53 128
ce55dd36 129 /* Black hole detection */
d0f36847
ED
130 if (!net->ipv4.sysctl_tcp_mtu_probing)
131 return;
132
133 if (!icsk->icsk_mtup.enabled) {
134 icsk->icsk_mtup.enabled = 1;
135 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
136 } else {
137 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
138 mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
139 mss = max(mss, 68 - tcp_sk(sk)->tcp_header_len);
140 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
ce55dd36 141 }
d0f36847 142 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
ce55dd36
ED
143}
144
c380d37e
RS
145
146/**
147 * retransmits_timed_out() - returns true if this connection has timed out
148 * @sk: The current socket
149 * @boundary: max number of retransmissions
150 * @timeout: A custom timeout value.
151 * If set to 0 the default timeout is calculated and used.
152 * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
c380d37e
RS
153 *
154 * The default "timeout" value this function can calculate and use
155 * is equivalent to the timeout of a TCP Connection
156 * after "boundary" unsuccessful, exponentially backed-off
ce682ef6 157 * retransmissions with an initial RTO of TCP_RTO_MIN.
2f7de571
DL
158 */
159static bool retransmits_timed_out(struct sock *sk,
dca43c75 160 unsigned int boundary,
ce682ef6 161 unsigned int timeout)
2f7de571 162{
ce682ef6 163 const unsigned int rto_base = TCP_RTO_MIN;
9a568de4 164 unsigned int linear_backoff_thresh, start_ts;
2f7de571
DL
165
166 if (!inet_csk(sk)->icsk_retransmits)
167 return false;
168
7faee5c0 169 start_ts = tcp_sk(sk)->retrans_stamp;
75c119af
ED
170 if (unlikely(!start_ts)) {
171 struct sk_buff *head = tcp_rtx_queue_head(sk);
172
173 if (!head)
174 return false;
175 start_ts = tcp_skb_timestamp(head);
176 }
2f7de571 177
dca43c75 178 if (likely(timeout == 0)) {
21a180cd 179 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
2f7de571 180
dca43c75 181 if (boundary <= linear_backoff_thresh)
21a180cd 182 timeout = ((2 << boundary) - 1) * rto_base;
dca43c75 183 else
21a180cd 184 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
dca43c75
JC
185 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
186 }
9a568de4 187 return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
2f7de571
DL
188}
189
1da177e4
LT
190/* A write timeout has occurred. Process the after effects. */
191static int tcp_write_timeout(struct sock *sk)
192{
5d424d5a 193 struct inet_connection_sock *icsk = inet_csk(sk);
c968601d 194 struct tcp_sock *tp = tcp_sk(sk);
6fa25166 195 struct net *net = sock_net(sk);
ce682ef6 196 bool expired, do_reset;
1da177e4
LT
197 int retry_until;
198
199 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
c968601d 200 if (icsk->icsk_retransmits) {
b6c6712a 201 dst_negative_advice(sk);
3acf3ec3
LB
202 } else if (!tp->syn_data && !tp->syn_fastopen) {
203 sk_rethink_txhash(sk);
c968601d 204 }
6fa25166 205 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
ce682ef6 206 expired = icsk->icsk_retransmits >= retry_until;
1da177e4 207 } else {
ce682ef6 208 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
5d424d5a 209 /* Black hole detection */
ce55dd36 210 tcp_mtu_probing(icsk, sk);
1da177e4 211
b6c6712a 212 dst_negative_advice(sk);
3acf3ec3
LB
213 } else {
214 sk_rethink_txhash(sk);
1da177e4
LT
215 }
216
c6214a97 217 retry_until = net->ipv4.sysctl_tcp_retries2;
1da177e4 218 if (sock_flag(sk, SOCK_DEAD)) {
7533ce30 219 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
e905a9ed 220
1da177e4 221 retry_until = tcp_orphan_retries(sk, alive);
6fa12c85 222 do_reset = alive ||
ce682ef6 223 !retransmits_timed_out(sk, retry_until, 0);
1da177e4 224
6fa12c85 225 if (tcp_out_of_resources(sk, do_reset))
1da177e4
LT
226 return 1;
227 }
ce682ef6
ED
228 expired = retransmits_timed_out(sk, retry_until,
229 icsk->icsk_user_timeout);
1da177e4 230 }
7268586b 231 tcp_fastopen_active_detect_blackhole(sk, expired);
f89013f6
LB
232
233 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
234 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
235 icsk->icsk_retransmits,
236 icsk->icsk_rto, (int)expired);
237
ce682ef6 238 if (expired) {
1da177e4
LT
239 /* Has it gone just too far? */
240 tcp_write_err(sk);
241 return 1;
242 }
f89013f6 243
1da177e4
LT
244 return 0;
245}
246
c10d9310 247/* Called with BH disabled */
6f458dfb 248void tcp_delack_timer_handler(struct sock *sk)
1da177e4 249{
463c84b9 250 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 251
9993e7d3 252 sk_mem_reclaim_partial(sk);
1da177e4 253
02b2faaf
ED
254 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
255 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
1da177e4
LT
256 goto out;
257
463c84b9
ACM
258 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
259 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
1da177e4
LT
260 goto out;
261 }
463c84b9 262 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
1da177e4 263
463c84b9
ACM
264 if (inet_csk_ack_scheduled(sk)) {
265 if (!icsk->icsk_ack.pingpong) {
1da177e4 266 /* Delayed ACK missed: inflate ATO. */
463c84b9 267 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
1da177e4
LT
268 } else {
269 /* Delayed ACK missed: leave pingpong mode and
270 * deflate ATO.
271 */
463c84b9
ACM
272 icsk->icsk_ack.pingpong = 0;
273 icsk->icsk_ack.ato = TCP_ATO_MIN;
1da177e4 274 }
4688eb7c 275 tcp_mstamp_refresh(tcp_sk(sk));
1da177e4 276 tcp_send_ack(sk);
02a1d6e7 277 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
1da177e4 278 }
1da177e4
LT
279
280out:
b8da51eb 281 if (tcp_under_memory_pressure(sk))
3ab224be 282 sk_mem_reclaim(sk);
6f458dfb
ED
283}
284
c380d37e
RS
285
286/**
287 * tcp_delack_timer() - The TCP delayed ACK timeout handler
288 * @data: Pointer to the current socket. (gets casted to struct sock *)
289 *
290 * This function gets (indirectly) called when the kernel timer for a TCP packet
291 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
292 *
293 * Returns: Nothing (void)
294 */
59f379f9 295static void tcp_delack_timer(struct timer_list *t)
6f458dfb 296{
59f379f9
KC
297 struct inet_connection_sock *icsk =
298 from_timer(icsk, t, icsk_delack_timer);
299 struct sock *sk = &icsk->icsk_inet.sk;
6f458dfb
ED
300
301 bh_lock_sock(sk);
302 if (!sock_owned_by_user(sk)) {
303 tcp_delack_timer_handler(sk);
304 } else {
59f379f9 305 icsk->icsk_ack.blocked = 1;
02a1d6e7 306 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
6f458dfb 307 /* deleguate our work to tcp_release_cb() */
7aa5470c 308 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
144d56e9 309 sock_hold(sk);
6f458dfb 310 }
1da177e4
LT
311 bh_unlock_sock(sk);
312 sock_put(sk);
313}
314
315static void tcp_probe_timer(struct sock *sk)
316{
6687e988 317 struct inet_connection_sock *icsk = inet_csk(sk);
75c119af 318 struct sk_buff *skb = tcp_send_head(sk);
1da177e4
LT
319 struct tcp_sock *tp = tcp_sk(sk);
320 int max_probes;
b248230c 321 u32 start_ts;
1da177e4 322
75c119af 323 if (tp->packets_out || !skb) {
6687e988 324 icsk->icsk_probes_out = 0;
1da177e4
LT
325 return;
326 }
327
b248230c
YC
328 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
329 * long as the receiver continues to respond probes. We support this by
330 * default and reset icsk_probes_out with incoming ACKs. But if the
331 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
332 * kill the socket when the retry count and the time exceeds the
333 * corresponding system limit. We also implement similar policy when
334 * we use RTO to probe window in tcp_retransmit_timer().
1da177e4 335 */
75c119af 336 start_ts = tcp_skb_timestamp(skb);
b248230c 337 if (!start_ts)
75c119af 338 skb->skb_mstamp = tp->tcp_mstamp;
b248230c 339 else if (icsk->icsk_user_timeout &&
4ab68879
ED
340 (s32)(tcp_time_stamp(tp) - start_ts) >
341 jiffies_to_msecs(icsk->icsk_user_timeout))
b248230c 342 goto abort;
1da177e4 343
c6214a97 344 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
1da177e4 345 if (sock_flag(sk, SOCK_DEAD)) {
7533ce30 346 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
e905a9ed 347
1da177e4 348 max_probes = tcp_orphan_retries(sk, alive);
b248230c
YC
349 if (!alive && icsk->icsk_backoff >= max_probes)
350 goto abort;
351 if (tcp_out_of_resources(sk, true))
1da177e4
LT
352 return;
353 }
354
6687e988 355 if (icsk->icsk_probes_out > max_probes) {
b248230c 356abort: tcp_write_err(sk);
1da177e4
LT
357 } else {
358 /* Only send another probe if we didn't close things up. */
359 tcp_send_probe0(sk);
360 }
361}
362
8336886f
JC
363/*
364 * Timer for Fast Open socket to retransmit SYNACK. Note that the
365 * sk here is the child socket, not the parent (listener) socket.
366 */
367static void tcp_fastopen_synack_timer(struct sock *sk)
368{
369 struct inet_connection_sock *icsk = inet_csk(sk);
370 int max_retries = icsk->icsk_syn_retries ? :
7c083ecb 371 sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
8336886f
JC
372 struct request_sock *req;
373
374 req = tcp_sk(sk)->fastopen_rsk;
42cb80a2 375 req->rsk_ops->syn_ack_timeout(req);
8336886f 376
e6c022a4 377 if (req->num_timeout >= max_retries) {
8336886f
JC
378 tcp_write_err(sk);
379 return;
380 }
381 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
382 * returned from rtx_syn_ack() to make it more persistent like
383 * regular retransmit because if the child socket has been accepted
384 * it's not good to give up too easily.
385 */
e6c022a4
ED
386 inet_rtx_syn_ack(sk, req);
387 req->num_timeout++;
7e32b443 388 icsk->icsk_retransmits++;
8336886f 389 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
e6c022a4 390 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
8336886f
JC
391}
392
1da177e4 393
c380d37e
RS
394/**
395 * tcp_retransmit_timer() - The TCP retransmit timeout handler
396 * @sk: Pointer to the current socket.
397 *
398 * This function gets called when the kernel timer for a TCP packet
399 * of this socket expires.
400 *
401 * It handles retransmission, timer adjustment and other necesarry measures.
402 *
403 * Returns: Nothing (void)
404 */
f1ecd5d9 405void tcp_retransmit_timer(struct sock *sk)
1da177e4
LT
406{
407 struct tcp_sock *tp = tcp_sk(sk);
ae5c3f40 408 struct net *net = sock_net(sk);
463c84b9 409 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 410
8336886f 411 if (tp->fastopen_rsk) {
37561f68
JC
412 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
413 sk->sk_state != TCP_FIN_WAIT1);
8336886f
JC
414 tcp_fastopen_synack_timer(sk);
415 /* Before we receive ACK to our SYN-ACK don't retransmit
416 * anything else (e.g., data or FIN segments).
417 */
418 return;
419 }
1da177e4
LT
420 if (!tp->packets_out)
421 goto out;
422
75c119af 423 WARN_ON(tcp_rtx_queue_empty(sk));
1da177e4 424
9b717a8d
ND
425 tp->tlp_high_seq = 0;
426
1da177e4
LT
427 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
428 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
429 /* Receiver dastardly shrinks window. Our retransmits
430 * become zero probes, but we should not timeout this
431 * connection. If the socket is an orphan, time it out,
432 * we cannot allow such beasts to hang infinitely.
433 */
569508c9
YH
434 struct inet_sock *inet = inet_sk(sk);
435 if (sk->sk_family == AF_INET) {
ba7a46f1
JP
436 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
437 &inet->inet_daddr,
438 ntohs(inet->inet_dport),
439 inet->inet_num,
440 tp->snd_una, tp->snd_nxt);
1da177e4 441 }
dfd56b8b 442#if IS_ENABLED(CONFIG_IPV6)
569508c9 443 else if (sk->sk_family == AF_INET6) {
ba7a46f1
JP
444 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
445 &sk->sk_v6_daddr,
446 ntohs(inet->inet_dport),
447 inet->inet_num,
448 tp->snd_una, tp->snd_nxt);
569508c9 449 }
1da177e4 450#endif
70eabf0e 451 if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
1da177e4
LT
452 tcp_write_err(sk);
453 goto out;
454 }
5ae344c9 455 tcp_enter_loss(sk);
75c119af 456 tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1);
1da177e4
LT
457 __sk_dst_reset(sk);
458 goto out_reset_timer;
459 }
460
461 if (tcp_write_timeout(sk))
462 goto out;
463
463c84b9 464 if (icsk->icsk_retransmits == 0) {
40b215e5
PE
465 int mib_idx;
466
c60ce4e2 467 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
bc079e9e
IJ
468 if (tcp_is_sack(tp))
469 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
470 else
471 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
6687e988 472 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
40b215e5 473 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
c60ce4e2
IJ
474 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
475 tp->sacked_out) {
476 if (tcp_is_sack(tp))
477 mib_idx = LINUX_MIB_TCPSACKFAILURES;
478 else
479 mib_idx = LINUX_MIB_TCPRENOFAILURES;
1da177e4 480 } else {
40b215e5 481 mib_idx = LINUX_MIB_TCPTIMEOUTS;
1da177e4 482 }
02a1d6e7 483 __NET_INC_STATS(sock_net(sk), mib_idx);
1da177e4
LT
484 }
485
5ae344c9 486 tcp_enter_loss(sk);
1da177e4 487
75c119af 488 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
1da177e4
LT
489 /* Retransmission failed because of local congestion,
490 * do not backoff.
491 */
463c84b9
ACM
492 if (!icsk->icsk_retransmits)
493 icsk->icsk_retransmits = 1;
494 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3f421baa
ACM
495 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
496 TCP_RTO_MAX);
1da177e4
LT
497 goto out;
498 }
499
500 /* Increase the timeout each time we retransmit. Note that
501 * we do not increase the rtt estimate. rto is initialized
502 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
503 * that doubling rto each time is the least we can get away with.
504 * In KA9Q, Karn uses this for the first few times, and then
505 * goes to quadratic. netBSD doubles, but only goes up to *64,
506 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
507 * defined in the protocol as the maximum possible RTT. I guess
508 * we'll have to use something other than TCP to talk to the
509 * University of Mars.
510 *
511 * PAWS allows us longer timeouts and large windows, so once
512 * implemented ftp to mars will work nicely. We will have to fix
513 * the 120 second clamps though!
514 */
463c84b9
ACM
515 icsk->icsk_backoff++;
516 icsk->icsk_retransmits++;
1da177e4
LT
517
518out_reset_timer:
36e31b0a
AP
519 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
520 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
521 * might be increased if the stream oscillates between thin and thick,
522 * thus the old value might already be too high compared to the value
523 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
524 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
525 * exponential backoff behaviour to avoid continue hammering
526 * linear-timeout retransmissions into a black hole
527 */
528 if (sk->sk_state == TCP_ESTABLISHED &&
2c04ac8a 529 (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
36e31b0a
AP
530 tcp_stream_is_thin(tp) &&
531 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
532 icsk->icsk_backoff = 0;
533 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
534 } else {
535 /* Use normal (exponential) backoff */
536 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
537 }
3f421baa 538 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
ce682ef6 539 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
1da177e4
LT
540 __sk_dst_reset(sk);
541
542out:;
543}
544
c380d37e
RS
545/* Called with bottom-half processing disabled.
546 Called by tcp_write_timer() */
6f458dfb 547void tcp_write_timer_handler(struct sock *sk)
1da177e4 548{
463c84b9 549 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
550 int event;
551
02b2faaf
ED
552 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
553 !icsk->icsk_pending)
1da177e4
LT
554 goto out;
555
463c84b9
ACM
556 if (time_after(icsk->icsk_timeout, jiffies)) {
557 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
1da177e4
LT
558 goto out;
559 }
560
9a568de4 561 tcp_mstamp_refresh(tcp_sk(sk));
463c84b9 562 event = icsk->icsk_pending;
1da177e4
LT
563
564 switch (event) {
57dde7f7
YC
565 case ICSK_TIME_REO_TIMEOUT:
566 tcp_rack_reo_timeout(sk);
567 break;
6ba8a3b1
ND
568 case ICSK_TIME_LOSS_PROBE:
569 tcp_send_loss_probe(sk);
570 break;
463c84b9 571 case ICSK_TIME_RETRANS:
6ba8a3b1 572 icsk->icsk_pending = 0;
1da177e4
LT
573 tcp_retransmit_timer(sk);
574 break;
463c84b9 575 case ICSK_TIME_PROBE0:
6ba8a3b1 576 icsk->icsk_pending = 0;
1da177e4
LT
577 tcp_probe_timer(sk);
578 break;
579 }
1da177e4
LT
580
581out:
3ab224be 582 sk_mem_reclaim(sk);
6f458dfb
ED
583}
584
59f379f9 585static void tcp_write_timer(struct timer_list *t)
6f458dfb 586{
59f379f9
KC
587 struct inet_connection_sock *icsk =
588 from_timer(icsk, t, icsk_retransmit_timer);
589 struct sock *sk = &icsk->icsk_inet.sk;
6f458dfb
ED
590
591 bh_lock_sock(sk);
592 if (!sock_owned_by_user(sk)) {
593 tcp_write_timer_handler(sk);
594 } else {
c380d37e 595 /* delegate our work to tcp_release_cb() */
7aa5470c 596 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
144d56e9 597 sock_hold(sk);
6f458dfb 598 }
1da177e4
LT
599 bh_unlock_sock(sk);
600 sock_put(sk);
601}
602
42cb80a2 603void tcp_syn_ack_timeout(const struct request_sock *req)
72659ecc 604{
42cb80a2
ED
605 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
606
02a1d6e7 607 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
72659ecc
OP
608}
609EXPORT_SYMBOL(tcp_syn_ack_timeout);
610
1da177e4
LT
611void tcp_set_keepalive(struct sock *sk, int val)
612{
613 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
614 return;
615
616 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
463c84b9 617 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
1da177e4 618 else if (!val)
463c84b9 619 inet_csk_delete_keepalive_timer(sk);
1da177e4 620}
4b9d07a4 621EXPORT_SYMBOL_GPL(tcp_set_keepalive);
1da177e4
LT
622
623
59f379f9 624static void tcp_keepalive_timer (struct timer_list *t)
1da177e4 625{
59f379f9 626 struct sock *sk = from_timer(sk, t, sk_timer);
6687e988 627 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 628 struct tcp_sock *tp = tcp_sk(sk);
6c37e5de 629 u32 elapsed;
1da177e4
LT
630
631 /* Only process if socket is not in use. */
632 bh_lock_sock(sk);
633 if (sock_owned_by_user(sk)) {
e905a9ed 634 /* Try again later. */
463c84b9 635 inet_csk_reset_keepalive_timer (sk, HZ/20);
1da177e4
LT
636 goto out;
637 }
638
639 if (sk->sk_state == TCP_LISTEN) {
fa76ce73 640 pr_err("Hmm... keepalive on a LISTEN ???\n");
1da177e4
LT
641 goto out;
642 }
643
4688eb7c 644 tcp_mstamp_refresh(tp);
1da177e4
LT
645 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
646 if (tp->linger2 >= 0) {
463c84b9 647 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
1da177e4
LT
648
649 if (tmo > 0) {
650 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
651 goto out;
652 }
653 }
654 tcp_send_active_reset(sk, GFP_ATOMIC);
655 goto death;
656 }
657
2dda6400
ED
658 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
659 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
1da177e4
LT
660 goto out;
661
662 elapsed = keepalive_time_when(tp);
663
664 /* It is alive without keepalive 8) */
75c119af 665 if (tp->packets_out || !tcp_write_queue_empty(sk))
1da177e4
LT
666 goto resched;
667
6c37e5de 668 elapsed = keepalive_time_elapsed(tp);
1da177e4
LT
669
670 if (elapsed >= keepalive_time_when(tp)) {
dca43c75
JC
671 /* If the TCP_USER_TIMEOUT option is enabled, use that
672 * to determine when to timeout instead.
673 */
674 if ((icsk->icsk_user_timeout != 0 &&
675 elapsed >= icsk->icsk_user_timeout &&
676 icsk->icsk_probes_out > 0) ||
677 (icsk->icsk_user_timeout == 0 &&
678 icsk->icsk_probes_out >= keepalive_probes(tp))) {
1da177e4
LT
679 tcp_send_active_reset(sk, GFP_ATOMIC);
680 tcp_write_err(sk);
681 goto out;
682 }
e520af48 683 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
6687e988 684 icsk->icsk_probes_out++;
1da177e4
LT
685 elapsed = keepalive_intvl_when(tp);
686 } else {
687 /* If keepalive was lost due to local congestion,
688 * try harder.
689 */
690 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
691 }
692 } else {
693 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
694 elapsed = keepalive_time_when(tp) - elapsed;
695 }
696
3ab224be 697 sk_mem_reclaim(sk);
1da177e4
LT
698
699resched:
463c84b9 700 inet_csk_reset_keepalive_timer (sk, elapsed);
1da177e4
LT
701 goto out;
702
e905a9ed 703death:
1da177e4
LT
704 tcp_done(sk);
705
706out:
707 bh_unlock_sock(sk);
708 sock_put(sk);
709}
6f458dfb 710
5d9f4262
ED
711static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
712{
713 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
714 struct sock *sk = (struct sock *)tp;
715
716 bh_lock_sock(sk);
717 if (!sock_owned_by_user(sk)) {
718 if (tp->compressed_ack)
719 tcp_send_ack(sk);
720 } else {
721 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
722 &sk->sk_tsq_flags))
723 sock_hold(sk);
724 }
725 bh_unlock_sock(sk);
726
727 sock_put(sk);
728
729 return HRTIMER_NORESTART;
730}
731
6f458dfb
ED
732void tcp_init_xmit_timers(struct sock *sk)
733{
734 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
735 &tcp_keepalive_timer);
218af599 736 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
73a6bab5 737 HRTIMER_MODE_ABS_PINNED_SOFT);
218af599 738 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
5d9f4262
ED
739
740 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
741 HRTIMER_MODE_REL_PINNED_SOFT);
742 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
6f458dfb 743}