Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
11 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
12 | * Florian La Roche, <flla@stud.uni-sb.de> | |
13 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> | |
14 | * Linus Torvalds, <torvalds@cs.helsinki.fi> | |
15 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
16 | * Matthew Dillon, <dillon@apollo.west.oic.com> | |
17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
18 | * Jorge Cwik, <jorge@laser.satlink.net> | |
19 | */ | |
20 | ||
21 | #include <linux/module.h> | |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
1da177e4 LT |
23 | #include <net/tcp.h> |
24 | ||
ab32ea5d BH |
25 | int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; |
26 | int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; | |
ab32ea5d BH |
27 | int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; |
28 | int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; | |
29 | int sysctl_tcp_orphan_retries __read_mostly; | |
36e31b0a | 30 | int sysctl_tcp_thin_linear_timeouts __read_mostly; |
1da177e4 | 31 | |
1da177e4 LT |
32 | static void tcp_write_err(struct sock *sk) |
33 | { | |
34 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; | |
35 | sk->sk_error_report(sk); | |
36 | ||
37 | tcp_done(sk); | |
de0744af | 38 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); |
1da177e4 LT |
39 | } |
40 | ||
41 | /* Do not allow orphaned sockets to eat all our resources. | |
42 | * This is direct violation of TCP specs, but it is required | |
43 | * to prevent DoS attacks. It is called when a retransmission timeout | |
44 | * or zero probe timeout occurs on orphaned socket. | |
45 | * | |
caa20d9a | 46 | * Criteria is still not confirmed experimentally and may change. |
1da177e4 LT |
47 | * We kill the socket, if: |
48 | * 1. If number of orphaned sockets exceeds an administratively configured | |
49 | * limit. | |
50 | * 2. If we have strong memory pressure. | |
51 | */ | |
b248230c | 52 | static int tcp_out_of_resources(struct sock *sk, bool do_reset) |
1da177e4 LT |
53 | { |
54 | struct tcp_sock *tp = tcp_sk(sk); | |
ad1af0fe | 55 | int shift = 0; |
1da177e4 | 56 | |
e905a9ed | 57 | /* If peer does not open window for long time, or did not transmit |
1da177e4 LT |
58 | * anything for long time, penalize it. */ |
59 | if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) | |
ad1af0fe | 60 | shift++; |
1da177e4 LT |
61 | |
62 | /* If some dubious ICMP arrived, penalize even more. */ | |
63 | if (sk->sk_err_soft) | |
ad1af0fe | 64 | shift++; |
1da177e4 | 65 | |
efcdbf24 | 66 | if (tcp_check_oom(sk, shift)) { |
1da177e4 LT |
67 | /* Catch exceptional cases, when connection requires reset. |
68 | * 1. Last segment was sent recently. */ | |
69 | if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || | |
70 | /* 2. Window is closed. */ | |
71 | (!tp->snd_wnd && !tp->packets_out)) | |
b248230c | 72 | do_reset = true; |
1da177e4 LT |
73 | if (do_reset) |
74 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
75 | tcp_done(sk); | |
de0744af | 76 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); |
1da177e4 LT |
77 | return 1; |
78 | } | |
79 | return 0; | |
80 | } | |
81 | ||
82 | /* Calculate maximal number or retries on an orphaned socket. */ | |
7533ce30 | 83 | static int tcp_orphan_retries(struct sock *sk, bool alive) |
1da177e4 LT |
84 | { |
85 | int retries = sysctl_tcp_orphan_retries; /* May be zero. */ | |
86 | ||
87 | /* We know from an ICMP that something is wrong. */ | |
88 | if (sk->sk_err_soft && !alive) | |
89 | retries = 0; | |
90 | ||
91 | /* However, if socket sent something recently, select some safe | |
92 | * number of retries. 8 corresponds to >100 seconds with minimal | |
93 | * RTO of 200msec. */ | |
94 | if (retries == 0 && alive) | |
95 | retries = 8; | |
96 | return retries; | |
97 | } | |
98 | ||
ce55dd36 ED |
99 | static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) |
100 | { | |
b0f9ca53 FD |
101 | struct net *net = sock_net(sk); |
102 | ||
ce55dd36 | 103 | /* Black hole detection */ |
b0f9ca53 | 104 | if (net->ipv4.sysctl_tcp_mtu_probing) { |
ce55dd36 ED |
105 | if (!icsk->icsk_mtup.enabled) { |
106 | icsk->icsk_mtup.enabled = 1; | |
05cbc0db | 107 | icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; |
ce55dd36 ED |
108 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
109 | } else { | |
b0f9ca53 | 110 | struct net *net = sock_net(sk); |
ce55dd36 | 111 | struct tcp_sock *tp = tcp_sk(sk); |
829942c1 DM |
112 | int mss; |
113 | ||
8beb5c5f | 114 | mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; |
b0f9ca53 | 115 | mss = min(net->ipv4.sysctl_tcp_base_mss, mss); |
ce55dd36 ED |
116 | mss = max(mss, 68 - tp->tcp_header_len); |
117 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); | |
118 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | |
119 | } | |
120 | } | |
121 | } | |
122 | ||
2f7de571 | 123 | /* This function calculates a "timeout" which is equivalent to the timeout of a |
3ad2f3fb | 124 | * TCP connection after "boundary" unsuccessful, exponentially backed-off |
4d22f7d3 DL |
125 | * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if |
126 | * syn_set flag is set. | |
2f7de571 DL |
127 | */ |
128 | static bool retransmits_timed_out(struct sock *sk, | |
dca43c75 | 129 | unsigned int boundary, |
21a180cd | 130 | unsigned int timeout, |
4d22f7d3 | 131 | bool syn_set) |
2f7de571 | 132 | { |
dca43c75 | 133 | unsigned int linear_backoff_thresh, start_ts; |
4d22f7d3 | 134 | unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; |
2f7de571 DL |
135 | |
136 | if (!inet_csk(sk)->icsk_retransmits) | |
137 | return false; | |
138 | ||
7faee5c0 ED |
139 | start_ts = tcp_sk(sk)->retrans_stamp; |
140 | if (unlikely(!start_ts)) | |
141 | start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk)); | |
2f7de571 | 142 | |
dca43c75 | 143 | if (likely(timeout == 0)) { |
21a180cd | 144 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); |
2f7de571 | 145 | |
dca43c75 | 146 | if (boundary <= linear_backoff_thresh) |
21a180cd | 147 | timeout = ((2 << boundary) - 1) * rto_base; |
dca43c75 | 148 | else |
21a180cd | 149 | timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + |
dca43c75 JC |
150 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; |
151 | } | |
2f7de571 DL |
152 | return (tcp_time_stamp - start_ts) >= timeout; |
153 | } | |
154 | ||
1da177e4 LT |
155 | /* A write timeout has occurred. Process the after effects. */ |
156 | static int tcp_write_timeout(struct sock *sk) | |
157 | { | |
5d424d5a | 158 | struct inet_connection_sock *icsk = inet_csk(sk); |
c968601d | 159 | struct tcp_sock *tp = tcp_sk(sk); |
1da177e4 | 160 | int retry_until; |
3db1cd5c | 161 | bool do_reset, syn_set = false; |
1da177e4 LT |
162 | |
163 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | |
c968601d | 164 | if (icsk->icsk_retransmits) { |
b6c6712a | 165 | dst_negative_advice(sk); |
c968601d | 166 | if (tp->syn_fastopen || tp->syn_data) |
2646c831 | 167 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); |
dd52bc2b | 168 | if (tp->syn_data && icsk->icsk_retransmits == 1) |
f19c29e3 YC |
169 | NET_INC_STATS_BH(sock_net(sk), |
170 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | |
c968601d | 171 | } |
463c84b9 | 172 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
3db1cd5c | 173 | syn_set = true; |
1da177e4 | 174 | } else { |
21a180cd | 175 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { |
0e45f4da YC |
176 | /* Some middle-boxes may black-hole Fast Open _after_ |
177 | * the handshake. Therefore we conservatively disable | |
178 | * Fast Open on this path on recurring timeouts with | |
179 | * few or zero bytes acked after Fast Open. | |
180 | */ | |
181 | if (tp->syn_data_acked && | |
182 | tp->bytes_acked <= tp->rx_opt.mss_clamp) { | |
183 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); | |
184 | if (icsk->icsk_retransmits == sysctl_tcp_retries1) | |
185 | NET_INC_STATS_BH(sock_net(sk), | |
186 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | |
187 | } | |
5d424d5a | 188 | /* Black hole detection */ |
ce55dd36 | 189 | tcp_mtu_probing(icsk, sk); |
1da177e4 | 190 | |
b6c6712a | 191 | dst_negative_advice(sk); |
1da177e4 LT |
192 | } |
193 | ||
194 | retry_until = sysctl_tcp_retries2; | |
195 | if (sock_flag(sk, SOCK_DEAD)) { | |
7533ce30 | 196 | const bool alive = icsk->icsk_rto < TCP_RTO_MAX; |
e905a9ed | 197 | |
1da177e4 | 198 | retry_until = tcp_orphan_retries(sk, alive); |
6fa12c85 | 199 | do_reset = alive || |
21a180cd | 200 | !retransmits_timed_out(sk, retry_until, 0, 0); |
1da177e4 | 201 | |
6fa12c85 | 202 | if (tcp_out_of_resources(sk, do_reset)) |
1da177e4 LT |
203 | return 1; |
204 | } | |
205 | } | |
206 | ||
dca43c75 | 207 | if (retransmits_timed_out(sk, retry_until, |
21a180cd | 208 | syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { |
1da177e4 LT |
209 | /* Has it gone just too far? */ |
210 | tcp_write_err(sk); | |
211 | return 1; | |
212 | } | |
213 | return 0; | |
214 | } | |
215 | ||
6f458dfb | 216 | void tcp_delack_timer_handler(struct sock *sk) |
1da177e4 | 217 | { |
1da177e4 | 218 | struct tcp_sock *tp = tcp_sk(sk); |
463c84b9 | 219 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 | 220 | |
9993e7d3 | 221 | sk_mem_reclaim_partial(sk); |
1da177e4 | 222 | |
463c84b9 | 223 | if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
1da177e4 LT |
224 | goto out; |
225 | ||
463c84b9 ACM |
226 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
227 | sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); | |
1da177e4 LT |
228 | goto out; |
229 | } | |
463c84b9 | 230 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; |
1da177e4 | 231 | |
b03efcfb | 232 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
1da177e4 LT |
233 | struct sk_buff *skb; |
234 | ||
de0744af | 235 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); |
1da177e4 LT |
236 | |
237 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) | |
c57943a1 | 238 | sk_backlog_rcv(sk, skb); |
1da177e4 LT |
239 | |
240 | tp->ucopy.memory = 0; | |
241 | } | |
242 | ||
463c84b9 ACM |
243 | if (inet_csk_ack_scheduled(sk)) { |
244 | if (!icsk->icsk_ack.pingpong) { | |
1da177e4 | 245 | /* Delayed ACK missed: inflate ATO. */ |
463c84b9 | 246 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); |
1da177e4 LT |
247 | } else { |
248 | /* Delayed ACK missed: leave pingpong mode and | |
249 | * deflate ATO. | |
250 | */ | |
463c84b9 ACM |
251 | icsk->icsk_ack.pingpong = 0; |
252 | icsk->icsk_ack.ato = TCP_ATO_MIN; | |
1da177e4 LT |
253 | } |
254 | tcp_send_ack(sk); | |
de0744af | 255 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
1da177e4 | 256 | } |
1da177e4 LT |
257 | |
258 | out: | |
b8da51eb | 259 | if (tcp_under_memory_pressure(sk)) |
3ab224be | 260 | sk_mem_reclaim(sk); |
6f458dfb ED |
261 | } |
262 | ||
263 | static void tcp_delack_timer(unsigned long data) | |
264 | { | |
265 | struct sock *sk = (struct sock *)data; | |
266 | ||
267 | bh_lock_sock(sk); | |
268 | if (!sock_owned_by_user(sk)) { | |
269 | tcp_delack_timer_handler(sk); | |
270 | } else { | |
271 | inet_csk(sk)->icsk_ack.blocked = 1; | |
272 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); | |
273 | /* deleguate our work to tcp_release_cb() */ | |
144d56e9 ED |
274 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) |
275 | sock_hold(sk); | |
6f458dfb | 276 | } |
1da177e4 LT |
277 | bh_unlock_sock(sk); |
278 | sock_put(sk); | |
279 | } | |
280 | ||
281 | static void tcp_probe_timer(struct sock *sk) | |
282 | { | |
6687e988 | 283 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
284 | struct tcp_sock *tp = tcp_sk(sk); |
285 | int max_probes; | |
b248230c | 286 | u32 start_ts; |
1da177e4 | 287 | |
fe067e8a | 288 | if (tp->packets_out || !tcp_send_head(sk)) { |
6687e988 | 289 | icsk->icsk_probes_out = 0; |
1da177e4 LT |
290 | return; |
291 | } | |
292 | ||
b248230c YC |
293 | /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as |
294 | * long as the receiver continues to respond probes. We support this by | |
295 | * default and reset icsk_probes_out with incoming ACKs. But if the | |
296 | * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we | |
297 | * kill the socket when the retry count and the time exceeds the | |
298 | * corresponding system limit. We also implement similar policy when | |
299 | * we use RTO to probe window in tcp_retransmit_timer(). | |
1da177e4 | 300 | */ |
b248230c YC |
301 | start_ts = tcp_skb_timestamp(tcp_send_head(sk)); |
302 | if (!start_ts) | |
303 | skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp); | |
304 | else if (icsk->icsk_user_timeout && | |
305 | (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout) | |
306 | goto abort; | |
1da177e4 | 307 | |
b248230c | 308 | max_probes = sysctl_tcp_retries2; |
1da177e4 | 309 | if (sock_flag(sk, SOCK_DEAD)) { |
7533ce30 | 310 | const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; |
e905a9ed | 311 | |
1da177e4 | 312 | max_probes = tcp_orphan_retries(sk, alive); |
b248230c YC |
313 | if (!alive && icsk->icsk_backoff >= max_probes) |
314 | goto abort; | |
315 | if (tcp_out_of_resources(sk, true)) | |
1da177e4 LT |
316 | return; |
317 | } | |
318 | ||
6687e988 | 319 | if (icsk->icsk_probes_out > max_probes) { |
b248230c | 320 | abort: tcp_write_err(sk); |
1da177e4 LT |
321 | } else { |
322 | /* Only send another probe if we didn't close things up. */ | |
323 | tcp_send_probe0(sk); | |
324 | } | |
325 | } | |
326 | ||
8336886f JC |
327 | /* |
328 | * Timer for Fast Open socket to retransmit SYNACK. Note that the | |
329 | * sk here is the child socket, not the parent (listener) socket. | |
330 | */ | |
331 | static void tcp_fastopen_synack_timer(struct sock *sk) | |
332 | { | |
333 | struct inet_connection_sock *icsk = inet_csk(sk); | |
334 | int max_retries = icsk->icsk_syn_retries ? : | |
335 | sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */ | |
336 | struct request_sock *req; | |
337 | ||
338 | req = tcp_sk(sk)->fastopen_rsk; | |
42cb80a2 | 339 | req->rsk_ops->syn_ack_timeout(req); |
8336886f | 340 | |
e6c022a4 | 341 | if (req->num_timeout >= max_retries) { |
8336886f JC |
342 | tcp_write_err(sk); |
343 | return; | |
344 | } | |
345 | /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error | |
346 | * returned from rtx_syn_ack() to make it more persistent like | |
347 | * regular retransmit because if the child socket has been accepted | |
348 | * it's not good to give up too easily. | |
349 | */ | |
e6c022a4 ED |
350 | inet_rtx_syn_ack(sk, req); |
351 | req->num_timeout++; | |
8336886f | 352 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
e6c022a4 | 353 | TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); |
8336886f JC |
354 | } |
355 | ||
1da177e4 LT |
356 | /* |
357 | * The TCP retransmit timer. | |
358 | */ | |
359 | ||
f1ecd5d9 | 360 | void tcp_retransmit_timer(struct sock *sk) |
1da177e4 LT |
361 | { |
362 | struct tcp_sock *tp = tcp_sk(sk); | |
463c84b9 | 363 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 | 364 | |
8336886f | 365 | if (tp->fastopen_rsk) { |
37561f68 JC |
366 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && |
367 | sk->sk_state != TCP_FIN_WAIT1); | |
8336886f JC |
368 | tcp_fastopen_synack_timer(sk); |
369 | /* Before we receive ACK to our SYN-ACK don't retransmit | |
370 | * anything else (e.g., data or FIN segments). | |
371 | */ | |
372 | return; | |
373 | } | |
1da177e4 LT |
374 | if (!tp->packets_out) |
375 | goto out; | |
376 | ||
547b792c | 377 | WARN_ON(tcp_write_queue_empty(sk)); |
1da177e4 | 378 | |
9b717a8d ND |
379 | tp->tlp_high_seq = 0; |
380 | ||
1da177e4 LT |
381 | if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && |
382 | !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { | |
383 | /* Receiver dastardly shrinks window. Our retransmits | |
384 | * become zero probes, but we should not timeout this | |
385 | * connection. If the socket is an orphan, time it out, | |
386 | * we cannot allow such beasts to hang infinitely. | |
387 | */ | |
569508c9 YH |
388 | struct inet_sock *inet = inet_sk(sk); |
389 | if (sk->sk_family == AF_INET) { | |
ba7a46f1 JP |
390 | net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", |
391 | &inet->inet_daddr, | |
392 | ntohs(inet->inet_dport), | |
393 | inet->inet_num, | |
394 | tp->snd_una, tp->snd_nxt); | |
1da177e4 | 395 | } |
dfd56b8b | 396 | #if IS_ENABLED(CONFIG_IPV6) |
569508c9 | 397 | else if (sk->sk_family == AF_INET6) { |
ba7a46f1 JP |
398 | net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", |
399 | &sk->sk_v6_daddr, | |
400 | ntohs(inet->inet_dport), | |
401 | inet->inet_num, | |
402 | tp->snd_una, tp->snd_nxt); | |
569508c9 | 403 | } |
1da177e4 LT |
404 | #endif |
405 | if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { | |
406 | tcp_write_err(sk); | |
407 | goto out; | |
408 | } | |
5ae344c9 | 409 | tcp_enter_loss(sk); |
fe067e8a | 410 | tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); |
1da177e4 LT |
411 | __sk_dst_reset(sk); |
412 | goto out_reset_timer; | |
413 | } | |
414 | ||
415 | if (tcp_write_timeout(sk)) | |
416 | goto out; | |
417 | ||
463c84b9 | 418 | if (icsk->icsk_retransmits == 0) { |
40b215e5 PE |
419 | int mib_idx; |
420 | ||
c60ce4e2 | 421 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { |
bc079e9e IJ |
422 | if (tcp_is_sack(tp)) |
423 | mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; | |
424 | else | |
425 | mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; | |
6687e988 | 426 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { |
40b215e5 | 427 | mib_idx = LINUX_MIB_TCPLOSSFAILURES; |
c60ce4e2 IJ |
428 | } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || |
429 | tp->sacked_out) { | |
430 | if (tcp_is_sack(tp)) | |
431 | mib_idx = LINUX_MIB_TCPSACKFAILURES; | |
432 | else | |
433 | mib_idx = LINUX_MIB_TCPRENOFAILURES; | |
1da177e4 | 434 | } else { |
40b215e5 | 435 | mib_idx = LINUX_MIB_TCPTIMEOUTS; |
1da177e4 | 436 | } |
de0744af | 437 | NET_INC_STATS_BH(sock_net(sk), mib_idx); |
1da177e4 LT |
438 | } |
439 | ||
5ae344c9 | 440 | tcp_enter_loss(sk); |
1da177e4 | 441 | |
fe067e8a | 442 | if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) { |
1da177e4 LT |
443 | /* Retransmission failed because of local congestion, |
444 | * do not backoff. | |
445 | */ | |
463c84b9 ACM |
446 | if (!icsk->icsk_retransmits) |
447 | icsk->icsk_retransmits = 1; | |
448 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | |
3f421baa ACM |
449 | min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), |
450 | TCP_RTO_MAX); | |
1da177e4 LT |
451 | goto out; |
452 | } | |
453 | ||
454 | /* Increase the timeout each time we retransmit. Note that | |
455 | * we do not increase the rtt estimate. rto is initialized | |
456 | * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests | |
457 | * that doubling rto each time is the least we can get away with. | |
458 | * In KA9Q, Karn uses this for the first few times, and then | |
459 | * goes to quadratic. netBSD doubles, but only goes up to *64, | |
460 | * and clamps at 1 to 64 sec afterwards. Note that 120 sec is | |
461 | * defined in the protocol as the maximum possible RTT. I guess | |
462 | * we'll have to use something other than TCP to talk to the | |
463 | * University of Mars. | |
464 | * | |
465 | * PAWS allows us longer timeouts and large windows, so once | |
466 | * implemented ftp to mars will work nicely. We will have to fix | |
467 | * the 120 second clamps though! | |
468 | */ | |
463c84b9 ACM |
469 | icsk->icsk_backoff++; |
470 | icsk->icsk_retransmits++; | |
1da177e4 LT |
471 | |
472 | out_reset_timer: | |
36e31b0a AP |
473 | /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is |
474 | * used to reset timer, set to 0. Recalculate 'icsk_rto' as this | |
475 | * might be increased if the stream oscillates between thin and thick, | |
476 | * thus the old value might already be too high compared to the value | |
477 | * set by 'tcp_set_rto' in tcp_input.c which resets the rto without | |
478 | * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating | |
479 | * exponential backoff behaviour to avoid continue hammering | |
480 | * linear-timeout retransmissions into a black hole | |
481 | */ | |
482 | if (sk->sk_state == TCP_ESTABLISHED && | |
483 | (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) && | |
484 | tcp_stream_is_thin(tp) && | |
485 | icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { | |
486 | icsk->icsk_backoff = 0; | |
487 | icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); | |
488 | } else { | |
489 | /* Use normal (exponential) backoff */ | |
490 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); | |
491 | } | |
3f421baa | 492 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); |
21a180cd | 493 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0)) |
1da177e4 LT |
494 | __sk_dst_reset(sk); |
495 | ||
496 | out:; | |
497 | } | |
498 | ||
6f458dfb | 499 | void tcp_write_timer_handler(struct sock *sk) |
1da177e4 | 500 | { |
463c84b9 | 501 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
502 | int event; |
503 | ||
463c84b9 | 504 | if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) |
1da177e4 LT |
505 | goto out; |
506 | ||
463c84b9 ACM |
507 | if (time_after(icsk->icsk_timeout, jiffies)) { |
508 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); | |
1da177e4 LT |
509 | goto out; |
510 | } | |
511 | ||
463c84b9 | 512 | event = icsk->icsk_pending; |
1da177e4 LT |
513 | |
514 | switch (event) { | |
6ba8a3b1 ND |
515 | case ICSK_TIME_EARLY_RETRANS: |
516 | tcp_resume_early_retransmit(sk); | |
517 | break; | |
518 | case ICSK_TIME_LOSS_PROBE: | |
519 | tcp_send_loss_probe(sk); | |
520 | break; | |
463c84b9 | 521 | case ICSK_TIME_RETRANS: |
6ba8a3b1 | 522 | icsk->icsk_pending = 0; |
1da177e4 LT |
523 | tcp_retransmit_timer(sk); |
524 | break; | |
463c84b9 | 525 | case ICSK_TIME_PROBE0: |
6ba8a3b1 | 526 | icsk->icsk_pending = 0; |
1da177e4 LT |
527 | tcp_probe_timer(sk); |
528 | break; | |
529 | } | |
1da177e4 LT |
530 | |
531 | out: | |
3ab224be | 532 | sk_mem_reclaim(sk); |
6f458dfb ED |
533 | } |
534 | ||
535 | static void tcp_write_timer(unsigned long data) | |
536 | { | |
537 | struct sock *sk = (struct sock *)data; | |
538 | ||
539 | bh_lock_sock(sk); | |
540 | if (!sock_owned_by_user(sk)) { | |
541 | tcp_write_timer_handler(sk); | |
542 | } else { | |
543 | /* deleguate our work to tcp_release_cb() */ | |
144d56e9 ED |
544 | if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) |
545 | sock_hold(sk); | |
6f458dfb | 546 | } |
1da177e4 LT |
547 | bh_unlock_sock(sk); |
548 | sock_put(sk); | |
549 | } | |
550 | ||
42cb80a2 | 551 | void tcp_syn_ack_timeout(const struct request_sock *req) |
72659ecc | 552 | { |
42cb80a2 ED |
553 | struct net *net = read_pnet(&inet_rsk(req)->ireq_net); |
554 | ||
555 | NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS); | |
72659ecc OP |
556 | } |
557 | EXPORT_SYMBOL(tcp_syn_ack_timeout); | |
558 | ||
1da177e4 LT |
559 | void tcp_set_keepalive(struct sock *sk, int val) |
560 | { | |
561 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) | |
562 | return; | |
563 | ||
564 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) | |
463c84b9 | 565 | inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); |
1da177e4 | 566 | else if (!val) |
463c84b9 | 567 | inet_csk_delete_keepalive_timer(sk); |
1da177e4 LT |
568 | } |
569 | ||
570 | ||
571 | static void tcp_keepalive_timer (unsigned long data) | |
572 | { | |
573 | struct sock *sk = (struct sock *) data; | |
6687e988 | 574 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 | 575 | struct tcp_sock *tp = tcp_sk(sk); |
6c37e5de | 576 | u32 elapsed; |
1da177e4 LT |
577 | |
578 | /* Only process if socket is not in use. */ | |
579 | bh_lock_sock(sk); | |
580 | if (sock_owned_by_user(sk)) { | |
e905a9ed | 581 | /* Try again later. */ |
463c84b9 | 582 | inet_csk_reset_keepalive_timer (sk, HZ/20); |
1da177e4 LT |
583 | goto out; |
584 | } | |
585 | ||
586 | if (sk->sk_state == TCP_LISTEN) { | |
fa76ce73 | 587 | pr_err("Hmm... keepalive on a LISTEN ???\n"); |
1da177e4 LT |
588 | goto out; |
589 | } | |
590 | ||
591 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { | |
592 | if (tp->linger2 >= 0) { | |
463c84b9 | 593 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; |
1da177e4 LT |
594 | |
595 | if (tmo > 0) { | |
596 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | |
597 | goto out; | |
598 | } | |
599 | } | |
600 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
601 | goto death; | |
602 | } | |
603 | ||
604 | if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) | |
605 | goto out; | |
606 | ||
607 | elapsed = keepalive_time_when(tp); | |
608 | ||
609 | /* It is alive without keepalive 8) */ | |
fe067e8a | 610 | if (tp->packets_out || tcp_send_head(sk)) |
1da177e4 LT |
611 | goto resched; |
612 | ||
6c37e5de | 613 | elapsed = keepalive_time_elapsed(tp); |
1da177e4 LT |
614 | |
615 | if (elapsed >= keepalive_time_when(tp)) { | |
dca43c75 JC |
616 | /* If the TCP_USER_TIMEOUT option is enabled, use that |
617 | * to determine when to timeout instead. | |
618 | */ | |
619 | if ((icsk->icsk_user_timeout != 0 && | |
620 | elapsed >= icsk->icsk_user_timeout && | |
621 | icsk->icsk_probes_out > 0) || | |
622 | (icsk->icsk_user_timeout == 0 && | |
623 | icsk->icsk_probes_out >= keepalive_probes(tp))) { | |
1da177e4 LT |
624 | tcp_send_active_reset(sk, GFP_ATOMIC); |
625 | tcp_write_err(sk); | |
626 | goto out; | |
627 | } | |
e520af48 | 628 | if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) { |
6687e988 | 629 | icsk->icsk_probes_out++; |
1da177e4 LT |
630 | elapsed = keepalive_intvl_when(tp); |
631 | } else { | |
632 | /* If keepalive was lost due to local congestion, | |
633 | * try harder. | |
634 | */ | |
635 | elapsed = TCP_RESOURCE_PROBE_INTERVAL; | |
636 | } | |
637 | } else { | |
638 | /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ | |
639 | elapsed = keepalive_time_when(tp) - elapsed; | |
640 | } | |
641 | ||
3ab224be | 642 | sk_mem_reclaim(sk); |
1da177e4 LT |
643 | |
644 | resched: | |
463c84b9 | 645 | inet_csk_reset_keepalive_timer (sk, elapsed); |
1da177e4 LT |
646 | goto out; |
647 | ||
e905a9ed | 648 | death: |
1da177e4 LT |
649 | tcp_done(sk); |
650 | ||
651 | out: | |
652 | bh_unlock_sock(sk); | |
653 | sock_put(sk); | |
654 | } | |
6f458dfb ED |
655 | |
656 | void tcp_init_xmit_timers(struct sock *sk) | |
657 | { | |
658 | inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, | |
659 | &tcp_keepalive_timer); | |
660 | } |