Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
4 | * operating system. INET is implemented using the BSD Socket | |
5 | * interface as the means of communication with the user level. | |
6 | * | |
7 | * Implementation of the Transmission Control Protocol(TCP). | |
8 | * | |
02c30a84 | 9 | * Authors: Ross Biro |
1da177e4 LT |
10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
11 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
13 | * Florian La Roche, <flla@stud.uni-sb.de> | |
14 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> | |
15 | * Linus Torvalds, <torvalds@cs.helsinki.fi> | |
16 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
17 | * Matthew Dillon, <dillon@apollo.west.oic.com> | |
18 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
19 | * Jorge Cwik, <jorge@laser.satlink.net> | |
20 | */ | |
21 | ||
22 | #include <linux/module.h> | |
5a0e3ad6 | 23 | #include <linux/gfp.h> |
1da177e4 | 24 | #include <net/tcp.h> |
5691276b | 25 | #include <net/rstreason.h> |
1da177e4 | 26 | |
b701a99e JM |
27 | static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) |
28 | { | |
58169ec9 | 29 | const struct inet_connection_sock *icsk = inet_csk(sk); |
614e8316 ED |
30 | const struct tcp_sock *tp = tcp_sk(sk); |
31 | u32 elapsed, user_timeout; | |
9efdda4e | 32 | s32 remaining; |
b701a99e | 33 | |
d58f2e15 ED |
34 | user_timeout = READ_ONCE(icsk->icsk_user_timeout); |
35 | if (!user_timeout) | |
b701a99e | 36 | return icsk->icsk_rto; |
614e8316 ED |
37 | |
38 | elapsed = tcp_time_stamp_ts(tp) - tp->retrans_stamp; | |
39 | if (tp->tcp_usec_ts) | |
40 | elapsed /= USEC_PER_MSEC; | |
41 | ||
d58f2e15 | 42 | remaining = user_timeout - elapsed; |
9efdda4e | 43 | if (remaining <= 0) |
b701a99e | 44 | return 1; /* user timeout has passed; fire ASAP */ |
9efdda4e ED |
45 | |
46 | return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); | |
b701a99e JM |
47 | } |
48 | ||
344db93a EC |
49 | u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) |
50 | { | |
58169ec9 | 51 | const struct inet_connection_sock *icsk = inet_csk(sk); |
d58f2e15 | 52 | u32 remaining, user_timeout; |
344db93a EC |
53 | s32 elapsed; |
54 | ||
d58f2e15 ED |
55 | user_timeout = READ_ONCE(icsk->icsk_user_timeout); |
56 | if (!user_timeout || !icsk->icsk_probes_tstamp) | |
344db93a EC |
57 | return when; |
58 | ||
59 | elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp; | |
60 | if (unlikely(elapsed < 0)) | |
61 | elapsed = 0; | |
d58f2e15 | 62 | remaining = msecs_to_jiffies(user_timeout) - elapsed; |
344db93a EC |
63 | remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN); |
64 | ||
65 | return min_t(u32, remaining, when); | |
66 | } | |
67 | ||
c380d37e RS |
68 | /** |
69 | * tcp_write_err() - close socket and save error info | |
70 | * @sk: The socket the error has appeared on. | |
71 | * | |
72 | * Returns: Nothing (void) | |
73 | */ | |
74 | ||
1da177e4 LT |
75 | static void tcp_write_err(struct sock *sk) |
76 | { | |
853c3bd7 | 77 | tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT); |
02a1d6e7 | 78 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); |
1da177e4 LT |
79 | } |
80 | ||
c380d37e RS |
81 | /** |
82 | * tcp_out_of_resources() - Close socket if out of resources | |
83 | * @sk: pointer to current socket | |
84 | * @do_reset: send a last packet with reset flag | |
1da177e4 | 85 | * |
c380d37e RS |
86 | * Do not allow orphaned sockets to eat all our resources. |
87 | * This is direct violation of TCP specs, but it is required | |
88 | * to prevent DoS attacks. It is called when a retransmission timeout | |
89 | * or zero probe timeout occurs on orphaned socket. | |
90 | * | |
4ee806d5 DS |
91 | * Also close if our net namespace is exiting; in that case there is no |
92 | * hope of ever communicating again since all netns interfaces are already | |
93 | * down (or about to be down), and we need to release our dst references, | |
94 | * which have been moved to the netns loopback interface, so the namespace | |
95 | * can finish exiting. This condition is only possible if we are a kernel | |
96 | * socket, as those do not hold references to the namespace. | |
97 | * | |
c380d37e RS |
98 | * Criteria is still not confirmed experimentally and may change. |
99 | * We kill the socket, if: | |
100 | * 1. If number of orphaned sockets exceeds an administratively configured | |
101 | * limit. | |
102 | * 2. If we have strong memory pressure. | |
4ee806d5 | 103 | * 3. If our net namespace is exiting. |
1da177e4 | 104 | */ |
b248230c | 105 | static int tcp_out_of_resources(struct sock *sk, bool do_reset) |
1da177e4 LT |
106 | { |
107 | struct tcp_sock *tp = tcp_sk(sk); | |
ad1af0fe | 108 | int shift = 0; |
1da177e4 | 109 | |
e905a9ed | 110 | /* If peer does not open window for long time, or did not transmit |
1da177e4 | 111 | * anything for long time, penalize it. */ |
54a378f4 | 112 | if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*tcp_rto_max(sk) || !do_reset) |
ad1af0fe | 113 | shift++; |
1da177e4 LT |
114 | |
115 | /* If some dubious ICMP arrived, penalize even more. */ | |
cee1af82 | 116 | if (READ_ONCE(sk->sk_err_soft)) |
ad1af0fe | 117 | shift++; |
1da177e4 | 118 | |
efcdbf24 | 119 | if (tcp_check_oom(sk, shift)) { |
1da177e4 LT |
120 | /* Catch exceptional cases, when connection requires reset. |
121 | * 1. Last segment was sent recently. */ | |
d635fbe2 | 122 | if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN || |
1da177e4 LT |
123 | /* 2. Window is closed. */ |
124 | (!tp->snd_wnd && !tp->packets_out)) | |
b248230c | 125 | do_reset = true; |
1da177e4 | 126 | if (do_reset) |
5691276b | 127 | tcp_send_active_reset(sk, GFP_ATOMIC, |
8407994f | 128 | SK_RST_REASON_TCP_ABORT_ON_MEMORY); |
1da177e4 | 129 | tcp_done(sk); |
02a1d6e7 | 130 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); |
1da177e4 LT |
131 | return 1; |
132 | } | |
4ee806d5 DS |
133 | |
134 | if (!check_net(sock_net(sk))) { | |
135 | /* Not possible to send reset; just close */ | |
136 | tcp_done(sk); | |
137 | return 1; | |
138 | } | |
139 | ||
1da177e4 LT |
140 | return 0; |
141 | } | |
142 | ||
c380d37e RS |
143 | /** |
144 | * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket | |
145 | * @sk: Pointer to the current socket. | |
146 | * @alive: bool, socket alive state | |
147 | */ | |
7533ce30 | 148 | static int tcp_orphan_retries(struct sock *sk, bool alive) |
1da177e4 | 149 | { |
39e24435 | 150 | int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */ |
1da177e4 LT |
151 | |
152 | /* We know from an ICMP that something is wrong. */ | |
cee1af82 | 153 | if (READ_ONCE(sk->sk_err_soft) && !alive) |
1da177e4 LT |
154 | retries = 0; |
155 | ||
156 | /* However, if socket sent something recently, select some safe | |
157 | * number of retries. 8 corresponds to >100 seconds with minimal | |
158 | * RTO of 200msec. */ | |
159 | if (retries == 0 && alive) | |
160 | retries = 8; | |
161 | return retries; | |
162 | } | |
163 | ||
ce55dd36 ED |
164 | static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) |
165 | { | |
d0f36847 ED |
166 | const struct net *net = sock_net(sk); |
167 | int mss; | |
b0f9ca53 | 168 | |
ce55dd36 | 169 | /* Black hole detection */ |
f47d00e0 | 170 | if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing)) |
d0f36847 ED |
171 | return; |
172 | ||
173 | if (!icsk->icsk_mtup.enabled) { | |
174 | icsk->icsk_mtup.enabled = 1; | |
175 | icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; | |
176 | } else { | |
177 | mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; | |
88d78bc0 | 178 | mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss); |
8e92d442 | 179 | mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor)); |
78eb166c | 180 | mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss)); |
d0f36847 | 181 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); |
ce55dd36 | 182 | } |
d0f36847 | 183 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
ce55dd36 ED |
184 | } |
185 | ||
01a523b0 YC |
186 | static unsigned int tcp_model_timeout(struct sock *sk, |
187 | unsigned int boundary, | |
188 | unsigned int rto_base) | |
189 | { | |
190 | unsigned int linear_backoff_thresh, timeout; | |
191 | ||
54a378f4 | 192 | linear_backoff_thresh = ilog2(tcp_rto_max(sk) / rto_base); |
01a523b0 YC |
193 | if (boundary <= linear_backoff_thresh) |
194 | timeout = ((2 << boundary) - 1) * rto_base; | |
195 | else | |
196 | timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + | |
54a378f4 | 197 | (boundary - linear_backoff_thresh) * tcp_rto_max(sk); |
01a523b0 YC |
198 | return jiffies_to_msecs(timeout); |
199 | } | |
c380d37e RS |
200 | /** |
201 | * retransmits_timed_out() - returns true if this connection has timed out | |
202 | * @sk: The current socket | |
203 | * @boundary: max number of retransmissions | |
204 | * @timeout: A custom timeout value. | |
205 | * If set to 0 the default timeout is calculated and used. | |
206 | * Using TCP_RTO_MIN and the number of unsuccessful retransmits. | |
c380d37e RS |
207 | * |
208 | * The default "timeout" value this function can calculate and use | |
209 | * is equivalent to the timeout of a TCP Connection | |
210 | * after "boundary" unsuccessful, exponentially backed-off | |
ce682ef6 | 211 | * retransmissions with an initial RTO of TCP_RTO_MIN. |
2f7de571 DL |
212 | */ |
213 | static bool retransmits_timed_out(struct sock *sk, | |
dca43c75 | 214 | unsigned int boundary, |
ce682ef6 | 215 | unsigned int timeout) |
2f7de571 | 216 | { |
614e8316 ED |
217 | struct tcp_sock *tp = tcp_sk(sk); |
218 | unsigned int start_ts, delta; | |
2f7de571 DL |
219 | |
220 | if (!inet_csk(sk)->icsk_retransmits) | |
221 | return false; | |
222 | ||
614e8316 | 223 | start_ts = tp->retrans_stamp; |
3256a2d6 ED |
224 | if (likely(timeout == 0)) { |
225 | unsigned int rto_base = TCP_RTO_MIN; | |
226 | ||
227 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
228 | rto_base = tcp_timeout_init(sk); | |
229 | timeout = tcp_model_timeout(sk, boundary, rto_base); | |
230 | } | |
01a523b0 | 231 | |
614e8316 ED |
232 | if (tp->tcp_usec_ts) { |
233 | /* delta maybe off up to a jiffy due to timer granularity. */ | |
234 | delta = tp->tcp_mstamp - start_ts + jiffies_to_usecs(1); | |
235 | return (s32)(delta - timeout * USEC_PER_MSEC) >= 0; | |
236 | } | |
237 | return (s32)(tcp_time_stamp_ts(tp) - start_ts - timeout) >= 0; | |
2f7de571 DL |
238 | } |
239 | ||
1da177e4 LT |
240 | /* A write timeout has occurred. Process the after effects. */ |
241 | static int tcp_write_timeout(struct sock *sk) | |
242 | { | |
5d424d5a | 243 | struct inet_connection_sock *icsk = inet_csk(sk); |
c968601d | 244 | struct tcp_sock *tp = tcp_sk(sk); |
6fa25166 | 245 | struct net *net = sock_net(sk); |
a41e8a88 | 246 | bool expired = false, do_reset; |
ccce324d | 247 | int retry_until, max_retransmits; |
1da177e4 LT |
248 | |
249 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | |
9c30ae83 YC |
250 | if (icsk->icsk_retransmits) |
251 | __dst_negative_advice(sk); | |
d44fd4a7 ED |
252 | /* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() */ |
253 | retry_until = READ_ONCE(icsk->icsk_syn_retries) ? : | |
20a3b1c0 | 254 | READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); |
ccce324d DM |
255 | |
256 | max_retransmits = retry_until; | |
257 | if (sk->sk_state == TCP_SYN_SENT) | |
258 | max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts); | |
259 | ||
260 | expired = icsk->icsk_retransmits >= max_retransmits; | |
1da177e4 | 261 | } else { |
39e24435 | 262 | if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) { |
5d424d5a | 263 | /* Black hole detection */ |
ce55dd36 | 264 | tcp_mtu_probing(icsk, sk); |
1da177e4 | 265 | |
9c30ae83 | 266 | __dst_negative_advice(sk); |
1da177e4 LT |
267 | } |
268 | ||
39e24435 | 269 | retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); |
1da177e4 | 270 | if (sock_flag(sk, SOCK_DEAD)) { |
54a378f4 | 271 | const bool alive = icsk->icsk_rto < tcp_rto_max(sk); |
e905a9ed | 272 | |
1da177e4 | 273 | retry_until = tcp_orphan_retries(sk, alive); |
6fa12c85 | 274 | do_reset = alive || |
ce682ef6 | 275 | !retransmits_timed_out(sk, retry_until, 0); |
1da177e4 | 276 | |
6fa12c85 | 277 | if (tcp_out_of_resources(sk, do_reset)) |
1da177e4 LT |
278 | return 1; |
279 | } | |
a41e8a88 ED |
280 | } |
281 | if (!expired) | |
ce682ef6 | 282 | expired = retransmits_timed_out(sk, retry_until, |
d58f2e15 | 283 | READ_ONCE(icsk->icsk_user_timeout)); |
7268586b | 284 | tcp_fastopen_active_detect_blackhole(sk, expired); |
6982826f | 285 | mptcp_active_detect_blackhole(sk, expired); |
f89013f6 LB |
286 | |
287 | if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG)) | |
288 | tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB, | |
289 | icsk->icsk_retransmits, | |
290 | icsk->icsk_rto, (int)expired); | |
291 | ||
ce682ef6 | 292 | if (expired) { |
1da177e4 LT |
293 | /* Has it gone just too far? */ |
294 | tcp_write_err(sk); | |
295 | return 1; | |
296 | } | |
f89013f6 | 297 | |
9c30ae83 YC |
298 | if (sk_rethink_txhash(sk)) { |
299 | tp->timeout_rehash++; | |
300 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH); | |
301 | } | |
302 | ||
1da177e4 LT |
303 | return 0; |
304 | } | |
305 | ||
c10d9310 | 306 | /* Called with BH disabled */ |
6f458dfb | 307 | void tcp_delack_timer_handler(struct sock *sk) |
1da177e4 | 308 | { |
463c84b9 | 309 | struct inet_connection_sock *icsk = inet_csk(sk); |
30c6f0bf | 310 | struct tcp_sock *tp = tcp_sk(sk); |
311 | ||
312 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) | |
313 | return; | |
314 | ||
315 | /* Handling the sack compression case */ | |
316 | if (tp->compressed_ack) { | |
317 | tcp_mstamp_refresh(tp); | |
318 | tcp_sack_compress_send_ack(sk); | |
319 | return; | |
320 | } | |
1da177e4 | 321 | |
30c6f0bf | 322 | if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
4890b686 | 323 | return; |
1da177e4 | 324 | |
f1e30061 ED |
325 | if (time_after(icsk_delack_timeout(icsk), jiffies)) { |
326 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | |
327 | icsk_delack_timeout(icsk)); | |
4890b686 | 328 | return; |
1da177e4 | 329 | } |
463c84b9 | 330 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; |
1da177e4 | 331 | |
463c84b9 | 332 | if (inet_csk_ack_scheduled(sk)) { |
31954cd8 | 333 | if (!inet_csk_in_pingpong_mode(sk)) { |
1da177e4 | 334 | /* Delayed ACK missed: inflate ATO. */ |
95b9a87c | 335 | icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, icsk->icsk_rto); |
1da177e4 LT |
336 | } else { |
337 | /* Delayed ACK missed: leave pingpong mode and | |
338 | * deflate ATO. | |
339 | */ | |
31954cd8 | 340 | inet_csk_exit_pingpong_mode(sk); |
463c84b9 | 341 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
1da177e4 | 342 | } |
30c6f0bf | 343 | tcp_mstamp_refresh(tp); |
1da177e4 | 344 | tcp_send_ack(sk); |
02a1d6e7 | 345 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
1da177e4 | 346 | } |
6f458dfb ED |
347 | } |
348 | ||
c380d37e RS |
349 | |
350 | /** | |
351 | * tcp_delack_timer() - The TCP delayed ACK timeout handler | |
3628e3cb | 352 | * @t: Pointer to the timer. (gets casted to struct sock *) |
c380d37e RS |
353 | * |
354 | * This function gets (indirectly) called when the kernel timer for a TCP packet | |
355 | * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work. | |
356 | * | |
357 | * Returns: Nothing (void) | |
358 | */ | |
59f379f9 | 359 | static void tcp_delack_timer(struct timer_list *t) |
6f458dfb | 360 | { |
59f379f9 | 361 | struct inet_connection_sock *icsk = |
41cb0855 | 362 | timer_container_of(icsk, t, icsk_delack_timer); |
59f379f9 | 363 | struct sock *sk = &icsk->icsk_inet.sk; |
6f458dfb | 364 | |
81df4fa9 ED |
365 | /* Avoid taking socket spinlock if there is no ACK to send. |
366 | * The compressed_ack check is racy, but a separate hrtimer | |
367 | * will take care of it eventually. | |
368 | */ | |
369 | if (!(smp_load_acquire(&icsk->icsk_ack.pending) & ICSK_ACK_TIMER) && | |
370 | !READ_ONCE(tcp_sk(sk)->compressed_ack)) | |
371 | goto out; | |
372 | ||
6f458dfb ED |
373 | bh_lock_sock(sk); |
374 | if (!sock_owned_by_user(sk)) { | |
375 | tcp_delack_timer_handler(sk); | |
376 | } else { | |
02a1d6e7 | 377 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
6f458dfb | 378 | /* deleguate our work to tcp_release_cb() */ |
7aa5470c | 379 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) |
144d56e9 | 380 | sock_hold(sk); |
6f458dfb | 381 | } |
1da177e4 | 382 | bh_unlock_sock(sk); |
81df4fa9 | 383 | out: |
1da177e4 LT |
384 | sock_put(sk); |
385 | } | |
386 | ||
387 | static void tcp_probe_timer(struct sock *sk) | |
388 | { | |
6687e988 | 389 | struct inet_connection_sock *icsk = inet_csk(sk); |
75c119af | 390 | struct sk_buff *skb = tcp_send_head(sk); |
1da177e4 LT |
391 | struct tcp_sock *tp = tcp_sk(sk); |
392 | int max_probes; | |
393 | ||
75c119af | 394 | if (tp->packets_out || !skb) { |
6687e988 | 395 | icsk->icsk_probes_out = 0; |
9d9b1ee0 | 396 | icsk->icsk_probes_tstamp = 0; |
1da177e4 LT |
397 | return; |
398 | } | |
399 | ||
b248230c YC |
400 | /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as |
401 | * long as the receiver continues to respond probes. We support this by | |
402 | * default and reset icsk_probes_out with incoming ACKs. But if the | |
403 | * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we | |
404 | * kill the socket when the retry count and the time exceeds the | |
405 | * corresponding system limit. We also implement similar policy when | |
406 | * we use RTO to probe window in tcp_retransmit_timer(). | |
1da177e4 | 407 | */ |
d58f2e15 | 408 | if (!icsk->icsk_probes_tstamp) { |
9d9b1ee0 | 409 | icsk->icsk_probes_tstamp = tcp_jiffies32; |
d58f2e15 ED |
410 | } else { |
411 | u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); | |
1da177e4 | 412 | |
d58f2e15 ED |
413 | if (user_timeout && |
414 | (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >= | |
415 | msecs_to_jiffies(user_timeout)) | |
4fa5ce3e | 416 | goto abort; |
d58f2e15 | 417 | } |
39e24435 | 418 | max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); |
1da177e4 | 419 | if (sock_flag(sk, SOCK_DEAD)) { |
54a378f4 ED |
420 | unsigned int rto_max = tcp_rto_max(sk); |
421 | const bool alive = inet_csk_rto_backoff(icsk, rto_max) < rto_max; | |
e905a9ed | 422 | |
1da177e4 | 423 | max_probes = tcp_orphan_retries(sk, alive); |
b248230c YC |
424 | if (!alive && icsk->icsk_backoff >= max_probes) |
425 | goto abort; | |
426 | if (tcp_out_of_resources(sk, true)) | |
1da177e4 LT |
427 | return; |
428 | } | |
429 | ||
3976535a | 430 | if (icsk->icsk_probes_out >= max_probes) { |
b248230c | 431 | abort: tcp_write_err(sk); |
1da177e4 LT |
432 | } else { |
433 | /* Only send another probe if we didn't close things up. */ | |
434 | tcp_send_probe0(sk); | |
435 | } | |
436 | } | |
437 | ||
3868ab0f A |
438 | static void tcp_update_rto_stats(struct sock *sk) |
439 | { | |
440 | struct inet_connection_sock *icsk = inet_csk(sk); | |
441 | struct tcp_sock *tp = tcp_sk(sk); | |
442 | ||
443 | if (!icsk->icsk_retransmits) { | |
444 | tp->total_rto_recoveries++; | |
99d67955 | 445 | tp->rto_stamp = tcp_time_stamp_ms(tp); |
3868ab0f A |
446 | } |
447 | icsk->icsk_retransmits++; | |
448 | tp->total_rto++; | |
449 | } | |
450 | ||
8336886f JC |
451 | /* |
452 | * Timer for Fast Open socket to retransmit SYNACK. Note that the | |
453 | * sk here is the child socket, not the parent (listener) socket. | |
454 | */ | |
d983ea6f | 455 | static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) |
8336886f JC |
456 | { |
457 | struct inet_connection_sock *icsk = inet_csk(sk); | |
c7d13c8f | 458 | struct tcp_sock *tp = tcp_sk(sk); |
20a3b1c0 | 459 | int max_retries; |
8336886f | 460 | |
42cb80a2 | 461 | req->rsk_ops->syn_ack_timeout(req); |
8336886f | 462 | |
d44fd4a7 ED |
463 | /* Add one more retry for fastopen. |
464 | * Paired with WRITE_ONCE() in tcp_sock_set_syncnt() | |
465 | */ | |
466 | max_retries = READ_ONCE(icsk->icsk_syn_retries) ? : | |
20a3b1c0 KI |
467 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1; |
468 | ||
e6c022a4 | 469 | if (req->num_timeout >= max_retries) { |
8336886f JC |
470 | tcp_write_err(sk); |
471 | return; | |
472 | } | |
8c3cfe19 YC |
473 | /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */ |
474 | if (icsk->icsk_retransmits == 1) | |
475 | tcp_enter_loss(sk); | |
8336886f JC |
476 | /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error |
477 | * returned from rtx_syn_ack() to make it more persistent like | |
478 | * regular retransmit because if the child socket has been accepted | |
479 | * it's not good to give up too easily. | |
480 | */ | |
e6c022a4 ED |
481 | inet_rtx_syn_ack(sk, req); |
482 | req->num_timeout++; | |
3868ab0f | 483 | tcp_update_rto_stats(sk); |
c7d13c8f | 484 | if (!tp->retrans_stamp) |
9d0c00f5 | 485 | tp->retrans_stamp = tcp_time_stamp_ts(tp); |
48b69b4c ED |
486 | tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
487 | req->timeout << req->num_timeout, false); | |
8336886f JC |
488 | } |
489 | ||
e89688e3 | 490 | static bool tcp_rtx_probe0_timed_out(const struct sock *sk, |
614e8316 ED |
491 | const struct sk_buff *skb, |
492 | u32 rtx_delta) | |
e89688e3 | 493 | { |
97a90635 ED |
494 | const struct inet_connection_sock *icsk = inet_csk(sk); |
495 | u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); | |
e89688e3 | 496 | const struct tcp_sock *tp = tcp_sk(sk); |
54a378f4 | 497 | int timeout = tcp_rto_max(sk) * 2; |
36534d3c | 498 | s32 rcv_delta; |
e89688e3 | 499 | |
97a90635 ED |
500 | if (user_timeout) { |
501 | /* If user application specified a TCP_USER_TIMEOUT, | |
502 | * it does not want win 0 packets to 'reset the timer' | |
503 | * while retransmits are not making progress. | |
504 | */ | |
505 | if (rtx_delta > user_timeout) | |
506 | return true; | |
507 | timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout)); | |
508 | } | |
36534d3c ED |
509 | /* Note: timer interrupt might have been delayed by at least one jiffy, |
510 | * and tp->rcv_tstamp might very well have been written recently. | |
511 | * rcv_delta can thus be negative. | |
512 | */ | |
a7c428ee | 513 | rcv_delta = icsk_timeout(icsk) - tp->rcv_tstamp; |
e89688e3 MD |
514 | if (rcv_delta <= timeout) |
515 | return false; | |
516 | ||
614e8316 | 517 | return msecs_to_jiffies(rtx_delta) > timeout; |
e89688e3 | 518 | } |
1da177e4 | 519 | |
c380d37e RS |
520 | /** |
521 | * tcp_retransmit_timer() - The TCP retransmit timeout handler | |
522 | * @sk: Pointer to the current socket. | |
523 | * | |
524 | * This function gets called when the kernel timer for a TCP packet | |
525 | * of this socket expires. | |
526 | * | |
974d8f86 | 527 | * It handles retransmission, timer adjustment and other necessary measures. |
c380d37e RS |
528 | * |
529 | * Returns: Nothing (void) | |
530 | */ | |
f1ecd5d9 | 531 | void tcp_retransmit_timer(struct sock *sk) |
1da177e4 LT |
532 | { |
533 | struct tcp_sock *tp = tcp_sk(sk); | |
ae5c3f40 | 534 | struct net *net = sock_net(sk); |
463c84b9 | 535 | struct inet_connection_sock *icsk = inet_csk(sk); |
d983ea6f | 536 | struct request_sock *req; |
0d580fbd | 537 | struct sk_buff *skb; |
1da177e4 | 538 | |
d983ea6f ED |
539 | req = rcu_dereference_protected(tp->fastopen_rsk, |
540 | lockdep_sock_is_held(sk)); | |
541 | if (req) { | |
37561f68 JC |
542 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && |
543 | sk->sk_state != TCP_FIN_WAIT1); | |
d983ea6f | 544 | tcp_fastopen_synack_timer(sk, req); |
8336886f JC |
545 | /* Before we receive ACK to our SYN-ACK don't retransmit |
546 | * anything else (e.g., data or FIN segments). | |
547 | */ | |
548 | return; | |
549 | } | |
0d580fbd ED |
550 | |
551 | if (!tp->packets_out) | |
552 | return; | |
553 | ||
554 | skb = tcp_rtx_queue_head(sk); | |
555 | if (WARN_ON_ONCE(!skb)) | |
88f8598d | 556 | return; |
1da177e4 LT |
557 | |
558 | if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && | |
559 | !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { | |
560 | /* Receiver dastardly shrinks window. Our retransmits | |
561 | * become zero probes, but we should not timeout this | |
562 | * connection. If the socket is an orphan, time it out, | |
563 | * we cannot allow such beasts to hang infinitely. | |
564 | */ | |
569508c9 | 565 | struct inet_sock *inet = inet_sk(sk); |
031c44b7 MD |
566 | u32 rtx_delta; |
567 | ||
614e8316 ED |
568 | rtx_delta = tcp_time_stamp_ts(tp) - (tp->retrans_stamp ?: |
569 | tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb)); | |
570 | if (tp->tcp_usec_ts) | |
571 | rtx_delta /= USEC_PER_MSEC; | |
572 | ||
569508c9 | 573 | if (sk->sk_family == AF_INET) { |
031c44b7 MD |
574 | net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n", |
575 | &inet->inet_daddr, ntohs(inet->inet_dport), | |
576 | inet->inet_num, tp->snd_una, tp->snd_nxt, | |
577 | jiffies_to_msecs(jiffies - tp->rcv_tstamp), | |
578 | rtx_delta); | |
1da177e4 | 579 | } |
dfd56b8b | 580 | #if IS_ENABLED(CONFIG_IPV6) |
569508c9 | 581 | else if (sk->sk_family == AF_INET6) { |
031c44b7 MD |
582 | net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n", |
583 | &sk->sk_v6_daddr, ntohs(inet->inet_dport), | |
584 | inet->inet_num, tp->snd_una, tp->snd_nxt, | |
585 | jiffies_to_msecs(jiffies - tp->rcv_tstamp), | |
586 | rtx_delta); | |
569508c9 | 587 | } |
1da177e4 | 588 | #endif |
614e8316 | 589 | if (tcp_rtx_probe0_timed_out(sk, skb, rtx_delta)) { |
1da177e4 LT |
590 | tcp_write_err(sk); |
591 | goto out; | |
592 | } | |
5ae344c9 | 593 | tcp_enter_loss(sk); |
0d580fbd | 594 | tcp_retransmit_skb(sk, skb, 1); |
1da177e4 LT |
595 | __sk_dst_reset(sk); |
596 | goto out_reset_timer; | |
597 | } | |
598 | ||
e1561fe2 | 599 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); |
1da177e4 LT |
600 | if (tcp_write_timeout(sk)) |
601 | goto out; | |
602 | ||
463c84b9 | 603 | if (icsk->icsk_retransmits == 0) { |
e1561fe2 | 604 | int mib_idx = 0; |
40b215e5 | 605 | |
c60ce4e2 | 606 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { |
bc079e9e IJ |
607 | if (tcp_is_sack(tp)) |
608 | mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; | |
609 | else | |
610 | mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; | |
6687e988 | 611 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { |
40b215e5 | 612 | mib_idx = LINUX_MIB_TCPLOSSFAILURES; |
c60ce4e2 IJ |
613 | } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || |
614 | tp->sacked_out) { | |
615 | if (tcp_is_sack(tp)) | |
616 | mib_idx = LINUX_MIB_TCPSACKFAILURES; | |
617 | else | |
618 | mib_idx = LINUX_MIB_TCPRENOFAILURES; | |
1da177e4 | 619 | } |
e1561fe2 YC |
620 | if (mib_idx) |
621 | __NET_INC_STATS(sock_net(sk), mib_idx); | |
1da177e4 LT |
622 | } |
623 | ||
5ae344c9 | 624 | tcp_enter_loss(sk); |
1da177e4 | 625 | |
3868ab0f | 626 | tcp_update_rto_stats(sk); |
75c119af | 627 | if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) { |
1da177e4 | 628 | /* Retransmission failed because of local congestion, |
590d2026 | 629 | * Let senders fight for local resources conservatively. |
1da177e4 | 630 | */ |
48b69b4c ED |
631 | tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
632 | TCP_RESOURCE_PROBE_INTERVAL, | |
633 | false); | |
1da177e4 LT |
634 | goto out; |
635 | } | |
636 | ||
637 | /* Increase the timeout each time we retransmit. Note that | |
638 | * we do not increase the rtt estimate. rto is initialized | |
639 | * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests | |
640 | * that doubling rto each time is the least we can get away with. | |
641 | * In KA9Q, Karn uses this for the first few times, and then | |
642 | * goes to quadratic. netBSD doubles, but only goes up to *64, | |
643 | * and clamps at 1 to 64 sec afterwards. Note that 120 sec is | |
644 | * defined in the protocol as the maximum possible RTT. I guess | |
645 | * we'll have to use something other than TCP to talk to the | |
646 | * University of Mars. | |
647 | * | |
648 | * PAWS allows us longer timeouts and large windows, so once | |
649 | * implemented ftp to mars will work nicely. We will have to fix | |
650 | * the 120 second clamps though! | |
651 | */ | |
1da177e4 LT |
652 | |
653 | out_reset_timer: | |
36e31b0a AP |
654 | /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is |
655 | * used to reset timer, set to 0. Recalculate 'icsk_rto' as this | |
656 | * might be increased if the stream oscillates between thin and thick, | |
657 | * thus the old value might already be too high compared to the value | |
658 | * set by 'tcp_set_rto' in tcp_input.c which resets the rto without | |
659 | * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating | |
660 | * exponential backoff behaviour to avoid continue hammering | |
661 | * linear-timeout retransmissions into a black hole | |
662 | */ | |
663 | if (sk->sk_state == TCP_ESTABLISHED && | |
7c6f2a86 | 664 | (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) && |
36e31b0a AP |
665 | tcp_stream_is_thin(tp) && |
666 | icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { | |
667 | icsk->icsk_backoff = 0; | |
e4dd0d3a JX |
668 | icsk->icsk_rto = clamp(__tcp_set_rto(tp), |
669 | tcp_rto_min(sk), | |
54a378f4 | 670 | tcp_rto_max(sk)); |
ccce324d | 671 | } else if (sk->sk_state != TCP_SYN_SENT || |
14dd92d0 | 672 | tp->total_rto > |
ccce324d DM |
673 | READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) { |
674 | /* Use normal (exponential) backoff unless linear timeouts are | |
675 | * activated. | |
676 | */ | |
14dd92d0 | 677 | icsk->icsk_backoff++; |
54a378f4 | 678 | icsk->icsk_rto = min(icsk->icsk_rto << 1, tcp_rto_max(sk)); |
36e31b0a | 679 | } |
48b69b4c ED |
680 | tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
681 | tcp_clamp_rto_to_user_timeout(sk), false); | |
39e24435 | 682 | if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0)) |
1da177e4 LT |
683 | __sk_dst_reset(sk); |
684 | ||
685 | out:; | |
686 | } | |
687 | ||
c380d37e | 688 | /* Called with bottom-half processing disabled. |
a7c428ee ED |
689 | * Called by tcp_write_timer() and tcp_release_cb(). |
690 | */ | |
6f458dfb | 691 | void tcp_write_timer_handler(struct sock *sk) |
1da177e4 | 692 | { |
463c84b9 | 693 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
694 | int event; |
695 | ||
02b2faaf ED |
696 | if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || |
697 | !icsk->icsk_pending) | |
4890b686 | 698 | return; |
1da177e4 | 699 | |
a7c428ee ED |
700 | if (time_after(icsk_timeout(icsk), jiffies)) { |
701 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, | |
702 | icsk_timeout(icsk)); | |
4890b686 | 703 | return; |
1da177e4 | 704 | } |
9a568de4 | 705 | tcp_mstamp_refresh(tcp_sk(sk)); |
463c84b9 | 706 | event = icsk->icsk_pending; |
1da177e4 LT |
707 | |
708 | switch (event) { | |
57dde7f7 YC |
709 | case ICSK_TIME_REO_TIMEOUT: |
710 | tcp_rack_reo_timeout(sk); | |
711 | break; | |
6ba8a3b1 ND |
712 | case ICSK_TIME_LOSS_PROBE: |
713 | tcp_send_loss_probe(sk); | |
714 | break; | |
463c84b9 | 715 | case ICSK_TIME_RETRANS: |
5a9071a7 | 716 | smp_store_release(&icsk->icsk_pending, 0); |
1da177e4 LT |
717 | tcp_retransmit_timer(sk); |
718 | break; | |
463c84b9 | 719 | case ICSK_TIME_PROBE0: |
5a9071a7 | 720 | smp_store_release(&icsk->icsk_pending, 0); |
1da177e4 LT |
721 | tcp_probe_timer(sk); |
722 | break; | |
723 | } | |
6f458dfb ED |
724 | } |
725 | ||
59f379f9 | 726 | static void tcp_write_timer(struct timer_list *t) |
6f458dfb | 727 | { |
59f379f9 | 728 | struct inet_connection_sock *icsk = |
41cb0855 | 729 | timer_container_of(icsk, t, icsk_retransmit_timer); |
59f379f9 | 730 | struct sock *sk = &icsk->icsk_inet.sk; |
6f458dfb | 731 | |
3b784293 ED |
732 | /* Avoid locking the socket when there is no pending event. */ |
733 | if (!smp_load_acquire(&icsk->icsk_pending)) | |
734 | goto out; | |
735 | ||
6f458dfb ED |
736 | bh_lock_sock(sk); |
737 | if (!sock_owned_by_user(sk)) { | |
738 | tcp_write_timer_handler(sk); | |
739 | } else { | |
c380d37e | 740 | /* delegate our work to tcp_release_cb() */ |
7aa5470c | 741 | if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags)) |
144d56e9 | 742 | sock_hold(sk); |
6f458dfb | 743 | } |
1da177e4 | 744 | bh_unlock_sock(sk); |
3b784293 | 745 | out: |
1da177e4 LT |
746 | sock_put(sk); |
747 | } | |
748 | ||
42cb80a2 | 749 | void tcp_syn_ack_timeout(const struct request_sock *req) |
72659ecc | 750 | { |
42cb80a2 ED |
751 | struct net *net = read_pnet(&inet_rsk(req)->ireq_net); |
752 | ||
02a1d6e7 | 753 | __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS); |
72659ecc | 754 | } |
6dc4c252 | 755 | EXPORT_IPV6_MOD(tcp_syn_ack_timeout); |
72659ecc | 756 | |
be258f65 ED |
757 | void tcp_reset_keepalive_timer(struct sock *sk, unsigned long len) |
758 | { | |
759 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); | |
760 | } | |
761 | ||
762 | static void tcp_delete_keepalive_timer(struct sock *sk) | |
763 | { | |
764 | sk_stop_timer(sk, &sk->sk_timer); | |
765 | } | |
72659ecc | 766 | |
1da177e4 LT |
767 | void tcp_set_keepalive(struct sock *sk, int val) |
768 | { | |
769 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) | |
770 | return; | |
771 | ||
772 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) | |
be258f65 | 773 | tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); |
1da177e4 | 774 | else if (!val) |
be258f65 | 775 | tcp_delete_keepalive_timer(sk); |
1da177e4 | 776 | } |
6dc4c252 | 777 | EXPORT_IPV6_MOD_GPL(tcp_set_keepalive); |
1da177e4 | 778 | |
be258f65 | 779 | static void tcp_keepalive_timer(struct timer_list *t) |
1da177e4 | 780 | { |
41cb0855 | 781 | struct sock *sk = timer_container_of(sk, t, sk_timer); |
6687e988 | 782 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 | 783 | struct tcp_sock *tp = tcp_sk(sk); |
6c37e5de | 784 | u32 elapsed; |
1da177e4 LT |
785 | |
786 | /* Only process if socket is not in use. */ | |
787 | bh_lock_sock(sk); | |
788 | if (sock_owned_by_user(sk)) { | |
e905a9ed | 789 | /* Try again later. */ |
be258f65 | 790 | tcp_reset_keepalive_timer(sk, HZ/20); |
1da177e4 LT |
791 | goto out; |
792 | } | |
793 | ||
794 | if (sk->sk_state == TCP_LISTEN) { | |
fa76ce73 | 795 | pr_err("Hmm... keepalive on a LISTEN ???\n"); |
1da177e4 LT |
796 | goto out; |
797 | } | |
798 | ||
4688eb7c | 799 | tcp_mstamp_refresh(tp); |
1da177e4 | 800 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { |
a81722dd | 801 | if (READ_ONCE(tp->linger2) >= 0) { |
463c84b9 | 802 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; |
1da177e4 LT |
803 | |
804 | if (tmo > 0) { | |
805 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | |
806 | goto out; | |
807 | } | |
808 | } | |
edefba66 | 809 | tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_TCP_STATE); |
1da177e4 LT |
810 | goto death; |
811 | } | |
812 | ||
2dda6400 ED |
813 | if (!sock_flag(sk, SOCK_KEEPOPEN) || |
814 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) | |
1da177e4 LT |
815 | goto out; |
816 | ||
817 | elapsed = keepalive_time_when(tp); | |
818 | ||
819 | /* It is alive without keepalive 8) */ | |
75c119af | 820 | if (tp->packets_out || !tcp_write_queue_empty(sk)) |
1da177e4 LT |
821 | goto resched; |
822 | ||
6c37e5de | 823 | elapsed = keepalive_time_elapsed(tp); |
1da177e4 LT |
824 | |
825 | if (elapsed >= keepalive_time_when(tp)) { | |
d58f2e15 ED |
826 | u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); |
827 | ||
dca43c75 JC |
828 | /* If the TCP_USER_TIMEOUT option is enabled, use that |
829 | * to determine when to timeout instead. | |
830 | */ | |
d58f2e15 ED |
831 | if ((user_timeout != 0 && |
832 | elapsed >= msecs_to_jiffies(user_timeout) && | |
dca43c75 | 833 | icsk->icsk_probes_out > 0) || |
d58f2e15 | 834 | (user_timeout == 0 && |
dca43c75 | 835 | icsk->icsk_probes_out >= keepalive_probes(tp))) { |
5691276b | 836 | tcp_send_active_reset(sk, GFP_ATOMIC, |
0a399892 | 837 | SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT); |
1da177e4 LT |
838 | tcp_write_err(sk); |
839 | goto out; | |
840 | } | |
e520af48 | 841 | if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) { |
6687e988 | 842 | icsk->icsk_probes_out++; |
1da177e4 LT |
843 | elapsed = keepalive_intvl_when(tp); |
844 | } else { | |
845 | /* If keepalive was lost due to local congestion, | |
846 | * try harder. | |
847 | */ | |
848 | elapsed = TCP_RESOURCE_PROBE_INTERVAL; | |
849 | } | |
850 | } else { | |
851 | /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ | |
852 | elapsed = keepalive_time_when(tp) - elapsed; | |
853 | } | |
854 | ||
1da177e4 | 855 | resched: |
be258f65 | 856 | tcp_reset_keepalive_timer(sk, elapsed); |
1da177e4 LT |
857 | goto out; |
858 | ||
e905a9ed | 859 | death: |
1da177e4 LT |
860 | tcp_done(sk); |
861 | ||
862 | out: | |
863 | bh_unlock_sock(sk); | |
864 | sock_put(sk); | |
865 | } | |
6f458dfb | 866 | |
5d9f4262 ED |
867 | static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer) |
868 | { | |
869 | struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer); | |
870 | struct sock *sk = (struct sock *)tp; | |
871 | ||
872 | bh_lock_sock(sk); | |
873 | if (!sock_owned_by_user(sk)) { | |
2b195850 ED |
874 | if (tp->compressed_ack) { |
875 | /* Since we have to send one ack finally, | |
974d8f86 | 876 | * subtract one from tp->compressed_ack to keep |
2b195850 ED |
877 | * LINUX_MIB_TCPACKCOMPRESSED accurate. |
878 | */ | |
879 | tp->compressed_ack--; | |
269084f7 | 880 | tcp_mstamp_refresh(tp); |
5d9f4262 | 881 | tcp_send_ack(sk); |
2b195850 | 882 | } |
5d9f4262 ED |
883 | } else { |
884 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, | |
885 | &sk->sk_tsq_flags)) | |
886 | sock_hold(sk); | |
887 | } | |
888 | bh_unlock_sock(sk); | |
889 | ||
890 | sock_put(sk); | |
891 | ||
892 | return HRTIMER_NORESTART; | |
893 | } | |
894 | ||
6f458dfb ED |
895 | void tcp_init_xmit_timers(struct sock *sk) |
896 | { | |
897 | inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, | |
898 | &tcp_keepalive_timer); | |
553f9a8b NC |
899 | hrtimer_setup(&tcp_sk(sk)->pacing_timer, tcp_pace_kick, CLOCK_MONOTONIC, |
900 | HRTIMER_MODE_ABS_PINNED_SOFT); | |
5d9f4262 | 901 | |
553f9a8b NC |
902 | hrtimer_setup(&tcp_sk(sk)->compressed_ack_timer, tcp_compressed_ack_kick, CLOCK_MONOTONIC, |
903 | HRTIMER_MODE_REL_PINNED_SOFT); | |
6f458dfb | 904 | } |