Commit | Line | Data |
---|---|---|
51c5d0c4 DM |
1 | #include <linux/rcupdate.h> |
2 | #include <linux/spinlock.h> | |
3 | #include <linux/jiffies.h> | |
4 | #include <linux/bootmem.h> | |
ab92bb2f | 5 | #include <linux/module.h> |
4aabd8ef | 6 | #include <linux/cache.h> |
51c5d0c4 DM |
7 | #include <linux/slab.h> |
8 | #include <linux/init.h> | |
4aabd8ef DM |
9 | #include <linux/tcp.h> |
10 | ||
11 | #include <net/inet_connection_sock.h> | |
51c5d0c4 | 12 | #include <net/net_namespace.h> |
ab92bb2f | 13 | #include <net/request_sock.h> |
51c5d0c4 | 14 | #include <net/inetpeer.h> |
4aabd8ef | 15 | #include <net/sock.h> |
51c5d0c4 | 16 | #include <net/ipv6.h> |
4aabd8ef DM |
17 | #include <net/dst.h> |
18 | #include <net/tcp.h> | |
19 | ||
20 | int sysctl_tcp_nometrics_save __read_mostly; | |
21 | ||
51c5d0c4 DM |
22 | enum tcp_metric_index { |
23 | TCP_METRIC_RTT, | |
24 | TCP_METRIC_RTTVAR, | |
25 | TCP_METRIC_SSTHRESH, | |
26 | TCP_METRIC_CWND, | |
27 | TCP_METRIC_REORDERING, | |
28 | ||
29 | /* Always last. */ | |
30 | TCP_METRIC_MAX, | |
31 | }; | |
32 | ||
33 | struct tcp_metrics_block { | |
34 | struct tcp_metrics_block __rcu *tcpm_next; | |
35 | struct inetpeer_addr tcpm_addr; | |
36 | unsigned long tcpm_stamp; | |
81166dd6 DM |
37 | u32 tcpm_ts; |
38 | u32 tcpm_ts_stamp; | |
51c5d0c4 DM |
39 | u32 tcpm_lock; |
40 | u32 tcpm_vals[TCP_METRIC_MAX]; | |
41 | }; | |
42 | ||
43 | static bool tcp_metric_locked(struct tcp_metrics_block *tm, | |
44 | enum tcp_metric_index idx) | |
45 | { | |
46 | return tm->tcpm_lock & (1 << idx); | |
47 | } | |
48 | ||
49 | static u32 tcp_metric_get(struct tcp_metrics_block *tm, | |
50 | enum tcp_metric_index idx) | |
51 | { | |
52 | return tm->tcpm_vals[idx]; | |
53 | } | |
54 | ||
55 | static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm, | |
56 | enum tcp_metric_index idx) | |
57 | { | |
58 | return msecs_to_jiffies(tm->tcpm_vals[idx]); | |
59 | } | |
60 | ||
61 | static void tcp_metric_set(struct tcp_metrics_block *tm, | |
62 | enum tcp_metric_index idx, | |
63 | u32 val) | |
64 | { | |
65 | tm->tcpm_vals[idx] = val; | |
66 | } | |
67 | ||
68 | static void tcp_metric_set_msecs(struct tcp_metrics_block *tm, | |
69 | enum tcp_metric_index idx, | |
70 | u32 val) | |
71 | { | |
72 | tm->tcpm_vals[idx] = jiffies_to_msecs(val); | |
73 | } | |
74 | ||
75 | static bool addr_same(const struct inetpeer_addr *a, | |
76 | const struct inetpeer_addr *b) | |
77 | { | |
78 | const struct in6_addr *a6, *b6; | |
79 | ||
80 | if (a->family != b->family) | |
81 | return false; | |
82 | if (a->family == AF_INET) | |
83 | return a->addr.a4 == b->addr.a4; | |
84 | ||
85 | a6 = (const struct in6_addr *) &a->addr.a6[0]; | |
86 | b6 = (const struct in6_addr *) &b->addr.a6[0]; | |
87 | ||
88 | return ipv6_addr_equal(a6, b6); | |
89 | } | |
90 | ||
91 | struct tcpm_hash_bucket { | |
92 | struct tcp_metrics_block __rcu *chain; | |
93 | }; | |
94 | ||
95 | static DEFINE_SPINLOCK(tcp_metrics_lock); | |
96 | ||
97 | static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst) | |
98 | { | |
99 | u32 val; | |
100 | ||
101 | val = 0; | |
102 | if (dst_metric_locked(dst, RTAX_RTT)) | |
103 | val |= 1 << TCP_METRIC_RTT; | |
104 | if (dst_metric_locked(dst, RTAX_RTTVAR)) | |
105 | val |= 1 << TCP_METRIC_RTTVAR; | |
106 | if (dst_metric_locked(dst, RTAX_SSTHRESH)) | |
107 | val |= 1 << TCP_METRIC_SSTHRESH; | |
108 | if (dst_metric_locked(dst, RTAX_CWND)) | |
109 | val |= 1 << TCP_METRIC_CWND; | |
110 | if (dst_metric_locked(dst, RTAX_REORDERING)) | |
111 | val |= 1 << TCP_METRIC_REORDERING; | |
112 | tm->tcpm_lock = val; | |
113 | ||
114 | tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT); | |
115 | tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR); | |
116 | tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); | |
117 | tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); | |
118 | tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); | |
81166dd6 DM |
119 | tm->tcpm_ts = 0; |
120 | tm->tcpm_ts_stamp = 0; | |
51c5d0c4 DM |
121 | } |
122 | ||
123 | static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, | |
124 | struct inetpeer_addr *addr, | |
125 | unsigned int hash, | |
126 | bool reclaim) | |
127 | { | |
128 | struct tcp_metrics_block *tm; | |
129 | struct net *net; | |
130 | ||
131 | spin_lock_bh(&tcp_metrics_lock); | |
132 | net = dev_net(dst->dev); | |
133 | if (unlikely(reclaim)) { | |
134 | struct tcp_metrics_block *oldest; | |
135 | ||
136 | oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); | |
137 | for (tm = rcu_dereference(oldest->tcpm_next); tm; | |
138 | tm = rcu_dereference(tm->tcpm_next)) { | |
139 | if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) | |
140 | oldest = tm; | |
141 | } | |
142 | tm = oldest; | |
143 | } else { | |
144 | tm = kmalloc(sizeof(*tm), GFP_ATOMIC); | |
145 | if (!tm) | |
146 | goto out_unlock; | |
147 | } | |
148 | tm->tcpm_addr = *addr; | |
149 | tm->tcpm_stamp = jiffies; | |
150 | ||
151 | tcpm_suck_dst(tm, dst); | |
152 | ||
153 | if (likely(!reclaim)) { | |
154 | tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain; | |
155 | rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm); | |
156 | } | |
157 | ||
158 | out_unlock: | |
159 | spin_unlock_bh(&tcp_metrics_lock); | |
160 | return tm; | |
161 | } | |
162 | ||
163 | #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) | |
164 | ||
165 | static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) | |
166 | { | |
167 | if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) | |
168 | tcpm_suck_dst(tm, dst); | |
169 | } | |
170 | ||
171 | #define TCP_METRICS_RECLAIM_DEPTH 5 | |
172 | #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL | |
173 | ||
174 | static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) | |
175 | { | |
176 | if (tm) | |
177 | return tm; | |
178 | if (depth > TCP_METRICS_RECLAIM_DEPTH) | |
179 | return TCP_METRICS_RECLAIM_PTR; | |
180 | return NULL; | |
181 | } | |
182 | ||
183 | static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr, | |
184 | struct net *net, unsigned int hash) | |
185 | { | |
186 | struct tcp_metrics_block *tm; | |
187 | int depth = 0; | |
188 | ||
189 | for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; | |
190 | tm = rcu_dereference(tm->tcpm_next)) { | |
191 | if (addr_same(&tm->tcpm_addr, addr)) | |
192 | break; | |
193 | depth++; | |
194 | } | |
195 | return tcp_get_encode(tm, depth); | |
196 | } | |
197 | ||
198 | static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, | |
199 | struct dst_entry *dst) | |
200 | { | |
201 | struct tcp_metrics_block *tm; | |
202 | struct inetpeer_addr addr; | |
203 | unsigned int hash; | |
204 | struct net *net; | |
205 | ||
206 | addr.family = req->rsk_ops->family; | |
207 | switch (addr.family) { | |
208 | case AF_INET: | |
209 | addr.addr.a4 = inet_rsk(req)->rmt_addr; | |
210 | hash = (__force unsigned int) addr.addr.a4; | |
211 | break; | |
212 | case AF_INET6: | |
213 | *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr; | |
214 | hash = ((__force unsigned int) addr.addr.a6[0] ^ | |
215 | (__force unsigned int) addr.addr.a6[1] ^ | |
216 | (__force unsigned int) addr.addr.a6[2] ^ | |
217 | (__force unsigned int) addr.addr.a6[3]); | |
218 | break; | |
219 | default: | |
220 | return NULL; | |
221 | } | |
222 | ||
223 | hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8); | |
224 | ||
225 | net = dev_net(dst->dev); | |
226 | hash &= net->ipv4.tcp_metrics_hash_mask; | |
227 | ||
228 | for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; | |
229 | tm = rcu_dereference(tm->tcpm_next)) { | |
230 | if (addr_same(&tm->tcpm_addr, &addr)) | |
231 | break; | |
232 | } | |
233 | tcpm_check_stamp(tm, dst); | |
234 | return tm; | |
235 | } | |
236 | ||
81166dd6 DM |
237 | static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw) |
238 | { | |
239 | struct inet6_timewait_sock *tw6; | |
240 | struct tcp_metrics_block *tm; | |
241 | struct inetpeer_addr addr; | |
242 | unsigned int hash; | |
243 | struct net *net; | |
244 | ||
245 | addr.family = tw->tw_family; | |
246 | switch (addr.family) { | |
247 | case AF_INET: | |
248 | addr.addr.a4 = tw->tw_daddr; | |
249 | hash = (__force unsigned int) addr.addr.a4; | |
250 | break; | |
251 | case AF_INET6: | |
252 | tw6 = inet6_twsk((struct sock *)tw); | |
253 | *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr; | |
254 | hash = ((__force unsigned int) addr.addr.a6[0] ^ | |
255 | (__force unsigned int) addr.addr.a6[1] ^ | |
256 | (__force unsigned int) addr.addr.a6[2] ^ | |
257 | (__force unsigned int) addr.addr.a6[3]); | |
258 | break; | |
259 | default: | |
260 | return NULL; | |
261 | } | |
262 | ||
263 | hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8); | |
264 | ||
265 | net = twsk_net(tw); | |
266 | hash &= net->ipv4.tcp_metrics_hash_mask; | |
267 | ||
268 | for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; | |
269 | tm = rcu_dereference(tm->tcpm_next)) { | |
270 | if (addr_same(&tm->tcpm_addr, &addr)) | |
271 | break; | |
272 | } | |
273 | return tm; | |
274 | } | |
275 | ||
51c5d0c4 DM |
276 | static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, |
277 | struct dst_entry *dst, | |
278 | bool create) | |
279 | { | |
280 | struct tcp_metrics_block *tm; | |
281 | struct inetpeer_addr addr; | |
282 | unsigned int hash; | |
283 | struct net *net; | |
284 | bool reclaim; | |
285 | ||
286 | addr.family = sk->sk_family; | |
287 | switch (addr.family) { | |
288 | case AF_INET: | |
289 | addr.addr.a4 = inet_sk(sk)->inet_daddr; | |
290 | hash = (__force unsigned int) addr.addr.a4; | |
291 | break; | |
292 | case AF_INET6: | |
293 | *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr; | |
294 | hash = ((__force unsigned int) addr.addr.a6[0] ^ | |
295 | (__force unsigned int) addr.addr.a6[1] ^ | |
296 | (__force unsigned int) addr.addr.a6[2] ^ | |
297 | (__force unsigned int) addr.addr.a6[3]); | |
298 | break; | |
299 | default: | |
300 | return NULL; | |
301 | } | |
302 | ||
303 | hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8); | |
304 | ||
305 | net = dev_net(dst->dev); | |
306 | hash &= net->ipv4.tcp_metrics_hash_mask; | |
307 | ||
308 | tm = __tcp_get_metrics(&addr, net, hash); | |
309 | reclaim = false; | |
310 | if (tm == TCP_METRICS_RECLAIM_PTR) { | |
311 | reclaim = true; | |
312 | tm = NULL; | |
313 | } | |
314 | if (!tm && create) | |
315 | tm = tcpm_new(dst, &addr, hash, reclaim); | |
316 | else | |
317 | tcpm_check_stamp(tm, dst); | |
318 | ||
319 | return tm; | |
320 | } | |
321 | ||
4aabd8ef DM |
322 | /* Save metrics learned by this TCP session. This function is called |
323 | * only, when TCP finishes successfully i.e. when it enters TIME-WAIT | |
324 | * or goes from LAST-ACK to CLOSE. | |
325 | */ | |
326 | void tcp_update_metrics(struct sock *sk) | |
327 | { | |
51c5d0c4 | 328 | const struct inet_connection_sock *icsk = inet_csk(sk); |
4aabd8ef | 329 | struct dst_entry *dst = __sk_dst_get(sk); |
51c5d0c4 DM |
330 | struct tcp_sock *tp = tcp_sk(sk); |
331 | struct tcp_metrics_block *tm; | |
332 | unsigned long rtt; | |
333 | u32 val; | |
334 | int m; | |
4aabd8ef | 335 | |
51c5d0c4 | 336 | if (sysctl_tcp_nometrics_save || !dst) |
4aabd8ef DM |
337 | return; |
338 | ||
51c5d0c4 | 339 | if (dst->flags & DST_HOST) |
4aabd8ef DM |
340 | dst_confirm(dst); |
341 | ||
51c5d0c4 DM |
342 | rcu_read_lock(); |
343 | if (icsk->icsk_backoff || !tp->srtt) { | |
344 | /* This session failed to estimate rtt. Why? | |
345 | * Probably, no packets returned in time. Reset our | |
346 | * results. | |
347 | */ | |
348 | tm = tcp_get_metrics(sk, dst, false); | |
349 | if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT)) | |
350 | tcp_metric_set(tm, TCP_METRIC_RTT, 0); | |
351 | goto out_unlock; | |
352 | } else | |
353 | tm = tcp_get_metrics(sk, dst, true); | |
4aabd8ef | 354 | |
51c5d0c4 DM |
355 | if (!tm) |
356 | goto out_unlock; | |
4aabd8ef | 357 | |
51c5d0c4 DM |
358 | rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT); |
359 | m = rtt - tp->srtt; | |
4aabd8ef | 360 | |
51c5d0c4 DM |
361 | /* If newly calculated rtt larger than stored one, store new |
362 | * one. Otherwise, use EWMA. Remember, rtt overestimation is | |
363 | * always better than underestimation. | |
364 | */ | |
365 | if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) { | |
366 | if (m <= 0) | |
367 | rtt = tp->srtt; | |
368 | else | |
369 | rtt -= (m >> 3); | |
370 | tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt); | |
371 | } | |
4aabd8ef | 372 | |
51c5d0c4 DM |
373 | if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) { |
374 | unsigned long var; | |
4aabd8ef | 375 | |
51c5d0c4 DM |
376 | if (m < 0) |
377 | m = -m; | |
4aabd8ef | 378 | |
51c5d0c4 DM |
379 | /* Scale deviation to rttvar fixed point */ |
380 | m >>= 1; | |
381 | if (m < tp->mdev) | |
382 | m = tp->mdev; | |
4aabd8ef | 383 | |
51c5d0c4 DM |
384 | var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR); |
385 | if (m >= var) | |
386 | var = m; | |
387 | else | |
388 | var -= (var - m) >> 2; | |
4aabd8ef | 389 | |
51c5d0c4 DM |
390 | tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var); |
391 | } | |
392 | ||
393 | if (tcp_in_initial_slowstart(tp)) { | |
394 | /* Slow start still did not finish. */ | |
395 | if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { | |
396 | val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); | |
397 | if (val && (tp->snd_cwnd >> 1) > val) | |
398 | tcp_metric_set(tm, TCP_METRIC_SSTHRESH, | |
399 | tp->snd_cwnd >> 1); | |
400 | } | |
401 | if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { | |
402 | val = tcp_metric_get(tm, TCP_METRIC_CWND); | |
403 | if (tp->snd_cwnd > val) | |
404 | tcp_metric_set(tm, TCP_METRIC_CWND, | |
405 | tp->snd_cwnd); | |
406 | } | |
407 | } else if (tp->snd_cwnd > tp->snd_ssthresh && | |
408 | icsk->icsk_ca_state == TCP_CA_Open) { | |
409 | /* Cong. avoidance phase, cwnd is reliable. */ | |
410 | if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) | |
411 | tcp_metric_set(tm, TCP_METRIC_SSTHRESH, | |
412 | max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); | |
413 | if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { | |
414 | val = tcp_metric_get(tm, TCP_METRIC_CWND); | |
415 | tcp_metric_set(tm, RTAX_CWND, (val + tp->snd_cwnd) >> 1); | |
416 | } | |
417 | } else { | |
418 | /* Else slow start did not finish, cwnd is non-sense, | |
419 | * ssthresh may be also invalid. | |
420 | */ | |
421 | if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { | |
422 | val = tcp_metric_get(tm, TCP_METRIC_CWND); | |
423 | tcp_metric_set(tm, TCP_METRIC_CWND, | |
424 | (val + tp->snd_ssthresh) >> 1); | |
425 | } | |
426 | if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { | |
427 | val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); | |
428 | if (val && tp->snd_ssthresh > val) | |
429 | tcp_metric_set(tm, TCP_METRIC_SSTHRESH, | |
430 | tp->snd_ssthresh); | |
431 | } | |
432 | if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { | |
433 | val = tcp_metric_get(tm, TCP_METRIC_REORDERING); | |
434 | if (val < tp->reordering && | |
4aabd8ef | 435 | tp->reordering != sysctl_tcp_reordering) |
51c5d0c4 DM |
436 | tcp_metric_set(tm, TCP_METRIC_REORDERING, |
437 | tp->reordering); | |
4aabd8ef DM |
438 | } |
439 | } | |
51c5d0c4 DM |
440 | tm->tcpm_stamp = jiffies; |
441 | out_unlock: | |
442 | rcu_read_unlock(); | |
4aabd8ef DM |
443 | } |
444 | ||
445 | /* Initialize metrics on socket. */ | |
446 | ||
447 | void tcp_init_metrics(struct sock *sk) | |
448 | { | |
4aabd8ef | 449 | struct dst_entry *dst = __sk_dst_get(sk); |
51c5d0c4 DM |
450 | struct tcp_sock *tp = tcp_sk(sk); |
451 | struct tcp_metrics_block *tm; | |
452 | u32 val; | |
4aabd8ef DM |
453 | |
454 | if (dst == NULL) | |
455 | goto reset; | |
456 | ||
457 | dst_confirm(dst); | |
458 | ||
51c5d0c4 DM |
459 | rcu_read_lock(); |
460 | tm = tcp_get_metrics(sk, dst, true); | |
461 | if (!tm) { | |
462 | rcu_read_unlock(); | |
463 | goto reset; | |
464 | } | |
465 | ||
466 | if (tcp_metric_locked(tm, TCP_METRIC_CWND)) | |
467 | tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND); | |
468 | ||
469 | val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); | |
470 | if (val) { | |
471 | tp->snd_ssthresh = val; | |
4aabd8ef DM |
472 | if (tp->snd_ssthresh > tp->snd_cwnd_clamp) |
473 | tp->snd_ssthresh = tp->snd_cwnd_clamp; | |
474 | } else { | |
475 | /* ssthresh may have been reduced unnecessarily during. | |
476 | * 3WHS. Restore it back to its initial default. | |
477 | */ | |
478 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | |
479 | } | |
51c5d0c4 DM |
480 | val = tcp_metric_get(tm, TCP_METRIC_REORDERING); |
481 | if (val && tp->reordering != val) { | |
4aabd8ef DM |
482 | tcp_disable_fack(tp); |
483 | tcp_disable_early_retrans(tp); | |
51c5d0c4 | 484 | tp->reordering = val; |
4aabd8ef DM |
485 | } |
486 | ||
51c5d0c4 DM |
487 | val = tcp_metric_get(tm, TCP_METRIC_RTT); |
488 | if (val == 0 || tp->srtt == 0) { | |
489 | rcu_read_unlock(); | |
4aabd8ef | 490 | goto reset; |
51c5d0c4 | 491 | } |
4aabd8ef DM |
492 | /* Initial rtt is determined from SYN,SYN-ACK. |
493 | * The segment is small and rtt may appear much | |
494 | * less than real one. Use per-dst memory | |
495 | * to make it more realistic. | |
496 | * | |
497 | * A bit of theory. RTT is time passed after "normal" sized packet | |
498 | * is sent until it is ACKed. In normal circumstances sending small | |
499 | * packets force peer to delay ACKs and calculation is correct too. | |
500 | * The algorithm is adaptive and, provided we follow specs, it | |
501 | * NEVER underestimate RTT. BUT! If peer tries to make some clever | |
502 | * tricks sort of "quick acks" for time long enough to decrease RTT | |
503 | * to low value, and then abruptly stops to do it and starts to delay | |
504 | * ACKs, wait for troubles. | |
505 | */ | |
51c5d0c4 DM |
506 | val = msecs_to_jiffies(val); |
507 | if (val > tp->srtt) { | |
508 | tp->srtt = val; | |
4aabd8ef DM |
509 | tp->rtt_seq = tp->snd_nxt; |
510 | } | |
51c5d0c4 DM |
511 | val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR); |
512 | if (val > tp->mdev) { | |
513 | tp->mdev = val; | |
4aabd8ef DM |
514 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); |
515 | } | |
51c5d0c4 DM |
516 | rcu_read_unlock(); |
517 | ||
4aabd8ef DM |
518 | tcp_set_rto(sk); |
519 | reset: | |
520 | if (tp->srtt == 0) { | |
521 | /* RFC6298: 5.7 We've failed to get a valid RTT sample from | |
522 | * 3WHS. This is most likely due to retransmission, | |
523 | * including spurious one. Reset the RTO back to 3secs | |
524 | * from the more aggressive 1sec to avoid more spurious | |
525 | * retransmission. | |
526 | */ | |
527 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; | |
528 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; | |
529 | } | |
530 | /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been | |
531 | * retransmitted. In light of RFC6298 more aggressive 1sec | |
532 | * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK | |
533 | * retransmission has occurred. | |
534 | */ | |
535 | if (tp->total_retrans > 1) | |
536 | tp->snd_cwnd = 1; | |
537 | else | |
538 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); | |
539 | tp->snd_cwnd_stamp = tcp_time_stamp; | |
540 | } | |
ab92bb2f | 541 | |
81166dd6 | 542 | bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check) |
ab92bb2f | 543 | { |
51c5d0c4 DM |
544 | struct tcp_metrics_block *tm; |
545 | bool ret; | |
546 | ||
ab92bb2f DM |
547 | if (!dst) |
548 | return false; | |
51c5d0c4 DM |
549 | |
550 | rcu_read_lock(); | |
551 | tm = __tcp_get_metrics_req(req, dst); | |
81166dd6 DM |
552 | if (paws_check) { |
553 | if (tm && | |
554 | (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL && | |
555 | (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW) | |
556 | ret = false; | |
557 | else | |
558 | ret = true; | |
559 | } else { | |
560 | if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp) | |
561 | ret = true; | |
562 | else | |
563 | ret = false; | |
564 | } | |
51c5d0c4 DM |
565 | rcu_read_unlock(); |
566 | ||
567 | return ret; | |
ab92bb2f DM |
568 | } |
569 | EXPORT_SYMBOL_GPL(tcp_peer_is_proven); | |
51c5d0c4 | 570 | |
81166dd6 DM |
571 | void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst) |
572 | { | |
573 | struct tcp_metrics_block *tm; | |
574 | ||
575 | rcu_read_lock(); | |
576 | tm = tcp_get_metrics(sk, dst, true); | |
577 | if (tm) { | |
578 | struct tcp_sock *tp = tcp_sk(sk); | |
579 | ||
580 | if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) { | |
581 | tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp; | |
582 | tp->rx_opt.ts_recent = tm->tcpm_ts; | |
583 | } | |
584 | } | |
585 | rcu_read_unlock(); | |
586 | } | |
587 | EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp); | |
588 | ||
589 | /* VJ's idea. Save last timestamp seen from this destination and hold | |
590 | * it at least for normal timewait interval to use for duplicate | |
591 | * segment detection in subsequent connections, before they enter | |
592 | * synchronized state. | |
593 | */ | |
594 | bool tcp_remember_stamp(struct sock *sk) | |
595 | { | |
596 | struct dst_entry *dst = __sk_dst_get(sk); | |
597 | bool ret = false; | |
598 | ||
599 | if (dst) { | |
600 | struct tcp_metrics_block *tm; | |
601 | ||
602 | rcu_read_lock(); | |
603 | tm = tcp_get_metrics(sk, dst, true); | |
604 | if (tm) { | |
605 | struct tcp_sock *tp = tcp_sk(sk); | |
606 | ||
607 | if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 || | |
608 | ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && | |
609 | tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { | |
610 | tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; | |
611 | tm->tcpm_ts = tp->rx_opt.ts_recent; | |
612 | } | |
613 | ret = true; | |
614 | } | |
615 | rcu_read_unlock(); | |
616 | } | |
617 | return ret; | |
618 | } | |
619 | ||
620 | bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw) | |
621 | { | |
622 | struct tcp_metrics_block *tm; | |
623 | bool ret = false; | |
624 | ||
625 | rcu_read_lock(); | |
626 | tm = __tcp_get_metrics_tw(tw); | |
627 | if (tw) { | |
628 | const struct tcp_timewait_sock *tcptw; | |
629 | struct sock *sk = (struct sock *) tw; | |
630 | ||
631 | tcptw = tcp_twsk(sk); | |
632 | if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 || | |
633 | ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && | |
634 | tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) { | |
635 | tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp; | |
636 | tm->tcpm_ts = tcptw->tw_ts_recent; | |
637 | } | |
638 | ret = true; | |
639 | } | |
640 | rcu_read_unlock(); | |
641 | ||
642 | return ret; | |
643 | } | |
644 | ||
51c5d0c4 DM |
645 | static unsigned long tcpmhash_entries; |
646 | static int __init set_tcpmhash_entries(char *str) | |
647 | { | |
648 | ssize_t ret; | |
649 | ||
650 | if (!str) | |
651 | return 0; | |
652 | ||
653 | ret = kstrtoul(str, 0, &tcpmhash_entries); | |
654 | if (ret) | |
655 | return 0; | |
656 | ||
657 | return 1; | |
658 | } | |
659 | __setup("tcpmhash_entries=", set_tcpmhash_entries); | |
660 | ||
661 | static int __net_init tcp_net_metrics_init(struct net *net) | |
662 | { | |
663 | int slots, size; | |
664 | ||
665 | slots = tcpmhash_entries; | |
666 | if (!slots) { | |
667 | if (totalram_pages >= 128 * 1024) | |
668 | slots = 16 * 1024; | |
669 | else | |
670 | slots = 8 * 1024; | |
671 | } | |
672 | ||
673 | size = slots * sizeof(struct tcpm_hash_bucket); | |
674 | ||
675 | net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL); | |
676 | if (!net->ipv4.tcp_metrics_hash) | |
677 | return -ENOMEM; | |
678 | ||
679 | net->ipv4.tcp_metrics_hash_mask = (slots - 1); | |
680 | ||
681 | return 0; | |
682 | } | |
683 | ||
684 | static void __net_exit tcp_net_metrics_exit(struct net *net) | |
685 | { | |
686 | kfree(net->ipv4.tcp_metrics_hash); | |
687 | } | |
688 | ||
689 | static __net_initdata struct pernet_operations tcp_net_metrics_ops = { | |
690 | .init = tcp_net_metrics_init, | |
691 | .exit = tcp_net_metrics_exit, | |
692 | }; | |
693 | ||
694 | void __init tcp_metrics_init(void) | |
695 | { | |
696 | register_pernet_subsys(&tcp_net_metrics_ops); | |
697 | } |