tcp_metrics: Rewrite tcp_metrics_flush_all
[linux-2.6-block.git] / net / ipv4 / tcp_metrics.c
CommitLineData
51c5d0c4
DM
1#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
ab92bb2f 4#include <linux/module.h>
4aabd8ef 5#include <linux/cache.h>
51c5d0c4
DM
6#include <linux/slab.h>
7#include <linux/init.h>
4aabd8ef 8#include <linux/tcp.h>
5815d5e7 9#include <linux/hash.h>
d23ff701 10#include <linux/tcp_metrics.h>
976a702a 11#include <linux/vmalloc.h>
4aabd8ef
DM
12
13#include <net/inet_connection_sock.h>
51c5d0c4 14#include <net/net_namespace.h>
ab92bb2f 15#include <net/request_sock.h>
51c5d0c4 16#include <net/inetpeer.h>
4aabd8ef 17#include <net/sock.h>
51c5d0c4 18#include <net/ipv6.h>
4aabd8ef
DM
19#include <net/dst.h>
20#include <net/tcp.h>
d23ff701 21#include <net/genetlink.h>
4aabd8ef
DM
22
23int sysctl_tcp_nometrics_save __read_mostly;
24
41804420
DM
25static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
26 const struct inetpeer_addr *daddr,
77f99ad1
CP
27 struct net *net, unsigned int hash);
28
1fe4c481
YC
29struct tcp_fastopen_metrics {
30 u16 mss;
aab48743
YC
31 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
32 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
1fe4c481
YC
33 struct tcp_fastopen_cookie cookie;
34};
35
740b0f18
ED
36/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37 * Kernel only stores RTT and RTTVAR in usec resolution
38 */
39#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
40
51c5d0c4
DM
41struct tcp_metrics_block {
42 struct tcp_metrics_block __rcu *tcpm_next;
849e8a0c 43 possible_net_t tcpm_net;
a5443028 44 struct inetpeer_addr tcpm_saddr;
324fd55a 45 struct inetpeer_addr tcpm_daddr;
51c5d0c4 46 unsigned long tcpm_stamp;
81166dd6
DM
47 u32 tcpm_ts;
48 u32 tcpm_ts_stamp;
51c5d0c4 49 u32 tcpm_lock;
740b0f18 50 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
1fe4c481 51 struct tcp_fastopen_metrics tcpm_fastopen;
d23ff701
JA
52
53 struct rcu_head rcu_head;
51c5d0c4
DM
54};
55
849e8a0c
EB
56static inline struct net *tm_net(struct tcp_metrics_block *tm)
57{
58 return read_pnet(&tm->tcpm_net);
59}
60
51c5d0c4
DM
61static bool tcp_metric_locked(struct tcp_metrics_block *tm,
62 enum tcp_metric_index idx)
63{
64 return tm->tcpm_lock & (1 << idx);
65}
66
67static u32 tcp_metric_get(struct tcp_metrics_block *tm,
68 enum tcp_metric_index idx)
69{
70 return tm->tcpm_vals[idx];
71}
72
51c5d0c4
DM
73static void tcp_metric_set(struct tcp_metrics_block *tm,
74 enum tcp_metric_index idx,
75 u32 val)
76{
77 tm->tcpm_vals[idx] = val;
78}
79
51c5d0c4
DM
80static bool addr_same(const struct inetpeer_addr *a,
81 const struct inetpeer_addr *b)
82{
83 const struct in6_addr *a6, *b6;
84
85 if (a->family != b->family)
86 return false;
87 if (a->family == AF_INET)
88 return a->addr.a4 == b->addr.a4;
89
90 a6 = (const struct in6_addr *) &a->addr.a6[0];
91 b6 = (const struct in6_addr *) &b->addr.a6[0];
92
93 return ipv6_addr_equal(a6, b6);
94}
95
96struct tcpm_hash_bucket {
97 struct tcp_metrics_block __rcu *chain;
98};
99
100static DEFINE_SPINLOCK(tcp_metrics_lock);
101
740b0f18
ED
102static void tcpm_suck_dst(struct tcp_metrics_block *tm,
103 const struct dst_entry *dst,
efeaa555 104 bool fastopen_clear)
51c5d0c4 105{
740b0f18 106 u32 msval;
51c5d0c4
DM
107 u32 val;
108
9a0a9502
JA
109 tm->tcpm_stamp = jiffies;
110
51c5d0c4
DM
111 val = 0;
112 if (dst_metric_locked(dst, RTAX_RTT))
113 val |= 1 << TCP_METRIC_RTT;
114 if (dst_metric_locked(dst, RTAX_RTTVAR))
115 val |= 1 << TCP_METRIC_RTTVAR;
116 if (dst_metric_locked(dst, RTAX_SSTHRESH))
117 val |= 1 << TCP_METRIC_SSTHRESH;
118 if (dst_metric_locked(dst, RTAX_CWND))
119 val |= 1 << TCP_METRIC_CWND;
120 if (dst_metric_locked(dst, RTAX_REORDERING))
121 val |= 1 << TCP_METRIC_REORDERING;
122 tm->tcpm_lock = val;
123
740b0f18
ED
124 msval = dst_metric_raw(dst, RTAX_RTT);
125 tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
126
127 msval = dst_metric_raw(dst, RTAX_RTTVAR);
128 tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
51c5d0c4
DM
129 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
130 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
131 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
81166dd6
DM
132 tm->tcpm_ts = 0;
133 tm->tcpm_ts_stamp = 0;
efeaa555
ED
134 if (fastopen_clear) {
135 tm->tcpm_fastopen.mss = 0;
136 tm->tcpm_fastopen.syn_loss = 0;
137 tm->tcpm_fastopen.cookie.len = 0;
138 }
51c5d0c4
DM
139}
140
77f99ad1
CP
141#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
142
143static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
144{
145 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
146 tcpm_suck_dst(tm, dst, false);
147}
148
149#define TCP_METRICS_RECLAIM_DEPTH 5
150#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
151
51c5d0c4 152static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
a5443028 153 struct inetpeer_addr *saddr,
324fd55a 154 struct inetpeer_addr *daddr,
77f99ad1 155 unsigned int hash)
51c5d0c4
DM
156{
157 struct tcp_metrics_block *tm;
158 struct net *net;
77f99ad1 159 bool reclaim = false;
51c5d0c4
DM
160
161 spin_lock_bh(&tcp_metrics_lock);
162 net = dev_net(dst->dev);
77f99ad1
CP
163
164 /* While waiting for the spin-lock the cache might have been populated
165 * with this entry and so we have to check again.
166 */
41804420 167 tm = __tcp_get_metrics(saddr, daddr, net, hash);
77f99ad1
CP
168 if (tm == TCP_METRICS_RECLAIM_PTR) {
169 reclaim = true;
170 tm = NULL;
171 }
172 if (tm) {
173 tcpm_check_stamp(tm, dst);
174 goto out_unlock;
175 }
176
51c5d0c4
DM
177 if (unlikely(reclaim)) {
178 struct tcp_metrics_block *oldest;
179
180 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
181 for (tm = rcu_dereference(oldest->tcpm_next); tm;
182 tm = rcu_dereference(tm->tcpm_next)) {
183 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
184 oldest = tm;
185 }
186 tm = oldest;
187 } else {
188 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
189 if (!tm)
190 goto out_unlock;
191 }
849e8a0c 192 write_pnet(&tm->tcpm_net, net);
a5443028 193 tm->tcpm_saddr = *saddr;
324fd55a 194 tm->tcpm_daddr = *daddr;
51c5d0c4 195
efeaa555 196 tcpm_suck_dst(tm, dst, true);
51c5d0c4
DM
197
198 if (likely(!reclaim)) {
199 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
200 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
201 }
202
203out_unlock:
204 spin_unlock_bh(&tcp_metrics_lock);
205 return tm;
206}
207
51c5d0c4
DM
208static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
209{
210 if (tm)
211 return tm;
212 if (depth > TCP_METRICS_RECLAIM_DEPTH)
213 return TCP_METRICS_RECLAIM_PTR;
214 return NULL;
215}
216
a5443028
CP
217static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
218 const struct inetpeer_addr *daddr,
51c5d0c4
DM
219 struct net *net, unsigned int hash)
220{
221 struct tcp_metrics_block *tm;
222 int depth = 0;
223
224 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
225 tm = rcu_dereference(tm->tcpm_next)) {
a5443028 226 if (addr_same(&tm->tcpm_saddr, saddr) &&
849e8a0c
EB
227 addr_same(&tm->tcpm_daddr, daddr) &&
228 net_eq(tm_net(tm), net))
51c5d0c4
DM
229 break;
230 depth++;
231 }
232 return tcp_get_encode(tm, depth);
233}
234
235static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
236 struct dst_entry *dst)
237{
238 struct tcp_metrics_block *tm;
a5443028 239 struct inetpeer_addr saddr, daddr;
51c5d0c4
DM
240 unsigned int hash;
241 struct net *net;
242
a5443028 243 saddr.family = req->rsk_ops->family;
324fd55a
CP
244 daddr.family = req->rsk_ops->family;
245 switch (daddr.family) {
51c5d0c4 246 case AF_INET:
a5443028 247 saddr.addr.a4 = inet_rsk(req)->ir_loc_addr;
324fd55a
CP
248 daddr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
249 hash = (__force unsigned int) daddr.addr.a4;
51c5d0c4 250 break;
634fb979 251#if IS_ENABLED(CONFIG_IPV6)
51c5d0c4 252 case AF_INET6:
a5443028 253 *(struct in6_addr *)saddr.addr.a6 = inet_rsk(req)->ir_v6_loc_addr;
324fd55a 254 *(struct in6_addr *)daddr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
634fb979 255 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
51c5d0c4 256 break;
634fb979 257#endif
51c5d0c4
DM
258 default:
259 return NULL;
260 }
261
51c5d0c4 262 net = dev_net(dst->dev);
3e5da62d 263 hash ^= net_hash_mix(net);
5815d5e7 264 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
51c5d0c4
DM
265
266 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
267 tm = rcu_dereference(tm->tcpm_next)) {
a5443028 268 if (addr_same(&tm->tcpm_saddr, &saddr) &&
849e8a0c
EB
269 addr_same(&tm->tcpm_daddr, &daddr) &&
270 net_eq(tm_net(tm), net))
51c5d0c4
DM
271 break;
272 }
273 tcpm_check_stamp(tm, dst);
274 return tm;
275}
276
81166dd6
DM
277static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
278{
81166dd6 279 struct tcp_metrics_block *tm;
a5443028 280 struct inetpeer_addr saddr, daddr;
81166dd6
DM
281 unsigned int hash;
282 struct net *net;
283
3ad88cf7
CP
284 if (tw->tw_family == AF_INET) {
285 saddr.family = AF_INET;
a5443028 286 saddr.addr.a4 = tw->tw_rcv_saddr;
3ad88cf7 287 daddr.family = AF_INET;
324fd55a
CP
288 daddr.addr.a4 = tw->tw_daddr;
289 hash = (__force unsigned int) daddr.addr.a4;
3ad88cf7 290 }
c2bb06db 291#if IS_ENABLED(CONFIG_IPV6)
3ad88cf7
CP
292 else if (tw->tw_family == AF_INET6) {
293 if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
294 saddr.family = AF_INET;
295 saddr.addr.a4 = tw->tw_rcv_saddr;
296 daddr.family = AF_INET;
297 daddr.addr.a4 = tw->tw_daddr;
298 hash = (__force unsigned int) daddr.addr.a4;
299 } else {
300 saddr.family = AF_INET6;
301 *(struct in6_addr *)saddr.addr.a6 = tw->tw_v6_rcv_saddr;
302 daddr.family = AF_INET6;
303 *(struct in6_addr *)daddr.addr.a6 = tw->tw_v6_daddr;
304 hash = ipv6_addr_hash(&tw->tw_v6_daddr);
305 }
306 }
c2bb06db 307#endif
3ad88cf7 308 else
81166dd6 309 return NULL;
81166dd6 310
81166dd6 311 net = twsk_net(tw);
3e5da62d 312 hash ^= net_hash_mix(net);
5815d5e7 313 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
81166dd6
DM
314
315 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
316 tm = rcu_dereference(tm->tcpm_next)) {
a5443028 317 if (addr_same(&tm->tcpm_saddr, &saddr) &&
849e8a0c
EB
318 addr_same(&tm->tcpm_daddr, &daddr) &&
319 net_eq(tm_net(tm), net))
81166dd6
DM
320 break;
321 }
322 return tm;
323}
324
51c5d0c4
DM
325static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
326 struct dst_entry *dst,
327 bool create)
328{
329 struct tcp_metrics_block *tm;
a5443028 330 struct inetpeer_addr saddr, daddr;
51c5d0c4
DM
331 unsigned int hash;
332 struct net *net;
51c5d0c4 333
3ad88cf7
CP
334 if (sk->sk_family == AF_INET) {
335 saddr.family = AF_INET;
a5443028 336 saddr.addr.a4 = inet_sk(sk)->inet_saddr;
3ad88cf7 337 daddr.family = AF_INET;
324fd55a
CP
338 daddr.addr.a4 = inet_sk(sk)->inet_daddr;
339 hash = (__force unsigned int) daddr.addr.a4;
3ad88cf7 340 }
c2bb06db 341#if IS_ENABLED(CONFIG_IPV6)
3ad88cf7
CP
342 else if (sk->sk_family == AF_INET6) {
343 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
344 saddr.family = AF_INET;
345 saddr.addr.a4 = inet_sk(sk)->inet_saddr;
346 daddr.family = AF_INET;
347 daddr.addr.a4 = inet_sk(sk)->inet_daddr;
348 hash = (__force unsigned int) daddr.addr.a4;
349 } else {
350 saddr.family = AF_INET6;
351 *(struct in6_addr *)saddr.addr.a6 = sk->sk_v6_rcv_saddr;
352 daddr.family = AF_INET6;
353 *(struct in6_addr *)daddr.addr.a6 = sk->sk_v6_daddr;
354 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
355 }
356 }
c2bb06db 357#endif
3ad88cf7 358 else
51c5d0c4 359 return NULL;
51c5d0c4 360
51c5d0c4 361 net = dev_net(dst->dev);
3e5da62d 362 hash ^= net_hash_mix(net);
5815d5e7 363 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
51c5d0c4 364
a5443028 365 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
77f99ad1 366 if (tm == TCP_METRICS_RECLAIM_PTR)
51c5d0c4 367 tm = NULL;
51c5d0c4 368 if (!tm && create)
41804420 369 tm = tcpm_new(dst, &saddr, &daddr, hash);
51c5d0c4
DM
370 else
371 tcpm_check_stamp(tm, dst);
372
373 return tm;
374}
375
4aabd8ef
DM
376/* Save metrics learned by this TCP session. This function is called
377 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
378 * or goes from LAST-ACK to CLOSE.
379 */
380void tcp_update_metrics(struct sock *sk)
381{
51c5d0c4 382 const struct inet_connection_sock *icsk = inet_csk(sk);
4aabd8ef 383 struct dst_entry *dst = __sk_dst_get(sk);
51c5d0c4
DM
384 struct tcp_sock *tp = tcp_sk(sk);
385 struct tcp_metrics_block *tm;
386 unsigned long rtt;
387 u32 val;
388 int m;
4aabd8ef 389
51c5d0c4 390 if (sysctl_tcp_nometrics_save || !dst)
4aabd8ef
DM
391 return;
392
51c5d0c4 393 if (dst->flags & DST_HOST)
4aabd8ef
DM
394 dst_confirm(dst);
395
51c5d0c4 396 rcu_read_lock();
740b0f18 397 if (icsk->icsk_backoff || !tp->srtt_us) {
51c5d0c4
DM
398 /* This session failed to estimate rtt. Why?
399 * Probably, no packets returned in time. Reset our
400 * results.
401 */
402 tm = tcp_get_metrics(sk, dst, false);
403 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
404 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
405 goto out_unlock;
406 } else
407 tm = tcp_get_metrics(sk, dst, true);
4aabd8ef 408
51c5d0c4
DM
409 if (!tm)
410 goto out_unlock;
4aabd8ef 411
740b0f18
ED
412 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
413 m = rtt - tp->srtt_us;
4aabd8ef 414
51c5d0c4
DM
415 /* If newly calculated rtt larger than stored one, store new
416 * one. Otherwise, use EWMA. Remember, rtt overestimation is
417 * always better than underestimation.
418 */
419 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
420 if (m <= 0)
740b0f18 421 rtt = tp->srtt_us;
51c5d0c4
DM
422 else
423 rtt -= (m >> 3);
740b0f18 424 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
51c5d0c4 425 }
4aabd8ef 426
51c5d0c4
DM
427 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
428 unsigned long var;
4aabd8ef 429
51c5d0c4
DM
430 if (m < 0)
431 m = -m;
4aabd8ef 432
51c5d0c4
DM
433 /* Scale deviation to rttvar fixed point */
434 m >>= 1;
740b0f18
ED
435 if (m < tp->mdev_us)
436 m = tp->mdev_us;
4aabd8ef 437
740b0f18 438 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
51c5d0c4
DM
439 if (m >= var)
440 var = m;
441 else
442 var -= (var - m) >> 2;
4aabd8ef 443
740b0f18 444 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
51c5d0c4
DM
445 }
446
447 if (tcp_in_initial_slowstart(tp)) {
448 /* Slow start still did not finish. */
449 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
450 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
451 if (val && (tp->snd_cwnd >> 1) > val)
452 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
453 tp->snd_cwnd >> 1);
454 }
455 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
456 val = tcp_metric_get(tm, TCP_METRIC_CWND);
457 if (tp->snd_cwnd > val)
458 tcp_metric_set(tm, TCP_METRIC_CWND,
459 tp->snd_cwnd);
460 }
461 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
462 icsk->icsk_ca_state == TCP_CA_Open) {
463 /* Cong. avoidance phase, cwnd is reliable. */
464 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
465 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
466 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
467 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
468 val = tcp_metric_get(tm, TCP_METRIC_CWND);
2100844c 469 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
51c5d0c4
DM
470 }
471 } else {
472 /* Else slow start did not finish, cwnd is non-sense,
473 * ssthresh may be also invalid.
474 */
475 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
476 val = tcp_metric_get(tm, TCP_METRIC_CWND);
477 tcp_metric_set(tm, TCP_METRIC_CWND,
478 (val + tp->snd_ssthresh) >> 1);
479 }
480 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
481 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
482 if (val && tp->snd_ssthresh > val)
483 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
484 tp->snd_ssthresh);
485 }
486 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
487 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
488 if (val < tp->reordering &&
4aabd8ef 489 tp->reordering != sysctl_tcp_reordering)
51c5d0c4
DM
490 tcp_metric_set(tm, TCP_METRIC_REORDERING,
491 tp->reordering);
4aabd8ef
DM
492 }
493 }
51c5d0c4
DM
494 tm->tcpm_stamp = jiffies;
495out_unlock:
496 rcu_read_unlock();
4aabd8ef
DM
497}
498
499/* Initialize metrics on socket. */
500
501void tcp_init_metrics(struct sock *sk)
502{
4aabd8ef 503 struct dst_entry *dst = __sk_dst_get(sk);
51c5d0c4
DM
504 struct tcp_sock *tp = tcp_sk(sk);
505 struct tcp_metrics_block *tm;
1b7fdd2a 506 u32 val, crtt = 0; /* cached RTT scaled by 8 */
4aabd8ef
DM
507
508 if (dst == NULL)
509 goto reset;
510
511 dst_confirm(dst);
512
51c5d0c4
DM
513 rcu_read_lock();
514 tm = tcp_get_metrics(sk, dst, true);
515 if (!tm) {
516 rcu_read_unlock();
517 goto reset;
518 }
519
520 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
521 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
522
523 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
524 if (val) {
525 tp->snd_ssthresh = val;
4aabd8ef
DM
526 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
527 tp->snd_ssthresh = tp->snd_cwnd_clamp;
528 } else {
529 /* ssthresh may have been reduced unnecessarily during.
530 * 3WHS. Restore it back to its initial default.
531 */
532 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
533 }
51c5d0c4
DM
534 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
535 if (val && tp->reordering != val) {
4aabd8ef
DM
536 tcp_disable_fack(tp);
537 tcp_disable_early_retrans(tp);
51c5d0c4 538 tp->reordering = val;
4aabd8ef
DM
539 }
540
740b0f18 541 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
51c5d0c4 542 rcu_read_unlock();
4aabd8ef 543reset:
52f20e65
YC
544 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
545 * to seed the RTO for later data packets because SYN packets are
546 * small. Use the per-dst cached values to seed the RTO but keep
547 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
548 * Later the RTO will be updated immediately upon obtaining the first
549 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
550 * influences the first RTO but not later RTT estimation.
551 *
552 * But if RTT is not available from the SYN (due to retransmits or
553 * syn cookies) or the cache, force a conservative 3secs timeout.
554 *
555 * A bit of theory. RTT is time passed after "normal" sized packet
556 * is sent until it is ACKed. In normal circumstances sending small
557 * packets force peer to delay ACKs and calculation is correct too.
558 * The algorithm is adaptive and, provided we follow specs, it
559 * NEVER underestimate RTT. BUT! If peer tries to make some clever
560 * tricks sort of "quick acks" for time long enough to decrease RTT
561 * to low value, and then abruptly stops to do it and starts to delay
562 * ACKs, wait for troubles.
563 */
740b0f18 564 if (crtt > tp->srtt_us) {
269aa759 565 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
740b0f18 566 crtt /= 8 * USEC_PER_MSEC;
269aa759 567 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
740b0f18 568 } else if (tp->srtt_us == 0) {
4aabd8ef
DM
569 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
570 * 3WHS. This is most likely due to retransmission,
571 * including spurious one. Reset the RTO back to 3secs
572 * from the more aggressive 1sec to avoid more spurious
573 * retransmission.
574 */
740b0f18
ED
575 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
576 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
577
4aabd8ef
DM
578 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
579 }
580 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
581 * retransmitted. In light of RFC6298 more aggressive 1sec
582 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
583 * retransmission has occurred.
584 */
585 if (tp->total_retrans > 1)
586 tp->snd_cwnd = 1;
587 else
588 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
589 tp->snd_cwnd_stamp = tcp_time_stamp;
590}
ab92bb2f 591
a26552af
HFS
592bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
593 bool paws_check, bool timestamps)
ab92bb2f 594{
51c5d0c4
DM
595 struct tcp_metrics_block *tm;
596 bool ret;
597
ab92bb2f
DM
598 if (!dst)
599 return false;
51c5d0c4
DM
600
601 rcu_read_lock();
602 tm = __tcp_get_metrics_req(req, dst);
81166dd6
DM
603 if (paws_check) {
604 if (tm &&
605 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
a26552af
HFS
606 ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW ||
607 !timestamps))
81166dd6
DM
608 ret = false;
609 else
610 ret = true;
611 } else {
612 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
613 ret = true;
614 else
615 ret = false;
616 }
51c5d0c4
DM
617 rcu_read_unlock();
618
619 return ret;
ab92bb2f
DM
620}
621EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
51c5d0c4 622
81166dd6
DM
623void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
624{
625 struct tcp_metrics_block *tm;
626
627 rcu_read_lock();
628 tm = tcp_get_metrics(sk, dst, true);
629 if (tm) {
630 struct tcp_sock *tp = tcp_sk(sk);
631
632 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
633 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
634 tp->rx_opt.ts_recent = tm->tcpm_ts;
635 }
636 }
637 rcu_read_unlock();
638}
639EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
640
641/* VJ's idea. Save last timestamp seen from this destination and hold
642 * it at least for normal timewait interval to use for duplicate
643 * segment detection in subsequent connections, before they enter
644 * synchronized state.
645 */
646bool tcp_remember_stamp(struct sock *sk)
647{
648 struct dst_entry *dst = __sk_dst_get(sk);
649 bool ret = false;
650
651 if (dst) {
652 struct tcp_metrics_block *tm;
653
654 rcu_read_lock();
655 tm = tcp_get_metrics(sk, dst, true);
656 if (tm) {
657 struct tcp_sock *tp = tcp_sk(sk);
658
659 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
660 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
661 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
662 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
663 tm->tcpm_ts = tp->rx_opt.ts_recent;
664 }
665 ret = true;
666 }
667 rcu_read_unlock();
668 }
669 return ret;
670}
671
672bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
673{
674 struct tcp_metrics_block *tm;
675 bool ret = false;
676
677 rcu_read_lock();
678 tm = __tcp_get_metrics_tw(tw);
9a0a9502 679 if (tm) {
81166dd6
DM
680 const struct tcp_timewait_sock *tcptw;
681 struct sock *sk = (struct sock *) tw;
682
683 tcptw = tcp_twsk(sk);
684 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
685 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
686 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
687 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
688 tm->tcpm_ts = tcptw->tw_ts_recent;
689 }
690 ret = true;
691 }
692 rcu_read_unlock();
693
694 return ret;
695}
696
1fe4c481
YC
697static DEFINE_SEQLOCK(fastopen_seqlock);
698
699void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
aab48743
YC
700 struct tcp_fastopen_cookie *cookie,
701 int *syn_loss, unsigned long *last_syn_loss)
1fe4c481
YC
702{
703 struct tcp_metrics_block *tm;
704
705 rcu_read_lock();
706 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
707 if (tm) {
708 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
709 unsigned int seq;
710
711 do {
712 seq = read_seqbegin(&fastopen_seqlock);
713 if (tfom->mss)
714 *mss = tfom->mss;
715 *cookie = tfom->cookie;
aab48743
YC
716 *syn_loss = tfom->syn_loss;
717 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
1fe4c481
YC
718 } while (read_seqretry(&fastopen_seqlock, seq));
719 }
720 rcu_read_unlock();
721}
722
1fe4c481 723void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
aab48743 724 struct tcp_fastopen_cookie *cookie, bool syn_lost)
1fe4c481 725{
dccf76ca 726 struct dst_entry *dst = __sk_dst_get(sk);
1fe4c481
YC
727 struct tcp_metrics_block *tm;
728
dccf76ca
ED
729 if (!dst)
730 return;
1fe4c481 731 rcu_read_lock();
dccf76ca 732 tm = tcp_get_metrics(sk, dst, true);
1fe4c481
YC
733 if (tm) {
734 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
735
736 write_seqlock_bh(&fastopen_seqlock);
c968601d
YC
737 if (mss)
738 tfom->mss = mss;
739 if (cookie && cookie->len > 0)
1fe4c481 740 tfom->cookie = *cookie;
aab48743
YC
741 if (syn_lost) {
742 ++tfom->syn_loss;
743 tfom->last_syn_loss = jiffies;
744 } else
745 tfom->syn_loss = 0;
1fe4c481
YC
746 write_sequnlock_bh(&fastopen_seqlock);
747 }
748 rcu_read_unlock();
749}
750
d23ff701
JA
751static struct genl_family tcp_metrics_nl_family = {
752 .id = GENL_ID_GENERATE,
753 .hdrsize = 0,
754 .name = TCP_METRICS_GENL_NAME,
755 .version = TCP_METRICS_GENL_VERSION,
756 .maxattr = TCP_METRICS_ATTR_MAX,
757 .netnsok = true,
758};
759
760static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
761 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
762 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
763 .len = sizeof(struct in6_addr), },
764 /* Following attributes are not received for GET/DEL,
765 * we keep them for reference
766 */
767#if 0
768 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
769 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
770 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
771 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
772 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
773 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
774 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
775 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
776 .len = TCP_FASTOPEN_COOKIE_MAX, },
777#endif
778};
779
780/* Add attributes, caller cancels its header on failure */
781static int tcp_metrics_fill_info(struct sk_buff *msg,
782 struct tcp_metrics_block *tm)
783{
784 struct nlattr *nest;
785 int i;
786
324fd55a 787 switch (tm->tcpm_daddr.family) {
d23ff701
JA
788 case AF_INET:
789 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
324fd55a 790 tm->tcpm_daddr.addr.a4) < 0)
d23ff701 791 goto nla_put_failure;
8a59359c
CP
792 if (nla_put_be32(msg, TCP_METRICS_ATTR_SADDR_IPV4,
793 tm->tcpm_saddr.addr.a4) < 0)
794 goto nla_put_failure;
d23ff701
JA
795 break;
796 case AF_INET6:
797 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
324fd55a 798 tm->tcpm_daddr.addr.a6) < 0)
d23ff701 799 goto nla_put_failure;
8a59359c
CP
800 if (nla_put(msg, TCP_METRICS_ATTR_SADDR_IPV6, 16,
801 tm->tcpm_saddr.addr.a6) < 0)
802 goto nla_put_failure;
d23ff701
JA
803 break;
804 default:
805 return -EAFNOSUPPORT;
806 }
807
808 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
809 jiffies - tm->tcpm_stamp) < 0)
810 goto nla_put_failure;
811 if (tm->tcpm_ts_stamp) {
812 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
813 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
814 goto nla_put_failure;
815 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
816 tm->tcpm_ts) < 0)
817 goto nla_put_failure;
818 }
819
820 {
821 int n = 0;
822
823 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
824 if (!nest)
825 goto nla_put_failure;
740b0f18
ED
826 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
827 u32 val = tm->tcpm_vals[i];
828
829 if (!val)
d23ff701 830 continue;
740b0f18
ED
831 if (i == TCP_METRIC_RTT) {
832 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
833 val) < 0)
834 goto nla_put_failure;
835 n++;
836 val = max(val / 1000, 1U);
837 }
838 if (i == TCP_METRIC_RTTVAR) {
839 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
840 val) < 0)
841 goto nla_put_failure;
842 n++;
843 val = max(val / 1000, 1U);
844 }
845 if (nla_put_u32(msg, i + 1, val) < 0)
d23ff701
JA
846 goto nla_put_failure;
847 n++;
848 }
849 if (n)
850 nla_nest_end(msg, nest);
851 else
852 nla_nest_cancel(msg, nest);
853 }
854
855 {
856 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
857 unsigned int seq;
858
859 do {
860 seq = read_seqbegin(&fastopen_seqlock);
861 tfom_copy[0] = tm->tcpm_fastopen;
862 } while (read_seqretry(&fastopen_seqlock, seq));
863
864 tfom = tfom_copy;
865 if (tfom->mss &&
866 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
867 tfom->mss) < 0)
868 goto nla_put_failure;
869 if (tfom->syn_loss &&
870 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
871 tfom->syn_loss) < 0 ||
872 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
873 jiffies - tfom->last_syn_loss) < 0))
874 goto nla_put_failure;
875 if (tfom->cookie.len > 0 &&
876 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
877 tfom->cookie.len, tfom->cookie.val) < 0)
878 goto nla_put_failure;
879 }
880
881 return 0;
882
883nla_put_failure:
884 return -EMSGSIZE;
885}
886
887static int tcp_metrics_dump_info(struct sk_buff *skb,
888 struct netlink_callback *cb,
889 struct tcp_metrics_block *tm)
890{
891 void *hdr;
892
15e47304 893 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
d23ff701
JA
894 &tcp_metrics_nl_family, NLM_F_MULTI,
895 TCP_METRICS_CMD_GET);
896 if (!hdr)
897 return -EMSGSIZE;
898
899 if (tcp_metrics_fill_info(skb, tm) < 0)
900 goto nla_put_failure;
901
053c095a
JB
902 genlmsg_end(skb, hdr);
903 return 0;
d23ff701
JA
904
905nla_put_failure:
906 genlmsg_cancel(skb, hdr);
907 return -EMSGSIZE;
908}
909
910static int tcp_metrics_nl_dump(struct sk_buff *skb,
911 struct netlink_callback *cb)
912{
913 struct net *net = sock_net(skb->sk);
914 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
915 unsigned int row, s_row = cb->args[0];
916 int s_col = cb->args[1], col = s_col;
917
918 for (row = s_row; row < max_rows; row++, s_col = 0) {
919 struct tcp_metrics_block *tm;
920 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
921
922 rcu_read_lock();
923 for (col = 0, tm = rcu_dereference(hb->chain); tm;
924 tm = rcu_dereference(tm->tcpm_next), col++) {
849e8a0c
EB
925 if (!net_eq(tm_net(tm), net))
926 continue;
d23ff701
JA
927 if (col < s_col)
928 continue;
929 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
930 rcu_read_unlock();
931 goto done;
932 }
933 }
934 rcu_read_unlock();
935 }
936
937done:
938 cb->args[0] = row;
939 cb->args[1] = col;
940 return skb->len;
941}
942
3e7013dd
CP
943static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
944 unsigned int *hash, int optional, int v4, int v6)
d23ff701
JA
945{
946 struct nlattr *a;
947
3e7013dd 948 a = info->attrs[v4];
d23ff701
JA
949 if (a) {
950 addr->family = AF_INET;
951 addr->addr.a4 = nla_get_be32(a);
3e7013dd
CP
952 if (hash)
953 *hash = (__force unsigned int) addr->addr.a4;
d23ff701
JA
954 return 0;
955 }
3e7013dd 956 a = info->attrs[v6];
d23ff701 957 if (a) {
2c42a3fb 958 if (nla_len(a) != sizeof(struct in6_addr))
d23ff701
JA
959 return -EINVAL;
960 addr->family = AF_INET6;
961 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
3e7013dd
CP
962 if (hash)
963 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
d23ff701
JA
964 return 0;
965 }
966 return optional ? 1 : -EAFNOSUPPORT;
967}
968
3e7013dd
CP
969static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
970 unsigned int *hash, int optional)
971{
972 return __parse_nl_addr(info, addr, hash, optional,
973 TCP_METRICS_ATTR_ADDR_IPV4,
974 TCP_METRICS_ATTR_ADDR_IPV6);
975}
976
977static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
978{
979 return __parse_nl_addr(info, addr, NULL, 0,
980 TCP_METRICS_ATTR_SADDR_IPV4,
981 TCP_METRICS_ATTR_SADDR_IPV6);
982}
983
d23ff701
JA
984static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
985{
986 struct tcp_metrics_block *tm;
3e7013dd 987 struct inetpeer_addr saddr, daddr;
d23ff701
JA
988 unsigned int hash;
989 struct sk_buff *msg;
990 struct net *net = genl_info_net(info);
991 void *reply;
992 int ret;
3e7013dd 993 bool src = true;
d23ff701 994
324fd55a 995 ret = parse_nl_addr(info, &daddr, &hash, 0);
d23ff701
JA
996 if (ret < 0)
997 return ret;
998
3e7013dd
CP
999 ret = parse_nl_saddr(info, &saddr);
1000 if (ret < 0)
1001 src = false;
1002
d23ff701
JA
1003 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1004 if (!msg)
1005 return -ENOMEM;
1006
1007 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
1008 info->genlhdr->cmd);
1009 if (!reply)
1010 goto nla_put_failure;
1011
3e5da62d 1012 hash ^= net_hash_mix(net);
d23ff701
JA
1013 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
1014 ret = -ESRCH;
1015 rcu_read_lock();
1016 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
1017 tm = rcu_dereference(tm->tcpm_next)) {
3e7013dd 1018 if (addr_same(&tm->tcpm_daddr, &daddr) &&
849e8a0c
EB
1019 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
1020 net_eq(tm_net(tm), net)) {
d23ff701
JA
1021 ret = tcp_metrics_fill_info(msg, tm);
1022 break;
1023 }
1024 }
1025 rcu_read_unlock();
1026 if (ret < 0)
1027 goto out_free;
1028
1029 genlmsg_end(msg, reply);
1030 return genlmsg_reply(msg, info);
1031
1032nla_put_failure:
1033 ret = -EMSGSIZE;
1034
1035out_free:
1036 nlmsg_free(msg);
1037 return ret;
1038}
1039
1040#define deref_locked_genl(p) \
1041 rcu_dereference_protected(p, lockdep_genl_is_held() && \
1042 lockdep_is_held(&tcp_metrics_lock))
1043
1044#define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
1045
8a4bff71 1046static void tcp_metrics_flush_all(struct net *net)
d23ff701
JA
1047{
1048 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
1049 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
1050 struct tcp_metrics_block *tm;
1051 unsigned int row;
1052
1053 for (row = 0; row < max_rows; row++, hb++) {
04f721c6 1054 struct tcp_metrics_block __rcu **pp;
d23ff701 1055 spin_lock_bh(&tcp_metrics_lock);
04f721c6
EB
1056 pp = &hb->chain;
1057 for (tm = deref_locked_genl(*pp); tm;
1058 tm = deref_locked_genl(*pp)) {
1059 if (net_eq(tm_net(tm), net)) {
1060 *pp = tm->tcpm_next;
1061 kfree_rcu(tm, rcu_head);
1062 } else {
1063 pp = &tm->tcpm_next;
1064 }
d23ff701 1065 }
04f721c6 1066 spin_unlock_bh(&tcp_metrics_lock);
d23ff701 1067 }
d23ff701
JA
1068}
1069
1070static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
1071{
1072 struct tcpm_hash_bucket *hb;
00ca9c5b 1073 struct tcp_metrics_block *tm;
d23ff701 1074 struct tcp_metrics_block __rcu **pp;
3e7013dd 1075 struct inetpeer_addr saddr, daddr;
d23ff701
JA
1076 unsigned int hash;
1077 struct net *net = genl_info_net(info);
1078 int ret;
00ca9c5b 1079 bool src = true, found = false;
d23ff701 1080
324fd55a 1081 ret = parse_nl_addr(info, &daddr, &hash, 1);
d23ff701
JA
1082 if (ret < 0)
1083 return ret;
8a4bff71
EB
1084 if (ret > 0) {
1085 tcp_metrics_flush_all(net);
1086 return 0;
1087 }
3e7013dd
CP
1088 ret = parse_nl_saddr(info, &saddr);
1089 if (ret < 0)
1090 src = false;
d23ff701 1091
3e5da62d 1092 hash ^= net_hash_mix(net);
d23ff701
JA
1093 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
1094 hb = net->ipv4.tcp_metrics_hash + hash;
1095 pp = &hb->chain;
1096 spin_lock_bh(&tcp_metrics_lock);
bbf852b9 1097 for (tm = deref_locked_genl(*pp); tm; tm = deref_locked_genl(*pp)) {
3e7013dd 1098 if (addr_same(&tm->tcpm_daddr, &daddr) &&
849e8a0c
EB
1099 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
1100 net_eq(tm_net(tm), net)) {
d23ff701 1101 *pp = tm->tcpm_next;
00ca9c5b
CP
1102 kfree_rcu(tm, rcu_head);
1103 found = true;
bbf852b9
CP
1104 } else {
1105 pp = &tm->tcpm_next;
d23ff701
JA
1106 }
1107 }
1108 spin_unlock_bh(&tcp_metrics_lock);
00ca9c5b 1109 if (!found)
d23ff701 1110 return -ESRCH;
d23ff701
JA
1111 return 0;
1112}
1113
4534de83 1114static const struct genl_ops tcp_metrics_nl_ops[] = {
d23ff701
JA
1115 {
1116 .cmd = TCP_METRICS_CMD_GET,
1117 .doit = tcp_metrics_nl_cmd_get,
1118 .dumpit = tcp_metrics_nl_dump,
1119 .policy = tcp_metrics_nl_policy,
d23ff701
JA
1120 },
1121 {
1122 .cmd = TCP_METRICS_CMD_DEL,
1123 .doit = tcp_metrics_nl_cmd_del,
1124 .policy = tcp_metrics_nl_policy,
1125 .flags = GENL_ADMIN_PERM,
1126 },
1127};
1128
5815d5e7 1129static unsigned int tcpmhash_entries;
51c5d0c4
DM
1130static int __init set_tcpmhash_entries(char *str)
1131{
1132 ssize_t ret;
1133
1134 if (!str)
1135 return 0;
1136
5815d5e7 1137 ret = kstrtouint(str, 0, &tcpmhash_entries);
51c5d0c4
DM
1138 if (ret)
1139 return 0;
1140
1141 return 1;
1142}
1143__setup("tcpmhash_entries=", set_tcpmhash_entries);
1144
1145static int __net_init tcp_net_metrics_init(struct net *net)
1146{
5815d5e7
ED
1147 size_t size;
1148 unsigned int slots;
51c5d0c4
DM
1149
1150 slots = tcpmhash_entries;
1151 if (!slots) {
1152 if (totalram_pages >= 128 * 1024)
1153 slots = 16 * 1024;
1154 else
1155 slots = 8 * 1024;
1156 }
1157
5815d5e7
ED
1158 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1159 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
51c5d0c4 1160
976a702a
ED
1161 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1162 if (!net->ipv4.tcp_metrics_hash)
1163 net->ipv4.tcp_metrics_hash = vzalloc(size);
1164
51c5d0c4
DM
1165 if (!net->ipv4.tcp_metrics_hash)
1166 return -ENOMEM;
1167
51c5d0c4
DM
1168 return 0;
1169}
1170
1171static void __net_exit tcp_net_metrics_exit(struct net *net)
1172{
36471012
ED
1173 unsigned int i;
1174
1175 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1176 struct tcp_metrics_block *tm, *next;
1177
1178 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1179 while (tm) {
1180 next = rcu_dereference_protected(tm->tcpm_next, 1);
1181 kfree(tm);
1182 tm = next;
1183 }
1184 }
4cb28970 1185 kvfree(net->ipv4.tcp_metrics_hash);
51c5d0c4
DM
1186}
1187
1188static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1189 .init = tcp_net_metrics_init,
1190 .exit = tcp_net_metrics_exit,
1191};
1192
1193void __init tcp_metrics_init(void)
1194{
d23ff701
JA
1195 int ret;
1196
1197 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1198 if (ret < 0)
6493517e
EB
1199 panic("Could not allocate the tcp_metrics hash table\n");
1200
d23ff701 1201 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
c53ed742 1202 tcp_metrics_nl_ops);
d23ff701 1203 if (ret < 0)
6493517e 1204 panic("Could not register tcp_metrics generic netlink\n");
51c5d0c4 1205}