Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[linux-2.6-block.git] / include / net / tcp.h
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
1da177e4
LT
21#define FASTRETRANS_DEBUG 1
22
1da177e4
LT
23#include <linux/list.h>
24#include <linux/tcp.h>
187f1882 25#include <linux/bug.h>
1da177e4
LT
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
fb286bb2 29#include <linux/skbuff.h>
c6aefafb 30#include <linux/cryptohash.h>
435cf559 31#include <linux/kref.h>
740b0f18 32#include <linux/ktime.h>
3f421baa
ACM
33
34#include <net/inet_connection_sock.h>
295ff7ed 35#include <net/inet_timewait_sock.h>
77d8bf9c 36#include <net/inet_hashtables.h>
1da177e4 37#include <net/checksum.h>
2e6599cb 38#include <net/request_sock.h>
1da177e4
LT
39#include <net/sock.h>
40#include <net/snmp.h>
41#include <net/ip.h>
c752f073 42#include <net/tcp_states.h>
bdf1ee5d 43#include <net/inet_ecn.h>
0c266898 44#include <net/dst.h>
c752f073 45
1da177e4 46#include <linux/seq_file.h>
180d8cd9 47#include <linux/memcontrol.h>
1da177e4 48
40304b2a
LB
49#include <linux/bpf.h>
50#include <linux/filter.h>
51#include <linux/bpf-cgroup.h>
52
6e04e021 53extern struct inet_hashinfo tcp_hashinfo;
1da177e4 54
dd24c001 55extern struct percpu_counter tcp_orphan_count;
5c9f3023 56void tcp_time_wait(struct sock *sk, int state, int timeo);
1da177e4 57
1da177e4 58#define MAX_TCP_HEADER (128 + MAX_HEADER)
33ad798c 59#define MAX_TCP_OPTION_SPACE 40
1da177e4 60
105970f6 61/*
1da177e4 62 * Never offer a window over 32767 without using window scaling. Some
105970f6 63 * poor stacks do signed 16bit maths!
1da177e4
LT
64 */
65#define MAX_TCP_WINDOW 32767U
66
67/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68#define TCP_MIN_MSS 88U
69
5d424d5a 70/* The least MTU to use for probing */
dcd8fb85 71#define TCP_BASE_MSS 1024
5d424d5a 72
05cbc0db
FD
73/* probing interval, default to 10 minutes as per RFC4821 */
74#define TCP_PROBE_INTERVAL 600
75
6b58e0a5
FD
76/* Specify interval when tcp mtu probing will stop */
77#define TCP_PROBE_THRESHOLD 8
78
1da177e4
LT
79/* After receiving this amount of duplicate ACKs fast retransmit starts. */
80#define TCP_FASTRETRANS_THRESH 3
81
1da177e4
LT
82/* Maximal number of ACKs sent quickly to accelerate slow-start. */
83#define TCP_MAX_QUICKACKS 16U
84
589c49cb
GF
85/* Maximal number of window scale according to RFC1323 */
86#define TCP_MAX_WSCALE 14U
87
1da177e4
LT
88/* urg_data states */
89#define TCP_URG_VALID 0x0100
90#define TCP_URG_NOTYET 0x0200
91#define TCP_URG_READ 0x0400
92
93#define TCP_RETR1 3 /*
94 * This is how many retries it does before it
95 * tries to figure out if the gateway is
96 * down. Minimal RFC value is 3; it corresponds
97 * to ~3sec-8min depending on RTO.
98 */
99
100#define TCP_RETR2 15 /*
101 * This should take at least
102 * 90 minutes to time out.
103 * RFC1122 says that the limit is 100 sec.
104 * 15 is ~13-30min depending on RTO.
105 */
106
6c9ff979
AB
107#define TCP_SYN_RETRIES 6 /* This is how many retries are done
108 * when active opening a connection.
109 * RFC1122 says the minimum retry MUST
110 * be at least 180secs. Nevertheless
111 * this value is corresponding to
112 * 63secs of retransmission with the
113 * current initial RTO.
114 */
1da177e4 115
6c9ff979
AB
116#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
117 * when passive opening a connection.
118 * This is corresponding to 31secs of
119 * retransmission with the current
120 * initial RTO.
121 */
1da177e4 122
1da177e4
LT
123#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
124 * state, about 60 seconds */
125#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
126 /* BSD style FIN_WAIT2 deadlock breaker.
127 * It used to be 3min, new value is 60sec,
128 * to combine FIN-WAIT-2 timeout with
129 * TIME-WAIT timer.
130 */
131
132#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
133#if HZ >= 100
134#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
135#define TCP_ATO_MIN ((unsigned)(HZ/25))
136#else
137#define TCP_DELACK_MIN 4U
138#define TCP_ATO_MIN 4U
139#endif
140#define TCP_RTO_MAX ((unsigned)(120*HZ))
141#define TCP_RTO_MIN ((unsigned)(HZ/5))
bb4d991a 142#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
fd4f2cea 143#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
9ad7c049
JC
144#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
145 * used as a fallback RTO for the
146 * initial data transmission if no
147 * valid RTT sample has been acquired,
148 * most likely due to retrans in 3WHS.
149 */
1da177e4
LT
150
151#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
152 * for local resources.
153 */
1da177e4
LT
154#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
155#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
156#define TCP_KEEPALIVE_INTVL (75*HZ)
157
158#define MAX_TCP_KEEPIDLE 32767
159#define MAX_TCP_KEEPINTVL 32767
160#define MAX_TCP_KEEPCNT 127
161#define MAX_TCP_SYNCNT 127
162
163#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
1da177e4
LT
164
165#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
166#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
167 * after this time. It should be equal
168 * (or greater than) TCP_TIMEWAIT_LEN
169 * to provide reliability equal to one
170 * provided by timewait state.
171 */
172#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
173 * timestamps. It must be less than
174 * minimal timewait lifetime.
175 */
1da177e4
LT
176/*
177 * TCP option
178 */
105970f6 179
1da177e4
LT
180#define TCPOPT_NOP 1 /* Padding */
181#define TCPOPT_EOL 0 /* End of options */
182#define TCPOPT_MSS 2 /* Segment size negotiating */
183#define TCPOPT_WINDOW 3 /* Window scaling */
184#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
185#define TCPOPT_SACK 5 /* SACK Block */
186#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
cfb6eeb4 187#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
7f9b838b 188#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
2100c8d2
YC
189#define TCPOPT_EXP 254 /* Experimental */
190/* Magic number to be after the option value for sharing TCP
191 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
192 */
193#define TCPOPT_FASTOPEN_MAGIC 0xF989
1da177e4
LT
194
195/*
196 * TCP option lengths
197 */
198
199#define TCPOLEN_MSS 4
200#define TCPOLEN_WINDOW 3
201#define TCPOLEN_SACK_PERM 2
202#define TCPOLEN_TIMESTAMP 10
cfb6eeb4 203#define TCPOLEN_MD5SIG 18
7f9b838b 204#define TCPOLEN_FASTOPEN_BASE 2
2100c8d2 205#define TCPOLEN_EXP_FASTOPEN_BASE 4
1da177e4
LT
206
207/* But this is what stacks really send out. */
208#define TCPOLEN_TSTAMP_ALIGNED 12
209#define TCPOLEN_WSCALE_ALIGNED 4
210#define TCPOLEN_SACKPERM_ALIGNED 4
211#define TCPOLEN_SACK_BASE 2
212#define TCPOLEN_SACK_BASE_ALIGNED 4
213#define TCPOLEN_SACK_PERBLOCK 8
cfb6eeb4 214#define TCPOLEN_MD5SIG_ALIGNED 20
33ad798c 215#define TCPOLEN_MSS_ALIGNED 4
1da177e4 216
1da177e4
LT
217/* Flags in tp->nonagle */
218#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
219#define TCP_NAGLE_CORK 2 /* Socket is corked */
caa20d9a 220#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
1da177e4 221
36e31b0a
AP
222/* TCP thin-stream limits */
223#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
224
21603fc4 225/* TCP initial congestion window as per rfc6928 */
442b9635
DM
226#define TCP_INIT_CWND 10
227
cf60af03
YC
228/* Bit Flags for sysctl_tcp_fastopen */
229#define TFO_CLIENT_ENABLE 1
10467163 230#define TFO_SERVER_ENABLE 2
67da22d2 231#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
cf60af03 232
10467163
JC
233/* Accept SYN data w/o any cookie option */
234#define TFO_SERVER_COOKIE_NOT_REQD 0x200
235
236/* Force enable TFO on all listeners, i.e., not requiring the
cebc5cba 237 * TCP_FASTOPEN socket option.
10467163
JC
238 */
239#define TFO_SERVER_WO_SOCKOPT1 0x400
10467163 240
295ff7ed 241
1da177e4 242/* sysctl variables for tcp */
2100c8d2 243extern int sysctl_tcp_fastopen;
1da177e4
LT
244extern int sysctl_tcp_retrans_collapse;
245extern int sysctl_tcp_stdurg;
246extern int sysctl_tcp_rfc1337;
247extern int sysctl_tcp_abort_on_overflow;
248extern int sysctl_tcp_max_orphans;
1da177e4
LT
249extern int sysctl_tcp_fack;
250extern int sysctl_tcp_reordering;
dca145ff 251extern int sysctl_tcp_max_reordering;
1da177e4 252extern int sysctl_tcp_dsack;
a4fe34bf 253extern long sysctl_tcp_mem[3];
1da177e4
LT
254extern int sysctl_tcp_wmem[3];
255extern int sysctl_tcp_rmem[3];
256extern int sysctl_tcp_app_win;
257extern int sysctl_tcp_adv_win_scale;
1da177e4 258extern int sysctl_tcp_frto;
1da177e4 259extern int sysctl_tcp_nometrics_save;
1da177e4
LT
260extern int sysctl_tcp_moderate_rcvbuf;
261extern int sysctl_tcp_tso_win_divisor;
15d99e02 262extern int sysctl_tcp_workaround_signed_windows;
35089bb2 263extern int sysctl_tcp_slow_start_after_idle;
36e31b0a 264extern int sysctl_tcp_thin_linear_timeouts;
7e380175 265extern int sysctl_tcp_thin_dupack;
eed530b6 266extern int sysctl_tcp_early_retrans;
a0370b3f
YC
267extern int sysctl_tcp_recovery;
268#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
269
46d3ceab 270extern int sysctl_tcp_limit_output_bytes;
282f23c6 271extern int sysctl_tcp_challenge_ack_limit;
95bd09eb 272extern int sysctl_tcp_min_tso_segs;
f6722583 273extern int sysctl_tcp_min_rtt_wlen;
f54b3111 274extern int sysctl_tcp_autocorking;
032ee423 275extern int sysctl_tcp_invalid_ratelimit;
43e122b0
ED
276extern int sysctl_tcp_pacing_ss_ratio;
277extern int sysctl_tcp_pacing_ca_ratio;
1da177e4 278
8d987e5c 279extern atomic_long_t tcp_memory_allocated;
1748376b 280extern struct percpu_counter tcp_sockets_allocated;
06044751 281extern unsigned long tcp_memory_pressure;
1da177e4 282
b8da51eb
ED
283/* optimized version of sk_under_memory_pressure() for TCP sockets */
284static inline bool tcp_under_memory_pressure(const struct sock *sk)
285{
baac50bb
JW
286 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
287 mem_cgroup_under_socket_pressure(sk->sk_memcg))
e805605c 288 return true;
b8da51eb
ED
289
290 return tcp_memory_pressure;
291}
1da177e4
LT
292/*
293 * The next routines deal with comparing 32 bit unsigned ints
294 * and worry about wraparound (automatic with unsigned arithmetic).
295 */
296
a2a385d6 297static inline bool before(__u32 seq1, __u32 seq2)
1da177e4 298{
0d630cc0 299 return (__s32)(seq1-seq2) < 0;
1da177e4 300}
9a036b9c 301#define after(seq2, seq1) before(seq1, seq2)
1da177e4
LT
302
303/* is s2<=s1<=s3 ? */
a2a385d6 304static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
1da177e4
LT
305{
306 return seq3 - seq2 >= seq1 - seq2;
307}
308
efcdbf24
AS
309static inline bool tcp_out_of_memory(struct sock *sk)
310{
311 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
312 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
313 return true;
314 return false;
315}
316
a6c5ea4c
ED
317void sk_forced_mem_schedule(struct sock *sk, int size);
318
ad1af0fe 319static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
e4fd5da3 320{
ad1af0fe
DM
321 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
322 int orphans = percpu_counter_read_positive(ocp);
323
324 if (orphans << shift > sysctl_tcp_max_orphans) {
325 orphans = percpu_counter_sum_positive(ocp);
326 if (orphans << shift > sysctl_tcp_max_orphans)
327 return true;
328 }
ad1af0fe 329 return false;
e4fd5da3 330}
1da177e4 331
5c9f3023 332bool tcp_check_oom(struct sock *sk, int shift);
efcdbf24 333
a0f82f64 334
1da177e4
LT
335extern struct proto tcp_prot;
336
57ef42d5 337#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
13415e46 338#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
57ef42d5 339#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
aa2ea058 340#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
1da177e4 341
5c9f3023
JP
342void tcp_tasklet_init(void);
343
344void tcp_v4_err(struct sk_buff *skb, u32);
345
346void tcp_shutdown(struct sock *sk, int how);
347
348void tcp_v4_early_demux(struct sk_buff *skb);
349int tcp_v4_rcv(struct sk_buff *skb);
350
351int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
1b784140 352int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
306b13eb 353int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
5c9f3023
JP
354int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
355 int flags);
306b13eb
TH
356int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
357 size_t size, int flags);
e3b5616a
DW
358ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
359 size_t size, int flags);
5c9f3023
JP
360void tcp_release_cb(struct sock *sk);
361void tcp_wfree(struct sk_buff *skb);
362void tcp_write_timer_handler(struct sock *sk);
363void tcp_delack_timer_handler(struct sock *sk);
364int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
72ab4a86 365int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
5c9f3023 366void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
e42e24c3 367 const struct tcphdr *th);
5c9f3023 368void tcp_rcv_space_adjust(struct sock *sk);
5c9f3023
JP
369int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
370void tcp_twsk_destructor(struct sock *sk);
371ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
372 struct pipe_inode_info *pipe, size_t len,
373 unsigned int flags);
9c55e01c 374
463c84b9
ACM
375static inline void tcp_dec_quickack_mode(struct sock *sk,
376 const unsigned int pkts)
1da177e4 377{
463c84b9 378 struct inet_connection_sock *icsk = inet_csk(sk);
fc6415bc 379
463c84b9
ACM
380 if (icsk->icsk_ack.quick) {
381 if (pkts >= icsk->icsk_ack.quick) {
382 icsk->icsk_ack.quick = 0;
fc6415bc 383 /* Leaving quickack mode we deflate ATO. */
463c84b9 384 icsk->icsk_ack.ato = TCP_ATO_MIN;
fc6415bc 385 } else
463c84b9 386 icsk->icsk_ack.quick -= pkts;
1da177e4
LT
387 }
388}
389
bdf1ee5d
IJ
390#define TCP_ECN_OK 1
391#define TCP_ECN_QUEUE_CWR 2
392#define TCP_ECN_DEMAND_CWR 4
7a269ffa 393#define TCP_ECN_SEEN 8
bdf1ee5d 394
fd2c3ef7 395enum tcp_tw_status {
1da177e4
LT
396 TCP_TW_SUCCESS = 0,
397 TCP_TW_RST = 1,
398 TCP_TW_ACK = 2,
399 TCP_TW_SYN = 3
400};
401
402
5c9f3023
JP
403enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
404 struct sk_buff *skb,
405 const struct tcphdr *th);
406struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
52452c54 407 struct request_sock *req, bool fastopen);
5c9f3023
JP
408int tcp_child_process(struct sock *parent, struct sock *child,
409 struct sk_buff *skb);
5ae344c9 410void tcp_enter_loss(struct sock *sk);
57dde7f7 411void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
5c9f3023
JP
412void tcp_clear_retrans(struct tcp_sock *tp);
413void tcp_update_metrics(struct sock *sk);
414void tcp_init_metrics(struct sock *sk);
415void tcp_metrics_init(void);
d82bae12 416bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
5c9f3023
JP
417void tcp_disable_fack(struct tcp_sock *tp);
418void tcp_close(struct sock *sk, long timeout);
419void tcp_init_sock(struct sock *sk);
420unsigned int tcp_poll(struct file *file, struct socket *sock,
421 struct poll_table_struct *wait);
422int tcp_getsockopt(struct sock *sk, int level, int optname,
423 char __user *optval, int __user *optlen);
424int tcp_setsockopt(struct sock *sk, int level, int optname,
425 char __user *optval, unsigned int optlen);
426int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
53d3176b 427 char __user *optval, int __user *optlen);
5c9f3023 428int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
53d3176b 429 char __user *optval, unsigned int optlen);
5c9f3023 430void tcp_set_keepalive(struct sock *sk, int val);
42cb80a2 431void tcp_syn_ack_timeout(const struct request_sock *req);
1b784140
YX
432int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
433 int flags, int *addr_len);
eed29f17 434void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
5c9f3023
JP
435 struct tcp_options_received *opt_rx,
436 int estab, struct tcp_fastopen_cookie *foc);
437const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
7d5d5525 438
1da177e4
LT
439/*
440 * TCP v4 functions exported for the inet6 API
441 */
442
5c9f3023 443void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
4fab9071 444void tcp_v4_mtu_reduced(struct sock *sk);
9cf74903 445void tcp_req_err(struct sock *sk, u32 seq, bool abort);
5c9f3023 446int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
c28c6f04 447struct sock *tcp_create_openreq_child(const struct sock *sk,
5c9f3023
JP
448 struct request_sock *req,
449 struct sk_buff *skb);
81164413 450void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
0c27171e 451struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
5c9f3023 452 struct request_sock *req,
5e0724d0
ED
453 struct dst_entry *dst,
454 struct request_sock *req_unhash,
455 bool *own_req);
5c9f3023
JP
456int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
457int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
458int tcp_connect(struct sock *sk);
b3d05147
ED
459enum tcp_synack_type {
460 TCP_SYNACK_NORMAL,
461 TCP_SYNACK_FASTOPEN,
462 TCP_SYNACK_COOKIE,
463};
5d062de7 464struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
5c9f3023 465 struct request_sock *req,
ca6fb065 466 struct tcp_fastopen_cookie *foc,
b3d05147 467 enum tcp_synack_type synack_type);
5c9f3023 468int tcp_disconnect(struct sock *sk, int flags);
1da177e4 469
370816ae 470void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
292e8d8c 471int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
63d02d15 472void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
1da177e4 473
1da177e4 474/* From syncookies.c */
b80c0e78
ED
475struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
476 struct request_sock *req,
84b114b9 477 struct dst_entry *dst, u32 tsoff);
5c9f3023
JP
478int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
479 u32 cookie);
461b74c3 480struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
e05c82d3 481#ifdef CONFIG_SYN_COOKIES
8c27bd75 482
63262315 483/* Syncookies use a monotonic timer which increments every 60 seconds.
8c27bd75
FW
484 * This counter is used both as a hash input and partially encoded into
485 * the cookie value. A cookie is only validated further if the delta
486 * between the current counter value and the encoded one is less than this,
63262315 487 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
8c27bd75
FW
488 * the counter advances immediately after a cookie is generated).
489 */
264ea103
ED
490#define MAX_SYNCOOKIE_AGE 2
491#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
492#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
493
494/* syncookies: remember time of last synqueue overflow
495 * But do not dirty this field too often (once per second is enough)
3f684b4b 496 * It is racy as we do not hold a lock, but race is very minor.
264ea103 497 */
3f684b4b 498static inline void tcp_synq_overflow(const struct sock *sk)
264ea103
ED
499{
500 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
501 unsigned long now = jiffies;
502
503 if (time_after(now, last_overflow + HZ))
504 tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
505}
506
507/* syncookies: no recent synqueue overflow on this listening socket? */
508static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
509{
510 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
511
512 return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
513}
8c27bd75
FW
514
515static inline u32 tcp_cookie_time(void)
516{
63262315
ED
517 u64 val = get_jiffies_64();
518
264ea103 519 do_div(val, TCP_SYNCOOKIE_PERIOD);
63262315 520 return val;
8c27bd75
FW
521}
522
5c9f3023
JP
523u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
524 u16 *mssp);
3f684b4b 525__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
9a568de4 526u64 cookie_init_timestamp(struct request_sock *req);
f9301034
ED
527bool cookie_timestamp_decode(const struct net *net,
528 struct tcp_options_received *opt);
f1673381 529bool cookie_ecn_ok(const struct tcp_options_received *opt,
f7b3bec6 530 const struct net *net, const struct dst_entry *dst);
4dfc2817 531
c6aefafb 532/* From net/ipv6/syncookies.c */
5c9f3023
JP
533int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
534 u32 cookie);
535struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
f1673381 536
5c9f3023
JP
537u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
538 const struct tcphdr *th, u16 *mssp);
3f684b4b 539__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
e05c82d3 540#endif
1da177e4
LT
541/* tcp_output.c */
542
1b3878ca
NC
543u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
544 int min_tso_segs);
5c9f3023
JP
545void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
546 int nonagle);
10d3be56
ED
547int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
548int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
5c9f3023
JP
549void tcp_retransmit_timer(struct sock *sk);
550void tcp_xmit_retransmit_queue(struct sock *);
551void tcp_simple_retransmit(struct sock *);
57dde7f7 552void tcp_enter_recovery(struct sock *sk, bool ece_ack);
5c9f3023 553int tcp_trim_head(struct sock *, struct sk_buff *, u32);
6cc55e09 554int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
5c9f3023
JP
555
556void tcp_send_probe0(struct sock *);
557void tcp_send_partial(struct sock *);
e520af48 558int tcp_write_wakeup(struct sock *, int mib);
5c9f3023
JP
559void tcp_send_fin(struct sock *sk);
560void tcp_send_active_reset(struct sock *sk, gfp_t priority);
561int tcp_send_synack(struct sock *);
5c9f3023
JP
562void tcp_push_one(struct sock *, unsigned int mss_now);
563void tcp_send_ack(struct sock *sk);
564void tcp_send_delayed_ack(struct sock *sk);
565void tcp_send_loss_probe(struct sock *sk);
566bool tcp_schedule_loss_probe(struct sock *sk);
cfea5a68
MKL
567void tcp_skb_collapse_tstamp(struct sk_buff *skb,
568 const struct sk_buff *next_skb);
1da177e4 569
a762a980 570/* tcp_input.c */
5c9f3023 571void tcp_rearm_rto(struct sock *sk);
0f1c28ae 572void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
5c9f3023 573void tcp_reset(struct sock *sk);
4f41b1c5 574void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
e3e17b77 575void tcp_fin(struct sock *sk);
a762a980 576
1da177e4 577/* tcp_timer.c */
5c9f3023 578void tcp_init_xmit_timers(struct sock *);
463c84b9
ACM
579static inline void tcp_clear_xmit_timers(struct sock *sk)
580{
218af599 581 hrtimer_cancel(&tcp_sk(sk)->pacing_timer);
463c84b9
ACM
582 inet_csk_clear_xmit_timers(sk);
583}
1da177e4 584
5c9f3023
JP
585unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
586unsigned int tcp_current_mss(struct sock *sk);
0c54b85f
IJ
587
588/* Bound MSS / TSO packet size with the half of the window */
589static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
590{
01f83d69
AK
591 int cutoff;
592
593 /* When peer uses tiny windows, there is no use in packetizing
594 * to sub-MSS pieces for the sake of SWS or making sure there
595 * are enough packets in the pipe for fast recovery.
596 *
597 * On the other hand, for extremely large MSS devices, handling
598 * smaller than MSS windows in this way does make sense.
599 */
2631b79f 600 if (tp->max_window > TCP_MSS_DEFAULT)
01f83d69
AK
601 cutoff = (tp->max_window >> 1);
602 else
603 cutoff = tp->max_window;
604
605 if (cutoff && pktsize > cutoff)
606 return max_t(int, cutoff, 68U - tp->tcp_header_len);
0c54b85f
IJ
607 else
608 return pktsize;
609}
1da177e4 610
17b085ea 611/* tcp.c */
0df48c26 612void tcp_get_info(struct sock *, struct tcp_info *);
1da177e4
LT
613
614/* Read 'sendfile()'-style from a TCP socket */
5c9f3023
JP
615int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
616 sk_read_actor_t recv_actor);
1da177e4 617
5c9f3023 618void tcp_initialize_rcv_mss(struct sock *sk);
1da177e4 619
5c9f3023
JP
620int tcp_mtu_to_mss(struct sock *sk, int pmtu);
621int tcp_mss_to_mtu(struct sock *sk, int mss);
622void tcp_mtup_init(struct sock *sk);
623void tcp_init_buffer_space(struct sock *sk);
5d424d5a 624
f1ecd5d9
DL
625static inline void tcp_bound_rto(const struct sock *sk)
626{
627 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
628 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
629}
630
631static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
632{
740b0f18 633 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
f1ecd5d9
DL
634}
635
31770e34
FW
636static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
637{
638 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
639 ntohl(TCP_FLAG_ACK) |
640 snd_wnd);
641}
642
643static inline void tcp_fast_path_on(struct tcp_sock *tp)
644{
645 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
646}
647
648static inline void tcp_fast_path_check(struct sock *sk)
649{
650 struct tcp_sock *tp = tcp_sk(sk);
651
652 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
653 tp->rcv_wnd &&
654 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
655 !tp->urg_data)
656 tcp_fast_path_on(tp);
657}
658
0c266898
SS
659/* Compute the actual rto_min value */
660static inline u32 tcp_rto_min(struct sock *sk)
661{
cf533ea5 662 const struct dst_entry *dst = __sk_dst_get(sk);
0c266898
SS
663 u32 rto_min = TCP_RTO_MIN;
664
665 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
666 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
667 return rto_min;
668}
669
740b0f18
ED
670static inline u32 tcp_rto_min_us(struct sock *sk)
671{
672 return jiffies_to_usecs(tcp_rto_min(sk));
673}
674
81164413
DB
675static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
676{
677 return dst_metric_locked(dst, RTAX_CC_ALGO);
678}
679
f6722583
YC
680/* Minimum RTT in usec. ~0 means not available. */
681static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
682{
64033892 683 return minmax_get(&tp->rtt_min);
f6722583
YC
684}
685
1da177e4
LT
686/* Compute the actual receive window we are currently advertising.
687 * Rcv_nxt can be after the window if our peer push more data
688 * than the offered window.
689 */
40efc6fa 690static inline u32 tcp_receive_window(const struct tcp_sock *tp)
1da177e4
LT
691{
692 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
693
694 if (win < 0)
695 win = 0;
696 return (u32) win;
697}
698
699/* Choose a new window, without checks for shrinking, and without
700 * scaling applied to the result. The caller does these things
701 * if necessary. This is a "raw" window selection.
702 */
5c9f3023 703u32 __tcp_select_window(struct sock *sk);
1da177e4 704
ee995283
PE
705void tcp_send_window_probe(struct sock *sk);
706
ec66eda8
ED
707/* TCP uses 32bit jiffies to save some space.
708 * Note that this is different from tcp_time_stamp, which
709 * historically has been the same until linux-4.13.
710 */
711#define tcp_jiffies32 ((u32)jiffies)
712
9a568de4
ED
713/*
714 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
715 * It is no longer tied to jiffies, but to 1 ms clock.
716 * Note: double check if you want to use tcp_jiffies32 instead of this.
717 */
718#define TCP_TS_HZ 1000
719
720static inline u64 tcp_clock_ns(void)
721{
722 return local_clock();
723}
724
725static inline u64 tcp_clock_us(void)
726{
727 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
728}
729
730/* This should only be used in contexts where tp->tcp_mstamp is up to date */
731static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
732{
733 return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
734}
735
736/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
737static inline u32 tcp_time_stamp_raw(void)
738{
739 return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
740}
741
742
743/* Refresh 1us clock of a TCP socket,
744 * ensuring monotically increasing values.
1da177e4 745 */
9a568de4
ED
746static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
747{
748 u64 val = tcp_clock_us();
749
750 if (val > tp->tcp_mstamp)
751 tp->tcp_mstamp = val;
752}
753
754static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
755{
756 return max_t(s64, t1 - t0, 0);
757}
1da177e4 758
7faee5c0
ED
759static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
760{
9a568de4 761 return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
7faee5c0
ED
762}
763
764
a3433f35
CG
765#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
766
767#define TCPHDR_FIN 0x01
768#define TCPHDR_SYN 0x02
769#define TCPHDR_RST 0x04
770#define TCPHDR_PSH 0x08
771#define TCPHDR_ACK 0x10
772#define TCPHDR_URG 0x20
773#define TCPHDR_ECE 0x40
774#define TCPHDR_CWR 0x80
775
49213555
DB
776#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
777
caa20d9a 778/* This is what the send packet queuing engine uses to pass
f86586fa
ED
779 * TCP per-packet control information to the transmission code.
780 * We also store the host-order sequence numbers in here too.
781 * This is 44 bytes if IPV6 is enabled.
782 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
1da177e4
LT
783 */
784struct tcp_skb_cb {
1da177e4
LT
785 __u32 seq; /* Starting sequence number */
786 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
cd7d8498
ED
787 union {
788 /* Note : tcp_tw_isn is used in input path only
789 * (isn chosen by tcp_timewait_state_process())
790 *
f69ad292
ED
791 * tcp_gso_segs/size are used in write queue only,
792 * cf tcp_skb_pcount()/tcp_skb_mss()
cd7d8498
ED
793 */
794 __u32 tcp_tw_isn;
f69ad292
ED
795 struct {
796 u16 tcp_gso_segs;
797 u16 tcp_gso_size;
798 };
98aaa913
MM
799
800 /* Used to stash the receive timestamp while this skb is in the
801 * out of order queue, as skb->tstamp is overwritten by the
802 * rbnode.
803 */
804 ktime_t swtstamp;
cd7d8498 805 };
4de075e0 806 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
f4f9f6e7 807
1da177e4
LT
808 __u8 sacked; /* State flags for SACK/FACK. */
809#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
810#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
811#define TCPCB_LOST 0x04 /* SKB is lost */
812#define TCPCB_TAGBITS 0x07 /* All tag bits */
9d186cac 813#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */
1da177e4 814#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
9d186cac
AV
815#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
816 TCPCB_REPAIRED)
1da177e4 817
f4f9f6e7 818 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
6b084928 819 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
c134ecb8 820 eor:1, /* Is skb MSG_EOR marked? */
98aaa913
MM
821 has_rxtstamp:1, /* SKB has a RX timestamp */
822 unused:5;
1da177e4 823 __u32 ack_seq; /* Sequence number ACK'd */
971f10ec 824 union {
b75803d5 825 struct {
b9f64820 826 /* There is space for up to 24 bytes */
d7722e85
SHY
827 __u32 in_flight:30,/* Bytes in flight at transmit */
828 is_app_limited:1, /* cwnd not fully used? */
829 unused:1;
b9f64820
YC
830 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
831 __u32 delivered;
832 /* start of send pipeline phase */
9a568de4 833 u64 first_tx_mstamp;
b9f64820 834 /* when we reached the "delivered" count */
9a568de4 835 u64 delivered_mstamp;
b75803d5
LB
836 } tx; /* only used for outgoing skbs */
837 union {
838 struct inet_skb_parm h4;
971f10ec 839#if IS_ENABLED(CONFIG_IPV6)
b75803d5 840 struct inet6_skb_parm h6;
971f10ec 841#endif
b75803d5
LB
842 } header; /* For incoming skbs */
843 };
1da177e4
LT
844};
845
846#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
847
870c3151 848
815afe17 849#if IS_ENABLED(CONFIG_IPV6)
870c3151
ED
850/* This is the variant of inet6_iif() that must be used by TCP,
851 * as TCP moves IP6CB into a different location in skb->cb[]
852 */
853static inline int tcp_v6_iif(const struct sk_buff *skb)
854{
a04a480d 855 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
74b20582
DA
856
857 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
870c3151 858}
4297a0ef
DA
859
860/* TCP_SKB_CB reference means this can not be used from early demux */
861static inline int tcp_v6_sdif(const struct sk_buff *skb)
862{
863#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
864 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
865 return TCP_SKB_CB(skb)->header.h6.iif;
866#endif
867 return 0;
868}
815afe17 869#endif
870c3151 870
a04a480d
DA
871/* TCP_SKB_CB reference means this can not be used from early demux */
872static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
873{
874#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
875 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
da96786e 876 skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
a04a480d
DA
877 return true;
878#endif
879 return false;
880}
881
3fa6f616
DA
882/* TCP_SKB_CB reference means this can not be used from early demux */
883static inline int tcp_v4_sdif(struct sk_buff *skb)
884{
885#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
886 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
887 return TCP_SKB_CB(skb)->header.h4.iif;
888#endif
889 return 0;
890}
891
1da177e4
LT
892/* Due to TSO, an SKB can be composed of multiple actual
893 * packets. To keep these tracked properly, we use this.
bd14b1b2 894 */
1da177e4 895static inline int tcp_skb_pcount(const struct sk_buff *skb)
bd14b1b2 896{
cd7d8498
ED
897 return TCP_SKB_CB(skb)->tcp_gso_segs;
898}
bd14b1b2 899
cd7d8498
ED
900static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
901{
902 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
bd14b1b2
ED
903}
904
cd7d8498 905static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1da177e4 906{
cd7d8498 907 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1da177e4
LT
908}
909
f69ad292 910/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1da177e4
LT
911static inline int tcp_skb_mss(const struct sk_buff *skb)
912{
f69ad292 913 return TCP_SKB_CB(skb)->tcp_gso_size;
1da177e4
LT
914}
915
c134ecb8
MKL
916static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
917{
918 return likely(!TCP_SKB_CB(skb)->eor);
919}
920
317a76f9
SH
921/* Events passed to congestion control interface */
922enum tcp_ca_event {
923 CA_EVENT_TX_START, /* first transmit when no packets in flight */
924 CA_EVENT_CWND_RESTART, /* congestion window restart */
925 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
317a76f9 926 CA_EVENT_LOSS, /* loss timeout */
9890092e
FW
927 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
928 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
929 CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
930 CA_EVENT_NON_DELAYED_ACK,
7354c8c3
FW
931};
932
9890092e 933/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
7354c8c3 934enum tcp_ca_ack_event_flags {
c1d2b4c3
FW
935 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
936 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
937 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
317a76f9
SH
938};
939
940/*
941 * Interface for adding new TCP congestion control handlers
942 */
943#define TCP_CA_NAME_MAX 16
3ff825b2
SH
944#define TCP_CA_MAX 128
945#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
946
c5c6a8ab
DB
947#define TCP_CA_UNSPEC 0
948
30e502a3 949/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
164891aa 950#define TCP_CONG_NON_RESTRICTED 0x1
30e502a3
DB
951/* Requires ECN/ECT set on all packets */
952#define TCP_CONG_NEEDS_ECN 0x2
164891aa 953
64f40ff5
ED
954union tcp_cc_info;
955
756ee172
LB
956struct ack_sample {
957 u32 pkts_acked;
958 s32 rtt_us;
6f094b9e 959 u32 in_flight;
756ee172
LB
960};
961
b9f64820
YC
962/* A rate sample measures the number of (original/retransmitted) data
963 * packets delivered "delivered" over an interval of time "interval_us".
964 * The tcp_rate.c code fills in the rate sample, and congestion
965 * control modules that define a cong_control function to run at the end
966 * of ACK processing can optionally chose to consult this sample when
967 * setting cwnd and pacing rate.
968 * A sample is invalid if "delivered" or "interval_us" is negative.
969 */
970struct rate_sample {
9a568de4 971 u64 prior_mstamp; /* starting timestamp for interval */
b9f64820
YC
972 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
973 s32 delivered; /* number of packets delivered over interval */
974 long interval_us; /* time for tp->delivered to incr "delivered" */
975 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
976 int losses; /* number of packets marked lost upon ACK */
977 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
978 u32 prior_in_flight; /* in flight before this ACK */
d7722e85 979 bool is_app_limited; /* is sample from packet with bubble in pipe? */
b9f64820
YC
980 bool is_retrans; /* is sample from retransmission? */
981};
982
317a76f9
SH
983struct tcp_congestion_ops {
984 struct list_head list;
c5c6a8ab
DB
985 u32 key;
986 u32 flags;
317a76f9
SH
987
988 /* initialize private data (optional) */
6687e988 989 void (*init)(struct sock *sk);
317a76f9 990 /* cleanup private data (optional) */
6687e988 991 void (*release)(struct sock *sk);
317a76f9
SH
992
993 /* return slow start threshold (required) */
6687e988 994 u32 (*ssthresh)(struct sock *sk);
317a76f9 995 /* do new cwnd calculation (required) */
24901551 996 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
317a76f9 997 /* call before changing ca_state (optional) */
6687e988 998 void (*set_state)(struct sock *sk, u8 new_state);
317a76f9 999 /* call when cwnd event occurs (optional) */
6687e988 1000 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
7354c8c3
FW
1001 /* call when ack arrives (optional) */
1002 void (*in_ack_event)(struct sock *sk, u32 flags);
1e0ce2a1 1003 /* new value of cwnd after loss (required) */
6687e988 1004 u32 (*undo_cwnd)(struct sock *sk);
317a76f9 1005 /* hook for packet ack accounting (optional) */
756ee172 1006 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
ed6e7268
NC
1007 /* suggest number of segments for each skb to transmit (optional) */
1008 u32 (*tso_segs_goal)(struct sock *sk);
77bfc174
YC
1009 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1010 u32 (*sndbuf_expand)(struct sock *sk);
c0402760
YC
1011 /* call when packets are delivered to update cwnd and pacing rate,
1012 * after all the ca_state processing. (optional)
1013 */
1014 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
73c1f4a0 1015 /* get info for inet_diag (optional) */
64f40ff5
ED
1016 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1017 union tcp_cc_info *info);
317a76f9
SH
1018
1019 char name[TCP_CA_NAME_MAX];
1020 struct module *owner;
1021};
1022
5c9f3023
JP
1023int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1024void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
317a76f9 1025
55d8694f 1026void tcp_assign_congestion_control(struct sock *sk);
5c9f3023
JP
1027void tcp_init_congestion_control(struct sock *sk);
1028void tcp_cleanup_congestion_control(struct sock *sk);
1029int tcp_set_default_congestion_control(const char *name);
1030void tcp_get_default_congestion_control(char *name);
1031void tcp_get_available_congestion_control(char *buf, size_t len);
1032void tcp_get_allowed_congestion_control(char *buf, size_t len);
1033int tcp_set_allowed_congestion_control(char *allowed);
ebfa00c5 1034int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
e73ebb08
NC
1035u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1036void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
317a76f9 1037
5c9f3023 1038u32 tcp_reno_ssthresh(struct sock *sk);
e9799183 1039u32 tcp_reno_undo_cwnd(struct sock *sk);
24901551 1040void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
a8acfbac 1041extern struct tcp_congestion_ops tcp_reno;
317a76f9 1042
c5c6a8ab 1043struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
c3a8d947 1044u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
ea697639 1045#ifdef CONFIG_INET
c5c6a8ab 1046char *tcp_ca_get_name_by_key(u32 key, char *buffer);
ea697639
DB
1047#else
1048static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1049{
1050 return NULL;
1051}
1052#endif
c5c6a8ab 1053
30e502a3
DB
1054static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1055{
1056 const struct inet_connection_sock *icsk = inet_csk(sk);
1057
1058 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1059}
1060
6687e988 1061static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
317a76f9 1062{
6687e988
ACM
1063 struct inet_connection_sock *icsk = inet_csk(sk);
1064
1065 if (icsk->icsk_ca_ops->set_state)
1066 icsk->icsk_ca_ops->set_state(sk, ca_state);
1067 icsk->icsk_ca_state = ca_state;
317a76f9
SH
1068}
1069
6687e988 1070static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
317a76f9 1071{
6687e988
ACM
1072 const struct inet_connection_sock *icsk = inet_csk(sk);
1073
1074 if (icsk->icsk_ca_ops->cwnd_event)
1075 icsk->icsk_ca_ops->cwnd_event(sk, event);
317a76f9
SH
1076}
1077
b9f64820
YC
1078/* From tcp_rate.c */
1079void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1080void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1081 struct rate_sample *rs);
1082void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
88d5c650 1083 struct rate_sample *rs);
d7722e85 1084void tcp_rate_check_app_limited(struct sock *sk);
b9f64820 1085
e60402d0
IJ
1086/* These functions determine how the current flow behaves in respect of SACK
1087 * handling. SACK is negotiated with the peer, and therefore it can vary
1088 * between different flows.
1089 *
1090 * tcp_is_sack - SACK enabled
1091 * tcp_is_reno - No SACK
1092 * tcp_is_fack - FACK enabled, implies SACK enabled
1093 */
1094static inline int tcp_is_sack(const struct tcp_sock *tp)
1095{
1096 return tp->rx_opt.sack_ok;
1097}
1098
a2a385d6 1099static inline bool tcp_is_reno(const struct tcp_sock *tp)
e60402d0
IJ
1100{
1101 return !tcp_is_sack(tp);
1102}
1103
a2a385d6 1104static inline bool tcp_is_fack(const struct tcp_sock *tp)
e60402d0 1105{
ab56222a 1106 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
e60402d0
IJ
1107}
1108
1109static inline void tcp_enable_fack(struct tcp_sock *tp)
1110{
ab56222a 1111 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
e60402d0
IJ
1112}
1113
83ae4088
IJ
1114static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1115{
1116 return tp->sacked_out + tp->lost_out;
1117}
1118
1da177e4
LT
1119/* This determines how many packets are "in the network" to the best
1120 * of our knowledge. In many cases it is conservative, but where
1121 * detailed information is available from the receiver (via SACK
1122 * blocks etc.) we can make more aggressive calculations.
1123 *
1124 * Use this for decisions involving congestion control, use just
1125 * tp->packets_out to determine if the send queue is empty or not.
1126 *
1127 * Read this equation as:
1128 *
1129 * "Packets sent once on transmission queue" MINUS
1130 * "Packets left network, but not honestly ACKed yet" PLUS
1131 * "Packets fast retransmitted"
1132 */
40efc6fa 1133static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1da177e4 1134{
83ae4088 1135 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1da177e4
LT
1136}
1137
0b6a05c1
IJ
1138#define TCP_INFINITE_SSTHRESH 0x7fffffff
1139
071d5080
YC
1140static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1141{
76174004 1142 return tp->snd_cwnd < tp->snd_ssthresh;
071d5080
YC
1143}
1144
0b6a05c1
IJ
1145static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1146{
1147 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1148}
1149
684bad11
YC
1150static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1151{
1152 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1153 (1 << inet_csk(sk)->icsk_ca_state);
1154}
1155
1da177e4 1156/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
684bad11 1157 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1da177e4
LT
1158 * ssthresh.
1159 */
6687e988 1160static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1da177e4 1161{
6687e988 1162 const struct tcp_sock *tp = tcp_sk(sk);
cf533ea5 1163
684bad11 1164 if (tcp_in_cwnd_reduction(sk))
1da177e4
LT
1165 return tp->snd_ssthresh;
1166 else
1167 return max(tp->snd_ssthresh,
1168 ((tp->snd_cwnd >> 1) +
1169 (tp->snd_cwnd >> 2)));
1170}
1171
b9c4595b
IJ
1172/* Use define here intentionally to get WARN_ON location shown at the caller */
1173#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1da177e4 1174
5ee2c941 1175void tcp_enter_cwr(struct sock *sk);
5c9f3023 1176__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1da177e4 1177
6b5a5c0d
NC
1178/* The maximum number of MSS of available cwnd for which TSO defers
1179 * sending if not using sysctl_tcp_tso_win_divisor.
1180 */
1181static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1182{
1183 return 3;
1184}
1185
90840def
IJ
1186/* Returns end sequence number of the receiver's advertised window */
1187static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1188{
1189 return tp->snd_una + tp->snd_wnd;
1190}
e114a710
ED
1191
1192/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1193 * flexible approach. The RFC suggests cwnd should not be raised unless
ca8a2263
NC
1194 * it was fully used previously. And that's exactly what we do in
1195 * congestion avoidance mode. But in slow start we allow cwnd to grow
1196 * as long as the application has used half the cwnd.
e114a710
ED
1197 * Example :
1198 * cwnd is 10 (IW10), but application sends 9 frames.
1199 * We allow cwnd to reach 18 when all frames are ACKed.
1200 * This check is safe because it's as aggressive as slow start which already
1201 * risks 100% overshoot. The advantage is that we discourage application to
1202 * either send more filler packets or data to artificially blow up the cwnd
1203 * usage, and allow application-limited process to probe bw more aggressively.
e114a710 1204 */
24901551 1205static inline bool tcp_is_cwnd_limited(const struct sock *sk)
e114a710
ED
1206{
1207 const struct tcp_sock *tp = tcp_sk(sk);
1208
ca8a2263 1209 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
071d5080 1210 if (tcp_in_slow_start(tp))
ca8a2263
NC
1211 return tp->snd_cwnd < 2 * tp->max_packets_out;
1212
1213 return tp->is_cwnd_limited;
e114a710 1214}
f4805ede 1215
21c8fe99
ED
1216/* Something is really bad, we could not queue an additional packet,
1217 * because qdisc is full or receiver sent a 0 window.
1218 * We do not want to add fuel to the fire, or abort too early,
1219 * so make sure the timer we arm now is at least 200ms in the future,
1220 * regardless of current icsk_rto value (as it could be ~2ms)
1221 */
1222static inline unsigned long tcp_probe0_base(const struct sock *sk)
1da177e4 1223{
21c8fe99
ED
1224 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1225}
9e412ba7 1226
21c8fe99
ED
1227/* Variant of inet_csk_rto_backoff() used for zero window probes */
1228static inline unsigned long tcp_probe0_when(const struct sock *sk,
1229 unsigned long max_when)
1230{
1231 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1232
1233 return (unsigned long)min_t(u64, when, max_when);
1234}
1235
1236static inline void tcp_check_probe_timer(struct sock *sk)
1237{
1238 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
3f421baa 1239 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
21c8fe99 1240 tcp_probe0_base(sk), TCP_RTO_MAX);
1da177e4
LT
1241}
1242
ee7537b6 1243static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1da177e4
LT
1244{
1245 tp->snd_wl1 = seq;
1246}
1247
ee7537b6 1248static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1da177e4
LT
1249{
1250 tp->snd_wl1 = seq;
1251}
1252
1da177e4
LT
1253/*
1254 * Calculate(/check) TCP checksum
1255 */
ba7808ea
FD
1256static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1257 __be32 daddr, __wsum base)
1da177e4
LT
1258{
1259 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1260}
1261
b51655b9 1262static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1da177e4 1263{
fb286bb2 1264 return __skb_checksum_complete(skb);
1da177e4
LT
1265}
1266
a2a385d6 1267static inline bool tcp_checksum_complete(struct sk_buff *skb)
1da177e4 1268{
60476372 1269 return !skb_csum_unnecessary(skb) &&
1da177e4
LT
1270 __tcp_checksum_complete(skb);
1271}
1272
c9c33212 1273bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
ac6e7800 1274int tcp_filter(struct sock *sk, struct sk_buff *skb);
1da177e4
LT
1275
1276#undef STATE_TRACE
1277
1278#ifdef STATE_TRACE
1279static const char *statename[]={
1280 "Unused","Established","Syn Sent","Syn Recv",
1281 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1282 "Close Wait","Last ACK","Listen","Closing"
1283};
1284#endif
5c9f3023 1285void tcp_set_state(struct sock *sk, int state);
1da177e4 1286
5c9f3023 1287void tcp_done(struct sock *sk);
1da177e4 1288
c1e64e29
LC
1289int tcp_abort(struct sock *sk, int err);
1290
40efc6fa 1291static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1da177e4
LT
1292{
1293 rx_opt->dsack = 0;
1da177e4
LT
1294 rx_opt->num_sacks = 0;
1295}
1296
5c9f3023 1297u32 tcp_default_init_rwnd(u32 mss);
6f021c62
ED
1298void tcp_cwnd_restart(struct sock *sk, s32 delta);
1299
1300static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1301{
1b1fc3fd 1302 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
6f021c62
ED
1303 struct tcp_sock *tp = tcp_sk(sk);
1304 s32 delta;
1305
1b1fc3fd
WW
1306 if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
1307 ca_ops->cong_control)
6f021c62 1308 return;
d635fbe2 1309 delta = tcp_jiffies32 - tp->lsndtime;
6f021c62
ED
1310 if (delta > inet_csk(sk)->icsk_rto)
1311 tcp_cwnd_restart(sk, delta);
1312}
85f16525 1313
1da177e4 1314/* Determine a window scaling and initial window to offer. */
5c9f3023
JP
1315void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1316 __u32 *window_clamp, int wscale_ok,
1317 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1da177e4
LT
1318
1319static inline int tcp_win_from_space(int space)
1320{
c4836742
GF
1321 int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
1322
1323 return tcp_adv_win_scale <= 0 ?
1324 (space>>(-tcp_adv_win_scale)) :
1325 space - (space>>tcp_adv_win_scale);
1da177e4
LT
1326}
1327
105970f6 1328/* Note: caller must be prepared to deal with negative returns */
1da177e4
LT
1329static inline int tcp_space(const struct sock *sk)
1330{
1331 return tcp_win_from_space(sk->sk_rcvbuf -
1332 atomic_read(&sk->sk_rmem_alloc));
105970f6 1333}
1da177e4
LT
1334
1335static inline int tcp_full_space(const struct sock *sk)
1336{
105970f6 1337 return tcp_win_from_space(sk->sk_rcvbuf);
1da177e4
LT
1338}
1339
843f4a55 1340extern void tcp_openreq_init_rwin(struct request_sock *req,
b1964b5f
ED
1341 const struct sock *sk_listener,
1342 const struct dst_entry *dst);
843f4a55 1343
5c9f3023 1344void tcp_enter_memory_pressure(struct sock *sk);
06044751 1345void tcp_leave_memory_pressure(struct sock *sk);
1da177e4 1346
1da177e4
LT
1347static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1348{
b840d15d
NB
1349 struct net *net = sock_net((struct sock *)tp);
1350
1351 return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1da177e4
LT
1352}
1353
1354static inline int keepalive_time_when(const struct tcp_sock *tp)
1355{
13b287e8
NB
1356 struct net *net = sock_net((struct sock *)tp);
1357
1358 return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1da177e4
LT
1359}
1360
df19a626
ED
1361static inline int keepalive_probes(const struct tcp_sock *tp)
1362{
9bd6861b
NB
1363 struct net *net = sock_net((struct sock *)tp);
1364
1365 return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
df19a626
ED
1366}
1367
6c37e5de
FL
1368static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1369{
1370 const struct inet_connection_sock *icsk = &tp->inet_conn;
1371
70eabf0e
ED
1372 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1373 tcp_jiffies32 - tp->rcv_tstamp);
6c37e5de
FL
1374}
1375
463c84b9 1376static inline int tcp_fin_time(const struct sock *sk)
1da177e4 1377{
1e579caa 1378 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
463c84b9 1379 const int rto = inet_csk(sk)->icsk_rto;
1da177e4 1380
463c84b9
ACM
1381 if (fin_timeout < (rto << 2) - (rto >> 1))
1382 fin_timeout = (rto << 2) - (rto >> 1);
1da177e4
LT
1383
1384 return fin_timeout;
1385}
1386
a2a385d6
ED
1387static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1388 int paws_win)
1da177e4 1389{
c887e6d2 1390 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
a2a385d6 1391 return true;
c887e6d2 1392 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
a2a385d6 1393 return true;
bc2ce894
ED
1394 /*
1395 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1396 * then following tcp messages have valid values. Ignore 0 value,
1397 * or else 'negative' tsval might forbid us to accept their packets.
1398 */
1399 if (!rx_opt->ts_recent)
a2a385d6
ED
1400 return true;
1401 return false;
c887e6d2
IJ
1402}
1403
a2a385d6
ED
1404static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1405 int rst)
c887e6d2
IJ
1406{
1407 if (tcp_paws_check(rx_opt, 0))
a2a385d6 1408 return false;
1da177e4
LT
1409
1410 /* RST segments are not recommended to carry timestamp,
1411 and, if they do, it is recommended to ignore PAWS because
1412 "their cleanup function should take precedence over timestamps."
1413 Certainly, it is mistake. It is necessary to understand the reasons
1414 of this constraint to relax it: if peer reboots, clock may go
1415 out-of-sync and half-open connections will not be reset.
1416 Actually, the problem would be not existing if all
1417 the implementations followed draft about maintaining clock
1418 via reboots. Linux-2.2 DOES NOT!
1419
1420 However, we can relax time bounds for RST segments to MSL.
1421 */
9d729f72 1422 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
a2a385d6
ED
1423 return false;
1424 return true;
1da177e4
LT
1425}
1426
7970ddc8
ED
1427bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1428 int mib_idx, u32 *last_oow_ack_time);
032ee423 1429
a9c19329 1430static inline void tcp_mib_init(struct net *net)
1da177e4
LT
1431{
1432 /* See RFC 2012 */
6aef70a8
ED
1433 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1434 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1435 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1436 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1da177e4
LT
1437}
1438
5af4ec23 1439/* from STCP */
ef9da47c 1440static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
0800f170 1441{
6a438bbe 1442 tp->lost_skb_hint = NULL;
ef9da47c
IJ
1443}
1444
1445static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1446{
1447 tcp_clear_retrans_hints_partial(tp);
6a438bbe 1448 tp->retransmit_skb_hint = NULL;
b7689205
IJ
1449}
1450
a915da9b
ED
1451union tcp_md5_addr {
1452 struct in_addr a4;
1453#if IS_ENABLED(CONFIG_IPV6)
1454 struct in6_addr a6;
1455#endif
1456};
1457
cfb6eeb4
YH
1458/* - key database */
1459struct tcp_md5sig_key {
a915da9b 1460 struct hlist_node node;
cfb6eeb4 1461 u8 keylen;
a915da9b
ED
1462 u8 family; /* AF_INET or AF_INET6 */
1463 union tcp_md5_addr addr;
6797318e 1464 u8 prefixlen;
a915da9b
ED
1465 u8 key[TCP_MD5SIG_MAXKEYLEN];
1466 struct rcu_head rcu;
cfb6eeb4
YH
1467};
1468
1469/* - sock block */
1470struct tcp_md5sig_info {
a915da9b 1471 struct hlist_head head;
a8afca03 1472 struct rcu_head rcu;
cfb6eeb4
YH
1473};
1474
1475/* - pseudo header */
1476struct tcp4_pseudohdr {
1477 __be32 saddr;
1478 __be32 daddr;
1479 __u8 pad;
1480 __u8 protocol;
1481 __be16 len;
1482};
1483
1484struct tcp6_pseudohdr {
1485 struct in6_addr saddr;
1486 struct in6_addr daddr;
1487 __be32 len;
1488 __be32 protocol; /* including padding */
1489};
1490
1491union tcp_md5sum_block {
1492 struct tcp4_pseudohdr ip4;
dfd56b8b 1493#if IS_ENABLED(CONFIG_IPV6)
cfb6eeb4
YH
1494 struct tcp6_pseudohdr ip6;
1495#endif
1496};
1497
1498/* - pool: digest algorithm, hash description and scratch buffer */
1499struct tcp_md5sig_pool {
cf80e0e4 1500 struct ahash_request *md5_req;
19689e38 1501 void *scratch;
cfb6eeb4
YH
1502};
1503
cfb6eeb4 1504/* - functions */
39f8e58e
ED
1505int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1506 const struct sock *sk, const struct sk_buff *skb);
5c9f3023 1507int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
6797318e
ID
1508 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1509 gfp_t gfp);
5c9f3023 1510int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
6797318e 1511 int family, u8 prefixlen);
b83e3deb 1512struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
fd3a154a 1513 const struct sock *addr_sk);
cfb6eeb4 1514
9501f972 1515#ifdef CONFIG_TCP_MD5SIG
b83e3deb 1516struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
5c9f3023
JP
1517 const union tcp_md5_addr *addr,
1518 int family);
a915da9b 1519#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
9501f972 1520#else
b83e3deb 1521static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
a915da9b
ED
1522 const union tcp_md5_addr *addr,
1523 int family)
1524{
1525 return NULL;
1526}
9501f972
YH
1527#define tcp_twsk_md5_key(twsk) NULL
1528#endif
1529
5c9f3023 1530bool tcp_alloc_md5sig_pool(void);
cfb6eeb4 1531
5c9f3023 1532struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
71cea17e
ED
1533static inline void tcp_put_md5sig_pool(void)
1534{
1535 local_bh_enable();
1536}
35790c04 1537
5c9f3023
JP
1538int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1539 unsigned int header_len);
1540int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1541 const struct tcp_md5sig_key *key);
cfb6eeb4 1542
10467163 1543/* From tcp_fastopen.c */
5c9f3023
JP
1544void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1545 struct tcp_fastopen_cookie *cookie, int *syn_loss,
1546 unsigned long *last_syn_loss);
1547void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2646c831
DL
1548 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1549 u16 try_exp);
783237e8
YC
1550struct tcp_fastopen_request {
1551 /* Fast Open cookie. Size 0 means a cookie request */
1552 struct tcp_fastopen_cookie cookie;
1553 struct msghdr *data; /* data in MSG_FASTOPEN */
f5ddcbbb
ED
1554 size_t size;
1555 int copied; /* queued in tcp_connect() */
783237e8 1556};
783237e8
YC
1557void tcp_free_fastopen_req(struct tcp_sock *tp);
1558
10467163
JC
1559extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1560int tcp_fastopen_reset_cipher(void *key, unsigned int len);
61d2bcae 1561void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
7c85af88
ED
1562struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1563 struct request_sock *req,
11199369 1564 struct tcp_fastopen_cookie *foc);
222e83d2 1565void tcp_fastopen_init_key_once(bool publish);
065263f4
WW
1566bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1567 struct tcp_fastopen_cookie *cookie);
19f6d3f3 1568bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
10467163
JC
1569#define TCP_FASTOPEN_KEY_LENGTH 16
1570
1571/* Fastopen key context */
1572struct tcp_fastopen_context {
7ae8639c
ED
1573 struct crypto_cipher *tfm;
1574 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
1575 struct rcu_head rcu;
10467163
JC
1576};
1577
cf1ef3f0 1578extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
46c2fa39 1579void tcp_fastopen_active_disable(struct sock *sk);
cf1ef3f0
WW
1580bool tcp_fastopen_active_should_disable(struct sock *sk);
1581void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1582void tcp_fastopen_active_timeout_reset(void);
1583
05b055e8
FY
1584/* Latencies incurred by various limits for a sender. They are
1585 * chronograph-like stats that are mutually exclusive.
1586 */
1587enum tcp_chrono {
1588 TCP_CHRONO_UNSPEC,
1589 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1590 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1591 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1592 __TCP_CHRONO_MAX,
1593};
1594
1595void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1596void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1597
fe067e8a
DM
1598/* write queue abstraction */
1599static inline void tcp_write_queue_purge(struct sock *sk)
1600{
1601 struct sk_buff *skb;
1602
0f87230d 1603 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
fe067e8a 1604 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
3ab224be
HA
1605 sk_wmem_free_skb(sk, skb);
1606 sk_mem_reclaim(sk);
8818a9d8 1607 tcp_clear_all_retrans_hints(tcp_sk(sk));
fe067e8a
DM
1608}
1609
cf533ea5 1610static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
fe067e8a 1611{
cd07a8ea 1612 return skb_peek(&sk->sk_write_queue);
fe067e8a
DM
1613}
1614
cf533ea5 1615static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
fe067e8a 1616{
cd07a8ea 1617 return skb_peek_tail(&sk->sk_write_queue);
fe067e8a
DM
1618}
1619
cf533ea5
ED
1620static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1621 const struct sk_buff *skb)
fe067e8a 1622{
cd07a8ea 1623 return skb_queue_next(&sk->sk_write_queue, skb);
fe067e8a
DM
1624}
1625
cf533ea5
ED
1626static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1627 const struct sk_buff *skb)
832d11c5
IJ
1628{
1629 return skb_queue_prev(&sk->sk_write_queue, skb);
1630}
1631
fe067e8a 1632#define tcp_for_write_queue(skb, sk) \
cd07a8ea 1633 skb_queue_walk(&(sk)->sk_write_queue, skb)
fe067e8a
DM
1634
1635#define tcp_for_write_queue_from(skb, sk) \
cd07a8ea 1636 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
fe067e8a 1637
234b6860 1638#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
cd07a8ea 1639 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
234b6860 1640
cf533ea5 1641static inline struct sk_buff *tcp_send_head(const struct sock *sk)
fe067e8a
DM
1642{
1643 return sk->sk_send_head;
1644}
1645
cd07a8ea
DM
1646static inline bool tcp_skb_is_last(const struct sock *sk,
1647 const struct sk_buff *skb)
1648{
1649 return skb_queue_is_last(&sk->sk_write_queue, skb);
1650}
1651
cf533ea5 1652static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
fe067e8a 1653{
cd07a8ea 1654 if (tcp_skb_is_last(sk, skb))
fe067e8a 1655 sk->sk_send_head = NULL;
cd07a8ea
DM
1656 else
1657 sk->sk_send_head = tcp_write_queue_next(sk, skb);
fe067e8a
DM
1658}
1659
1660static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1661{
0f87230d 1662 if (sk->sk_send_head == skb_unlinked) {
fe067e8a 1663 sk->sk_send_head = NULL;
0f87230d
FY
1664 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1665 }
bb1fceca
ED
1666 if (tcp_sk(sk)->highest_sack == skb_unlinked)
1667 tcp_sk(sk)->highest_sack = NULL;
fe067e8a
DM
1668}
1669
1670static inline void tcp_init_send_head(struct sock *sk)
1671{
1672 sk->sk_send_head = NULL;
1673}
1674
1675static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1676{
1677 __skb_queue_tail(&sk->sk_write_queue, skb);
1678}
1679
1680static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1681{
1682 __tcp_add_write_queue_tail(sk, skb);
1683
1684 /* Queue it, remembering where we must start sending. */
6859d494 1685 if (sk->sk_send_head == NULL) {
fe067e8a 1686 sk->sk_send_head = skb;
0f87230d 1687 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
6859d494
IJ
1688
1689 if (tcp_sk(sk)->highest_sack == NULL)
1690 tcp_sk(sk)->highest_sack = skb;
1691 }
fe067e8a
DM
1692}
1693
1694static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1695{
1696 __skb_queue_head(&sk->sk_write_queue, skb);
1697}
1698
1699/* Insert buff after skb on the write queue of sk. */
1700static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1701 struct sk_buff *buff,
1702 struct sock *sk)
1703{
7de6c033 1704 __skb_queue_after(&sk->sk_write_queue, skb, buff);
fe067e8a
DM
1705}
1706
43f59c89 1707/* Insert new before skb on the write queue of sk. */
fe067e8a
DM
1708static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1709 struct sk_buff *skb,
1710 struct sock *sk)
1711{
43f59c89 1712 __skb_queue_before(&sk->sk_write_queue, skb, new);