2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
22 #define FASTRETRANS_DEBUG 1
24 #include <linux/list.h>
25 #include <linux/tcp.h>
26 #include <linux/slab.h>
27 #include <linux/cache.h>
28 #include <linux/percpu.h>
29 #include <linux/skbuff.h>
30 #include <linux/dmaengine.h>
31 #include <linux/crypto.h>
32 #include <linux/cryptohash.h>
34 #include <net/inet_connection_sock.h>
35 #include <net/inet_timewait_sock.h>
36 #include <net/inet_hashtables.h>
37 #include <net/checksum.h>
38 #include <net/request_sock.h>
42 #include <net/tcp_states.h>
43 #include <net/inet_ecn.h>
45 #include <linux/seq_file.h>
47 extern struct inet_hashinfo tcp_hashinfo;
49 extern atomic_t tcp_orphan_count;
50 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
52 #define MAX_TCP_HEADER (128 + MAX_HEADER)
53 #define MAX_TCP_OPTION_SPACE 40
56 * Never offer a window over 32767 without using window scaling. Some
57 * poor stacks do signed 16bit maths!
59 #define MAX_TCP_WINDOW 32767U
61 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
62 #define TCP_MIN_MSS 88U
64 /* Minimal RCV_MSS. */
65 #define TCP_MIN_RCVMSS 536U
67 /* The least MTU to use for probing */
68 #define TCP_BASE_MSS 512
70 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
71 #define TCP_FASTRETRANS_THRESH 3
73 /* Maximal reordering. */
74 #define TCP_MAX_REORDERING 127
76 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
77 #define TCP_MAX_QUICKACKS 16U
80 #define TCP_URG_VALID 0x0100
81 #define TCP_URG_NOTYET 0x0200
82 #define TCP_URG_READ 0x0400
84 #define TCP_RETR1 3 /*
85 * This is how many retries it does before it
86 * tries to figure out if the gateway is
87 * down. Minimal RFC value is 3; it corresponds
88 * to ~3sec-8min depending on RTO.
91 #define TCP_RETR2 15 /*
92 * This should take at least
93 * 90 minutes to time out.
94 * RFC1122 says that the limit is 100 sec.
95 * 15 is ~13-30min depending on RTO.
98 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
99 * connection: ~180sec is RFC minimum */
101 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
102 * connection: ~180sec is RFC minimum */
105 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
106 * socket. 7 is ~50sec-16min.
110 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
111 * state, about 60 seconds */
112 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
113 /* BSD style FIN_WAIT2 deadlock breaker.
114 * It used to be 3min, new value is 60sec,
115 * to combine FIN-WAIT-2 timeout with
119 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
121 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
122 #define TCP_ATO_MIN ((unsigned)(HZ/25))
124 #define TCP_DELACK_MIN 4U
125 #define TCP_ATO_MIN 4U
127 #define TCP_RTO_MAX ((unsigned)(120*HZ))
128 #define TCP_RTO_MIN ((unsigned)(HZ/5))
129 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
131 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
132 * for local resources.
135 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
136 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
137 #define TCP_KEEPALIVE_INTVL (75*HZ)
139 #define MAX_TCP_KEEPIDLE 32767
140 #define MAX_TCP_KEEPINTVL 32767
141 #define MAX_TCP_KEEPCNT 127
142 #define MAX_TCP_SYNCNT 127
144 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
146 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
147 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
148 * after this time. It should be equal
149 * (or greater than) TCP_TIMEWAIT_LEN
150 * to provide reliability equal to one
151 * provided by timewait state.
153 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
154 * timestamps. It must be less than
155 * minimal timewait lifetime.
161 #define TCPOPT_NOP 1 /* Padding */
162 #define TCPOPT_EOL 0 /* End of options */
163 #define TCPOPT_MSS 2 /* Segment size negotiating */
164 #define TCPOPT_WINDOW 3 /* Window scaling */
165 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
166 #define TCPOPT_SACK 5 /* SACK Block */
167 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
168 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
174 #define TCPOLEN_MSS 4
175 #define TCPOLEN_WINDOW 3
176 #define TCPOLEN_SACK_PERM 2
177 #define TCPOLEN_TIMESTAMP 10
178 #define TCPOLEN_MD5SIG 18
180 /* But this is what stacks really send out. */
181 #define TCPOLEN_TSTAMP_ALIGNED 12
182 #define TCPOLEN_WSCALE_ALIGNED 4
183 #define TCPOLEN_SACKPERM_ALIGNED 4
184 #define TCPOLEN_SACK_BASE 2
185 #define TCPOLEN_SACK_BASE_ALIGNED 4
186 #define TCPOLEN_SACK_PERBLOCK 8
187 #define TCPOLEN_MD5SIG_ALIGNED 20
188 #define TCPOLEN_MSS_ALIGNED 4
190 /* Flags in tp->nonagle */
191 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
192 #define TCP_NAGLE_CORK 2 /* Socket is corked */
193 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
195 extern struct inet_timewait_death_row tcp_death_row;
197 /* sysctl variables for tcp */
198 extern int sysctl_tcp_timestamps;
199 extern int sysctl_tcp_window_scaling;
200 extern int sysctl_tcp_sack;
201 extern int sysctl_tcp_fin_timeout;
202 extern int sysctl_tcp_keepalive_time;
203 extern int sysctl_tcp_keepalive_probes;
204 extern int sysctl_tcp_keepalive_intvl;
205 extern int sysctl_tcp_syn_retries;
206 extern int sysctl_tcp_synack_retries;
207 extern int sysctl_tcp_retries1;
208 extern int sysctl_tcp_retries2;
209 extern int sysctl_tcp_orphan_retries;
210 extern int sysctl_tcp_syncookies;
211 extern int sysctl_tcp_retrans_collapse;
212 extern int sysctl_tcp_stdurg;
213 extern int sysctl_tcp_rfc1337;
214 extern int sysctl_tcp_abort_on_overflow;
215 extern int sysctl_tcp_max_orphans;
216 extern int sysctl_tcp_fack;
217 extern int sysctl_tcp_reordering;
218 extern int sysctl_tcp_ecn;
219 extern int sysctl_tcp_dsack;
220 extern int sysctl_tcp_mem[3];
221 extern int sysctl_tcp_wmem[3];
222 extern int sysctl_tcp_rmem[3];
223 extern int sysctl_tcp_app_win;
224 extern int sysctl_tcp_adv_win_scale;
225 extern int sysctl_tcp_tw_reuse;
226 extern int sysctl_tcp_frto;
227 extern int sysctl_tcp_frto_response;
228 extern int sysctl_tcp_low_latency;
229 extern int sysctl_tcp_dma_copybreak;
230 extern int sysctl_tcp_nometrics_save;
231 extern int sysctl_tcp_moderate_rcvbuf;
232 extern int sysctl_tcp_tso_win_divisor;
233 extern int sysctl_tcp_abc;
234 extern int sysctl_tcp_mtu_probing;
235 extern int sysctl_tcp_base_mss;
236 extern int sysctl_tcp_workaround_signed_windows;
237 extern int sysctl_tcp_slow_start_after_idle;
238 extern int sysctl_tcp_max_ssthresh;
240 extern atomic_t tcp_memory_allocated;
241 extern atomic_t tcp_sockets_allocated;
242 extern int tcp_memory_pressure;
245 * The next routines deal with comparing 32 bit unsigned ints
246 * and worry about wraparound (automatic with unsigned arithmetic).
249 static inline int before(__u32 seq1, __u32 seq2)
251 return (__s32)(seq1-seq2) < 0;
253 #define after(seq2, seq1) before(seq1, seq2)
255 /* is s2<=s1<=s3 ? */
256 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
258 return seq3 - seq2 >= seq1 - seq2;
261 static inline int tcp_too_many_orphans(struct sock *sk, int num)
263 return (num > sysctl_tcp_max_orphans) ||
264 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
265 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
268 extern struct proto tcp_prot;
270 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
271 #define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
272 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
273 #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
275 extern void tcp_v4_err(struct sk_buff *skb, u32);
277 extern void tcp_shutdown (struct sock *sk, int how);
279 extern int tcp_v4_rcv(struct sk_buff *skb);
281 extern int tcp_v4_remember_stamp(struct sock *sk);
283 extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
285 extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock,
286 struct msghdr *msg, size_t size);
287 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
289 extern int tcp_ioctl(struct sock *sk,
293 extern int tcp_rcv_state_process(struct sock *sk,
298 extern int tcp_rcv_established(struct sock *sk,
303 extern void tcp_rcv_space_adjust(struct sock *sk);
305 extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
307 extern int tcp_twsk_unique(struct sock *sk,
308 struct sock *sktw, void *twp);
310 extern void tcp_twsk_destructor(struct sock *sk);
312 extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
313 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
315 static inline void tcp_dec_quickack_mode(struct sock *sk,
316 const unsigned int pkts)
318 struct inet_connection_sock *icsk = inet_csk(sk);
320 if (icsk->icsk_ack.quick) {
321 if (pkts >= icsk->icsk_ack.quick) {
322 icsk->icsk_ack.quick = 0;
323 /* Leaving quickack mode we deflate ATO. */
324 icsk->icsk_ack.ato = TCP_ATO_MIN;
326 icsk->icsk_ack.quick -= pkts;
330 extern void tcp_enter_quickack_mode(struct sock *sk);
332 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
334 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
338 #define TCP_ECN_QUEUE_CWR 2
339 #define TCP_ECN_DEMAND_CWR 4
341 static __inline__ void
342 TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
344 if (sysctl_tcp_ecn && th->ece && th->cwr)
345 inet_rsk(req)->ecn_ok = 1;
357 extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
359 const struct tcphdr *th);
361 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
362 struct request_sock *req,
363 struct request_sock **prev);
364 extern int tcp_child_process(struct sock *parent,
366 struct sk_buff *skb);
367 extern int tcp_use_frto(struct sock *sk);
368 extern void tcp_enter_frto(struct sock *sk);
369 extern void tcp_enter_loss(struct sock *sk, int how);
370 extern void tcp_clear_retrans(struct tcp_sock *tp);
371 extern void tcp_update_metrics(struct sock *sk);
373 extern void tcp_close(struct sock *sk,
375 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
377 extern int tcp_getsockopt(struct sock *sk, int level,
381 extern int tcp_setsockopt(struct sock *sk, int level,
382 int optname, char __user *optval,
384 extern int compat_tcp_getsockopt(struct sock *sk,
385 int level, int optname,
386 char __user *optval, int __user *optlen);
387 extern int compat_tcp_setsockopt(struct sock *sk,
388 int level, int optname,
389 char __user *optval, int optlen);
390 extern void tcp_set_keepalive(struct sock *sk, int val);
391 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
393 size_t len, int nonblock,
394 int flags, int *addr_len);
396 extern void tcp_parse_options(struct sk_buff *skb,
397 struct tcp_options_received *opt_rx,
400 extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
403 * TCP v4 functions exported for the inet6 API
406 extern void tcp_v4_send_check(struct sock *sk, int len,
407 struct sk_buff *skb);
409 extern int tcp_v4_conn_request(struct sock *sk,
410 struct sk_buff *skb);
412 extern struct sock * tcp_create_openreq_child(struct sock *sk,
413 struct request_sock *req,
414 struct sk_buff *skb);
416 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
418 struct request_sock *req,
419 struct dst_entry *dst);
421 extern int tcp_v4_do_rcv(struct sock *sk,
422 struct sk_buff *skb);
424 extern int tcp_v4_connect(struct sock *sk,
425 struct sockaddr *uaddr,
428 extern int tcp_connect(struct sock *sk);
430 extern struct sk_buff * tcp_make_synack(struct sock *sk,
431 struct dst_entry *dst,
432 struct request_sock *req);
434 extern int tcp_disconnect(struct sock *sk, int flags);
437 /* From syncookies.c */
438 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
439 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
440 struct ip_options *opt);
441 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
444 extern __u32 cookie_init_timestamp(struct request_sock *req);
445 extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
447 /* From net/ipv6/syncookies.c */
448 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
449 extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
454 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
456 extern int tcp_may_send_now(struct sock *sk);
457 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
458 extern void tcp_xmit_retransmit_queue(struct sock *);
459 extern void tcp_simple_retransmit(struct sock *);
460 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
461 extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
463 extern void tcp_send_probe0(struct sock *);
464 extern void tcp_send_partial(struct sock *);
465 extern int tcp_write_wakeup(struct sock *);
466 extern void tcp_send_fin(struct sock *sk);
467 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
468 extern int tcp_send_synack(struct sock *);
469 extern void tcp_push_one(struct sock *, unsigned int mss_now);
470 extern void tcp_send_ack(struct sock *sk);
471 extern void tcp_send_delayed_ack(struct sock *sk);
474 extern void tcp_cwnd_application_limited(struct sock *sk);
475 extern void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
476 struct sk_buff *skb);
479 extern void tcp_init_xmit_timers(struct sock *);
480 static inline void tcp_clear_xmit_timers(struct sock *sk)
482 inet_csk_clear_xmit_timers(sk);
485 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
486 extern unsigned int tcp_current_mss(struct sock *sk, int large);
489 extern void tcp_get_info(struct sock *, struct tcp_info *);
491 /* Read 'sendfile()'-style from a TCP socket */
492 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
493 unsigned int, size_t);
494 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
495 sk_read_actor_t recv_actor);
497 extern void tcp_initialize_rcv_mss(struct sock *sk);
499 extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
500 extern int tcp_mss_to_mtu(struct sock *sk, int mss);
501 extern void tcp_mtup_init(struct sock *sk);
503 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
505 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
506 ntohl(TCP_FLAG_ACK) |
510 static inline void tcp_fast_path_on(struct tcp_sock *tp)
512 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
515 static inline void tcp_fast_path_check(struct sock *sk)
517 struct tcp_sock *tp = tcp_sk(sk);
519 if (skb_queue_empty(&tp->out_of_order_queue) &&
521 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
523 tcp_fast_path_on(tp);
526 /* Compute the actual receive window we are currently advertising.
527 * Rcv_nxt can be after the window if our peer push more data
528 * than the offered window.
530 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
532 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
539 /* Choose a new window, without checks for shrinking, and without
540 * scaling applied to the result. The caller does these things
541 * if necessary. This is a "raw" window selection.
543 extern u32 __tcp_select_window(struct sock *sk);
545 /* TCP timestamps are only 32-bits, this causes a slight
546 * complication on 64-bit systems since we store a snapshot
547 * of jiffies in the buffer control blocks below. We decided
548 * to use only the low 32-bits of jiffies and hide the ugly
549 * casts with the following macro.
551 #define tcp_time_stamp ((__u32)(jiffies))
553 /* This is what the send packet queuing engine uses to pass
554 * TCP per-packet control information to the transmission
555 * code. We also store the host-order sequence numbers in
556 * here too. This is 36 bytes on 32-bit architectures,
557 * 40 bytes on 64-bit machines, if this grows please adjust
558 * skbuff.h:skbuff->cb[xxx] size appropriately.
562 struct inet_skb_parm h4;
563 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
564 struct inet6_skb_parm h6;
566 } header; /* For incoming frames */
567 __u32 seq; /* Starting sequence number */
568 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
569 __u32 when; /* used to compute rtt's */
570 __u8 flags; /* TCP header flags. */
572 /* NOTE: These must match up to the flags byte in a
575 #define TCPCB_FLAG_FIN 0x01
576 #define TCPCB_FLAG_SYN 0x02
577 #define TCPCB_FLAG_RST 0x04
578 #define TCPCB_FLAG_PSH 0x08
579 #define TCPCB_FLAG_ACK 0x10
580 #define TCPCB_FLAG_URG 0x20
581 #define TCPCB_FLAG_ECE 0x40
582 #define TCPCB_FLAG_CWR 0x80
584 __u8 sacked; /* State flags for SACK/FACK. */
585 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
586 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
587 #define TCPCB_LOST 0x04 /* SKB is lost */
588 #define TCPCB_TAGBITS 0x07 /* All tag bits */
590 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
591 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
593 __u16 urg_ptr; /* Valid w/URG flags is set. */
594 __u32 ack_seq; /* Sequence number ACK'd */
597 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
599 /* Due to TSO, an SKB can be composed of multiple actual
600 * packets. To keep these tracked properly, we use this.
602 static inline int tcp_skb_pcount(const struct sk_buff *skb)
604 return skb_shinfo(skb)->gso_segs;
607 /* This is valid iff tcp_skb_pcount() > 1. */
608 static inline int tcp_skb_mss(const struct sk_buff *skb)
610 return skb_shinfo(skb)->gso_size;
613 static inline void tcp_dec_pcount_approx_int(__u32 *count, const int decr)
622 static inline void tcp_dec_pcount_approx(__u32 *count,
623 const struct sk_buff *skb)
625 tcp_dec_pcount_approx_int(count, tcp_skb_pcount(skb));
628 /* Events passed to congestion control interface */
630 CA_EVENT_TX_START, /* first transmit when no packets in flight */
631 CA_EVENT_CWND_RESTART, /* congestion window restart */
632 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
633 CA_EVENT_FRTO, /* fast recovery timeout */
634 CA_EVENT_LOSS, /* loss timeout */
635 CA_EVENT_FAST_ACK, /* in sequence ack */
636 CA_EVENT_SLOW_ACK, /* other ack */
640 * Interface for adding new TCP congestion control handlers
642 #define TCP_CA_NAME_MAX 16
643 #define TCP_CA_MAX 128
644 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
646 #define TCP_CONG_NON_RESTRICTED 0x1
647 #define TCP_CONG_RTT_STAMP 0x2
649 struct tcp_congestion_ops {
650 struct list_head list;
653 /* initialize private data (optional) */
654 void (*init)(struct sock *sk);
655 /* cleanup private data (optional) */
656 void (*release)(struct sock *sk);
658 /* return slow start threshold (required) */
659 u32 (*ssthresh)(struct sock *sk);
660 /* lower bound for congestion window (optional) */
661 u32 (*min_cwnd)(const struct sock *sk);
662 /* do new cwnd calculation (required) */
663 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
664 /* call before changing ca_state (optional) */
665 void (*set_state)(struct sock *sk, u8 new_state);
666 /* call when cwnd event occurs (optional) */
667 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
668 /* new value of cwnd after loss (optional) */
669 u32 (*undo_cwnd)(struct sock *sk);
670 /* hook for packet ack accounting (optional) */
671 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
672 /* get info for inet_diag (optional) */
673 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
675 char name[TCP_CA_NAME_MAX];
676 struct module *owner;
679 extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
680 extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
682 extern void tcp_init_congestion_control(struct sock *sk);
683 extern void tcp_cleanup_congestion_control(struct sock *sk);
684 extern int tcp_set_default_congestion_control(const char *name);
685 extern void tcp_get_default_congestion_control(char *name);
686 extern void tcp_get_available_congestion_control(char *buf, size_t len);
687 extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
688 extern int tcp_set_allowed_congestion_control(char *allowed);
689 extern int tcp_set_congestion_control(struct sock *sk, const char *name);
690 extern void tcp_slow_start(struct tcp_sock *tp);
692 extern struct tcp_congestion_ops tcp_init_congestion_ops;
693 extern u32 tcp_reno_ssthresh(struct sock *sk);
694 extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
695 extern u32 tcp_reno_min_cwnd(const struct sock *sk);
696 extern struct tcp_congestion_ops tcp_reno;
698 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
700 struct inet_connection_sock *icsk = inet_csk(sk);
702 if (icsk->icsk_ca_ops->set_state)
703 icsk->icsk_ca_ops->set_state(sk, ca_state);
704 icsk->icsk_ca_state = ca_state;
707 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
709 const struct inet_connection_sock *icsk = inet_csk(sk);
711 if (icsk->icsk_ca_ops->cwnd_event)
712 icsk->icsk_ca_ops->cwnd_event(sk, event);
715 /* These functions determine how the current flow behaves in respect of SACK
716 * handling. SACK is negotiated with the peer, and therefore it can vary
717 * between different flows.
719 * tcp_is_sack - SACK enabled
720 * tcp_is_reno - No SACK
721 * tcp_is_fack - FACK enabled, implies SACK enabled
723 static inline int tcp_is_sack(const struct tcp_sock *tp)
725 return tp->rx_opt.sack_ok;
728 static inline int tcp_is_reno(const struct tcp_sock *tp)
730 return !tcp_is_sack(tp);
733 static inline int tcp_is_fack(const struct tcp_sock *tp)
735 return tp->rx_opt.sack_ok & 2;
738 static inline void tcp_enable_fack(struct tcp_sock *tp)
740 tp->rx_opt.sack_ok |= 2;
743 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
745 return tp->sacked_out + tp->lost_out;
748 /* This determines how many packets are "in the network" to the best
749 * of our knowledge. In many cases it is conservative, but where
750 * detailed information is available from the receiver (via SACK
751 * blocks etc.) we can make more aggressive calculations.
753 * Use this for decisions involving congestion control, use just
754 * tp->packets_out to determine if the send queue is empty or not.
756 * Read this equation as:
758 * "Packets sent once on transmission queue" MINUS
759 * "Packets left network, but not honestly ACKed yet" PLUS
760 * "Packets fast retransmitted"
762 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
764 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
767 extern int tcp_limit_reno_sacked(struct tcp_sock *tp);
769 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
770 * The exception is rate halving phase, when cwnd is decreasing towards
773 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
775 const struct tcp_sock *tp = tcp_sk(sk);
776 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
777 return tp->snd_ssthresh;
779 return max(tp->snd_ssthresh,
780 ((tp->snd_cwnd >> 1) +
781 (tp->snd_cwnd >> 2)));
784 /* Use define here intentionally to get WARN_ON location shown at the caller */
785 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
787 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
788 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
790 /* Slow start with delack produces 3 packets of burst, so that
791 * it is safe "de facto". This will be the default - same as
792 * the default reordering threshold - but if reordering increases,
793 * we must be able to allow cwnd to burst at least this much in order
794 * to not pull it back when holes are filled.
796 static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
798 return tp->reordering;
801 /* Returns end sequence number of the receiver's advertised window */
802 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
804 return tp->snd_una + tp->snd_wnd;
806 extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
808 static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
809 const struct sk_buff *skb)
812 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
815 static inline void tcp_check_probe_timer(struct sock *sk)
817 struct tcp_sock *tp = tcp_sk(sk);
818 const struct inet_connection_sock *icsk = inet_csk(sk);
820 if (!tp->packets_out && !icsk->icsk_pending)
821 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
822 icsk->icsk_rto, TCP_RTO_MAX);
825 static inline void tcp_push_pending_frames(struct sock *sk)
827 struct tcp_sock *tp = tcp_sk(sk);
829 __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle);
832 static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
837 static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
843 * Calculate(/check) TCP checksum
845 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
846 __be32 daddr, __wsum base)
848 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
851 static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
853 return __skb_checksum_complete(skb);
856 static inline int tcp_checksum_complete(struct sk_buff *skb)
858 return !skb_csum_unnecessary(skb) &&
859 __tcp_checksum_complete(skb);
862 /* Prequeue for VJ style copy to user, combined with checksumming. */
864 static inline void tcp_prequeue_init(struct tcp_sock *tp)
866 tp->ucopy.task = NULL;
868 tp->ucopy.memory = 0;
869 skb_queue_head_init(&tp->ucopy.prequeue);
870 #ifdef CONFIG_NET_DMA
871 tp->ucopy.dma_chan = NULL;
872 tp->ucopy.wakeup = 0;
873 tp->ucopy.pinned_list = NULL;
874 tp->ucopy.dma_cookie = 0;
878 /* Packet is added to VJ-style prequeue for processing in process
879 * context, if a reader task is waiting. Apparently, this exciting
880 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
881 * failed somewhere. Latency? Burstiness? Well, at least now we will
882 * see, why it failed. 8)8) --ANK
884 * NOTE: is this not too big to inline?
886 static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
888 struct tcp_sock *tp = tcp_sk(sk);
890 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
891 __skb_queue_tail(&tp->ucopy.prequeue, skb);
892 tp->ucopy.memory += skb->truesize;
893 if (tp->ucopy.memory > sk->sk_rcvbuf) {
894 struct sk_buff *skb1;
896 BUG_ON(sock_owned_by_user(sk));
898 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
899 sk->sk_backlog_rcv(sk, skb1);
900 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
903 tp->ucopy.memory = 0;
904 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
905 wake_up_interruptible(sk->sk_sleep);
906 if (!inet_csk_ack_scheduled(sk))
907 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
908 (3 * TCP_RTO_MIN) / 4,
920 static const char *statename[]={
921 "Unused","Established","Syn Sent","Syn Recv",
922 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
923 "Close Wait","Last ACK","Listen","Closing"
926 extern void tcp_set_state(struct sock *sk, int state);
928 extern void tcp_done(struct sock *sk);
930 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
933 rx_opt->eff_sacks = 0;
934 rx_opt->num_sacks = 0;
937 /* Determine a window scaling and initial window to offer. */
938 extern void tcp_select_initial_window(int __space, __u32 mss,
939 __u32 *rcv_wnd, __u32 *window_clamp,
940 int wscale_ok, __u8 *rcv_wscale);
942 static inline int tcp_win_from_space(int space)
944 return sysctl_tcp_adv_win_scale<=0 ?
945 (space>>(-sysctl_tcp_adv_win_scale)) :
946 space - (space>>sysctl_tcp_adv_win_scale);
949 /* Note: caller must be prepared to deal with negative returns */
950 static inline int tcp_space(const struct sock *sk)
952 return tcp_win_from_space(sk->sk_rcvbuf -
953 atomic_read(&sk->sk_rmem_alloc));
956 static inline int tcp_full_space(const struct sock *sk)
958 return tcp_win_from_space(sk->sk_rcvbuf);
961 static inline void tcp_openreq_init(struct request_sock *req,
962 struct tcp_options_received *rx_opt,
965 struct inet_request_sock *ireq = inet_rsk(req);
967 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
969 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
970 req->mss = rx_opt->mss_clamp;
971 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
972 ireq->tstamp_ok = rx_opt->tstamp_ok;
973 ireq->sack_ok = rx_opt->sack_ok;
974 ireq->snd_wscale = rx_opt->snd_wscale;
975 ireq->wscale_ok = rx_opt->wscale_ok;
978 ireq->rmt_port = tcp_hdr(skb)->source;
981 extern void tcp_enter_memory_pressure(struct sock *sk);
983 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
985 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
988 static inline int keepalive_time_when(const struct tcp_sock *tp)
990 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
993 static inline int tcp_fin_time(const struct sock *sk)
995 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
996 const int rto = inet_csk(sk)->icsk_rto;
998 if (fin_timeout < (rto << 2) - (rto >> 1))
999 fin_timeout = (rto << 2) - (rto >> 1);
1004 static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
1006 if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
1008 if (get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
1011 /* RST segments are not recommended to carry timestamp,
1012 and, if they do, it is recommended to ignore PAWS because
1013 "their cleanup function should take precedence over timestamps."
1014 Certainly, it is mistake. It is necessary to understand the reasons
1015 of this constraint to relax it: if peer reboots, clock may go
1016 out-of-sync and half-open connections will not be reset.
1017 Actually, the problem would be not existing if all
1018 the implementations followed draft about maintaining clock
1019 via reboots. Linux-2.2 DOES NOT!
1021 However, we can relax time bounds for RST segments to MSL.
1023 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1028 #define TCP_CHECK_TIMER(sk) do { } while (0)
1030 static inline void tcp_mib_init(struct net *net)
1033 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1034 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1035 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1036 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1040 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1042 tp->lost_skb_hint = NULL;
1043 tp->scoreboard_skb_hint = NULL;
1044 tp->retransmit_skb_hint = NULL;
1050 /* - key database */
1051 struct tcp_md5sig_key {
1056 struct tcp4_md5sig_key {
1057 struct tcp_md5sig_key base;
1061 struct tcp6_md5sig_key {
1062 struct tcp_md5sig_key base;
1064 u32 scope_id; /* XXX */
1066 struct in6_addr addr;
1070 struct tcp_md5sig_info {
1071 struct tcp4_md5sig_key *keys4;
1072 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1073 struct tcp6_md5sig_key *keys6;
1081 /* - pseudo header */
1082 struct tcp4_pseudohdr {
1090 struct tcp6_pseudohdr {
1091 struct in6_addr saddr;
1092 struct in6_addr daddr;
1094 __be32 protocol; /* including padding */
1097 union tcp_md5sum_block {
1098 struct tcp4_pseudohdr ip4;
1099 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1100 struct tcp6_pseudohdr ip6;
1104 /* - pool: digest algorithm, hash description and scratch buffer */
1105 struct tcp_md5sig_pool {
1106 struct hash_desc md5_desc;
1107 union tcp_md5sum_block md5_blk;
1110 #define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
1113 extern int tcp_v4_md5_hash_skb(char *md5_hash,
1114 struct tcp_md5sig_key *key,
1116 struct request_sock *req,
1117 struct sk_buff *skb);
1119 extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1120 struct sock *addr_sk);
1122 extern int tcp_v4_md5_do_add(struct sock *sk,
1127 extern int tcp_v4_md5_do_del(struct sock *sk,
1130 #ifdef CONFIG_TCP_MD5SIG
1131 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1132 &(struct tcp_md5sig_key) { \
1133 .key = (twsk)->tw_md5_key, \
1134 .keylen = (twsk)->tw_md5_keylen, \
1137 #define tcp_twsk_md5_key(twsk) NULL
1140 extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void);
1141 extern void tcp_free_md5sig_pool(void);
1143 extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
1144 extern void __tcp_put_md5sig_pool(void);
1145 extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1146 extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1147 unsigned header_len);
1148 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1149 struct tcp_md5sig_key *key);
1152 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
1154 int cpu = get_cpu();
1155 struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
1161 static inline void tcp_put_md5sig_pool(void)
1163 __tcp_put_md5sig_pool();
1167 /* write queue abstraction */
1168 static inline void tcp_write_queue_purge(struct sock *sk)
1170 struct sk_buff *skb;
1172 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1173 sk_wmem_free_skb(sk, skb);
1177 static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1179 struct sk_buff *skb = sk->sk_write_queue.next;
1180 if (skb == (struct sk_buff *) &sk->sk_write_queue)
1185 static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1187 struct sk_buff *skb = sk->sk_write_queue.prev;
1188 if (skb == (struct sk_buff *) &sk->sk_write_queue)
1193 static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1198 #define tcp_for_write_queue(skb, sk) \
1199 for (skb = (sk)->sk_write_queue.next; \
1200 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
1203 #define tcp_for_write_queue_from(skb, sk) \
1204 for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\
1207 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1208 for (tmp = skb->next; \
1209 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
1210 skb = tmp, tmp = skb->next)
1212 static inline struct sk_buff *tcp_send_head(struct sock *sk)
1214 return sk->sk_send_head;
1217 static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1219 sk->sk_send_head = skb->next;
1220 if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
1221 sk->sk_send_head = NULL;
1224 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1226 if (sk->sk_send_head == skb_unlinked)
1227 sk->sk_send_head = NULL;
1230 static inline void tcp_init_send_head(struct sock *sk)
1232 sk->sk_send_head = NULL;
1235 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1237 __skb_queue_tail(&sk->sk_write_queue, skb);
1240 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1242 __tcp_add_write_queue_tail(sk, skb);
1244 /* Queue it, remembering where we must start sending. */
1245 if (sk->sk_send_head == NULL) {
1246 sk->sk_send_head = skb;
1248 if (tcp_sk(sk)->highest_sack == NULL)
1249 tcp_sk(sk)->highest_sack = skb;
1253 static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1255 __skb_queue_head(&sk->sk_write_queue, skb);
1258 /* Insert buff after skb on the write queue of sk. */
1259 static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1260 struct sk_buff *buff,
1263 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1266 /* Insert skb between prev and next on the write queue of sk. */
1267 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1268 struct sk_buff *skb,
1271 __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
1273 if (sk->sk_send_head == skb)
1274 sk->sk_send_head = new;
1277 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1279 __skb_unlink(skb, &sk->sk_write_queue);
1282 static inline int tcp_skb_is_last(const struct sock *sk,
1283 const struct sk_buff *skb)
1285 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1288 static inline int tcp_write_queue_empty(struct sock *sk)
1290 return skb_queue_empty(&sk->sk_write_queue);
1293 /* Start sequence of the highest skb with SACKed bit, valid only if
1294 * sacked > 0 or when the caller has ensured validity by itself.
1296 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1298 if (!tp->sacked_out)
1301 if (tp->highest_sack == NULL)
1304 return TCP_SKB_CB(tp->highest_sack)->seq;
1307 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1309 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1310 tcp_write_queue_next(sk, skb);
1313 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1315 return tcp_sk(sk)->highest_sack;
1318 static inline void tcp_highest_sack_reset(struct sock *sk)
1320 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1323 /* Called when old skb is about to be deleted (to be combined with new skb) */
1324 static inline void tcp_highest_sack_combine(struct sock *sk,
1325 struct sk_buff *old,
1326 struct sk_buff *new)
1328 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1329 tcp_sk(sk)->highest_sack = new;
1333 enum tcp_seq_states {
1334 TCP_SEQ_STATE_LISTENING,
1335 TCP_SEQ_STATE_OPENREQ,
1336 TCP_SEQ_STATE_ESTABLISHED,
1337 TCP_SEQ_STATE_TIME_WAIT,
1340 struct tcp_seq_afinfo {
1343 struct file_operations seq_fops;
1344 struct seq_operations seq_ops;
1347 struct tcp_iter_state {
1348 struct seq_net_private p;
1350 enum tcp_seq_states state;
1351 struct sock *syn_wait_sk;
1352 int bucket, sbucket, num, uid;
1355 extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1356 extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1358 extern struct request_sock_ops tcp_request_sock_ops;
1359 extern struct request_sock_ops tcp6_request_sock_ops;
1361 extern void tcp_v4_destroy_sock(struct sock *sk);
1363 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1364 extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
1366 #ifdef CONFIG_PROC_FS
1367 extern int tcp4_proc_init(void);
1368 extern void tcp4_proc_exit(void);
1371 /* TCP af-specific functions */
1372 struct tcp_sock_af_ops {
1373 #ifdef CONFIG_TCP_MD5SIG
1374 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1375 struct sock *addr_sk);
1376 int (*calc_md5_hash) (char *location,
1377 struct tcp_md5sig_key *md5,
1379 struct request_sock *req,
1380 struct sk_buff *skb);
1381 int (*md5_add) (struct sock *sk,
1382 struct sock *addr_sk,
1385 int (*md5_parse) (struct sock *sk,
1386 char __user *optval,
1391 struct tcp_request_sock_ops {
1392 #ifdef CONFIG_TCP_MD5SIG
1393 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1394 struct request_sock *req);
1398 extern void tcp_v4_init(void);
1399 extern void tcp_init(void);