[INET]: Generalise the tcp_listen_ lock routines
[linux-2.6-block.git] / include / net / tcp.h
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
21#define TCP_DEBUG 1
22#define FASTRETRANS_DEBUG 1
23
24/* Cancel timers, when they are not required. */
25#undef TCP_CLEAR_TIMERS
26
27#include <linux/config.h>
28#include <linux/list.h>
29#include <linux/tcp.h>
30#include <linux/slab.h>
31#include <linux/cache.h>
32#include <linux/percpu.h>
77d8bf9c 33#include <net/inet_hashtables.h>
1da177e4 34#include <net/checksum.h>
2e6599cb 35#include <net/request_sock.h>
1da177e4
LT
36#include <net/sock.h>
37#include <net/snmp.h>
38#include <net/ip.h>
39#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
40#include <linux/ipv6.h>
41#endif
42#include <linux/seq_file.h>
43
6e04e021 44extern struct inet_hashinfo tcp_hashinfo;
1da177e4 45
1da177e4
LT
46#if (BITS_PER_LONG == 64)
47#define TCP_ADDRCMP_ALIGN_BYTES 8
48#else
49#define TCP_ADDRCMP_ALIGN_BYTES 4
50#endif
51
52/* This is a TIME_WAIT bucket. It works around the memory consumption
53 * problems of sockets in such a state on heavily loaded servers, but
54 * without violating the protocol specification.
55 */
56struct tcp_tw_bucket {
57 /*
58 * Now struct sock also uses sock_common, so please just
59 * don't add nothing before this first member (__tw_common) --acme
60 */
61 struct sock_common __tw_common;
62#define tw_family __tw_common.skc_family
63#define tw_state __tw_common.skc_state
64#define tw_reuse __tw_common.skc_reuse
65#define tw_bound_dev_if __tw_common.skc_bound_dev_if
66#define tw_node __tw_common.skc_node
67#define tw_bind_node __tw_common.skc_bind_node
68#define tw_refcnt __tw_common.skc_refcnt
69 volatile unsigned char tw_substate;
70 unsigned char tw_rcv_wscale;
71 __u16 tw_sport;
72 /* Socket demultiplex comparisons on incoming packets. */
73 /* these five are in inet_sock */
74 __u32 tw_daddr
75 __attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
76 __u32 tw_rcv_saddr;
77 __u16 tw_dport;
78 __u16 tw_num;
79 /* And these are ours. */
80 int tw_hashent;
81 int tw_timeout;
82 __u32 tw_rcv_nxt;
83 __u32 tw_snd_nxt;
84 __u32 tw_rcv_wnd;
85 __u32 tw_ts_recent;
86 long tw_ts_recent_stamp;
87 unsigned long tw_ttd;
0f7ff927 88 struct inet_bind_bucket *tw_tb;
1da177e4
LT
89 struct hlist_node tw_death_node;
90#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
91 struct in6_addr tw_v6_daddr;
92 struct in6_addr tw_v6_rcv_saddr;
93 int tw_v6_ipv6only;
94#endif
95};
96
97static __inline__ void tw_add_node(struct tcp_tw_bucket *tw,
98 struct hlist_head *list)
99{
100 hlist_add_head(&tw->tw_node, list);
101}
102
103static __inline__ void tw_add_bind_node(struct tcp_tw_bucket *tw,
104 struct hlist_head *list)
105{
106 hlist_add_head(&tw->tw_bind_node, list);
107}
108
109static inline int tw_dead_hashed(struct tcp_tw_bucket *tw)
110{
111 return tw->tw_death_node.pprev != NULL;
112}
113
114static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw)
115{
116 tw->tw_death_node.pprev = NULL;
117}
118
119static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw)
120{
121 __hlist_del(&tw->tw_death_node);
122 tw_dead_node_init(tw);
123}
124
125static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
126{
127 if (tw_dead_hashed(tw)) {
128 __tw_del_dead_node(tw);
129 return 1;
130 }
131 return 0;
132}
133
134#define tw_for_each(tw, node, head) \
135 hlist_for_each_entry(tw, node, head, tw_node)
136
137#define tw_for_each_inmate(tw, node, jail) \
138 hlist_for_each_entry(tw, node, jail, tw_death_node)
139
140#define tw_for_each_inmate_safe(tw, node, safe, jail) \
141 hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
142
143#define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
144
145static inline u32 tcp_v4_rcv_saddr(const struct sock *sk)
146{
147 return likely(sk->sk_state != TCP_TIME_WAIT) ?
148 inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr;
149}
150
151#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
152static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
153{
154 return likely(sk->sk_state != TCP_TIME_WAIT) ?
155 &inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr;
156}
157
158static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
159{
160 return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
161}
162
163#define tcptw_sk_ipv6only(__sk) (tcptw_sk(__sk)->tw_v6_ipv6only)
164
165static inline int tcp_v6_ipv6only(const struct sock *sk)
166{
167 return likely(sk->sk_state != TCP_TIME_WAIT) ?
168 ipv6_only_sock(sk) : tcptw_sk_ipv6only(sk);
169}
170#else
171# define __tcp_v6_rcv_saddr(__sk) NULL
172# define tcp_v6_rcv_saddr(__sk) NULL
173# define tcptw_sk_ipv6only(__sk) 0
174# define tcp_v6_ipv6only(__sk) 0
175#endif
176
177extern kmem_cache_t *tcp_timewait_cachep;
178
179static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
180{
181 if (atomic_dec_and_test(&tw->tw_refcnt)) {
e6848976 182#ifdef SOCK_REFCNT_DEBUG
1da177e4
LT
183 printk(KERN_DEBUG "tw_bucket %p released\n", tw);
184#endif
185 kmem_cache_free(tcp_timewait_cachep, tw);
186 }
187}
188
189extern atomic_t tcp_orphan_count;
190extern int tcp_tw_count;
191extern void tcp_time_wait(struct sock *sk, int state, int timeo);
192extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
193
194
195/* Socket demux engine toys. */
196#ifdef __BIG_ENDIAN
197#define TCP_COMBINED_PORTS(__sport, __dport) \
198 (((__u32)(__sport)<<16) | (__u32)(__dport))
199#else /* __LITTLE_ENDIAN */
200#define TCP_COMBINED_PORTS(__sport, __dport) \
201 (((__u32)(__dport)<<16) | (__u32)(__sport))
202#endif
203
204#if (BITS_PER_LONG == 64)
205#ifdef __BIG_ENDIAN
206#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
207 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
208#else /* __LITTLE_ENDIAN */
209#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
210 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
211#endif /* __BIG_ENDIAN */
212#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
213 (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
214 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
215 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
216#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
217 (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
218 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
219 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
220#else /* 32-bit arch */
221#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
222#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
223 ((inet_sk(__sk)->daddr == (__saddr)) && \
224 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
225 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
226 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
227#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
228 ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
229 (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
230 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
231 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
232#endif /* 64-bit arch */
233
234#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
235 (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
236 ((__sk)->sk_family == AF_INET6) && \
237 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
238 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
239 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
240
1da177e4
LT
241#define MAX_TCP_HEADER (128 + MAX_HEADER)
242
243/*
244 * Never offer a window over 32767 without using window scaling. Some
245 * poor stacks do signed 16bit maths!
246 */
247#define MAX_TCP_WINDOW 32767U
248
249/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
250#define TCP_MIN_MSS 88U
251
252/* Minimal RCV_MSS. */
253#define TCP_MIN_RCVMSS 536U
254
255/* After receiving this amount of duplicate ACKs fast retransmit starts. */
256#define TCP_FASTRETRANS_THRESH 3
257
258/* Maximal reordering. */
259#define TCP_MAX_REORDERING 127
260
261/* Maximal number of ACKs sent quickly to accelerate slow-start. */
262#define TCP_MAX_QUICKACKS 16U
263
264/* urg_data states */
265#define TCP_URG_VALID 0x0100
266#define TCP_URG_NOTYET 0x0200
267#define TCP_URG_READ 0x0400
268
269#define TCP_RETR1 3 /*
270 * This is how many retries it does before it
271 * tries to figure out if the gateway is
272 * down. Minimal RFC value is 3; it corresponds
273 * to ~3sec-8min depending on RTO.
274 */
275
276#define TCP_RETR2 15 /*
277 * This should take at least
278 * 90 minutes to time out.
279 * RFC1122 says that the limit is 100 sec.
280 * 15 is ~13-30min depending on RTO.
281 */
282
283#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
284 * connection: ~180sec is RFC minumum */
285
286#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
287 * connection: ~180sec is RFC minumum */
288
289
290#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
291 * socket. 7 is ~50sec-16min.
292 */
293
294
295#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
296 * state, about 60 seconds */
297#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
298 /* BSD style FIN_WAIT2 deadlock breaker.
299 * It used to be 3min, new value is 60sec,
300 * to combine FIN-WAIT-2 timeout with
301 * TIME-WAIT timer.
302 */
303
304#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
305#if HZ >= 100
306#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
307#define TCP_ATO_MIN ((unsigned)(HZ/25))
308#else
309#define TCP_DELACK_MIN 4U
310#define TCP_ATO_MIN 4U
311#endif
312#define TCP_RTO_MAX ((unsigned)(120*HZ))
313#define TCP_RTO_MIN ((unsigned)(HZ/5))
314#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
315
316#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
317 * for local resources.
318 */
319
320#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
321#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
322#define TCP_KEEPALIVE_INTVL (75*HZ)
323
324#define MAX_TCP_KEEPIDLE 32767
325#define MAX_TCP_KEEPINTVL 32767
326#define MAX_TCP_KEEPCNT 127
327#define MAX_TCP_SYNCNT 127
328
329#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
330#define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
331
332#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
333#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
334 * after this time. It should be equal
335 * (or greater than) TCP_TIMEWAIT_LEN
336 * to provide reliability equal to one
337 * provided by timewait state.
338 */
339#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
340 * timestamps. It must be less than
341 * minimal timewait lifetime.
342 */
343
344#define TCP_TW_RECYCLE_SLOTS_LOG 5
345#define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
346
347/* If time > 4sec, it is "slow" path, no recycling is required,
348 so that we select tick to get range about 4 seconds.
349 */
350
351#if HZ <= 16 || HZ > 4096
352# error Unsupported: HZ <= 16 or HZ > 4096
353#elif HZ <= 32
354# define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
355#elif HZ <= 64
356# define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
357#elif HZ <= 128
358# define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
359#elif HZ <= 256
360# define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
361#elif HZ <= 512
362# define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
363#elif HZ <= 1024
364# define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
365#elif HZ <= 2048
366# define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
367#else
368# define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
369#endif
1da177e4
LT
370/*
371 * TCP option
372 */
373
374#define TCPOPT_NOP 1 /* Padding */
375#define TCPOPT_EOL 0 /* End of options */
376#define TCPOPT_MSS 2 /* Segment size negotiating */
377#define TCPOPT_WINDOW 3 /* Window scaling */
378#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
379#define TCPOPT_SACK 5 /* SACK Block */
380#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
381
382/*
383 * TCP option lengths
384 */
385
386#define TCPOLEN_MSS 4
387#define TCPOLEN_WINDOW 3
388#define TCPOLEN_SACK_PERM 2
389#define TCPOLEN_TIMESTAMP 10
390
391/* But this is what stacks really send out. */
392#define TCPOLEN_TSTAMP_ALIGNED 12
393#define TCPOLEN_WSCALE_ALIGNED 4
394#define TCPOLEN_SACKPERM_ALIGNED 4
395#define TCPOLEN_SACK_BASE 2
396#define TCPOLEN_SACK_BASE_ALIGNED 4
397#define TCPOLEN_SACK_PERBLOCK 8
398
399#define TCP_TIME_RETRANS 1 /* Retransmit timer */
400#define TCP_TIME_DACK 2 /* Delayed ack timer */
401#define TCP_TIME_PROBE0 3 /* Zero window probe timer */
402#define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
403
404/* Flags in tp->nonagle */
405#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
406#define TCP_NAGLE_CORK 2 /* Socket is corked */
407#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
408
409/* sysctl variables for tcp */
1da177e4
LT
410extern int sysctl_tcp_timestamps;
411extern int sysctl_tcp_window_scaling;
412extern int sysctl_tcp_sack;
413extern int sysctl_tcp_fin_timeout;
414extern int sysctl_tcp_tw_recycle;
415extern int sysctl_tcp_keepalive_time;
416extern int sysctl_tcp_keepalive_probes;
417extern int sysctl_tcp_keepalive_intvl;
418extern int sysctl_tcp_syn_retries;
419extern int sysctl_tcp_synack_retries;
420extern int sysctl_tcp_retries1;
421extern int sysctl_tcp_retries2;
422extern int sysctl_tcp_orphan_retries;
423extern int sysctl_tcp_syncookies;
424extern int sysctl_tcp_retrans_collapse;
425extern int sysctl_tcp_stdurg;
426extern int sysctl_tcp_rfc1337;
427extern int sysctl_tcp_abort_on_overflow;
428extern int sysctl_tcp_max_orphans;
429extern int sysctl_tcp_max_tw_buckets;
430extern int sysctl_tcp_fack;
431extern int sysctl_tcp_reordering;
432extern int sysctl_tcp_ecn;
433extern int sysctl_tcp_dsack;
434extern int sysctl_tcp_mem[3];
435extern int sysctl_tcp_wmem[3];
436extern int sysctl_tcp_rmem[3];
437extern int sysctl_tcp_app_win;
438extern int sysctl_tcp_adv_win_scale;
439extern int sysctl_tcp_tw_reuse;
440extern int sysctl_tcp_frto;
441extern int sysctl_tcp_low_latency;
1da177e4 442extern int sysctl_tcp_nometrics_save;
1da177e4
LT
443extern int sysctl_tcp_moderate_rcvbuf;
444extern int sysctl_tcp_tso_win_divisor;
445
446extern atomic_t tcp_memory_allocated;
447extern atomic_t tcp_sockets_allocated;
448extern int tcp_memory_pressure;
449
1da177e4
LT
450#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
451#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
452#else
453#define TCP_INET_FAMILY(fam) 1
454#endif
455
456/*
457 * Pointers to address related TCP functions
458 * (i.e. things that depend on the address family)
459 */
460
461struct tcp_func {
462 int (*queue_xmit) (struct sk_buff *skb,
463 int ipfragok);
464
465 void (*send_check) (struct sock *sk,
466 struct tcphdr *th,
467 int len,
468 struct sk_buff *skb);
469
470 int (*rebuild_header) (struct sock *sk);
471
472 int (*conn_request) (struct sock *sk,
473 struct sk_buff *skb);
474
475 struct sock * (*syn_recv_sock) (struct sock *sk,
476 struct sk_buff *skb,
60236fdd 477 struct request_sock *req,
1da177e4
LT
478 struct dst_entry *dst);
479
480 int (*remember_stamp) (struct sock *sk);
481
482 __u16 net_header_len;
483
484 int (*setsockopt) (struct sock *sk,
485 int level,
486 int optname,
487 char __user *optval,
488 int optlen);
489
490 int (*getsockopt) (struct sock *sk,
491 int level,
492 int optname,
493 char __user *optval,
494 int __user *optlen);
495
496
497 void (*addr2sockaddr) (struct sock *sk,
498 struct sockaddr *);
499
500 int sockaddr_len;
501};
502
503/*
504 * The next routines deal with comparing 32 bit unsigned ints
505 * and worry about wraparound (automatic with unsigned arithmetic).
506 */
507
508static inline int before(__u32 seq1, __u32 seq2)
509{
510 return (__s32)(seq1-seq2) < 0;
511}
512
513static inline int after(__u32 seq1, __u32 seq2)
514{
515 return (__s32)(seq2-seq1) < 0;
516}
517
518
519/* is s2<=s1<=s3 ? */
520static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
521{
522 return seq3 - seq2 >= seq1 - seq2;
523}
524
525
526extern struct proto tcp_prot;
527
528DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
529#define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
530#define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
531#define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
532#define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
533#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
534#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
535
1da177e4
LT
536extern void tcp_v4_err(struct sk_buff *skb, u32);
537
538extern void tcp_shutdown (struct sock *sk, int how);
539
540extern int tcp_v4_rcv(struct sk_buff *skb);
541
542extern int tcp_v4_remember_stamp(struct sock *sk);
543
544extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
545
546extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
547 struct msghdr *msg, size_t size);
548extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
549
550extern int tcp_ioctl(struct sock *sk,
551 int cmd,
552 unsigned long arg);
553
554extern int tcp_rcv_state_process(struct sock *sk,
555 struct sk_buff *skb,
556 struct tcphdr *th,
557 unsigned len);
558
559extern int tcp_rcv_established(struct sock *sk,
560 struct sk_buff *skb,
561 struct tcphdr *th,
562 unsigned len);
563
564extern void tcp_rcv_space_adjust(struct sock *sk);
565
566enum tcp_ack_state_t
567{
568 TCP_ACK_SCHED = 1,
569 TCP_ACK_TIMER = 2,
570 TCP_ACK_PUSHED= 4
571};
572
573static inline void tcp_schedule_ack(struct tcp_sock *tp)
574{
575 tp->ack.pending |= TCP_ACK_SCHED;
576}
577
578static inline int tcp_ack_scheduled(struct tcp_sock *tp)
579{
580 return tp->ack.pending&TCP_ACK_SCHED;
581}
582
fc6415bc 583static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts)
1da177e4 584{
fc6415bc
DM
585 if (tp->ack.quick) {
586 if (pkts >= tp->ack.quick) {
587 tp->ack.quick = 0;
588
589 /* Leaving quickack mode we deflate ATO. */
590 tp->ack.ato = TCP_ATO_MIN;
591 } else
592 tp->ack.quick -= pkts;
1da177e4
LT
593 }
594}
595
596extern void tcp_enter_quickack_mode(struct tcp_sock *tp);
597
598static __inline__ void tcp_delack_init(struct tcp_sock *tp)
599{
600 memset(&tp->ack, 0, sizeof(tp->ack));
601}
602
603static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
604{
605 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
606}
607
608enum tcp_tw_status
609{
610 TCP_TW_SUCCESS = 0,
611 TCP_TW_RST = 1,
612 TCP_TW_ACK = 2,
613 TCP_TW_SYN = 3
614};
615
616
617extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
618 struct sk_buff *skb,
619 struct tcphdr *th,
620 unsigned len);
621
622extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
60236fdd
ACM
623 struct request_sock *req,
624 struct request_sock **prev);
1da177e4
LT
625extern int tcp_child_process(struct sock *parent,
626 struct sock *child,
627 struct sk_buff *skb);
628extern void tcp_enter_frto(struct sock *sk);
629extern void tcp_enter_loss(struct sock *sk, int how);
630extern void tcp_clear_retrans(struct tcp_sock *tp);
631extern void tcp_update_metrics(struct sock *sk);
632
633extern void tcp_close(struct sock *sk,
634 long timeout);
635extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
636extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
637
638extern int tcp_getsockopt(struct sock *sk, int level,
639 int optname,
640 char __user *optval,
641 int __user *optlen);
642extern int tcp_setsockopt(struct sock *sk, int level,
643 int optname, char __user *optval,
644 int optlen);
645extern void tcp_set_keepalive(struct sock *sk, int val);
646extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
647 struct msghdr *msg,
648 size_t len, int nonblock,
649 int flags, int *addr_len);
650
651extern int tcp_listen_start(struct sock *sk);
652
653extern void tcp_parse_options(struct sk_buff *skb,
654 struct tcp_options_received *opt_rx,
655 int estab);
656
657/*
658 * TCP v4 functions exported for the inet6 API
659 */
660
1da177e4
LT
661extern void tcp_v4_send_check(struct sock *sk,
662 struct tcphdr *th, int len,
663 struct sk_buff *skb);
664
665extern int tcp_v4_conn_request(struct sock *sk,
666 struct sk_buff *skb);
667
668extern struct sock * tcp_create_openreq_child(struct sock *sk,
60236fdd 669 struct request_sock *req,
1da177e4
LT
670 struct sk_buff *skb);
671
672extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
673 struct sk_buff *skb,
60236fdd 674 struct request_sock *req,
1da177e4
LT
675 struct dst_entry *dst);
676
677extern int tcp_v4_do_rcv(struct sock *sk,
678 struct sk_buff *skb);
679
680extern int tcp_v4_connect(struct sock *sk,
681 struct sockaddr *uaddr,
682 int addr_len);
683
684extern int tcp_connect(struct sock *sk);
685
686extern struct sk_buff * tcp_make_synack(struct sock *sk,
687 struct dst_entry *dst,
60236fdd 688 struct request_sock *req);
1da177e4
LT
689
690extern int tcp_disconnect(struct sock *sk, int flags);
691
692extern void tcp_unhash(struct sock *sk);
693
694extern int tcp_v4_hash_connecting(struct sock *sk);
695
696
697/* From syncookies.c */
698extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
699 struct ip_options *opt);
700extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
701 __u16 *mss);
702
703/* tcp_output.c */
704
f6302d1d 705extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
a2e2a59c 706 unsigned int cur_mss, int nonagle);
f6302d1d 707extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp);
1da177e4
LT
708extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
709extern void tcp_xmit_retransmit_queue(struct sock *);
710extern void tcp_simple_retransmit(struct sock *);
711extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
712
713extern void tcp_send_probe0(struct sock *);
714extern void tcp_send_partial(struct sock *);
715extern int tcp_write_wakeup(struct sock *);
716extern void tcp_send_fin(struct sock *sk);
86a76caf
VF
717extern void tcp_send_active_reset(struct sock *sk,
718 unsigned int __nocast priority);
1da177e4 719extern int tcp_send_synack(struct sock *);
c1b4a7e6 720extern void tcp_push_one(struct sock *, unsigned int mss_now);
1da177e4
LT
721extern void tcp_send_ack(struct sock *sk);
722extern void tcp_send_delayed_ack(struct sock *sk);
723
a762a980
DM
724/* tcp_input.c */
725extern void tcp_cwnd_application_limited(struct sock *sk);
726
1da177e4
LT
727/* tcp_timer.c */
728extern void tcp_init_xmit_timers(struct sock *);
729extern void tcp_clear_xmit_timers(struct sock *);
730
731extern void tcp_delete_keepalive_timer(struct sock *);
732extern void tcp_reset_keepalive_timer(struct sock *, unsigned long);
733extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
734extern unsigned int tcp_current_mss(struct sock *sk, int large);
735
736#ifdef TCP_DEBUG
737extern const char tcp_timer_bug_msg[];
738#endif
739
740/* tcp_diag.c */
741extern void tcp_get_info(struct sock *, struct tcp_info *);
742
743/* Read 'sendfile()'-style from a TCP socket */
744typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
745 unsigned int, size_t);
746extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
747 sk_read_actor_t recv_actor);
748
749static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
750{
751 struct tcp_sock *tp = tcp_sk(sk);
752
753 switch (what) {
754 case TCP_TIME_RETRANS:
755 case TCP_TIME_PROBE0:
756 tp->pending = 0;
757
758#ifdef TCP_CLEAR_TIMERS
759 sk_stop_timer(sk, &tp->retransmit_timer);
760#endif
761 break;
762 case TCP_TIME_DACK:
763 tp->ack.blocked = 0;
764 tp->ack.pending = 0;
765
766#ifdef TCP_CLEAR_TIMERS
767 sk_stop_timer(sk, &tp->delack_timer);
768#endif
769 break;
770 default:
771#ifdef TCP_DEBUG
772 printk(tcp_timer_bug_msg);
773#endif
774 return;
775 };
776
777}
778
779/*
780 * Reset the retransmission timer
781 */
782static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
783{
784 struct tcp_sock *tp = tcp_sk(sk);
785
786 if (when > TCP_RTO_MAX) {
787#ifdef TCP_DEBUG
788 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
789#endif
790 when = TCP_RTO_MAX;
791 }
792
793 switch (what) {
794 case TCP_TIME_RETRANS:
795 case TCP_TIME_PROBE0:
796 tp->pending = what;
797 tp->timeout = jiffies+when;
798 sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
799 break;
800
801 case TCP_TIME_DACK:
802 tp->ack.pending |= TCP_ACK_TIMER;
803 tp->ack.timeout = jiffies+when;
804 sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
805 break;
806
807 default:
808#ifdef TCP_DEBUG
809 printk(tcp_timer_bug_msg);
810#endif
811 return;
812 };
813}
814
815/* Initialize RCV_MSS value.
816 * RCV_MSS is an our guess about MSS used by the peer.
817 * We haven't any direct information about the MSS.
818 * It's better to underestimate the RCV_MSS rather than overestimate.
819 * Overestimations make us ACKing less frequently than needed.
820 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
821 */
822
823static inline void tcp_initialize_rcv_mss(struct sock *sk)
824{
825 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 826 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
1da177e4
LT
827
828 hint = min(hint, tp->rcv_wnd/2);
829 hint = min(hint, TCP_MIN_RCVMSS);
830 hint = max(hint, TCP_MIN_MSS);
831
832 tp->ack.rcv_mss = hint;
833}
834
835static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
836{
837 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
838 ntohl(TCP_FLAG_ACK) |
839 snd_wnd);
840}
841
842static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
843{
844 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
845}
846
847static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
848{
b03efcfb 849 if (skb_queue_empty(&tp->out_of_order_queue) &&
1da177e4
LT
850 tp->rcv_wnd &&
851 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
852 !tp->urg_data)
853 tcp_fast_path_on(tp);
854}
855
856/* Compute the actual receive window we are currently advertising.
857 * Rcv_nxt can be after the window if our peer push more data
858 * than the offered window.
859 */
860static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp)
861{
862 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
863
864 if (win < 0)
865 win = 0;
866 return (u32) win;
867}
868
869/* Choose a new window, without checks for shrinking, and without
870 * scaling applied to the result. The caller does these things
871 * if necessary. This is a "raw" window selection.
872 */
873extern u32 __tcp_select_window(struct sock *sk);
874
875/* TCP timestamps are only 32-bits, this causes a slight
876 * complication on 64-bit systems since we store a snapshot
877 * of jiffies in the buffer control blocks below. We decidely
878 * only use of the low 32-bits of jiffies and hide the ugly
879 * casts with the following macro.
880 */
881#define tcp_time_stamp ((__u32)(jiffies))
882
883/* This is what the send packet queueing engine uses to pass
884 * TCP per-packet control information to the transmission
885 * code. We also store the host-order sequence numbers in
886 * here too. This is 36 bytes on 32-bit architectures,
887 * 40 bytes on 64-bit machines, if this grows please adjust
888 * skbuff.h:skbuff->cb[xxx] size appropriately.
889 */
890struct tcp_skb_cb {
891 union {
892 struct inet_skb_parm h4;
893#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
894 struct inet6_skb_parm h6;
895#endif
896 } header; /* For incoming frames */
897 __u32 seq; /* Starting sequence number */
898 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
899 __u32 when; /* used to compute rtt's */
900 __u8 flags; /* TCP header flags. */
901
902 /* NOTE: These must match up to the flags byte in a
903 * real TCP header.
904 */
905#define TCPCB_FLAG_FIN 0x01
906#define TCPCB_FLAG_SYN 0x02
907#define TCPCB_FLAG_RST 0x04
908#define TCPCB_FLAG_PSH 0x08
909#define TCPCB_FLAG_ACK 0x10
910#define TCPCB_FLAG_URG 0x20
911#define TCPCB_FLAG_ECE 0x40
912#define TCPCB_FLAG_CWR 0x80
913
914 __u8 sacked; /* State flags for SACK/FACK. */
915#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
916#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
917#define TCPCB_LOST 0x04 /* SKB is lost */
918#define TCPCB_TAGBITS 0x07 /* All tag bits */
919
920#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
921#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
922
923#define TCPCB_URG 0x20 /* Urgent pointer advenced here */
924
925#define TCPCB_AT_TAIL (TCPCB_URG)
926
927 __u16 urg_ptr; /* Valid w/URG flags is set. */
928 __u32 ack_seq; /* Sequence number ACK'd */
929};
930
931#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
932
933#include <net/tcp_ecn.h>
934
935/* Due to TSO, an SKB can be composed of multiple actual
936 * packets. To keep these tracked properly, we use this.
937 */
938static inline int tcp_skb_pcount(const struct sk_buff *skb)
939{
940 return skb_shinfo(skb)->tso_segs;
941}
942
943/* This is valid iff tcp_skb_pcount() > 1. */
944static inline int tcp_skb_mss(const struct sk_buff *skb)
945{
946 return skb_shinfo(skb)->tso_size;
947}
948
949static inline void tcp_dec_pcount_approx(__u32 *count,
950 const struct sk_buff *skb)
951{
952 if (*count) {
953 *count -= tcp_skb_pcount(skb);
954 if ((int)*count < 0)
955 *count = 0;
956 }
957}
958
959static inline void tcp_packets_out_inc(struct sock *sk,
960 struct tcp_sock *tp,
961 const struct sk_buff *skb)
962{
963 int orig = tp->packets_out;
964
965 tp->packets_out += tcp_skb_pcount(skb);
966 if (!orig)
967 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
968}
969
970static inline void tcp_packets_out_dec(struct tcp_sock *tp,
971 const struct sk_buff *skb)
972{
973 tp->packets_out -= tcp_skb_pcount(skb);
974}
975
317a76f9
SH
976/* Events passed to congestion control interface */
977enum tcp_ca_event {
978 CA_EVENT_TX_START, /* first transmit when no packets in flight */
979 CA_EVENT_CWND_RESTART, /* congestion window restart */
980 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
981 CA_EVENT_FRTO, /* fast recovery timeout */
982 CA_EVENT_LOSS, /* loss timeout */
983 CA_EVENT_FAST_ACK, /* in sequence ack */
984 CA_EVENT_SLOW_ACK, /* other ack */
985};
986
987/*
988 * Interface for adding new TCP congestion control handlers
989 */
990#define TCP_CA_NAME_MAX 16
991struct tcp_congestion_ops {
992 struct list_head list;
993
994 /* initialize private data (optional) */
995 void (*init)(struct tcp_sock *tp);
996 /* cleanup private data (optional) */
997 void (*release)(struct tcp_sock *tp);
998
999 /* return slow start threshold (required) */
1000 u32 (*ssthresh)(struct tcp_sock *tp);
1001 /* lower bound for congestion window (optional) */
1002 u32 (*min_cwnd)(struct tcp_sock *tp);
1003 /* do new cwnd calculation (required) */
1004 void (*cong_avoid)(struct tcp_sock *tp, u32 ack,
1005 u32 rtt, u32 in_flight, int good_ack);
1006 /* round trip time sample per acked packet (optional) */
1007 void (*rtt_sample)(struct tcp_sock *tp, u32 usrtt);
1008 /* call before changing ca_state (optional) */
1009 void (*set_state)(struct tcp_sock *tp, u8 new_state);
1010 /* call when cwnd event occurs (optional) */
1011 void (*cwnd_event)(struct tcp_sock *tp, enum tcp_ca_event ev);
1012 /* new value of cwnd after loss (optional) */
1013 u32 (*undo_cwnd)(struct tcp_sock *tp);
1014 /* hook for packet ack accounting (optional) */
1015 void (*pkts_acked)(struct tcp_sock *tp, u32 num_acked);
1016 /* get info for tcp_diag (optional) */
1017 void (*get_info)(struct tcp_sock *tp, u32 ext, struct sk_buff *skb);
1018
1019 char name[TCP_CA_NAME_MAX];
1020 struct module *owner;
1021};
1022
1023extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1024extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1025
1026extern void tcp_init_congestion_control(struct tcp_sock *tp);
1027extern void tcp_cleanup_congestion_control(struct tcp_sock *tp);
1028extern int tcp_set_default_congestion_control(const char *name);
1029extern void tcp_get_default_congestion_control(char *name);
5f8ef48d 1030extern int tcp_set_congestion_control(struct tcp_sock *tp, const char *name);
317a76f9 1031
5f8ef48d 1032extern struct tcp_congestion_ops tcp_init_congestion_ops;
317a76f9
SH
1033extern u32 tcp_reno_ssthresh(struct tcp_sock *tp);
1034extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack,
1035 u32 rtt, u32 in_flight, int flag);
1036extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp);
a8acfbac 1037extern struct tcp_congestion_ops tcp_reno;
317a76f9
SH
1038
1039static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
1040{
1041 if (tp->ca_ops->set_state)
1042 tp->ca_ops->set_state(tp, ca_state);
1043 tp->ca_state = ca_state;
1044}
1045
1046static inline void tcp_ca_event(struct tcp_sock *tp, enum tcp_ca_event event)
1047{
1048 if (tp->ca_ops->cwnd_event)
1049 tp->ca_ops->cwnd_event(tp, event);
1050}
1051
1da177e4
LT
1052/* This determines how many packets are "in the network" to the best
1053 * of our knowledge. In many cases it is conservative, but where
1054 * detailed information is available from the receiver (via SACK
1055 * blocks etc.) we can make more aggressive calculations.
1056 *
1057 * Use this for decisions involving congestion control, use just
1058 * tp->packets_out to determine if the send queue is empty or not.
1059 *
1060 * Read this equation as:
1061 *
1062 * "Packets sent once on transmission queue" MINUS
1063 * "Packets left network, but not honestly ACKed yet" PLUS
1064 * "Packets fast retransmitted"
1065 */
1066static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1067{
1068 return (tp->packets_out - tp->left_out + tp->retrans_out);
1069}
1070
1da177e4
LT
1071/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1072 * The exception is rate halving phase, when cwnd is decreasing towards
1073 * ssthresh.
1074 */
1075static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp)
1076{
1077 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
1078 return tp->snd_ssthresh;
1079 else
1080 return max(tp->snd_ssthresh,
1081 ((tp->snd_cwnd >> 1) +
1082 (tp->snd_cwnd >> 2)));
1083}
1084
1085static inline void tcp_sync_left_out(struct tcp_sock *tp)
1086{
1087 if (tp->rx_opt.sack_ok &&
1088 (tp->sacked_out >= tp->packets_out - tp->lost_out))
1089 tp->sacked_out = tp->packets_out - tp->lost_out;
1090 tp->left_out = tp->sacked_out + tp->lost_out;
1091}
1092
d1b04c08 1093/* Set slow start threshold and cwnd not falling to slow start */
1da177e4
LT
1094static inline void __tcp_enter_cwr(struct tcp_sock *tp)
1095{
1096 tp->undo_marker = 0;
317a76f9 1097 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp);
1da177e4
LT
1098 tp->snd_cwnd = min(tp->snd_cwnd,
1099 tcp_packets_in_flight(tp) + 1U);
1100 tp->snd_cwnd_cnt = 0;
1101 tp->high_seq = tp->snd_nxt;
1102 tp->snd_cwnd_stamp = tcp_time_stamp;
1103 TCP_ECN_queue_cwr(tp);
1104}
1105
1106static inline void tcp_enter_cwr(struct tcp_sock *tp)
1107{
1108 tp->prior_ssthresh = 0;
1109 if (tp->ca_state < TCP_CA_CWR) {
1110 __tcp_enter_cwr(tp);
1111 tcp_set_ca_state(tp, TCP_CA_CWR);
1112 }
1113}
1114
1115extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
1116
1117/* Slow start with delack produces 3 packets of burst, so that
1118 * it is safe "de facto".
1119 */
1120static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
1121{
1122 return 3;
1123}
1124
1da177e4
LT
1125static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
1126 const struct sk_buff *skb)
1127{
1128 if (skb->len < mss)
1129 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1130}
1131
1da177e4
LT
1132static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
1133{
1134 if (!tp->packets_out && !tp->pending)
1135 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
1136}
1137
1da177e4
LT
1138static __inline__ void tcp_push_pending_frames(struct sock *sk,
1139 struct tcp_sock *tp)
1140{
1141 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
1142}
1143
1da177e4
LT
1144static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
1145{
1146 tp->snd_wl1 = seq;
1147}
1148
1149static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
1150{
1151 tp->snd_wl1 = seq;
1152}
1153
1154extern void tcp_destroy_sock(struct sock *sk);
1155
1156
1157/*
1158 * Calculate(/check) TCP checksum
1159 */
1160static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
1161 unsigned long saddr, unsigned long daddr,
1162 unsigned long base)
1163{
1164 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1165}
1166
1167static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
1168{
1169 return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
1170}
1171
1172static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
1173{
1174 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1175 __tcp_checksum_complete(skb);
1176}
1177
1178/* Prequeue for VJ style copy to user, combined with checksumming. */
1179
1180static __inline__ void tcp_prequeue_init(struct tcp_sock *tp)
1181{
1182 tp->ucopy.task = NULL;
1183 tp->ucopy.len = 0;
1184 tp->ucopy.memory = 0;
1185 skb_queue_head_init(&tp->ucopy.prequeue);
1186}
1187
1188/* Packet is added to VJ-style prequeue for processing in process
1189 * context, if a reader task is waiting. Apparently, this exciting
1190 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1191 * failed somewhere. Latency? Burstiness? Well, at least now we will
1192 * see, why it failed. 8)8) --ANK
1193 *
1194 * NOTE: is this not too big to inline?
1195 */
1196static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1197{
1198 struct tcp_sock *tp = tcp_sk(sk);
1199
1200 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
1201 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1202 tp->ucopy.memory += skb->truesize;
1203 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1204 struct sk_buff *skb1;
1205
1206 BUG_ON(sock_owned_by_user(sk));
1207
1208 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1209 sk->sk_backlog_rcv(sk, skb1);
1210 NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
1211 }
1212
1213 tp->ucopy.memory = 0;
1214 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1215 wake_up_interruptible(sk->sk_sleep);
1216 if (!tcp_ack_scheduled(tp))
1217 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
1218 }
1219 return 1;
1220 }
1221 return 0;
1222}
1223
1224
1225#undef STATE_TRACE
1226
1227#ifdef STATE_TRACE
1228static const char *statename[]={
1229 "Unused","Established","Syn Sent","Syn Recv",
1230 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1231 "Close Wait","Last ACK","Listen","Closing"
1232};
1233#endif
1234
1235static __inline__ void tcp_set_state(struct sock *sk, int state)
1236{
1237 int oldstate = sk->sk_state;
1238
1239 switch (state) {
1240 case TCP_ESTABLISHED:
1241 if (oldstate != TCP_ESTABLISHED)
1242 TCP_INC_STATS(TCP_MIB_CURRESTAB);
1243 break;
1244
1245 case TCP_CLOSE:
1246 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1247 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1248
1249 sk->sk_prot->unhash(sk);
a55ebcc4 1250 if (inet_sk(sk)->bind_hash &&
1da177e4 1251 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2d8c4ce5 1252 inet_put_port(&tcp_hashinfo, sk);
1da177e4
LT
1253 /* fall through */
1254 default:
1255 if (oldstate==TCP_ESTABLISHED)
1256 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
1257 }
1258
1259 /* Change state AFTER socket is unhashed to avoid closed
1260 * socket sitting in hash tables.
1261 */
1262 sk->sk_state = state;
1263
1264#ifdef STATE_TRACE
1265 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1266#endif
1267}
1268
1269static __inline__ void tcp_done(struct sock *sk)
1270{
1271 tcp_set_state(sk, TCP_CLOSE);
1272 tcp_clear_xmit_timers(sk);
1273
1274 sk->sk_shutdown = SHUTDOWN_MASK;
1275
1276 if (!sock_flag(sk, SOCK_DEAD))
1277 sk->sk_state_change(sk);
1278 else
1279 tcp_destroy_sock(sk);
1280}
1281
1282static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
1283{
1284 rx_opt->dsack = 0;
1285 rx_opt->eff_sacks = 0;
1286 rx_opt->num_sacks = 0;
1287}
1288
1289static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
1290{
1291 if (tp->rx_opt.tstamp_ok) {
1292 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1293 (TCPOPT_NOP << 16) |
1294 (TCPOPT_TIMESTAMP << 8) |
1295 TCPOLEN_TIMESTAMP);
1296 *ptr++ = htonl(tstamp);
1297 *ptr++ = htonl(tp->rx_opt.ts_recent);
1298 }
1299 if (tp->rx_opt.eff_sacks) {
1300 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
1301 int this_sack;
1302
1303 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1304 (TCPOPT_NOP << 16) |
1305 (TCPOPT_SACK << 8) |
1306 (TCPOLEN_SACK_BASE +
1307 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)));
1308 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
1309 *ptr++ = htonl(sp[this_sack].start_seq);
1310 *ptr++ = htonl(sp[this_sack].end_seq);
1311 }
1312 if (tp->rx_opt.dsack) {
1313 tp->rx_opt.dsack = 0;
1314 tp->rx_opt.eff_sacks--;
1315 }
1316 }
1317}
1318
1319/* Construct a tcp options header for a SYN or SYN_ACK packet.
1320 * If this is every changed make sure to change the definition of
1321 * MAX_SYN_SIZE to match the new maximum number of options that you
1322 * can generate.
1323 */
1324static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1325 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1326{
1327 /* We always get an MSS option.
1328 * The option bytes which will be seen in normal data
1329 * packets should timestamps be used, must be in the MSS
1330 * advertised. But we subtract them from tp->mss_cache so
1331 * that calculations in tcp_sendmsg are simpler etc.
1332 * So account for this fact here if necessary. If we
1333 * don't do this correctly, as a receiver we won't
1334 * recognize data packets as being full sized when we
1335 * should, and thus we won't abide by the delayed ACK
1336 * rules correctly.
1337 * SACKs don't matter, we never delay an ACK when we
1338 * have any of those going out.
1339 */
1340 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1341 if (ts) {
1342 if(sack)
1343 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1344 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1345 else
1346 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1347 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1348 *ptr++ = htonl(tstamp); /* TSVAL */
1349 *ptr++ = htonl(ts_recent); /* TSECR */
1350 } else if(sack)
1351 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1352 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1353 if (offer_wscale)
1354 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1355}
1356
1357/* Determine a window scaling and initial window to offer. */
1358extern void tcp_select_initial_window(int __space, __u32 mss,
1359 __u32 *rcv_wnd, __u32 *window_clamp,
1360 int wscale_ok, __u8 *rcv_wscale);
1361
1362static inline int tcp_win_from_space(int space)
1363{
1364 return sysctl_tcp_adv_win_scale<=0 ?
1365 (space>>(-sysctl_tcp_adv_win_scale)) :
1366 space - (space>>sysctl_tcp_adv_win_scale);
1367}
1368
1369/* Note: caller must be prepared to deal with negative returns */
1370static inline int tcp_space(const struct sock *sk)
1371{
1372 return tcp_win_from_space(sk->sk_rcvbuf -
1373 atomic_read(&sk->sk_rmem_alloc));
1374}
1375
1376static inline int tcp_full_space(const struct sock *sk)
1377{
1378 return tcp_win_from_space(sk->sk_rcvbuf);
1379}
1380
60236fdd 1381static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
1da177e4
LT
1382 struct sock *child)
1383{
0e87506f 1384 reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
1da177e4
LT
1385}
1386
1da177e4 1387static inline void
60236fdd 1388tcp_synq_removed(struct sock *sk, struct request_sock *req)
1da177e4 1389{
0e87506f 1390 if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
1da177e4 1391 tcp_delete_keepalive_timer(sk);
1da177e4
LT
1392}
1393
1394static inline void tcp_synq_added(struct sock *sk)
1395{
0e87506f 1396 if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
1da177e4 1397 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1da177e4
LT
1398}
1399
1400static inline int tcp_synq_len(struct sock *sk)
1401{
0e87506f 1402 return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
1da177e4
LT
1403}
1404
1405static inline int tcp_synq_young(struct sock *sk)
1406{
0e87506f 1407 return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
1da177e4
LT
1408}
1409
1410static inline int tcp_synq_is_full(struct sock *sk)
1411{
0e87506f 1412 return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
1da177e4
LT
1413}
1414
60236fdd 1415static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
0e87506f 1416 struct request_sock **prev)
1da177e4 1417{
0e87506f 1418 reqsk_queue_unlink(&tp->accept_queue, req, prev);
1da177e4
LT
1419}
1420
60236fdd
ACM
1421static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
1422 struct request_sock **prev)
1da177e4
LT
1423{
1424 tcp_synq_unlink(tcp_sk(sk), req, prev);
1425 tcp_synq_removed(sk, req);
60236fdd 1426 reqsk_free(req);
1da177e4
LT
1427}
1428
60236fdd 1429static __inline__ void tcp_openreq_init(struct request_sock *req,
1da177e4
LT
1430 struct tcp_options_received *rx_opt,
1431 struct sk_buff *skb)
1432{
2e6599cb
ACM
1433 struct inet_request_sock *ireq = inet_rsk(req);
1434
1da177e4 1435 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
2e6599cb 1436 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1da177e4
LT
1437 req->mss = rx_opt->mss_clamp;
1438 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
2e6599cb
ACM
1439 ireq->tstamp_ok = rx_opt->tstamp_ok;
1440 ireq->sack_ok = rx_opt->sack_ok;
1441 ireq->snd_wscale = rx_opt->snd_wscale;
1442 ireq->wscale_ok = rx_opt->wscale_ok;
1443 ireq->acked = 0;
1444 ireq->ecn_ok = 0;
1445 ireq->rmt_port = skb->h.th->source;
1da177e4
LT
1446}
1447
1448extern void tcp_enter_memory_pressure(void);
1449
1da177e4
LT
1450static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1451{
1452 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1453}
1454
1455static inline int keepalive_time_when(const struct tcp_sock *tp)
1456{
1457 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1458}
1459
1460static inline int tcp_fin_time(const struct tcp_sock *tp)
1461{
1462 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
1463
1464 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
1465 fin_timeout = (tp->rto<<2) - (tp->rto>>1);
1466
1467 return fin_timeout;
1468}
1469
1470static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
1471{
1472 if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
1473 return 0;
1474 if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
1475 return 0;
1476
1477 /* RST segments are not recommended to carry timestamp,
1478 and, if they do, it is recommended to ignore PAWS because
1479 "their cleanup function should take precedence over timestamps."
1480 Certainly, it is mistake. It is necessary to understand the reasons
1481 of this constraint to relax it: if peer reboots, clock may go
1482 out-of-sync and half-open connections will not be reset.
1483 Actually, the problem would be not existing if all
1484 the implementations followed draft about maintaining clock
1485 via reboots. Linux-2.2 DOES NOT!
1486
1487 However, we can relax time bounds for RST segments to MSL.
1488 */
1489 if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1490 return 0;
1491 return 1;
1492}
1493
1da177e4
LT
1494#define TCP_CHECK_TIMER(sk) do { } while (0)
1495
1496static inline int tcp_use_frto(const struct sock *sk)
1497{
1498 const struct tcp_sock *tp = tcp_sk(sk);
1499
1500 /* F-RTO must be activated in sysctl and there must be some
1501 * unsent new data, and the advertised window should allow
1502 * sending it.
1503 */
1504 return (sysctl_tcp_frto && sk->sk_send_head &&
1505 !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
1506 tp->snd_una + tp->snd_wnd));
1507}
1508
1509static inline void tcp_mib_init(void)
1510{
1511 /* See RFC 2012 */
1512 TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
1513 TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1514 TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1515 TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
1516}
1517
1518/* /proc */
1519enum tcp_seq_states {
1520 TCP_SEQ_STATE_LISTENING,
1521 TCP_SEQ_STATE_OPENREQ,
1522 TCP_SEQ_STATE_ESTABLISHED,
1523 TCP_SEQ_STATE_TIME_WAIT,
1524};
1525
1526struct tcp_seq_afinfo {
1527 struct module *owner;
1528 char *name;
1529 sa_family_t family;
1530 int (*seq_show) (struct seq_file *m, void *v);
1531 struct file_operations *seq_fops;
1532};
1533
1534struct tcp_iter_state {
1535 sa_family_t family;
1536 enum tcp_seq_states state;
1537 struct sock *syn_wait_sk;
1538 int bucket, sbucket, num, uid;
1539 struct seq_operations seq_ops;
1540};
1541
1542extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
1543extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
1544
1da177e4 1545#endif /* _TCP_H */