udp: fix behavior of wrong checksums
[linux-2.6-block.git] / net / ipv4 / tcp_cong.c
CommitLineData
317a76f9 1/*
b92022f3 2 * Pluggable TCP congestion control support and newReno
317a76f9 3 * congestion control.
02582e9b 4 * Based on ideas from I/O scheduler support and Web100.
317a76f9
SH
5 *
6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
7 */
8
afd46503
JP
9#define pr_fmt(fmt) "TCP: " fmt
10
317a76f9
SH
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/types.h>
14#include <linux/list.h>
5a0e3ad6 15#include <linux/gfp.h>
c5c6a8ab 16#include <linux/jhash.h>
317a76f9
SH
17#include <net/tcp.h>
18
19static DEFINE_SPINLOCK(tcp_cong_list_lock);
20static LIST_HEAD(tcp_cong_list);
21
22/* Simple linear search, don't expect many entries! */
23static struct tcp_congestion_ops *tcp_ca_find(const char *name)
24{
25 struct tcp_congestion_ops *e;
26
5f8ef48d 27 list_for_each_entry_rcu(e, &tcp_cong_list, list) {
317a76f9
SH
28 if (strcmp(e->name, name) == 0)
29 return e;
30 }
31
32 return NULL;
33}
34
c5c6a8ab
DB
35/* Must be called with rcu lock held */
36static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
37{
38 const struct tcp_congestion_ops *ca = tcp_ca_find(name);
39#ifdef CONFIG_MODULES
40 if (!ca && capable(CAP_NET_ADMIN)) {
41 rcu_read_unlock();
42 request_module("tcp_%s", name);
43 rcu_read_lock();
44 ca = tcp_ca_find(name);
45 }
46#endif
47 return ca;
48}
49
50/* Simple linear search, not much in here. */
51struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
52{
53 struct tcp_congestion_ops *e;
54
55 list_for_each_entry_rcu(e, &tcp_cong_list, list) {
56 if (e->key == key)
57 return e;
58 }
59
60 return NULL;
61}
62
317a76f9 63/*
d08df601 64 * Attach new congestion control algorithm to the list
317a76f9
SH
65 * of available options.
66 */
67int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
68{
69 int ret = 0;
70
71 /* all algorithms must implement ssthresh and cong_avoid ops */
72dc5b92 72 if (!ca->ssthresh || !ca->cong_avoid) {
afd46503 73 pr_err("%s does not implement required ops\n", ca->name);
317a76f9
SH
74 return -EINVAL;
75 }
76
c5c6a8ab
DB
77 ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
78
317a76f9 79 spin_lock(&tcp_cong_list_lock);
c5c6a8ab
DB
80 if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
81 pr_notice("%s already registered or non-unique key\n",
82 ca->name);
317a76f9
SH
83 ret = -EEXIST;
84 } else {
3d2573f7 85 list_add_tail_rcu(&ca->list, &tcp_cong_list);
db2855ae 86 pr_debug("%s registered\n", ca->name);
317a76f9
SH
87 }
88 spin_unlock(&tcp_cong_list_lock);
89
90 return ret;
91}
92EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
93
94/*
95 * Remove congestion control algorithm, called from
96 * the module's remove function. Module ref counts are used
97 * to ensure that this can't be done till all sockets using
98 * that method are closed.
99 */
100void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
101{
102 spin_lock(&tcp_cong_list_lock);
103 list_del_rcu(&ca->list);
104 spin_unlock(&tcp_cong_list_lock);
c5c6a8ab
DB
105
106 /* Wait for outstanding readers to complete before the
107 * module gets removed entirely.
108 *
109 * A try_module_get() should fail by now as our module is
110 * in "going" state since no refs are held anymore and
111 * module_exit() handler being called.
112 */
113 synchronize_rcu();
317a76f9
SH
114}
115EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
116
c5c6a8ab
DB
117u32 tcp_ca_get_key_by_name(const char *name)
118{
119 const struct tcp_congestion_ops *ca;
120 u32 key;
121
122 might_sleep();
123
124 rcu_read_lock();
125 ca = __tcp_ca_find_autoload(name);
126 key = ca ? ca->key : TCP_CA_UNSPEC;
127 rcu_read_unlock();
128
129 return key;
130}
131EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
132
133char *tcp_ca_get_name_by_key(u32 key, char *buffer)
134{
135 const struct tcp_congestion_ops *ca;
136 char *ret = NULL;
137
138 rcu_read_lock();
139 ca = tcp_ca_find_key(key);
140 if (ca)
141 ret = strncpy(buffer, ca->name,
142 TCP_CA_NAME_MAX);
143 rcu_read_unlock();
144
145 return ret;
146}
147EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
148
317a76f9 149/* Assign choice of congestion control. */
55d8694f 150void tcp_assign_congestion_control(struct sock *sk)
317a76f9 151{
6687e988 152 struct inet_connection_sock *icsk = inet_csk(sk);
317a76f9
SH
153 struct tcp_congestion_ops *ca;
154
55d8694f
FW
155 rcu_read_lock();
156 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
157 if (likely(try_module_get(ca->owner))) {
158 icsk->icsk_ca_ops = ca;
159 goto out;
317a76f9 160 }
55d8694f
FW
161 /* Fallback to next available. The last really
162 * guaranteed fallback is Reno from this list.
163 */
317a76f9 164 }
55d8694f
FW
165out:
166 rcu_read_unlock();
167
168 /* Clear out private data before diag gets it and
169 * the ca has not been initialized.
170 */
171 if (ca->get_info)
172 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
173}
174
175void tcp_init_congestion_control(struct sock *sk)
176{
177 const struct inet_connection_sock *icsk = inet_csk(sk);
317a76f9 178
6687e988
ACM
179 if (icsk->icsk_ca_ops->init)
180 icsk->icsk_ca_ops->init(sk);
317a76f9
SH
181}
182
29ba4fff
DB
183static void tcp_reinit_congestion_control(struct sock *sk,
184 const struct tcp_congestion_ops *ca)
185{
186 struct inet_connection_sock *icsk = inet_csk(sk);
187
188 tcp_cleanup_congestion_control(sk);
189 icsk->icsk_ca_ops = ca;
190
191 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
192 icsk->icsk_ca_ops->init(sk);
193}
194
317a76f9 195/* Manage refcounts on socket close. */
6687e988 196void tcp_cleanup_congestion_control(struct sock *sk)
317a76f9 197{
6687e988
ACM
198 struct inet_connection_sock *icsk = inet_csk(sk);
199
200 if (icsk->icsk_ca_ops->release)
201 icsk->icsk_ca_ops->release(sk);
202 module_put(icsk->icsk_ca_ops->owner);
317a76f9
SH
203}
204
205/* Used by sysctl to change default congestion control */
206int tcp_set_default_congestion_control(const char *name)
207{
208 struct tcp_congestion_ops *ca;
209 int ret = -ENOENT;
210
211 spin_lock(&tcp_cong_list_lock);
212 ca = tcp_ca_find(name);
95a5afca 213#ifdef CONFIG_MODULES
a8f80e8f 214 if (!ca && capable(CAP_NET_ADMIN)) {
317a76f9
SH
215 spin_unlock(&tcp_cong_list_lock);
216
217 request_module("tcp_%s", name);
218 spin_lock(&tcp_cong_list_lock);
219 ca = tcp_ca_find(name);
220 }
221#endif
222
223 if (ca) {
164891aa 224 ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */
317a76f9
SH
225 list_move(&ca->list, &tcp_cong_list);
226 ret = 0;
227 }
228 spin_unlock(&tcp_cong_list_lock);
229
230 return ret;
231}
232
b1736a71
SH
233/* Set default value from kernel configuration at bootup */
234static int __init tcp_congestion_default(void)
235{
236 return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
237}
238late_initcall(tcp_congestion_default);
239
3ff825b2
SH
240/* Build string with list of available congestion control values */
241void tcp_get_available_congestion_control(char *buf, size_t maxlen)
242{
243 struct tcp_congestion_ops *ca;
244 size_t offs = 0;
245
246 rcu_read_lock();
247 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
248 offs += snprintf(buf + offs, maxlen - offs,
249 "%s%s",
250 offs == 0 ? "" : " ", ca->name);
3ff825b2
SH
251 }
252 rcu_read_unlock();
253}
254
317a76f9
SH
255/* Get current default congestion control */
256void tcp_get_default_congestion_control(char *name)
257{
258 struct tcp_congestion_ops *ca;
259 /* We will always have reno... */
260 BUG_ON(list_empty(&tcp_cong_list));
261
262 rcu_read_lock();
263 ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
264 strncpy(name, ca->name, TCP_CA_NAME_MAX);
265 rcu_read_unlock();
266}
267
ce7bc3bf
SH
268/* Built list of non-restricted congestion control values */
269void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
270{
271 struct tcp_congestion_ops *ca;
272 size_t offs = 0;
273
274 *buf = '\0';
275 rcu_read_lock();
276 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
164891aa 277 if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
ce7bc3bf
SH
278 continue;
279 offs += snprintf(buf + offs, maxlen - offs,
280 "%s%s",
281 offs == 0 ? "" : " ", ca->name);
ce7bc3bf
SH
282 }
283 rcu_read_unlock();
284}
285
286/* Change list of non-restricted congestion control */
287int tcp_set_allowed_congestion_control(char *val)
288{
289 struct tcp_congestion_ops *ca;
c34186ed 290 char *saved_clone, *clone, *name;
ce7bc3bf
SH
291 int ret = 0;
292
c34186ed 293 saved_clone = clone = kstrdup(val, GFP_USER);
ce7bc3bf
SH
294 if (!clone)
295 return -ENOMEM;
296
297 spin_lock(&tcp_cong_list_lock);
298 /* pass 1 check for bad entries */
299 while ((name = strsep(&clone, " ")) && *name) {
300 ca = tcp_ca_find(name);
301 if (!ca) {
302 ret = -ENOENT;
303 goto out;
304 }
305 }
306
164891aa 307 /* pass 2 clear old values */
ce7bc3bf 308 list_for_each_entry_rcu(ca, &tcp_cong_list, list)
164891aa 309 ca->flags &= ~TCP_CONG_NON_RESTRICTED;
ce7bc3bf
SH
310
311 /* pass 3 mark as allowed */
312 while ((name = strsep(&val, " ")) && *name) {
313 ca = tcp_ca_find(name);
314 WARN_ON(!ca);
315 if (ca)
164891aa 316 ca->flags |= TCP_CONG_NON_RESTRICTED;
ce7bc3bf
SH
317 }
318out:
319 spin_unlock(&tcp_cong_list_lock);
c34186ed 320 kfree(saved_clone);
ce7bc3bf
SH
321
322 return ret;
323}
324
5f8ef48d 325/* Change congestion control for socket */
6687e988 326int tcp_set_congestion_control(struct sock *sk, const char *name)
5f8ef48d 327{
6687e988 328 struct inet_connection_sock *icsk = inet_csk(sk);
c5c6a8ab 329 const struct tcp_congestion_ops *ca;
5f8ef48d
SH
330 int err = 0;
331
c5c6a8ab
DB
332 if (icsk->icsk_ca_dst_locked)
333 return -EPERM;
4d4d3d1e 334
c5c6a8ab
DB
335 rcu_read_lock();
336 ca = __tcp_ca_find_autoload(name);
337 /* No change asking for existing value */
6687e988 338 if (ca == icsk->icsk_ca_ops)
5f8ef48d 339 goto out;
5f8ef48d
SH
340 if (!ca)
341 err = -ENOENT;
52e804c6
EB
342 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
343 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
ce7bc3bf 344 err = -EPERM;
5f8ef48d
SH
345 else if (!try_module_get(ca->owner))
346 err = -EBUSY;
29ba4fff
DB
347 else
348 tcp_reinit_congestion_control(sk, ca);
5f8ef48d
SH
349 out:
350 rcu_read_unlock();
351 return err;
352}
353
9f9843a7
YC
354/* Slow start is used when congestion window is no greater than the slow start
355 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
356 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
357 * something better;) a packet is only considered (s)acked in its entirety to
358 * defend the ACK attacks described in the RFC. Slow start processes a stretch
359 * ACK of degree N as if N acks of degree 1 are received back to back except
360 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
361 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
40efc6fa 362 */
e73ebb08 363u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
40efc6fa 364{
9f9843a7 365 u32 cwnd = tp->snd_cwnd + acked;
a02ba041 366
9f9843a7
YC
367 if (cwnd > tp->snd_ssthresh)
368 cwnd = tp->snd_ssthresh + 1;
e73ebb08 369 acked -= cwnd - tp->snd_cwnd;
9f9843a7 370 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
e73ebb08
NC
371
372 return acked;
40efc6fa
SH
373}
374EXPORT_SYMBOL_GPL(tcp_slow_start);
375
814d488c
NC
376/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
377 * for every packet that was ACKed.
378 */
e73ebb08 379void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
758ce5c8 380{
9949afa4
NC
381 /* If credits accumulated at a higher w, apply them gently now. */
382 if (tp->snd_cwnd_cnt >= w) {
383 tp->snd_cwnd_cnt = 0;
384 tp->snd_cwnd++;
385 }
386
814d488c 387 tp->snd_cwnd_cnt += acked;
758ce5c8 388 if (tp->snd_cwnd_cnt >= w) {
814d488c
NC
389 u32 delta = tp->snd_cwnd_cnt / w;
390
391 tp->snd_cwnd_cnt -= delta * w;
392 tp->snd_cwnd += delta;
758ce5c8 393 }
814d488c 394 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
758ce5c8
IJ
395}
396EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
397
317a76f9
SH
398/*
399 * TCP Reno congestion control
400 * This is special case used for fallback as well.
401 */
402/* This is Jacobson's slow start and congestion avoidance.
403 * SIGCOMM '88, p. 328.
404 */
24901551 405void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
317a76f9 406{
6687e988
ACM
407 struct tcp_sock *tp = tcp_sk(sk);
408
24901551 409 if (!tcp_is_cwnd_limited(sk))
317a76f9
SH
410 return;
411
7faffa1c 412 /* In "safe" area, increase. */
c22bdca9
NC
413 if (tp->snd_cwnd <= tp->snd_ssthresh) {
414 acked = tcp_slow_start(tp, acked);
415 if (!acked)
416 return;
417 }
e905a9ed 418 /* In dangerous area, increase slowly. */
c22bdca9 419 tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
317a76f9
SH
420}
421EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
422
423/* Slow start threshold is half the congestion window (min 2) */
6687e988 424u32 tcp_reno_ssthresh(struct sock *sk)
317a76f9 425{
6687e988 426 const struct tcp_sock *tp = tcp_sk(sk);
688d1945 427
317a76f9
SH
428 return max(tp->snd_cwnd >> 1U, 2U);
429}
430EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
431
317a76f9 432struct tcp_congestion_ops tcp_reno = {
164891aa 433 .flags = TCP_CONG_NON_RESTRICTED,
317a76f9
SH
434 .name = "reno",
435 .owner = THIS_MODULE,
436 .ssthresh = tcp_reno_ssthresh,
437 .cong_avoid = tcp_reno_cong_avoid,
317a76f9 438};