net: '&' redux
[linux-2.6-block.git] / net / netfilter / ipvs / ip_vs_lblc.c
CommitLineData
1da177e4
LT
1/*
2 * IPVS: Locality-Based Least-Connection scheduling module
3 *
1da177e4
LT
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Changes:
12 * Martin Hamilton : fixed the terrible locking bugs
13 * *lock(tbl->lock) ==> *lock(&tbl->lock)
14 * Wensong Zhang : fixed the uninitilized tbl->lock bug
15 * Wensong Zhang : added doing full expiration check to
16 * collect stale entries of 24+ hours when
17 * no partial expire check in a half hour
18 * Julian Anastasov : replaced del_timer call with del_timer_sync
19 * to avoid the possible race between timer
20 * handler and del_timer thread in SMP
21 *
22 */
23
24/*
25 * The lblc algorithm is as follows (pseudo code):
26 *
27 * if cachenode[dest_ip] is null then
28 * n, cachenode[dest_ip] <- {weighted least-conn node};
29 * else
30 * n <- cachenode[dest_ip];
31 * if (n is dead) OR
32 * (n.conns>n.weight AND
33 * there is a node m with m.conns<m.weight/2) then
34 * n, cachenode[dest_ip] <- {weighted least-conn node};
35 *
36 * return n;
37 *
38 * Thanks must go to Wenzhuo Zhang for talking WCCP to me and pushing
39 * me to write this module.
40 */
41
14c85021 42#include <linux/ip.h>
1da177e4
LT
43#include <linux/module.h>
44#include <linux/kernel.h>
14c85021 45#include <linux/skbuff.h>
d7fe0f24 46#include <linux/jiffies.h>
1da177e4
LT
47
48/* for sysctl */
49#include <linux/fs.h>
50#include <linux/sysctl.h>
51
52#include <net/ip_vs.h>
53
54
55/*
56 * It is for garbage collection of stale IPVS lblc entries,
57 * when the table is full.
58 */
59#define CHECK_EXPIRE_INTERVAL (60*HZ)
60#define ENTRY_TIMEOUT (6*60*HZ)
61
62/*
63 * It is for full expiration check.
64 * When there is no partial expiration check (garbage collection)
65 * in a half hour, do a full expiration check to collect stale
66 * entries that haven't been touched for a day.
67 */
68#define COUNT_FOR_FULL_EXPIRATION 30
69static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
70
71
72/*
73 * for IPVS lblc entry hash table
74 */
75#ifndef CONFIG_IP_VS_LBLC_TAB_BITS
76#define CONFIG_IP_VS_LBLC_TAB_BITS 10
77#endif
78#define IP_VS_LBLC_TAB_BITS CONFIG_IP_VS_LBLC_TAB_BITS
79#define IP_VS_LBLC_TAB_SIZE (1 << IP_VS_LBLC_TAB_BITS)
80#define IP_VS_LBLC_TAB_MASK (IP_VS_LBLC_TAB_SIZE - 1)
81
82
83/*
84 * IPVS lblc entry represents an association between destination
85 * IP address and its destination server
86 */
87struct ip_vs_lblc_entry {
88 struct list_head list;
44548375
JV
89 int af; /* address family */
90 union nf_inet_addr addr; /* destination IP address */
1da177e4
LT
91 struct ip_vs_dest *dest; /* real server (cache) */
92 unsigned long lastuse; /* last used time */
93};
94
95
96/*
97 * IPVS lblc hash table
98 */
99struct ip_vs_lblc_table {
1da177e4
LT
100 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
101 atomic_t entries; /* number of entries */
102 int max_size; /* maximum size of entries */
103 struct timer_list periodic_timer; /* collect stale entries */
104 int rover; /* rover for expire check */
105 int counter; /* counter for no expire */
106};
107
108
109/*
110 * IPVS LBLC sysctl table
111 */
112
113static ctl_table vs_vars_table[] = {
114 {
1da177e4
LT
115 .procname = "lblc_expiration",
116 .data = &sysctl_ip_vs_lblc_expiration,
117 .maxlen = sizeof(int),
e905a9ed 118 .mode = 0644,
6d9f239a 119 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
120 },
121 { .ctl_name = 0 }
122};
123
1da177e4
LT
124static struct ctl_table_header * sysctl_header;
125
1da177e4
LT
126static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
127{
128 list_del(&en->list);
129 /*
130 * We don't kfree dest because it is refered either by its service
131 * or the trash dest list.
132 */
133 atomic_dec(&en->dest->refcnt);
134 kfree(en);
135}
136
137
138/*
139 * Returns hash value for IPVS LBLC entry
140 */
44548375
JV
141static inline unsigned
142ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
1da177e4 143{
44548375
JV
144 __be32 addr_fold = addr->ip;
145
146#ifdef CONFIG_IP_VS_IPV6
147 if (af == AF_INET6)
148 addr_fold = addr->ip6[0]^addr->ip6[1]^
149 addr->ip6[2]^addr->ip6[3];
150#endif
151 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLC_TAB_MASK;
1da177e4
LT
152}
153
154
155/*
156 * Hash an entry in the ip_vs_lblc_table.
157 * returns bool success.
158 */
39ac50d0 159static void
1da177e4
LT
160ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
161{
44548375 162 unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr);
1da177e4 163
1da177e4
LT
164 list_add(&en->list, &tbl->bucket[hash]);
165 atomic_inc(&tbl->entries);
1da177e4
LT
166}
167
168
1da177e4 169/*
39ac50d0
SW
170 * Get ip_vs_lblc_entry associated with supplied parameters. Called under read
171 * lock
1da177e4
LT
172 */
173static inline struct ip_vs_lblc_entry *
44548375
JV
174ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
175 const union nf_inet_addr *addr)
1da177e4 176{
44548375 177 unsigned hash = ip_vs_lblc_hashkey(af, addr);
1da177e4
LT
178 struct ip_vs_lblc_entry *en;
179
39ac50d0 180 list_for_each_entry(en, &tbl->bucket[hash], list)
44548375 181 if (ip_vs_addr_equal(af, &en->addr, addr))
39ac50d0 182 return en;
1da177e4 183
39ac50d0
SW
184 return NULL;
185}
1da177e4 186
39ac50d0
SW
187
188/*
189 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
190 * address to a server. Called under write lock.
191 */
192static inline struct ip_vs_lblc_entry *
44548375 193ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
39ac50d0
SW
194 struct ip_vs_dest *dest)
195{
196 struct ip_vs_lblc_entry *en;
197
44548375 198 en = ip_vs_lblc_get(dest->af, tbl, daddr);
39ac50d0
SW
199 if (!en) {
200 en = kmalloc(sizeof(*en), GFP_ATOMIC);
201 if (!en) {
202 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
203 return NULL;
1da177e4 204 }
1da177e4 205
44548375
JV
206 en->af = dest->af;
207 ip_vs_addr_copy(dest->af, &en->addr, daddr);
39ac50d0 208 en->lastuse = jiffies;
1da177e4 209
39ac50d0
SW
210 atomic_inc(&dest->refcnt);
211 en->dest = dest;
212
213 ip_vs_lblc_hash(tbl, en);
214 } else if (en->dest != dest) {
215 atomic_dec(&en->dest->refcnt);
216 atomic_inc(&dest->refcnt);
217 en->dest = dest;
218 }
219
220 return en;
1da177e4
LT
221}
222
223
224/*
225 * Flush all the entries of the specified table.
226 */
227static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
228{
1da177e4 229 struct ip_vs_lblc_entry *en, *nxt;
39ac50d0 230 int i;
1da177e4
LT
231
232 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
1da177e4
LT
233 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
234 ip_vs_lblc_free(en);
235 atomic_dec(&tbl->entries);
236 }
1da177e4
LT
237 }
238}
239
240
39ac50d0 241static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
1da177e4 242{
39ac50d0
SW
243 struct ip_vs_lblc_table *tbl = svc->sched_data;
244 struct ip_vs_lblc_entry *en, *nxt;
1da177e4
LT
245 unsigned long now = jiffies;
246 int i, j;
1da177e4
LT
247
248 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
249 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
250
39ac50d0 251 write_lock(&svc->sched_lock);
1da177e4 252 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
e905a9ed 253 if (time_before(now,
1da177e4
LT
254 en->lastuse + sysctl_ip_vs_lblc_expiration))
255 continue;
256
257 ip_vs_lblc_free(en);
258 atomic_dec(&tbl->entries);
259 }
39ac50d0 260 write_unlock(&svc->sched_lock);
1da177e4
LT
261 }
262 tbl->rover = j;
263}
264
265
266/*
267 * Periodical timer handler for IPVS lblc table
268 * It is used to collect stale entries when the number of entries
269 * exceeds the maximum size of the table.
270 *
271 * Fixme: we probably need more complicated algorithm to collect
272 * entries that have not been used for a long time even
273 * if the number of entries doesn't exceed the maximum size
274 * of the table.
275 * The full expiration check is for this purpose now.
276 */
277static void ip_vs_lblc_check_expire(unsigned long data)
278{
39ac50d0
SW
279 struct ip_vs_service *svc = (struct ip_vs_service *) data;
280 struct ip_vs_lblc_table *tbl = svc->sched_data;
1da177e4
LT
281 unsigned long now = jiffies;
282 int goal;
283 int i, j;
284 struct ip_vs_lblc_entry *en, *nxt;
285
1da177e4
LT
286 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
287 /* do full expiration check */
39ac50d0 288 ip_vs_lblc_full_check(svc);
1da177e4
LT
289 tbl->counter = 1;
290 goto out;
291 }
292
293 if (atomic_read(&tbl->entries) <= tbl->max_size) {
294 tbl->counter++;
295 goto out;
296 }
297
298 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
299 if (goal > tbl->max_size/2)
300 goal = tbl->max_size/2;
301
302 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
303 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
304
39ac50d0 305 write_lock(&svc->sched_lock);
1da177e4
LT
306 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
307 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
308 continue;
309
310 ip_vs_lblc_free(en);
311 atomic_dec(&tbl->entries);
312 goal--;
313 }
39ac50d0 314 write_unlock(&svc->sched_lock);
1da177e4
LT
315 if (goal <= 0)
316 break;
317 }
318 tbl->rover = j;
319
320 out:
321 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
322}
323
324
325static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
326{
327 int i;
328 struct ip_vs_lblc_table *tbl;
329
330 /*
331 * Allocate the ip_vs_lblc_table for this service
332 */
39ac50d0 333 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
1da177e4
LT
334 if (tbl == NULL) {
335 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
336 return -ENOMEM;
337 }
338 svc->sched_data = tbl;
339 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
39ac50d0 340 "current service\n", sizeof(*tbl));
1da177e4
LT
341
342 /*
343 * Initialize the hash buckets
344 */
345 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
346 INIT_LIST_HEAD(&tbl->bucket[i]);
347 }
1da177e4
LT
348 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
349 tbl->rover = 0;
350 tbl->counter = 1;
351
352 /*
353 * Hook periodic timer for garbage collection
354 */
b24b8a24 355 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
39ac50d0
SW
356 (unsigned long)svc);
357 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
1da177e4
LT
358
359 return 0;
360}
361
362
363static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
364{
365 struct ip_vs_lblc_table *tbl = svc->sched_data;
366
367 /* remove periodic timer */
368 del_timer_sync(&tbl->periodic_timer);
369
370 /* got to clean up table entries here */
371 ip_vs_lblc_flush(tbl);
372
373 /* release the table itself */
39ac50d0 374 kfree(tbl);
1da177e4 375 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
39ac50d0 376 sizeof(*tbl));
1da177e4
LT
377
378 return 0;
379}
380
381
1da177e4 382static inline struct ip_vs_dest *
44548375 383__ip_vs_lblc_schedule(struct ip_vs_service *svc)
1da177e4
LT
384{
385 struct ip_vs_dest *dest, *least;
386 int loh, doh;
387
388 /*
389 * We think the overhead of processing active connections is fifty
390 * times higher than that of inactive connections in average. (This
391 * fifty times might not be accurate, we will change it later.) We
392 * use the following formula to estimate the overhead:
393 * dest->activeconns*50 + dest->inactconns
394 * and the load:
395 * (dest overhead) / dest->weight
396 *
397 * Remember -- no floats in kernel mode!!!
398 * The comparison of h1*w2 > h2*w1 is equivalent to that of
399 * h1/w1 > h2/w2
400 * if every weight is larger than zero.
401 *
402 * The server with weight=0 is quiesced and will not receive any
403 * new connection.
404 */
405 list_for_each_entry(dest, &svc->destinations, n_list) {
406 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
407 continue;
408 if (atomic_read(&dest->weight) > 0) {
409 least = dest;
410 loh = atomic_read(&least->activeconns) * 50
411 + atomic_read(&least->inactconns);
412 goto nextstage;
413 }
414 }
415 return NULL;
416
417 /*
418 * Find the destination with the least load.
419 */
420 nextstage:
421 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
422 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
423 continue;
424
425 doh = atomic_read(&dest->activeconns) * 50
426 + atomic_read(&dest->inactconns);
427 if (loh * atomic_read(&dest->weight) >
428 doh * atomic_read(&least->weight)) {
429 least = dest;
430 loh = doh;
431 }
432 }
433
44548375
JV
434 IP_VS_DBG_BUF(6, "LBLC: server %s:%d "
435 "activeconns %d refcnt %d weight %d overhead %d\n",
436 IP_VS_DBG_ADDR(least->af, &least->addr),
437 ntohs(least->port),
438 atomic_read(&least->activeconns),
439 atomic_read(&least->refcnt),
440 atomic_read(&least->weight), loh);
1da177e4
LT
441
442 return least;
443}
444
445
446/*
447 * If this destination server is overloaded and there is a less loaded
448 * server, then return true.
449 */
450static inline int
451is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
452{
453 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
454 struct ip_vs_dest *d;
455
456 list_for_each_entry(d, &svc->destinations, n_list) {
457 if (atomic_read(&d->activeconns)*2
458 < atomic_read(&d->weight)) {
459 return 1;
460 }
461 }
462 }
463 return 0;
464}
465
466
467/*
468 * Locality-Based (weighted) Least-Connection scheduling
469 */
470static struct ip_vs_dest *
471ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
472{
39ac50d0 473 struct ip_vs_lblc_table *tbl = svc->sched_data;
44548375 474 struct ip_vs_iphdr iph;
39ac50d0
SW
475 struct ip_vs_dest *dest = NULL;
476 struct ip_vs_lblc_entry *en;
1da177e4 477
44548375
JV
478 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
479
1da177e4
LT
480 IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
481
39ac50d0
SW
482 /* First look in our cache */
483 read_lock(&svc->sched_lock);
44548375 484 en = ip_vs_lblc_get(svc->af, tbl, &iph.daddr);
39ac50d0
SW
485 if (en) {
486 /* We only hold a read lock, but this is atomic */
487 en->lastuse = jiffies;
488
489 /*
490 * If the destination is not available, i.e. it's in the trash,
491 * we must ignore it, as it may be removed from under our feet,
492 * if someone drops our reference count. Our caller only makes
493 * sure that destinations, that are not in the trash, are not
494 * moved to the trash, while we are scheduling. But anyone can
495 * free up entries from the trash at any time.
496 */
497
498 if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
499 dest = en->dest;
500 }
501 read_unlock(&svc->sched_lock);
502
503 /* If the destination has a weight and is not overloaded, use it */
504 if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
505 goto out;
506
507 /* No cache entry or it is invalid, time to schedule */
44548375 508 dest = __ip_vs_lblc_schedule(svc);
39ac50d0
SW
509 if (!dest) {
510 IP_VS_DBG(1, "no destination available\n");
511 return NULL;
1da177e4 512 }
1da177e4 513
39ac50d0
SW
514 /* If we fail to create a cache entry, we'll just use the valid dest */
515 write_lock(&svc->sched_lock);
44548375 516 ip_vs_lblc_new(tbl, &iph.daddr, dest);
39ac50d0
SW
517 write_unlock(&svc->sched_lock);
518
519out:
44548375
JV
520 IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
521 IP_VS_DBG_ADDR(svc->af, &iph.daddr),
522 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
1da177e4
LT
523
524 return dest;
525}
526
527
528/*
529 * IPVS LBLC Scheduler structure
530 */
531static struct ip_vs_scheduler ip_vs_lblc_scheduler =
532{
533 .name = "lblc",
534 .refcnt = ATOMIC_INIT(0),
535 .module = THIS_MODULE,
d149ccc9 536 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
1da177e4
LT
537 .init_service = ip_vs_lblc_init_svc,
538 .done_service = ip_vs_lblc_done_svc,
1da177e4
LT
539 .schedule = ip_vs_lblc_schedule,
540};
541
542
543static int __init ip_vs_lblc_init(void)
544{
a014bc8f
PE
545 int ret;
546
90754f8e 547 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
a014bc8f
PE
548 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
549 if (ret)
550 unregister_sysctl_table(sysctl_header);
551 return ret;
1da177e4
LT
552}
553
554
555static void __exit ip_vs_lblc_cleanup(void)
556{
557 unregister_sysctl_table(sysctl_header);
558 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
559}
560
561
562module_init(ip_vs_lblc_init);
563module_exit(ip_vs_lblc_cleanup);
564MODULE_LICENSE("GPL");