[XFRM]: Drop packets when replay counter would overflow
[linux-2.6-block.git] / net / core / neighbour.c
CommitLineData
1da177e4
LT
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
1da177e4
LT
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
1da177e4
LT
22#include <linux/netdevice.h>
23#include <linux/proc_fs.h>
24#ifdef CONFIG_SYSCTL
25#include <linux/sysctl.h>
26#endif
27#include <linux/times.h>
457c4cbc 28#include <net/net_namespace.h>
1da177e4
LT
29#include <net/neighbour.h>
30#include <net/dst.h>
31#include <net/sock.h>
8d71740c 32#include <net/netevent.h>
a14a49d2 33#include <net/netlink.h>
1da177e4
LT
34#include <linux/rtnetlink.h>
35#include <linux/random.h>
543537bd 36#include <linux/string.h>
c3609d51 37#include <linux/log2.h>
1da177e4
LT
38
39#define NEIGH_DEBUG 1
40
41#define NEIGH_PRINTK(x...) printk(x)
42#define NEIGH_NOPRINTK(x...) do { ; } while(0)
43#define NEIGH_PRINTK0 NEIGH_PRINTK
44#define NEIGH_PRINTK1 NEIGH_NOPRINTK
45#define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47#if NEIGH_DEBUG >= 1
48#undef NEIGH_PRINTK1
49#define NEIGH_PRINTK1 NEIGH_PRINTK
50#endif
51#if NEIGH_DEBUG >= 2
52#undef NEIGH_PRINTK2
53#define NEIGH_PRINTK2 NEIGH_PRINTK
54#endif
55
56#define PNEIGH_HASHMASK 0xF
57
58static void neigh_timer_handler(unsigned long arg);
d961db35
TG
59static void __neigh_notify(struct neighbour *n, int type, int flags);
60static void neigh_update_notify(struct neighbour *neigh);
1da177e4
LT
61static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63
64static struct neigh_table *neigh_tables;
45fc3b11 65#ifdef CONFIG_PROC_FS
9a32144e 66static const struct file_operations neigh_stat_seq_fops;
45fc3b11 67#endif
1da177e4
LT
68
69/*
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
76 cache.
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
79
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
83
84 Reference count prevents destruction.
85
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
88 - timer
89 - resolution queue
90
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
95
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
98 */
99
100static DEFINE_RWLOCK(neigh_tbl_lock);
101
102static int neigh_blackhole(struct sk_buff *skb)
103{
104 kfree_skb(skb);
105 return -ENETDOWN;
106}
107
4f494554
TG
108static void neigh_cleanup_and_release(struct neighbour *neigh)
109{
110 if (neigh->parms->neigh_cleanup)
111 neigh->parms->neigh_cleanup(neigh);
112
d961db35 113 __neigh_notify(neigh, RTM_DELNEIGH, 0);
4f494554
TG
114 neigh_release(neigh);
115}
116
1da177e4
LT
117/*
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
121 */
122
123unsigned long neigh_rand_reach_time(unsigned long base)
124{
125 return (base ? (net_random() % base) + (base >> 1) : 0);
126}
127
128
129static int neigh_forced_gc(struct neigh_table *tbl)
130{
131 int shrunk = 0;
132 int i;
133
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
139
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
145 */
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
149 *np = n->next;
150 n->dead = 1;
151 shrunk = 1;
152 write_unlock(&n->lock);
4f494554 153 neigh_cleanup_and_release(n);
1da177e4
LT
154 continue;
155 }
156 write_unlock(&n->lock);
157 np = &n->next;
158 }
159 }
160
161 tbl->last_flush = jiffies;
162
163 write_unlock_bh(&tbl->lock);
164
165 return shrunk;
166}
167
a43d8994
PE
168static void neigh_add_timer(struct neighbour *n, unsigned long when)
169{
170 neigh_hold(n);
171 if (unlikely(mod_timer(&n->timer, when))) {
172 printk("NEIGH: BUG, double timer add, state is %x\n",
173 n->nud_state);
174 dump_stack();
175 }
176}
177
1da177e4
LT
178static int neigh_del_timer(struct neighbour *n)
179{
180 if ((n->nud_state & NUD_IN_TIMER) &&
181 del_timer(&n->timer)) {
182 neigh_release(n);
183 return 1;
184 }
185 return 0;
186}
187
188static void pneigh_queue_purge(struct sk_buff_head *list)
189{
190 struct sk_buff *skb;
191
192 while ((skb = skb_dequeue(list)) != NULL) {
193 dev_put(skb->dev);
194 kfree_skb(skb);
195 }
196}
197
49636bb1 198static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
1da177e4
LT
199{
200 int i;
201
1da177e4
LT
202 for (i = 0; i <= tbl->hash_mask; i++) {
203 struct neighbour *n, **np = &tbl->hash_buckets[i];
204
205 while ((n = *np) != NULL) {
206 if (dev && n->dev != dev) {
207 np = &n->next;
208 continue;
209 }
210 *np = n->next;
211 write_lock(&n->lock);
212 neigh_del_timer(n);
213 n->dead = 1;
214
215 if (atomic_read(&n->refcnt) != 1) {
216 /* The most unpleasant situation.
217 We must destroy neighbour entry,
218 but someone still uses it.
219
220 The destroy will be delayed until
221 the last user releases us, but
222 we must kill timers etc. and move
223 it to safe state.
224 */
225 skb_queue_purge(&n->arp_queue);
226 n->output = neigh_blackhole;
227 if (n->nud_state & NUD_VALID)
228 n->nud_state = NUD_NOARP;
229 else
230 n->nud_state = NUD_NONE;
231 NEIGH_PRINTK2("neigh %p is stray.\n", n);
232 }
233 write_unlock(&n->lock);
4f494554 234 neigh_cleanup_and_release(n);
1da177e4
LT
235 }
236 }
49636bb1 237}
1da177e4 238
49636bb1
HX
239void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
240{
241 write_lock_bh(&tbl->lock);
242 neigh_flush_dev(tbl, dev);
243 write_unlock_bh(&tbl->lock);
244}
245
246int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
247{
248 write_lock_bh(&tbl->lock);
249 neigh_flush_dev(tbl, dev);
1da177e4
LT
250 pneigh_ifdown(tbl, dev);
251 write_unlock_bh(&tbl->lock);
252
253 del_timer_sync(&tbl->proxy_timer);
254 pneigh_queue_purge(&tbl->proxy_queue);
255 return 0;
256}
257
258static struct neighbour *neigh_alloc(struct neigh_table *tbl)
259{
260 struct neighbour *n = NULL;
261 unsigned long now = jiffies;
262 int entries;
263
264 entries = atomic_inc_return(&tbl->entries) - 1;
265 if (entries >= tbl->gc_thresh3 ||
266 (entries >= tbl->gc_thresh2 &&
267 time_after(now, tbl->last_flush + 5 * HZ))) {
268 if (!neigh_forced_gc(tbl) &&
269 entries >= tbl->gc_thresh3)
270 goto out_entries;
271 }
272
c3762229 273 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
1da177e4
LT
274 if (!n)
275 goto out_entries;
276
1da177e4
LT
277 skb_queue_head_init(&n->arp_queue);
278 rwlock_init(&n->lock);
279 n->updated = n->used = now;
280 n->nud_state = NUD_NONE;
281 n->output = neigh_blackhole;
282 n->parms = neigh_parms_clone(&tbl->parms);
b24b8a24 283 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
1da177e4
LT
284
285 NEIGH_CACHE_STAT_INC(tbl, allocs);
286 n->tbl = tbl;
287 atomic_set(&n->refcnt, 1);
288 n->dead = 1;
289out:
290 return n;
291
292out_entries:
293 atomic_dec(&tbl->entries);
294 goto out;
295}
296
297static struct neighbour **neigh_hash_alloc(unsigned int entries)
298{
299 unsigned long size = entries * sizeof(struct neighbour *);
300 struct neighbour **ret;
301
302 if (size <= PAGE_SIZE) {
77d04bd9 303 ret = kzalloc(size, GFP_ATOMIC);
1da177e4
LT
304 } else {
305 ret = (struct neighbour **)
77d04bd9 306 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
1da177e4 307 }
1da177e4
LT
308 return ret;
309}
310
311static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
312{
313 unsigned long size = entries * sizeof(struct neighbour *);
314
315 if (size <= PAGE_SIZE)
316 kfree(hash);
317 else
318 free_pages((unsigned long)hash, get_order(size));
319}
320
321static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
322{
323 struct neighbour **new_hash, **old_hash;
324 unsigned int i, new_hash_mask, old_entries;
325
326 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
327
c3609d51 328 BUG_ON(!is_power_of_2(new_entries));
1da177e4
LT
329 new_hash = neigh_hash_alloc(new_entries);
330 if (!new_hash)
331 return;
332
333 old_entries = tbl->hash_mask + 1;
334 new_hash_mask = new_entries - 1;
335 old_hash = tbl->hash_buckets;
336
337 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
338 for (i = 0; i < old_entries; i++) {
339 struct neighbour *n, *next;
340
341 for (n = old_hash[i]; n; n = next) {
342 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
343
344 hash_val &= new_hash_mask;
345 next = n->next;
346
347 n->next = new_hash[hash_val];
348 new_hash[hash_val] = n;
349 }
350 }
351 tbl->hash_buckets = new_hash;
352 tbl->hash_mask = new_hash_mask;
353
354 neigh_hash_free(old_hash, old_entries);
355}
356
357struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
358 struct net_device *dev)
359{
360 struct neighbour *n;
361 int key_len = tbl->key_len;
c5e29460 362 u32 hash_val = tbl->hash(pkey, dev);
4ec93edb 363
1da177e4
LT
364 NEIGH_CACHE_STAT_INC(tbl, lookups);
365
366 read_lock_bh(&tbl->lock);
c5e29460 367 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
1da177e4
LT
368 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
369 neigh_hold(n);
370 NEIGH_CACHE_STAT_INC(tbl, hits);
371 break;
372 }
373 }
374 read_unlock_bh(&tbl->lock);
375 return n;
376}
377
378struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
379{
380 struct neighbour *n;
381 int key_len = tbl->key_len;
c5e29460 382 u32 hash_val = tbl->hash(pkey, NULL);
1da177e4
LT
383
384 NEIGH_CACHE_STAT_INC(tbl, lookups);
385
386 read_lock_bh(&tbl->lock);
c5e29460 387 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
1da177e4
LT
388 if (!memcmp(n->primary_key, pkey, key_len)) {
389 neigh_hold(n);
390 NEIGH_CACHE_STAT_INC(tbl, hits);
391 break;
392 }
393 }
394 read_unlock_bh(&tbl->lock);
395 return n;
396}
397
398struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
399 struct net_device *dev)
400{
401 u32 hash_val;
402 int key_len = tbl->key_len;
403 int error;
404 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
405
406 if (!n) {
407 rc = ERR_PTR(-ENOBUFS);
408 goto out;
409 }
410
411 memcpy(n->primary_key, pkey, key_len);
412 n->dev = dev;
413 dev_hold(dev);
414
415 /* Protocol specific setup. */
416 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
417 rc = ERR_PTR(error);
418 goto out_neigh_release;
419 }
420
421 /* Device specific setup. */
422 if (n->parms->neigh_setup &&
423 (error = n->parms->neigh_setup(n)) < 0) {
424 rc = ERR_PTR(error);
425 goto out_neigh_release;
426 }
427
428 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
429
430 write_lock_bh(&tbl->lock);
431
432 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
433 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
434
435 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
436
437 if (n->parms->dead) {
438 rc = ERR_PTR(-EINVAL);
439 goto out_tbl_unlock;
440 }
441
442 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
443 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
444 neigh_hold(n1);
445 rc = n1;
446 goto out_tbl_unlock;
447 }
448 }
449
450 n->next = tbl->hash_buckets[hash_val];
451 tbl->hash_buckets[hash_val] = n;
452 n->dead = 0;
453 neigh_hold(n);
454 write_unlock_bh(&tbl->lock);
455 NEIGH_PRINTK2("neigh %p is created.\n", n);
456 rc = n;
457out:
458 return rc;
459out_tbl_unlock:
460 write_unlock_bh(&tbl->lock);
461out_neigh_release:
462 neigh_release(n);
463 goto out;
464}
465
466struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
467 struct net_device *dev, int creat)
468{
469 struct pneigh_entry *n;
470 int key_len = tbl->key_len;
471 u32 hash_val = *(u32 *)(pkey + key_len - 4);
472
473 hash_val ^= (hash_val >> 16);
474 hash_val ^= hash_val >> 8;
475 hash_val ^= hash_val >> 4;
476 hash_val &= PNEIGH_HASHMASK;
477
478 read_lock_bh(&tbl->lock);
479
480 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
481 if (!memcmp(n->key, pkey, key_len) &&
482 (n->dev == dev || !n->dev)) {
483 read_unlock_bh(&tbl->lock);
484 goto out;
485 }
486 }
487 read_unlock_bh(&tbl->lock);
488 n = NULL;
489 if (!creat)
490 goto out;
491
4ae28944
PE
492 ASSERT_RTNL();
493
1da177e4
LT
494 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
495 if (!n)
496 goto out;
497
498 memcpy(n->key, pkey, key_len);
499 n->dev = dev;
500 if (dev)
501 dev_hold(dev);
502
503 if (tbl->pconstructor && tbl->pconstructor(n)) {
504 if (dev)
505 dev_put(dev);
506 kfree(n);
507 n = NULL;
508 goto out;
509 }
510
511 write_lock_bh(&tbl->lock);
512 n->next = tbl->phash_buckets[hash_val];
513 tbl->phash_buckets[hash_val] = n;
514 write_unlock_bh(&tbl->lock);
515out:
516 return n;
517}
518
519
520int pneigh_delete(struct neigh_table *tbl, const void *pkey,
521 struct net_device *dev)
522{
523 struct pneigh_entry *n, **np;
524 int key_len = tbl->key_len;
525 u32 hash_val = *(u32 *)(pkey + key_len - 4);
526
527 hash_val ^= (hash_val >> 16);
528 hash_val ^= hash_val >> 8;
529 hash_val ^= hash_val >> 4;
530 hash_val &= PNEIGH_HASHMASK;
531
532 write_lock_bh(&tbl->lock);
533 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
534 np = &n->next) {
535 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
536 *np = n->next;
537 write_unlock_bh(&tbl->lock);
538 if (tbl->pdestructor)
539 tbl->pdestructor(n);
540 if (n->dev)
541 dev_put(n->dev);
542 kfree(n);
543 return 0;
544 }
545 }
546 write_unlock_bh(&tbl->lock);
547 return -ENOENT;
548}
549
550static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
551{
552 struct pneigh_entry *n, **np;
553 u32 h;
554
555 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
556 np = &tbl->phash_buckets[h];
557 while ((n = *np) != NULL) {
558 if (!dev || n->dev == dev) {
559 *np = n->next;
560 if (tbl->pdestructor)
561 tbl->pdestructor(n);
562 if (n->dev)
563 dev_put(n->dev);
564 kfree(n);
565 continue;
566 }
567 np = &n->next;
568 }
569 }
570 return -ENOENT;
571}
572
573
574/*
575 * neighbour must already be out of the table;
576 *
577 */
578void neigh_destroy(struct neighbour *neigh)
579{
580 struct hh_cache *hh;
581
582 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
583
584 if (!neigh->dead) {
585 printk(KERN_WARNING
586 "Destroying alive neighbour %p\n", neigh);
587 dump_stack();
588 return;
589 }
590
591 if (neigh_del_timer(neigh))
592 printk(KERN_WARNING "Impossible event.\n");
593
594 while ((hh = neigh->hh) != NULL) {
595 neigh->hh = hh->hh_next;
596 hh->hh_next = NULL;
3644f0ce
SH
597
598 write_seqlock_bh(&hh->hh_lock);
1da177e4 599 hh->hh_output = neigh_blackhole;
3644f0ce 600 write_sequnlock_bh(&hh->hh_lock);
1da177e4
LT
601 if (atomic_dec_and_test(&hh->hh_refcnt))
602 kfree(hh);
603 }
604
1da177e4
LT
605 skb_queue_purge(&neigh->arp_queue);
606
607 dev_put(neigh->dev);
608 neigh_parms_put(neigh->parms);
609
610 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
611
612 atomic_dec(&neigh->tbl->entries);
613 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
614}
615
616/* Neighbour state is suspicious;
617 disable fast path.
618
619 Called with write_locked neigh.
620 */
621static void neigh_suspect(struct neighbour *neigh)
622{
623 struct hh_cache *hh;
624
625 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
626
627 neigh->output = neigh->ops->output;
628
629 for (hh = neigh->hh; hh; hh = hh->hh_next)
630 hh->hh_output = neigh->ops->output;
631}
632
633/* Neighbour state is OK;
634 enable fast path.
635
636 Called with write_locked neigh.
637 */
638static void neigh_connect(struct neighbour *neigh)
639{
640 struct hh_cache *hh;
641
642 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
643
644 neigh->output = neigh->ops->connected_output;
645
646 for (hh = neigh->hh; hh; hh = hh->hh_next)
647 hh->hh_output = neigh->ops->hh_output;
648}
649
650static void neigh_periodic_timer(unsigned long arg)
651{
652 struct neigh_table *tbl = (struct neigh_table *)arg;
653 struct neighbour *n, **np;
654 unsigned long expire, now = jiffies;
655
656 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
657
658 write_lock(&tbl->lock);
659
660 /*
661 * periodically recompute ReachableTime from random function
662 */
663
664 if (time_after(now, tbl->last_rand + 300 * HZ)) {
665 struct neigh_parms *p;
666 tbl->last_rand = now;
667 for (p = &tbl->parms; p; p = p->next)
668 p->reachable_time =
669 neigh_rand_reach_time(p->base_reachable_time);
670 }
671
672 np = &tbl->hash_buckets[tbl->hash_chain_gc];
673 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
674
675 while ((n = *np) != NULL) {
676 unsigned int state;
677
678 write_lock(&n->lock);
679
680 state = n->nud_state;
681 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
682 write_unlock(&n->lock);
683 goto next_elt;
684 }
685
686 if (time_before(n->used, n->confirmed))
687 n->used = n->confirmed;
688
689 if (atomic_read(&n->refcnt) == 1 &&
690 (state == NUD_FAILED ||
691 time_after(now, n->used + n->parms->gc_staletime))) {
692 *np = n->next;
693 n->dead = 1;
694 write_unlock(&n->lock);
4f494554 695 neigh_cleanup_and_release(n);
1da177e4
LT
696 continue;
697 }
698 write_unlock(&n->lock);
699
700next_elt:
701 np = &n->next;
702 }
703
4ec93edb
YH
704 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
705 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
706 * base_reachable_time.
1da177e4
LT
707 */
708 expire = tbl->parms.base_reachable_time >> 1;
709 expire /= (tbl->hash_mask + 1);
710 if (!expire)
711 expire = 1;
712
f5a6e01c
AV
713 if (expire>HZ)
714 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
715 else
716 mod_timer(&tbl->gc_timer, now + expire);
1da177e4
LT
717
718 write_unlock(&tbl->lock);
719}
720
721static __inline__ int neigh_max_probes(struct neighbour *n)
722{
723 struct neigh_parms *p = n->parms;
724 return (n->nud_state & NUD_PROBE ?
725 p->ucast_probes :
726 p->ucast_probes + p->app_probes + p->mcast_probes);
727}
728
1da177e4
LT
729/* Called when a timer expires for a neighbour entry. */
730
731static void neigh_timer_handler(unsigned long arg)
732{
733 unsigned long now, next;
734 struct neighbour *neigh = (struct neighbour *)arg;
735 unsigned state;
736 int notify = 0;
737
738 write_lock(&neigh->lock);
739
740 state = neigh->nud_state;
741 now = jiffies;
742 next = now + HZ;
743
744 if (!(state & NUD_IN_TIMER)) {
745#ifndef CONFIG_SMP
746 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
747#endif
748 goto out;
749 }
750
751 if (state & NUD_REACHABLE) {
4ec93edb 752 if (time_before_eq(now,
1da177e4
LT
753 neigh->confirmed + neigh->parms->reachable_time)) {
754 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
755 next = neigh->confirmed + neigh->parms->reachable_time;
756 } else if (time_before_eq(now,
757 neigh->used + neigh->parms->delay_probe_time)) {
758 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
759 neigh->nud_state = NUD_DELAY;
955aaa2f 760 neigh->updated = jiffies;
1da177e4
LT
761 neigh_suspect(neigh);
762 next = now + neigh->parms->delay_probe_time;
763 } else {
764 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
765 neigh->nud_state = NUD_STALE;
955aaa2f 766 neigh->updated = jiffies;
1da177e4 767 neigh_suspect(neigh);
8d71740c 768 notify = 1;
1da177e4
LT
769 }
770 } else if (state & NUD_DELAY) {
4ec93edb 771 if (time_before_eq(now,
1da177e4
LT
772 neigh->confirmed + neigh->parms->delay_probe_time)) {
773 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
774 neigh->nud_state = NUD_REACHABLE;
955aaa2f 775 neigh->updated = jiffies;
1da177e4 776 neigh_connect(neigh);
8d71740c 777 notify = 1;
1da177e4
LT
778 next = neigh->confirmed + neigh->parms->reachable_time;
779 } else {
780 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
781 neigh->nud_state = NUD_PROBE;
955aaa2f 782 neigh->updated = jiffies;
1da177e4
LT
783 atomic_set(&neigh->probes, 0);
784 next = now + neigh->parms->retrans_time;
785 }
786 } else {
787 /* NUD_PROBE|NUD_INCOMPLETE */
788 next = now + neigh->parms->retrans_time;
789 }
790
791 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
792 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
793 struct sk_buff *skb;
794
795 neigh->nud_state = NUD_FAILED;
955aaa2f 796 neigh->updated = jiffies;
1da177e4
LT
797 notify = 1;
798 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
799 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
800
801 /* It is very thin place. report_unreachable is very complicated
802 routine. Particularly, it can hit the same neighbour entry!
803
804 So that, we try to be accurate and avoid dead loop. --ANK
805 */
806 while (neigh->nud_state == NUD_FAILED &&
807 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
808 write_unlock(&neigh->lock);
809 neigh->ops->error_report(neigh, skb);
810 write_lock(&neigh->lock);
811 }
812 skb_queue_purge(&neigh->arp_queue);
813 }
814
815 if (neigh->nud_state & NUD_IN_TIMER) {
1da177e4
LT
816 if (time_before(next, jiffies + HZ/2))
817 next = jiffies + HZ/2;
6fb9974f
HX
818 if (!mod_timer(&neigh->timer, next))
819 neigh_hold(neigh);
1da177e4
LT
820 }
821 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
822 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
823 /* keep skb alive even if arp_queue overflows */
824 if (skb)
825 skb_get(skb);
826 write_unlock(&neigh->lock);
827 neigh->ops->solicit(neigh, skb);
828 atomic_inc(&neigh->probes);
829 if (skb)
830 kfree_skb(skb);
831 } else {
832out:
833 write_unlock(&neigh->lock);
834 }
d961db35 835
8d71740c 836 if (notify)
d961db35 837 neigh_update_notify(neigh);
1da177e4 838
1da177e4
LT
839 neigh_release(neigh);
840}
841
842int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
843{
844 int rc;
845 unsigned long now;
846
847 write_lock_bh(&neigh->lock);
848
849 rc = 0;
850 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
851 goto out_unlock_bh;
852
853 now = jiffies;
4ec93edb 854
1da177e4
LT
855 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
856 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
857 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
858 neigh->nud_state = NUD_INCOMPLETE;
955aaa2f 859 neigh->updated = jiffies;
667347f1 860 neigh_add_timer(neigh, now + 1);
1da177e4
LT
861 } else {
862 neigh->nud_state = NUD_FAILED;
955aaa2f 863 neigh->updated = jiffies;
1da177e4
LT
864 write_unlock_bh(&neigh->lock);
865
866 if (skb)
867 kfree_skb(skb);
868 return 1;
869 }
870 } else if (neigh->nud_state & NUD_STALE) {
871 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
1da177e4 872 neigh->nud_state = NUD_DELAY;
955aaa2f 873 neigh->updated = jiffies;
667347f1
DM
874 neigh_add_timer(neigh,
875 jiffies + neigh->parms->delay_probe_time);
1da177e4
LT
876 }
877
878 if (neigh->nud_state == NUD_INCOMPLETE) {
879 if (skb) {
880 if (skb_queue_len(&neigh->arp_queue) >=
881 neigh->parms->queue_len) {
882 struct sk_buff *buff;
883 buff = neigh->arp_queue.next;
884 __skb_unlink(buff, &neigh->arp_queue);
885 kfree_skb(buff);
886 }
887 __skb_queue_tail(&neigh->arp_queue, skb);
888 }
889 rc = 1;
890 }
891out_unlock_bh:
892 write_unlock_bh(&neigh->lock);
893 return rc;
894}
895
e92b43a3 896static void neigh_update_hhs(struct neighbour *neigh)
1da177e4
LT
897{
898 struct hh_cache *hh;
3b04ddde
SH
899 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
900 = neigh->dev->header_ops->cache_update;
1da177e4
LT
901
902 if (update) {
903 for (hh = neigh->hh; hh; hh = hh->hh_next) {
3644f0ce 904 write_seqlock_bh(&hh->hh_lock);
1da177e4 905 update(hh, neigh->dev, neigh->ha);
3644f0ce 906 write_sequnlock_bh(&hh->hh_lock);
1da177e4
LT
907 }
908 }
909}
910
911
912
913/* Generic update routine.
914 -- lladdr is new lladdr or NULL, if it is not supplied.
915 -- new is new state.
916 -- flags
917 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
918 if it is different.
919 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
4ec93edb 920 lladdr instead of overriding it
1da177e4
LT
921 if it is different.
922 It also allows to retain current state
923 if lladdr is unchanged.
924 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
925
4ec93edb 926 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1da177e4
LT
927 NTF_ROUTER flag.
928 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
929 a router.
930
931 Caller MUST hold reference count on the entry.
932 */
933
934int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
935 u32 flags)
936{
937 u8 old;
938 int err;
1da177e4 939 int notify = 0;
1da177e4
LT
940 struct net_device *dev;
941 int update_isrouter = 0;
942
943 write_lock_bh(&neigh->lock);
944
945 dev = neigh->dev;
946 old = neigh->nud_state;
947 err = -EPERM;
948
4ec93edb 949 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1da177e4
LT
950 (old & (NUD_NOARP | NUD_PERMANENT)))
951 goto out;
952
953 if (!(new & NUD_VALID)) {
954 neigh_del_timer(neigh);
955 if (old & NUD_CONNECTED)
956 neigh_suspect(neigh);
957 neigh->nud_state = new;
958 err = 0;
1da177e4 959 notify = old & NUD_VALID;
1da177e4
LT
960 goto out;
961 }
962
963 /* Compare new lladdr with cached one */
964 if (!dev->addr_len) {
965 /* First case: device needs no address. */
966 lladdr = neigh->ha;
967 } else if (lladdr) {
968 /* The second case: if something is already cached
969 and a new address is proposed:
970 - compare new & old
971 - if they are different, check override flag
972 */
4ec93edb 973 if ((old & NUD_VALID) &&
1da177e4
LT
974 !memcmp(lladdr, neigh->ha, dev->addr_len))
975 lladdr = neigh->ha;
976 } else {
977 /* No address is supplied; if we know something,
978 use it, otherwise discard the request.
979 */
980 err = -EINVAL;
981 if (!(old & NUD_VALID))
982 goto out;
983 lladdr = neigh->ha;
984 }
985
986 if (new & NUD_CONNECTED)
987 neigh->confirmed = jiffies;
988 neigh->updated = jiffies;
989
990 /* If entry was valid and address is not changed,
991 do not change entry state, if new one is STALE.
992 */
993 err = 0;
994 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
995 if (old & NUD_VALID) {
996 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
997 update_isrouter = 0;
998 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
999 (old & NUD_CONNECTED)) {
1000 lladdr = neigh->ha;
1001 new = NUD_STALE;
1002 } else
1003 goto out;
1004 } else {
1005 if (lladdr == neigh->ha && new == NUD_STALE &&
1006 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1007 (old & NUD_CONNECTED))
1008 )
1009 new = old;
1010 }
1011 }
1012
1013 if (new != old) {
1014 neigh_del_timer(neigh);
a43d8994 1015 if (new & NUD_IN_TIMER)
4ec93edb
YH
1016 neigh_add_timer(neigh, (jiffies +
1017 ((new & NUD_REACHABLE) ?
667347f1
DM
1018 neigh->parms->reachable_time :
1019 0)));
1da177e4
LT
1020 neigh->nud_state = new;
1021 }
1022
1023 if (lladdr != neigh->ha) {
1024 memcpy(&neigh->ha, lladdr, dev->addr_len);
1025 neigh_update_hhs(neigh);
1026 if (!(new & NUD_CONNECTED))
1027 neigh->confirmed = jiffies -
1028 (neigh->parms->base_reachable_time << 1);
1da177e4 1029 notify = 1;
1da177e4
LT
1030 }
1031 if (new == old)
1032 goto out;
1033 if (new & NUD_CONNECTED)
1034 neigh_connect(neigh);
1035 else
1036 neigh_suspect(neigh);
1037 if (!(old & NUD_VALID)) {
1038 struct sk_buff *skb;
1039
1040 /* Again: avoid dead loop if something went wrong */
1041
1042 while (neigh->nud_state & NUD_VALID &&
1043 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1044 struct neighbour *n1 = neigh;
1045 write_unlock_bh(&neigh->lock);
1046 /* On shaper/eql skb->dst->neighbour != neigh :( */
1047 if (skb->dst && skb->dst->neighbour)
1048 n1 = skb->dst->neighbour;
1049 n1->output(skb);
1050 write_lock_bh(&neigh->lock);
1051 }
1052 skb_queue_purge(&neigh->arp_queue);
1053 }
1054out:
1055 if (update_isrouter) {
1056 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1057 (neigh->flags | NTF_ROUTER) :
1058 (neigh->flags & ~NTF_ROUTER);
1059 }
1060 write_unlock_bh(&neigh->lock);
8d71740c
TT
1061
1062 if (notify)
d961db35
TG
1063 neigh_update_notify(neigh);
1064
1da177e4
LT
1065 return err;
1066}
1067
1068struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1069 u8 *lladdr, void *saddr,
1070 struct net_device *dev)
1071{
1072 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1073 lladdr || !dev->addr_len);
1074 if (neigh)
4ec93edb 1075 neigh_update(neigh, lladdr, NUD_STALE,
1da177e4
LT
1076 NEIGH_UPDATE_F_OVERRIDE);
1077 return neigh;
1078}
1079
1080static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
d77072ec 1081 __be16 protocol)
1da177e4
LT
1082{
1083 struct hh_cache *hh;
1084 struct net_device *dev = dst->dev;
1085
1086 for (hh = n->hh; hh; hh = hh->hh_next)
1087 if (hh->hh_type == protocol)
1088 break;
1089
77d04bd9 1090 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
3644f0ce 1091 seqlock_init(&hh->hh_lock);
1da177e4
LT
1092 hh->hh_type = protocol;
1093 atomic_set(&hh->hh_refcnt, 0);
1094 hh->hh_next = NULL;
3b04ddde
SH
1095
1096 if (dev->header_ops->cache(n, hh)) {
1da177e4
LT
1097 kfree(hh);
1098 hh = NULL;
1099 } else {
1100 atomic_inc(&hh->hh_refcnt);
1101 hh->hh_next = n->hh;
1102 n->hh = hh;
1103 if (n->nud_state & NUD_CONNECTED)
1104 hh->hh_output = n->ops->hh_output;
1105 else
1106 hh->hh_output = n->ops->output;
1107 }
1108 }
1109 if (hh) {
1110 atomic_inc(&hh->hh_refcnt);
1111 dst->hh = hh;
1112 }
1113}
1114
1115/* This function can be used in contexts, where only old dev_queue_xmit
1116 worked, f.e. if you want to override normal output path (eql, shaper),
1117 but resolution is not made yet.
1118 */
1119
1120int neigh_compat_output(struct sk_buff *skb)
1121{
1122 struct net_device *dev = skb->dev;
1123
bbe735e4 1124 __skb_pull(skb, skb_network_offset(skb));
1da177e4 1125
0c4e8581
SH
1126 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1127 skb->len) < 0 &&
3b04ddde 1128 dev->header_ops->rebuild(skb))
1da177e4
LT
1129 return 0;
1130
1131 return dev_queue_xmit(skb);
1132}
1133
1134/* Slow and careful. */
1135
1136int neigh_resolve_output(struct sk_buff *skb)
1137{
1138 struct dst_entry *dst = skb->dst;
1139 struct neighbour *neigh;
1140 int rc = 0;
1141
1142 if (!dst || !(neigh = dst->neighbour))
1143 goto discard;
1144
bbe735e4 1145 __skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1146
1147 if (!neigh_event_send(neigh, skb)) {
1148 int err;
1149 struct net_device *dev = neigh->dev;
3b04ddde 1150 if (dev->header_ops->cache && !dst->hh) {
1da177e4
LT
1151 write_lock_bh(&neigh->lock);
1152 if (!dst->hh)
1153 neigh_hh_init(neigh, dst, dst->ops->protocol);
0c4e8581
SH
1154 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1155 neigh->ha, NULL, skb->len);
1da177e4
LT
1156 write_unlock_bh(&neigh->lock);
1157 } else {
1158 read_lock_bh(&neigh->lock);
0c4e8581
SH
1159 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1160 neigh->ha, NULL, skb->len);
1da177e4
LT
1161 read_unlock_bh(&neigh->lock);
1162 }
1163 if (err >= 0)
1164 rc = neigh->ops->queue_xmit(skb);
1165 else
1166 goto out_kfree_skb;
1167 }
1168out:
1169 return rc;
1170discard:
1171 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1172 dst, dst ? dst->neighbour : NULL);
1173out_kfree_skb:
1174 rc = -EINVAL;
1175 kfree_skb(skb);
1176 goto out;
1177}
1178
1179/* As fast as possible without hh cache */
1180
1181int neigh_connected_output(struct sk_buff *skb)
1182{
1183 int err;
1184 struct dst_entry *dst = skb->dst;
1185 struct neighbour *neigh = dst->neighbour;
1186 struct net_device *dev = neigh->dev;
1187
bbe735e4 1188 __skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1189
1190 read_lock_bh(&neigh->lock);
0c4e8581
SH
1191 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1192 neigh->ha, NULL, skb->len);
1da177e4
LT
1193 read_unlock_bh(&neigh->lock);
1194 if (err >= 0)
1195 err = neigh->ops->queue_xmit(skb);
1196 else {
1197 err = -EINVAL;
1198 kfree_skb(skb);
1199 }
1200 return err;
1201}
1202
1203static void neigh_proxy_process(unsigned long arg)
1204{
1205 struct neigh_table *tbl = (struct neigh_table *)arg;
1206 long sched_next = 0;
1207 unsigned long now = jiffies;
1208 struct sk_buff *skb;
1209
1210 spin_lock(&tbl->proxy_queue.lock);
1211
1212 skb = tbl->proxy_queue.next;
1213
1214 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1215 struct sk_buff *back = skb;
a61bbcf2 1216 long tdif = NEIGH_CB(back)->sched_next - now;
1da177e4
LT
1217
1218 skb = skb->next;
1219 if (tdif <= 0) {
1220 struct net_device *dev = back->dev;
1221 __skb_unlink(back, &tbl->proxy_queue);
1222 if (tbl->proxy_redo && netif_running(dev))
1223 tbl->proxy_redo(back);
1224 else
1225 kfree_skb(back);
1226
1227 dev_put(dev);
1228 } else if (!sched_next || tdif < sched_next)
1229 sched_next = tdif;
1230 }
1231 del_timer(&tbl->proxy_timer);
1232 if (sched_next)
1233 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1234 spin_unlock(&tbl->proxy_queue.lock);
1235}
1236
1237void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1238 struct sk_buff *skb)
1239{
1240 unsigned long now = jiffies;
1241 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1242
1243 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1244 kfree_skb(skb);
1245 return;
1246 }
a61bbcf2
PM
1247
1248 NEIGH_CB(skb)->sched_next = sched_next;
1249 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1da177e4
LT
1250
1251 spin_lock(&tbl->proxy_queue.lock);
1252 if (del_timer(&tbl->proxy_timer)) {
1253 if (time_before(tbl->proxy_timer.expires, sched_next))
1254 sched_next = tbl->proxy_timer.expires;
1255 }
1256 dst_release(skb->dst);
1257 skb->dst = NULL;
1258 dev_hold(skb->dev);
1259 __skb_queue_tail(&tbl->proxy_queue, skb);
1260 mod_timer(&tbl->proxy_timer, sched_next);
1261 spin_unlock(&tbl->proxy_queue.lock);
1262}
1263
1264
1265struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1266 struct neigh_table *tbl)
1267{
b1a98bf6 1268 struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1da177e4
LT
1269
1270 if (p) {
1da177e4
LT
1271 p->tbl = tbl;
1272 atomic_set(&p->refcnt, 1);
1273 INIT_RCU_HEAD(&p->rcu_head);
1274 p->reachable_time =
1275 neigh_rand_reach_time(p->base_reachable_time);
c7fb64db
TG
1276 if (dev) {
1277 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1278 kfree(p);
1279 return NULL;
1280 }
1281
1282 dev_hold(dev);
1283 p->dev = dev;
1da177e4
LT
1284 }
1285 p->sysctl_table = NULL;
1286 write_lock_bh(&tbl->lock);
1287 p->next = tbl->parms.next;
1288 tbl->parms.next = p;
1289 write_unlock_bh(&tbl->lock);
1290 }
1291 return p;
1292}
1293
1294static void neigh_rcu_free_parms(struct rcu_head *head)
1295{
1296 struct neigh_parms *parms =
1297 container_of(head, struct neigh_parms, rcu_head);
1298
1299 neigh_parms_put(parms);
1300}
1301
1302void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1303{
1304 struct neigh_parms **p;
1305
1306 if (!parms || parms == &tbl->parms)
1307 return;
1308 write_lock_bh(&tbl->lock);
1309 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1310 if (*p == parms) {
1311 *p = parms->next;
1312 parms->dead = 1;
1313 write_unlock_bh(&tbl->lock);
cecbb639
DM
1314 if (parms->dev)
1315 dev_put(parms->dev);
1da177e4
LT
1316 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1317 return;
1318 }
1319 }
1320 write_unlock_bh(&tbl->lock);
1321 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1322}
1323
1324void neigh_parms_destroy(struct neigh_parms *parms)
1325{
1326 kfree(parms);
1327}
1328
c2ecba71
PE
1329static struct lock_class_key neigh_table_proxy_queue_class;
1330
bd89efc5 1331void neigh_table_init_no_netlink(struct neigh_table *tbl)
1da177e4
LT
1332{
1333 unsigned long now = jiffies;
1334 unsigned long phsize;
1335
1336 atomic_set(&tbl->parms.refcnt, 1);
1337 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1338 tbl->parms.reachable_time =
1339 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1340
1341 if (!tbl->kmem_cachep)
e5d679f3
AD
1342 tbl->kmem_cachep =
1343 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1344 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
20c2df83 1345 NULL);
1da177e4
LT
1346 tbl->stats = alloc_percpu(struct neigh_statistics);
1347 if (!tbl->stats)
1348 panic("cannot create neighbour cache statistics");
4ec93edb 1349
1da177e4 1350#ifdef CONFIG_PROC_FS
457c4cbc 1351 tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
4ec93edb 1352 if (!tbl->pde)
1da177e4
LT
1353 panic("cannot create neighbour proc dir entry");
1354 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1355 tbl->pde->data = tbl;
1356#endif
1357
1358 tbl->hash_mask = 1;
1359 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1360
1361 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
77d04bd9 1362 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1da177e4
LT
1363
1364 if (!tbl->hash_buckets || !tbl->phash_buckets)
1365 panic("cannot allocate neighbour cache hashes");
1366
1da177e4
LT
1367 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1368
1369 rwlock_init(&tbl->lock);
b24b8a24 1370 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1da177e4
LT
1371 tbl->gc_timer.expires = now + 1;
1372 add_timer(&tbl->gc_timer);
1373
b24b8a24 1374 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
c2ecba71
PE
1375 skb_queue_head_init_class(&tbl->proxy_queue,
1376 &neigh_table_proxy_queue_class);
1da177e4
LT
1377
1378 tbl->last_flush = now;
1379 tbl->last_rand = now + tbl->parms.reachable_time * 20;
bd89efc5
SK
1380}
1381
1382void neigh_table_init(struct neigh_table *tbl)
1383{
1384 struct neigh_table *tmp;
1385
1386 neigh_table_init_no_netlink(tbl);
1da177e4 1387 write_lock(&neigh_tbl_lock);
bd89efc5
SK
1388 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1389 if (tmp->family == tbl->family)
1390 break;
1391 }
1da177e4
LT
1392 tbl->next = neigh_tables;
1393 neigh_tables = tbl;
1394 write_unlock(&neigh_tbl_lock);
bd89efc5
SK
1395
1396 if (unlikely(tmp)) {
1397 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1398 "family %d\n", tbl->family);
1399 dump_stack();
1400 }
1da177e4
LT
1401}
1402
1403int neigh_table_clear(struct neigh_table *tbl)
1404{
1405 struct neigh_table **tp;
1406
1407 /* It is not clean... Fix it to unload IPv6 module safely */
1408 del_timer_sync(&tbl->gc_timer);
1409 del_timer_sync(&tbl->proxy_timer);
1410 pneigh_queue_purge(&tbl->proxy_queue);
1411 neigh_ifdown(tbl, NULL);
1412 if (atomic_read(&tbl->entries))
1413 printk(KERN_CRIT "neighbour leakage\n");
1414 write_lock(&neigh_tbl_lock);
1415 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1416 if (*tp == tbl) {
1417 *tp = tbl->next;
1418 break;
1419 }
1420 }
1421 write_unlock(&neigh_tbl_lock);
1422
1423 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1424 tbl->hash_buckets = NULL;
1425
1426 kfree(tbl->phash_buckets);
1427 tbl->phash_buckets = NULL;
1428
3f192b5c
AD
1429 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1430
3fcde74b
KK
1431 free_percpu(tbl->stats);
1432 tbl->stats = NULL;
1433
bfb85c9f
RD
1434 kmem_cache_destroy(tbl->kmem_cachep);
1435 tbl->kmem_cachep = NULL;
1436
1da177e4
LT
1437 return 0;
1438}
1439
c8822a4e 1440static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1da177e4 1441{
881d966b 1442 struct net *net = skb->sk->sk_net;
a14a49d2
TG
1443 struct ndmsg *ndm;
1444 struct nlattr *dst_attr;
1da177e4
LT
1445 struct neigh_table *tbl;
1446 struct net_device *dev = NULL;
a14a49d2 1447 int err = -EINVAL;
1da177e4 1448
b854272b
DL
1449 if (net != &init_net)
1450 return -EINVAL;
1451
a14a49d2 1452 if (nlmsg_len(nlh) < sizeof(*ndm))
1da177e4
LT
1453 goto out;
1454
a14a49d2
TG
1455 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1456 if (dst_attr == NULL)
1457 goto out;
1458
1459 ndm = nlmsg_data(nlh);
1460 if (ndm->ndm_ifindex) {
881d966b 1461 dev = dev_get_by_index(net, ndm->ndm_ifindex);
a14a49d2
TG
1462 if (dev == NULL) {
1463 err = -ENODEV;
1464 goto out;
1465 }
1466 }
1467
1da177e4
LT
1468 read_lock(&neigh_tbl_lock);
1469 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
a14a49d2 1470 struct neighbour *neigh;
1da177e4
LT
1471
1472 if (tbl->family != ndm->ndm_family)
1473 continue;
1474 read_unlock(&neigh_tbl_lock);
1475
a14a49d2 1476 if (nla_len(dst_attr) < tbl->key_len)
1da177e4
LT
1477 goto out_dev_put;
1478
1479 if (ndm->ndm_flags & NTF_PROXY) {
a14a49d2 1480 err = pneigh_delete(tbl, nla_data(dst_attr), dev);
1da177e4
LT
1481 goto out_dev_put;
1482 }
1483
a14a49d2
TG
1484 if (dev == NULL)
1485 goto out_dev_put;
1da177e4 1486
a14a49d2
TG
1487 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1488 if (neigh == NULL) {
1489 err = -ENOENT;
1490 goto out_dev_put;
1da177e4 1491 }
a14a49d2
TG
1492
1493 err = neigh_update(neigh, NULL, NUD_FAILED,
1494 NEIGH_UPDATE_F_OVERRIDE |
1495 NEIGH_UPDATE_F_ADMIN);
1496 neigh_release(neigh);
1da177e4
LT
1497 goto out_dev_put;
1498 }
1499 read_unlock(&neigh_tbl_lock);
a14a49d2
TG
1500 err = -EAFNOSUPPORT;
1501
1da177e4
LT
1502out_dev_put:
1503 if (dev)
1504 dev_put(dev);
1505out:
1506 return err;
1507}
1508
c8822a4e 1509static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1da177e4 1510{
881d966b 1511 struct net *net = skb->sk->sk_net;
5208debd
TG
1512 struct ndmsg *ndm;
1513 struct nlattr *tb[NDA_MAX+1];
1da177e4
LT
1514 struct neigh_table *tbl;
1515 struct net_device *dev = NULL;
5208debd 1516 int err;
1da177e4 1517
b854272b
DL
1518 if (net != &init_net)
1519 return -EINVAL;
1520
5208debd
TG
1521 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1522 if (err < 0)
1da177e4
LT
1523 goto out;
1524
5208debd
TG
1525 err = -EINVAL;
1526 if (tb[NDA_DST] == NULL)
1527 goto out;
1528
1529 ndm = nlmsg_data(nlh);
1530 if (ndm->ndm_ifindex) {
881d966b 1531 dev = dev_get_by_index(net, ndm->ndm_ifindex);
5208debd
TG
1532 if (dev == NULL) {
1533 err = -ENODEV;
1534 goto out;
1535 }
1536
1537 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1538 goto out_dev_put;
1539 }
1540
1da177e4
LT
1541 read_lock(&neigh_tbl_lock);
1542 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
5208debd
TG
1543 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1544 struct neighbour *neigh;
1545 void *dst, *lladdr;
1da177e4
LT
1546
1547 if (tbl->family != ndm->ndm_family)
1548 continue;
1549 read_unlock(&neigh_tbl_lock);
1550
5208debd 1551 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1da177e4 1552 goto out_dev_put;
5208debd
TG
1553 dst = nla_data(tb[NDA_DST]);
1554 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1da177e4
LT
1555
1556 if (ndm->ndm_flags & NTF_PROXY) {
62dd9318
VN
1557 struct pneigh_entry *pn;
1558
1559 err = -ENOBUFS;
1560 pn = pneigh_lookup(tbl, dst, dev, 1);
1561 if (pn) {
1562 pn->flags = ndm->ndm_flags;
1563 err = 0;
1564 }
1da177e4
LT
1565 goto out_dev_put;
1566 }
1567
5208debd 1568 if (dev == NULL)
1da177e4 1569 goto out_dev_put;
5208debd
TG
1570
1571 neigh = neigh_lookup(tbl, dst, dev);
1572 if (neigh == NULL) {
1573 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1574 err = -ENOENT;
1575 goto out_dev_put;
1576 }
4ec93edb 1577
5208debd
TG
1578 neigh = __neigh_lookup_errno(tbl, dst, dev);
1579 if (IS_ERR(neigh)) {
1580 err = PTR_ERR(neigh);
1da177e4
LT
1581 goto out_dev_put;
1582 }
1da177e4 1583 } else {
5208debd
TG
1584 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1585 err = -EEXIST;
1586 neigh_release(neigh);
1da177e4
LT
1587 goto out_dev_put;
1588 }
1da177e4 1589
5208debd
TG
1590 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1591 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1592 }
1da177e4 1593
5208debd
TG
1594 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1595 neigh_release(neigh);
1da177e4
LT
1596 goto out_dev_put;
1597 }
1598
1599 read_unlock(&neigh_tbl_lock);
5208debd
TG
1600 err = -EAFNOSUPPORT;
1601
1da177e4
LT
1602out_dev_put:
1603 if (dev)
1604 dev_put(dev);
1605out:
1606 return err;
1607}
1608
c7fb64db
TG
1609static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1610{
ca860fb3
TG
1611 struct nlattr *nest;
1612
1613 nest = nla_nest_start(skb, NDTA_PARMS);
1614 if (nest == NULL)
1615 return -ENOBUFS;
c7fb64db
TG
1616
1617 if (parms->dev)
ca860fb3
TG
1618 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1619
1620 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1621 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1622 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1623 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1624 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1625 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1626 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1627 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
c7fb64db 1628 parms->base_reachable_time);
ca860fb3
TG
1629 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1630 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1631 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1632 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1633 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1634 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
c7fb64db 1635
ca860fb3 1636 return nla_nest_end(skb, nest);
c7fb64db 1637
ca860fb3
TG
1638nla_put_failure:
1639 return nla_nest_cancel(skb, nest);
c7fb64db
TG
1640}
1641
ca860fb3
TG
1642static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1643 u32 pid, u32 seq, int type, int flags)
c7fb64db
TG
1644{
1645 struct nlmsghdr *nlh;
1646 struct ndtmsg *ndtmsg;
1647
ca860fb3
TG
1648 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1649 if (nlh == NULL)
26932566 1650 return -EMSGSIZE;
c7fb64db 1651
ca860fb3 1652 ndtmsg = nlmsg_data(nlh);
c7fb64db
TG
1653
1654 read_lock_bh(&tbl->lock);
1655 ndtmsg->ndtm_family = tbl->family;
9ef1d4c7
PM
1656 ndtmsg->ndtm_pad1 = 0;
1657 ndtmsg->ndtm_pad2 = 0;
c7fb64db 1658
ca860fb3
TG
1659 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1660 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1661 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1662 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1663 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
c7fb64db
TG
1664
1665 {
1666 unsigned long now = jiffies;
1667 unsigned int flush_delta = now - tbl->last_flush;
1668 unsigned int rand_delta = now - tbl->last_rand;
1669
1670 struct ndt_config ndc = {
1671 .ndtc_key_len = tbl->key_len,
1672 .ndtc_entry_size = tbl->entry_size,
1673 .ndtc_entries = atomic_read(&tbl->entries),
1674 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1675 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1676 .ndtc_hash_rnd = tbl->hash_rnd,
1677 .ndtc_hash_mask = tbl->hash_mask,
1678 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1679 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1680 };
1681
ca860fb3 1682 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
c7fb64db
TG
1683 }
1684
1685 {
1686 int cpu;
1687 struct ndt_stats ndst;
1688
1689 memset(&ndst, 0, sizeof(ndst));
1690
6f912042 1691 for_each_possible_cpu(cpu) {
c7fb64db
TG
1692 struct neigh_statistics *st;
1693
c7fb64db
TG
1694 st = per_cpu_ptr(tbl->stats, cpu);
1695 ndst.ndts_allocs += st->allocs;
1696 ndst.ndts_destroys += st->destroys;
1697 ndst.ndts_hash_grows += st->hash_grows;
1698 ndst.ndts_res_failed += st->res_failed;
1699 ndst.ndts_lookups += st->lookups;
1700 ndst.ndts_hits += st->hits;
1701 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1702 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1703 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1704 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1705 }
1706
ca860fb3 1707 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
c7fb64db
TG
1708 }
1709
1710 BUG_ON(tbl->parms.dev);
1711 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
ca860fb3 1712 goto nla_put_failure;
c7fb64db
TG
1713
1714 read_unlock_bh(&tbl->lock);
ca860fb3 1715 return nlmsg_end(skb, nlh);
c7fb64db 1716
ca860fb3 1717nla_put_failure:
c7fb64db 1718 read_unlock_bh(&tbl->lock);
26932566
PM
1719 nlmsg_cancel(skb, nlh);
1720 return -EMSGSIZE;
c7fb64db
TG
1721}
1722
ca860fb3
TG
1723static int neightbl_fill_param_info(struct sk_buff *skb,
1724 struct neigh_table *tbl,
c7fb64db 1725 struct neigh_parms *parms,
ca860fb3
TG
1726 u32 pid, u32 seq, int type,
1727 unsigned int flags)
c7fb64db
TG
1728{
1729 struct ndtmsg *ndtmsg;
1730 struct nlmsghdr *nlh;
1731
ca860fb3
TG
1732 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1733 if (nlh == NULL)
26932566 1734 return -EMSGSIZE;
c7fb64db 1735
ca860fb3 1736 ndtmsg = nlmsg_data(nlh);
c7fb64db
TG
1737
1738 read_lock_bh(&tbl->lock);
1739 ndtmsg->ndtm_family = tbl->family;
9ef1d4c7
PM
1740 ndtmsg->ndtm_pad1 = 0;
1741 ndtmsg->ndtm_pad2 = 0;
c7fb64db 1742
ca860fb3
TG
1743 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1744 neightbl_fill_parms(skb, parms) < 0)
1745 goto errout;
c7fb64db
TG
1746
1747 read_unlock_bh(&tbl->lock);
ca860fb3
TG
1748 return nlmsg_end(skb, nlh);
1749errout:
c7fb64db 1750 read_unlock_bh(&tbl->lock);
26932566
PM
1751 nlmsg_cancel(skb, nlh);
1752 return -EMSGSIZE;
c7fb64db 1753}
4ec93edb 1754
c7fb64db
TG
1755static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1756 int ifindex)
1757{
1758 struct neigh_parms *p;
4ec93edb 1759
c7fb64db
TG
1760 for (p = &tbl->parms; p; p = p->next)
1761 if ((p->dev && p->dev->ifindex == ifindex) ||
1762 (!p->dev && !ifindex))
1763 return p;
1764
1765 return NULL;
1766}
1767
ef7c79ed 1768static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
6b3f8674
TG
1769 [NDTA_NAME] = { .type = NLA_STRING },
1770 [NDTA_THRESH1] = { .type = NLA_U32 },
1771 [NDTA_THRESH2] = { .type = NLA_U32 },
1772 [NDTA_THRESH3] = { .type = NLA_U32 },
1773 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1774 [NDTA_PARMS] = { .type = NLA_NESTED },
1775};
1776
ef7c79ed 1777static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
6b3f8674
TG
1778 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1779 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1780 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1781 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1782 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1783 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1784 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1785 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1786 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1787 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1788 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1789 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1790 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1791};
1792
c8822a4e 1793static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
c7fb64db 1794{
b854272b 1795 struct net *net = skb->sk->sk_net;
c7fb64db 1796 struct neigh_table *tbl;
6b3f8674
TG
1797 struct ndtmsg *ndtmsg;
1798 struct nlattr *tb[NDTA_MAX+1];
1799 int err;
c7fb64db 1800
b854272b
DL
1801 if (net != &init_net)
1802 return -EINVAL;
1803
6b3f8674
TG
1804 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1805 nl_neightbl_policy);
1806 if (err < 0)
1807 goto errout;
c7fb64db 1808
6b3f8674
TG
1809 if (tb[NDTA_NAME] == NULL) {
1810 err = -EINVAL;
1811 goto errout;
1812 }
1813
1814 ndtmsg = nlmsg_data(nlh);
c7fb64db
TG
1815 read_lock(&neigh_tbl_lock);
1816 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1817 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1818 continue;
1819
6b3f8674 1820 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
c7fb64db
TG
1821 break;
1822 }
1823
1824 if (tbl == NULL) {
1825 err = -ENOENT;
6b3f8674 1826 goto errout_locked;
c7fb64db
TG
1827 }
1828
4ec93edb 1829 /*
c7fb64db
TG
1830 * We acquire tbl->lock to be nice to the periodic timers and
1831 * make sure they always see a consistent set of values.
1832 */
1833 write_lock_bh(&tbl->lock);
1834
6b3f8674
TG
1835 if (tb[NDTA_PARMS]) {
1836 struct nlattr *tbp[NDTPA_MAX+1];
c7fb64db 1837 struct neigh_parms *p;
6b3f8674 1838 int i, ifindex = 0;
c7fb64db 1839
6b3f8674
TG
1840 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1841 nl_ntbl_parm_policy);
1842 if (err < 0)
1843 goto errout_tbl_lock;
c7fb64db 1844
6b3f8674
TG
1845 if (tbp[NDTPA_IFINDEX])
1846 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
c7fb64db
TG
1847
1848 p = lookup_neigh_params(tbl, ifindex);
1849 if (p == NULL) {
1850 err = -ENOENT;
6b3f8674 1851 goto errout_tbl_lock;
c7fb64db 1852 }
c7fb64db 1853
6b3f8674
TG
1854 for (i = 1; i <= NDTPA_MAX; i++) {
1855 if (tbp[i] == NULL)
1856 continue;
c7fb64db 1857
6b3f8674
TG
1858 switch (i) {
1859 case NDTPA_QUEUE_LEN:
1860 p->queue_len = nla_get_u32(tbp[i]);
1861 break;
1862 case NDTPA_PROXY_QLEN:
1863 p->proxy_qlen = nla_get_u32(tbp[i]);
1864 break;
1865 case NDTPA_APP_PROBES:
1866 p->app_probes = nla_get_u32(tbp[i]);
1867 break;
1868 case NDTPA_UCAST_PROBES:
1869 p->ucast_probes = nla_get_u32(tbp[i]);
1870 break;
1871 case NDTPA_MCAST_PROBES:
1872 p->mcast_probes = nla_get_u32(tbp[i]);
1873 break;
1874 case NDTPA_BASE_REACHABLE_TIME:
1875 p->base_reachable_time = nla_get_msecs(tbp[i]);
1876 break;
1877 case NDTPA_GC_STALETIME:
1878 p->gc_staletime = nla_get_msecs(tbp[i]);
1879 break;
1880 case NDTPA_DELAY_PROBE_TIME:
1881 p->delay_probe_time = nla_get_msecs(tbp[i]);
1882 break;
1883 case NDTPA_RETRANS_TIME:
1884 p->retrans_time = nla_get_msecs(tbp[i]);
1885 break;
1886 case NDTPA_ANYCAST_DELAY:
1887 p->anycast_delay = nla_get_msecs(tbp[i]);
1888 break;
1889 case NDTPA_PROXY_DELAY:
1890 p->proxy_delay = nla_get_msecs(tbp[i]);
1891 break;
1892 case NDTPA_LOCKTIME:
1893 p->locktime = nla_get_msecs(tbp[i]);
1894 break;
1895 }
1896 }
1897 }
c7fb64db 1898
6b3f8674
TG
1899 if (tb[NDTA_THRESH1])
1900 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
c7fb64db 1901
6b3f8674
TG
1902 if (tb[NDTA_THRESH2])
1903 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
c7fb64db 1904
6b3f8674
TG
1905 if (tb[NDTA_THRESH3])
1906 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
c7fb64db 1907
6b3f8674
TG
1908 if (tb[NDTA_GC_INTERVAL])
1909 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
c7fb64db
TG
1910
1911 err = 0;
1912
6b3f8674 1913errout_tbl_lock:
c7fb64db 1914 write_unlock_bh(&tbl->lock);
6b3f8674 1915errout_locked:
c7fb64db 1916 read_unlock(&neigh_tbl_lock);
6b3f8674 1917errout:
c7fb64db
TG
1918 return err;
1919}
1920
c8822a4e 1921static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
c7fb64db 1922{
b854272b 1923 struct net *net = skb->sk->sk_net;
ca860fb3
TG
1924 int family, tidx, nidx = 0;
1925 int tbl_skip = cb->args[0];
1926 int neigh_skip = cb->args[1];
c7fb64db
TG
1927 struct neigh_table *tbl;
1928
b854272b
DL
1929 if (net != &init_net)
1930 return 0;
1931
ca860fb3 1932 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
c7fb64db
TG
1933
1934 read_lock(&neigh_tbl_lock);
ca860fb3 1935 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
c7fb64db
TG
1936 struct neigh_parms *p;
1937
ca860fb3 1938 if (tidx < tbl_skip || (family && tbl->family != family))
c7fb64db
TG
1939 continue;
1940
ca860fb3
TG
1941 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1942 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1943 NLM_F_MULTI) <= 0)
c7fb64db
TG
1944 break;
1945
ca860fb3
TG
1946 for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) {
1947 if (nidx < neigh_skip)
c7fb64db
TG
1948 continue;
1949
ca860fb3
TG
1950 if (neightbl_fill_param_info(skb, tbl, p,
1951 NETLINK_CB(cb->skb).pid,
1952 cb->nlh->nlmsg_seq,
1953 RTM_NEWNEIGHTBL,
1954 NLM_F_MULTI) <= 0)
c7fb64db
TG
1955 goto out;
1956 }
1957
ca860fb3 1958 neigh_skip = 0;
c7fb64db
TG
1959 }
1960out:
1961 read_unlock(&neigh_tbl_lock);
ca860fb3
TG
1962 cb->args[0] = tidx;
1963 cb->args[1] = nidx;
c7fb64db
TG
1964
1965 return skb->len;
1966}
1da177e4 1967
8b8aec50
TG
1968static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1969 u32 pid, u32 seq, int type, unsigned int flags)
1da177e4
LT
1970{
1971 unsigned long now = jiffies;
1da177e4 1972 struct nda_cacheinfo ci;
8b8aec50
TG
1973 struct nlmsghdr *nlh;
1974 struct ndmsg *ndm;
1975
1976 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1977 if (nlh == NULL)
26932566 1978 return -EMSGSIZE;
1da177e4 1979
8b8aec50
TG
1980 ndm = nlmsg_data(nlh);
1981 ndm->ndm_family = neigh->ops->family;
9ef1d4c7
PM
1982 ndm->ndm_pad1 = 0;
1983 ndm->ndm_pad2 = 0;
8b8aec50
TG
1984 ndm->ndm_flags = neigh->flags;
1985 ndm->ndm_type = neigh->type;
1986 ndm->ndm_ifindex = neigh->dev->ifindex;
1da177e4 1987
8b8aec50
TG
1988 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
1989
1990 read_lock_bh(&neigh->lock);
1991 ndm->ndm_state = neigh->nud_state;
1992 if ((neigh->nud_state & NUD_VALID) &&
1993 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
1994 read_unlock_bh(&neigh->lock);
1995 goto nla_put_failure;
1996 }
1997
1998 ci.ndm_used = now - neigh->used;
1999 ci.ndm_confirmed = now - neigh->confirmed;
2000 ci.ndm_updated = now - neigh->updated;
2001 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2002 read_unlock_bh(&neigh->lock);
2003
2004 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2005 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2006
2007 return nlmsg_end(skb, nlh);
2008
2009nla_put_failure:
26932566
PM
2010 nlmsg_cancel(skb, nlh);
2011 return -EMSGSIZE;
1da177e4
LT
2012}
2013
d961db35
TG
2014static void neigh_update_notify(struct neighbour *neigh)
2015{
2016 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2017 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2018}
1da177e4
LT
2019
2020static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2021 struct netlink_callback *cb)
2022{
2023 struct neighbour *n;
2024 int rc, h, s_h = cb->args[1];
2025 int idx, s_idx = idx = cb->args[2];
2026
c5e29460 2027 read_lock_bh(&tbl->lock);
1da177e4
LT
2028 for (h = 0; h <= tbl->hash_mask; h++) {
2029 if (h < s_h)
2030 continue;
2031 if (h > s_h)
2032 s_idx = 0;
1da177e4
LT
2033 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2034 if (idx < s_idx)
2035 continue;
2036 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2037 cb->nlh->nlmsg_seq,
b6544c0b
JHS
2038 RTM_NEWNEIGH,
2039 NLM_F_MULTI) <= 0) {
1da177e4
LT
2040 read_unlock_bh(&tbl->lock);
2041 rc = -1;
2042 goto out;
2043 }
2044 }
1da177e4 2045 }
c5e29460 2046 read_unlock_bh(&tbl->lock);
1da177e4
LT
2047 rc = skb->len;
2048out:
2049 cb->args[1] = h;
2050 cb->args[2] = idx;
2051 return rc;
2052}
2053
c8822a4e 2054static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1da177e4 2055{
b854272b 2056 struct net *net = skb->sk->sk_net;
1da177e4
LT
2057 struct neigh_table *tbl;
2058 int t, family, s_t;
2059
b854272b
DL
2060 if (net != &init_net)
2061 return 0;
2062
1da177e4 2063 read_lock(&neigh_tbl_lock);
8b8aec50 2064 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1da177e4
LT
2065 s_t = cb->args[0];
2066
2067 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2068 if (t < s_t || (family && tbl->family != family))
2069 continue;
2070 if (t > s_t)
2071 memset(&cb->args[1], 0, sizeof(cb->args) -
2072 sizeof(cb->args[0]));
2073 if (neigh_dump_table(tbl, skb, cb) < 0)
2074 break;
2075 }
2076 read_unlock(&neigh_tbl_lock);
2077
2078 cb->args[0] = t;
2079 return skb->len;
2080}
2081
2082void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2083{
2084 int chain;
2085
2086 read_lock_bh(&tbl->lock);
2087 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2088 struct neighbour *n;
2089
2090 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2091 cb(n, cookie);
2092 }
2093 read_unlock_bh(&tbl->lock);
2094}
2095EXPORT_SYMBOL(neigh_for_each);
2096
2097/* The tbl->lock must be held as a writer and BH disabled. */
2098void __neigh_for_each_release(struct neigh_table *tbl,
2099 int (*cb)(struct neighbour *))
2100{
2101 int chain;
2102
2103 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2104 struct neighbour *n, **np;
2105
2106 np = &tbl->hash_buckets[chain];
2107 while ((n = *np) != NULL) {
2108 int release;
2109
2110 write_lock(&n->lock);
2111 release = cb(n);
2112 if (release) {
2113 *np = n->next;
2114 n->dead = 1;
2115 } else
2116 np = &n->next;
2117 write_unlock(&n->lock);
4f494554
TG
2118 if (release)
2119 neigh_cleanup_and_release(n);
1da177e4
LT
2120 }
2121 }
2122}
2123EXPORT_SYMBOL(__neigh_for_each_release);
2124
2125#ifdef CONFIG_PROC_FS
2126
2127static struct neighbour *neigh_get_first(struct seq_file *seq)
2128{
2129 struct neigh_seq_state *state = seq->private;
2130 struct neigh_table *tbl = state->tbl;
2131 struct neighbour *n = NULL;
2132 int bucket = state->bucket;
2133
2134 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2135 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2136 n = tbl->hash_buckets[bucket];
2137
2138 while (n) {
2139 if (state->neigh_sub_iter) {
2140 loff_t fakep = 0;
2141 void *v;
2142
2143 v = state->neigh_sub_iter(state, n, &fakep);
2144 if (!v)
2145 goto next;
2146 }
2147 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2148 break;
2149 if (n->nud_state & ~NUD_NOARP)
2150 break;
2151 next:
2152 n = n->next;
2153 }
2154
2155 if (n)
2156 break;
2157 }
2158 state->bucket = bucket;
2159
2160 return n;
2161}
2162
2163static struct neighbour *neigh_get_next(struct seq_file *seq,
2164 struct neighbour *n,
2165 loff_t *pos)
2166{
2167 struct neigh_seq_state *state = seq->private;
2168 struct neigh_table *tbl = state->tbl;
2169
2170 if (state->neigh_sub_iter) {
2171 void *v = state->neigh_sub_iter(state, n, pos);
2172 if (v)
2173 return n;
2174 }
2175 n = n->next;
2176
2177 while (1) {
2178 while (n) {
2179 if (state->neigh_sub_iter) {
2180 void *v = state->neigh_sub_iter(state, n, pos);
2181 if (v)
2182 return n;
2183 goto next;
2184 }
2185 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2186 break;
2187
2188 if (n->nud_state & ~NUD_NOARP)
2189 break;
2190 next:
2191 n = n->next;
2192 }
2193
2194 if (n)
2195 break;
2196
2197 if (++state->bucket > tbl->hash_mask)
2198 break;
2199
2200 n = tbl->hash_buckets[state->bucket];
2201 }
2202
2203 if (n && pos)
2204 --(*pos);
2205 return n;
2206}
2207
2208static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2209{
2210 struct neighbour *n = neigh_get_first(seq);
2211
2212 if (n) {
2213 while (*pos) {
2214 n = neigh_get_next(seq, n, pos);
2215 if (!n)
2216 break;
2217 }
2218 }
2219 return *pos ? NULL : n;
2220}
2221
2222static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2223{
2224 struct neigh_seq_state *state = seq->private;
2225 struct neigh_table *tbl = state->tbl;
2226 struct pneigh_entry *pn = NULL;
2227 int bucket = state->bucket;
2228
2229 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2230 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2231 pn = tbl->phash_buckets[bucket];
2232 if (pn)
2233 break;
2234 }
2235 state->bucket = bucket;
2236
2237 return pn;
2238}
2239
2240static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2241 struct pneigh_entry *pn,
2242 loff_t *pos)
2243{
2244 struct neigh_seq_state *state = seq->private;
2245 struct neigh_table *tbl = state->tbl;
2246
2247 pn = pn->next;
2248 while (!pn) {
2249 if (++state->bucket > PNEIGH_HASHMASK)
2250 break;
2251 pn = tbl->phash_buckets[state->bucket];
2252 if (pn)
2253 break;
2254 }
2255
2256 if (pn && pos)
2257 --(*pos);
2258
2259 return pn;
2260}
2261
2262static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2263{
2264 struct pneigh_entry *pn = pneigh_get_first(seq);
2265
2266 if (pn) {
2267 while (*pos) {
2268 pn = pneigh_get_next(seq, pn, pos);
2269 if (!pn)
2270 break;
2271 }
2272 }
2273 return *pos ? NULL : pn;
2274}
2275
2276static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2277{
2278 struct neigh_seq_state *state = seq->private;
2279 void *rc;
2280
2281 rc = neigh_get_idx(seq, pos);
2282 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2283 rc = pneigh_get_idx(seq, pos);
2284
2285 return rc;
2286}
2287
2288void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2289{
2290 struct neigh_seq_state *state = seq->private;
2291 loff_t pos_minus_one;
2292
2293 state->tbl = tbl;
2294 state->bucket = 0;
2295 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2296
2297 read_lock_bh(&tbl->lock);
2298
2299 pos_minus_one = *pos - 1;
2300 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2301}
2302EXPORT_SYMBOL(neigh_seq_start);
2303
2304void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2305{
2306 struct neigh_seq_state *state;
2307 void *rc;
2308
2309 if (v == SEQ_START_TOKEN) {
2310 rc = neigh_get_idx(seq, pos);
2311 goto out;
2312 }
2313
2314 state = seq->private;
2315 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2316 rc = neigh_get_next(seq, v, NULL);
2317 if (rc)
2318 goto out;
2319 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2320 rc = pneigh_get_first(seq);
2321 } else {
2322 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2323 rc = pneigh_get_next(seq, v, NULL);
2324 }
2325out:
2326 ++(*pos);
2327 return rc;
2328}
2329EXPORT_SYMBOL(neigh_seq_next);
2330
2331void neigh_seq_stop(struct seq_file *seq, void *v)
2332{
2333 struct neigh_seq_state *state = seq->private;
2334 struct neigh_table *tbl = state->tbl;
2335
2336 read_unlock_bh(&tbl->lock);
2337}
2338EXPORT_SYMBOL(neigh_seq_stop);
2339
2340/* statistics via seq_file */
2341
2342static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2343{
2344 struct proc_dir_entry *pde = seq->private;
2345 struct neigh_table *tbl = pde->data;
2346 int cpu;
2347
2348 if (*pos == 0)
2349 return SEQ_START_TOKEN;
4ec93edb 2350
1da177e4
LT
2351 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2352 if (!cpu_possible(cpu))
2353 continue;
2354 *pos = cpu+1;
2355 return per_cpu_ptr(tbl->stats, cpu);
2356 }
2357 return NULL;
2358}
2359
2360static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2361{
2362 struct proc_dir_entry *pde = seq->private;
2363 struct neigh_table *tbl = pde->data;
2364 int cpu;
2365
2366 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2367 if (!cpu_possible(cpu))
2368 continue;
2369 *pos = cpu+1;
2370 return per_cpu_ptr(tbl->stats, cpu);
2371 }
2372 return NULL;
2373}
2374
2375static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2376{
2377
2378}
2379
2380static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2381{
2382 struct proc_dir_entry *pde = seq->private;
2383 struct neigh_table *tbl = pde->data;
2384 struct neigh_statistics *st = v;
2385
2386 if (v == SEQ_START_TOKEN) {
5bec0039 2387 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
1da177e4
LT
2388 return 0;
2389 }
2390
2391 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2392 "%08lx %08lx %08lx %08lx\n",
2393 atomic_read(&tbl->entries),
2394
2395 st->allocs,
2396 st->destroys,
2397 st->hash_grows,
2398
2399 st->lookups,
2400 st->hits,
2401
2402 st->res_failed,
2403
2404 st->rcv_probes_mcast,
2405 st->rcv_probes_ucast,
2406
2407 st->periodic_gc_runs,
2408 st->forced_gc_runs
2409 );
2410
2411 return 0;
2412}
2413
f690808e 2414static const struct seq_operations neigh_stat_seq_ops = {
1da177e4
LT
2415 .start = neigh_stat_seq_start,
2416 .next = neigh_stat_seq_next,
2417 .stop = neigh_stat_seq_stop,
2418 .show = neigh_stat_seq_show,
2419};
2420
2421static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2422{
2423 int ret = seq_open(file, &neigh_stat_seq_ops);
2424
2425 if (!ret) {
2426 struct seq_file *sf = file->private_data;
2427 sf->private = PDE(inode);
2428 }
2429 return ret;
2430};
2431
9a32144e 2432static const struct file_operations neigh_stat_seq_fops = {
1da177e4
LT
2433 .owner = THIS_MODULE,
2434 .open = neigh_stat_seq_open,
2435 .read = seq_read,
2436 .llseek = seq_lseek,
2437 .release = seq_release,
2438};
2439
2440#endif /* CONFIG_PROC_FS */
2441
339bf98f
TG
2442static inline size_t neigh_nlmsg_size(void)
2443{
2444 return NLMSG_ALIGN(sizeof(struct ndmsg))
2445 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2446 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2447 + nla_total_size(sizeof(struct nda_cacheinfo))
2448 + nla_total_size(4); /* NDA_PROBES */
2449}
2450
b8673311 2451static void __neigh_notify(struct neighbour *n, int type, int flags)
1da177e4 2452{
8b8aec50 2453 struct sk_buff *skb;
b8673311 2454 int err = -ENOBUFS;
1da177e4 2455
339bf98f 2456 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
8b8aec50 2457 if (skb == NULL)
b8673311 2458 goto errout;
1da177e4 2459
b8673311 2460 err = neigh_fill_info(skb, n, 0, 0, type, flags);
26932566
PM
2461 if (err < 0) {
2462 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2463 WARN_ON(err == -EMSGSIZE);
2464 kfree_skb(skb);
2465 goto errout;
2466 }
97c53cac 2467 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
b8673311
TG
2468errout:
2469 if (err < 0)
97c53cac 2470 rtnl_set_sk_err(&init_net, RTNLGRP_NEIGH, err);
1da177e4
LT
2471}
2472
d961db35 2473#ifdef CONFIG_ARPD
b8673311 2474void neigh_app_ns(struct neighbour *n)
1da177e4 2475{
b8673311
TG
2476 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2477}
1da177e4
LT
2478#endif /* CONFIG_ARPD */
2479
2480#ifdef CONFIG_SYSCTL
2481
2482static struct neigh_sysctl_table {
2483 struct ctl_table_header *sysctl_header;
c3bac5a7
PE
2484 struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2485 char *dev_name;
ab32ea5d 2486} neigh_sysctl_template __read_mostly = {
1da177e4
LT
2487 .neigh_vars = {
2488 {
2489 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2490 .procname = "mcast_solicit",
2491 .maxlen = sizeof(int),
2492 .mode = 0644,
2493 .proc_handler = &proc_dointvec,
2494 },
2495 {
2496 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2497 .procname = "ucast_solicit",
2498 .maxlen = sizeof(int),
2499 .mode = 0644,
2500 .proc_handler = &proc_dointvec,
2501 },
2502 {
2503 .ctl_name = NET_NEIGH_APP_SOLICIT,
2504 .procname = "app_solicit",
2505 .maxlen = sizeof(int),
2506 .mode = 0644,
2507 .proc_handler = &proc_dointvec,
2508 },
2509 {
1da177e4
LT
2510 .procname = "retrans_time",
2511 .maxlen = sizeof(int),
2512 .mode = 0644,
2513 .proc_handler = &proc_dointvec_userhz_jiffies,
2514 },
2515 {
2516 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2517 .procname = "base_reachable_time",
2518 .maxlen = sizeof(int),
2519 .mode = 0644,
2520 .proc_handler = &proc_dointvec_jiffies,
2521 .strategy = &sysctl_jiffies,
2522 },
2523 {
2524 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2525 .procname = "delay_first_probe_time",
2526 .maxlen = sizeof(int),
2527 .mode = 0644,
2528 .proc_handler = &proc_dointvec_jiffies,
2529 .strategy = &sysctl_jiffies,
2530 },
2531 {
2532 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2533 .procname = "gc_stale_time",
2534 .maxlen = sizeof(int),
2535 .mode = 0644,
2536 .proc_handler = &proc_dointvec_jiffies,
2537 .strategy = &sysctl_jiffies,
2538 },
2539 {
2540 .ctl_name = NET_NEIGH_UNRES_QLEN,
2541 .procname = "unres_qlen",
2542 .maxlen = sizeof(int),
2543 .mode = 0644,
2544 .proc_handler = &proc_dointvec,
2545 },
2546 {
2547 .ctl_name = NET_NEIGH_PROXY_QLEN,
2548 .procname = "proxy_qlen",
2549 .maxlen = sizeof(int),
2550 .mode = 0644,
2551 .proc_handler = &proc_dointvec,
2552 },
2553 {
1da177e4
LT
2554 .procname = "anycast_delay",
2555 .maxlen = sizeof(int),
2556 .mode = 0644,
2557 .proc_handler = &proc_dointvec_userhz_jiffies,
2558 },
2559 {
1da177e4
LT
2560 .procname = "proxy_delay",
2561 .maxlen = sizeof(int),
2562 .mode = 0644,
2563 .proc_handler = &proc_dointvec_userhz_jiffies,
2564 },
2565 {
1da177e4
LT
2566 .procname = "locktime",
2567 .maxlen = sizeof(int),
2568 .mode = 0644,
2569 .proc_handler = &proc_dointvec_userhz_jiffies,
2570 },
d12af679
EB
2571 {
2572 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2573 .procname = "retrans_time_ms",
2574 .maxlen = sizeof(int),
2575 .mode = 0644,
2576 .proc_handler = &proc_dointvec_ms_jiffies,
2577 .strategy = &sysctl_ms_jiffies,
2578 },
2579 {
2580 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2581 .procname = "base_reachable_time_ms",
2582 .maxlen = sizeof(int),
2583 .mode = 0644,
2584 .proc_handler = &proc_dointvec_ms_jiffies,
2585 .strategy = &sysctl_ms_jiffies,
2586 },
1da177e4
LT
2587 {
2588 .ctl_name = NET_NEIGH_GC_INTERVAL,
2589 .procname = "gc_interval",
2590 .maxlen = sizeof(int),
2591 .mode = 0644,
2592 .proc_handler = &proc_dointvec_jiffies,
2593 .strategy = &sysctl_jiffies,
2594 },
2595 {
2596 .ctl_name = NET_NEIGH_GC_THRESH1,
2597 .procname = "gc_thresh1",
2598 .maxlen = sizeof(int),
2599 .mode = 0644,
2600 .proc_handler = &proc_dointvec,
2601 },
2602 {
2603 .ctl_name = NET_NEIGH_GC_THRESH2,
2604 .procname = "gc_thresh2",
2605 .maxlen = sizeof(int),
2606 .mode = 0644,
2607 .proc_handler = &proc_dointvec,
2608 },
2609 {
2610 .ctl_name = NET_NEIGH_GC_THRESH3,
2611 .procname = "gc_thresh3",
2612 .maxlen = sizeof(int),
2613 .mode = 0644,
2614 .proc_handler = &proc_dointvec,
2615 },
c3bac5a7 2616 {},
1da177e4
LT
2617 },
2618};
2619
2620int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
4ec93edb 2621 int p_id, int pdev_id, char *p_name,
1da177e4
LT
2622 proc_handler *handler, ctl_handler *strategy)
2623{
3c607bbb 2624 struct neigh_sysctl_table *t;
1da177e4 2625 const char *dev_name_source = NULL;
c3bac5a7
PE
2626
2627#define NEIGH_CTL_PATH_ROOT 0
2628#define NEIGH_CTL_PATH_PROTO 1
2629#define NEIGH_CTL_PATH_NEIGH 2
2630#define NEIGH_CTL_PATH_DEV 3
2631
2632 struct ctl_path neigh_path[] = {
2633 { .procname = "net", .ctl_name = CTL_NET, },
2634 { .procname = "proto", .ctl_name = 0, },
2635 { .procname = "neigh", .ctl_name = 0, },
2636 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2637 { },
2638 };
1da177e4 2639
3c607bbb 2640 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
1da177e4 2641 if (!t)
3c607bbb
PE
2642 goto err;
2643
1da177e4
LT
2644 t->neigh_vars[0].data = &p->mcast_probes;
2645 t->neigh_vars[1].data = &p->ucast_probes;
2646 t->neigh_vars[2].data = &p->app_probes;
2647 t->neigh_vars[3].data = &p->retrans_time;
2648 t->neigh_vars[4].data = &p->base_reachable_time;
2649 t->neigh_vars[5].data = &p->delay_probe_time;
2650 t->neigh_vars[6].data = &p->gc_staletime;
2651 t->neigh_vars[7].data = &p->queue_len;
2652 t->neigh_vars[8].data = &p->proxy_qlen;
2653 t->neigh_vars[9].data = &p->anycast_delay;
2654 t->neigh_vars[10].data = &p->proxy_delay;
2655 t->neigh_vars[11].data = &p->locktime;
d12af679
EB
2656 t->neigh_vars[12].data = &p->retrans_time;
2657 t->neigh_vars[13].data = &p->base_reachable_time;
1da177e4
LT
2658
2659 if (dev) {
2660 dev_name_source = dev->name;
c3bac5a7 2661 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
d12af679
EB
2662 /* Terminate the table early */
2663 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
1da177e4 2664 } else {
c3bac5a7 2665 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
d12af679
EB
2666 t->neigh_vars[14].data = (int *)(p + 1);
2667 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2668 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2669 t->neigh_vars[17].data = (int *)(p + 1) + 3;
1da177e4
LT
2670 }
2671
1da177e4
LT
2672
2673 if (handler || strategy) {
2674 /* RetransTime */
2675 t->neigh_vars[3].proc_handler = handler;
2676 t->neigh_vars[3].strategy = strategy;
2677 t->neigh_vars[3].extra1 = dev;
d12af679
EB
2678 if (!strategy)
2679 t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
1da177e4
LT
2680 /* ReachableTime */
2681 t->neigh_vars[4].proc_handler = handler;
2682 t->neigh_vars[4].strategy = strategy;
2683 t->neigh_vars[4].extra1 = dev;
d12af679
EB
2684 if (!strategy)
2685 t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
1da177e4 2686 /* RetransTime (in milliseconds)*/
d12af679
EB
2687 t->neigh_vars[12].proc_handler = handler;
2688 t->neigh_vars[12].strategy = strategy;
2689 t->neigh_vars[12].extra1 = dev;
2690 if (!strategy)
2691 t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
1da177e4 2692 /* ReachableTime (in milliseconds) */
d12af679
EB
2693 t->neigh_vars[13].proc_handler = handler;
2694 t->neigh_vars[13].strategy = strategy;
2695 t->neigh_vars[13].extra1 = dev;
2696 if (!strategy)
2697 t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
1da177e4
LT
2698 }
2699
c3bac5a7
PE
2700 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2701 if (!t->dev_name)
1da177e4 2702 goto free;
1da177e4 2703
c3bac5a7
PE
2704 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2705 neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2706 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2707 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
1da177e4 2708
c3bac5a7 2709 t->sysctl_header = register_sysctl_paths(neigh_path, t->neigh_vars);
3c607bbb 2710 if (!t->sysctl_header)
1da177e4 2711 goto free_procname;
3c607bbb 2712
1da177e4
LT
2713 p->sysctl_table = t;
2714 return 0;
2715
3c607bbb 2716free_procname:
c3bac5a7 2717 kfree(t->dev_name);
3c607bbb 2718free:
1da177e4 2719 kfree(t);
3c607bbb
PE
2720err:
2721 return -ENOBUFS;
1da177e4
LT
2722}
2723
2724void neigh_sysctl_unregister(struct neigh_parms *p)
2725{
2726 if (p->sysctl_table) {
2727 struct neigh_sysctl_table *t = p->sysctl_table;
2728 p->sysctl_table = NULL;
2729 unregister_sysctl_table(t->sysctl_header);
c3bac5a7 2730 kfree(t->dev_name);
1da177e4
LT
2731 kfree(t);
2732 }
2733}
2734
2735#endif /* CONFIG_SYSCTL */
2736
c8822a4e
TG
2737static int __init neigh_init(void)
2738{
2739 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2740 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2741 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2742
2743 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2744 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2745
2746 return 0;
2747}
2748
2749subsys_initcall(neigh_init);
2750
1da177e4 2751EXPORT_SYMBOL(__neigh_event_send);
1da177e4
LT
2752EXPORT_SYMBOL(neigh_changeaddr);
2753EXPORT_SYMBOL(neigh_compat_output);
2754EXPORT_SYMBOL(neigh_connected_output);
2755EXPORT_SYMBOL(neigh_create);
1da177e4 2756EXPORT_SYMBOL(neigh_destroy);
1da177e4
LT
2757EXPORT_SYMBOL(neigh_event_ns);
2758EXPORT_SYMBOL(neigh_ifdown);
2759EXPORT_SYMBOL(neigh_lookup);
2760EXPORT_SYMBOL(neigh_lookup_nodev);
2761EXPORT_SYMBOL(neigh_parms_alloc);
2762EXPORT_SYMBOL(neigh_parms_release);
2763EXPORT_SYMBOL(neigh_rand_reach_time);
2764EXPORT_SYMBOL(neigh_resolve_output);
2765EXPORT_SYMBOL(neigh_table_clear);
2766EXPORT_SYMBOL(neigh_table_init);
bd89efc5 2767EXPORT_SYMBOL(neigh_table_init_no_netlink);
1da177e4 2768EXPORT_SYMBOL(neigh_update);
1da177e4
LT
2769EXPORT_SYMBOL(pneigh_enqueue);
2770EXPORT_SYMBOL(pneigh_lookup);
2771
2772#ifdef CONFIG_ARPD
2773EXPORT_SYMBOL(neigh_app_ns);
2774#endif
2775#ifdef CONFIG_SYSCTL
2776EXPORT_SYMBOL(neigh_sysctl_register);
2777EXPORT_SYMBOL(neigh_sysctl_unregister);
2778#endif