[IPV6] NDISC: Avoid updating neighbor cache for proxied address in receiving NA.
[linux-2.6-block.git] / net / core / neighbour.c
CommitLineData
1da177e4
LT
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
1da177e4
LT
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
22#include <linux/sched.h>
23#include <linux/netdevice.h>
24#include <linux/proc_fs.h>
25#ifdef CONFIG_SYSCTL
26#include <linux/sysctl.h>
27#endif
28#include <linux/times.h>
29#include <net/neighbour.h>
30#include <net/dst.h>
31#include <net/sock.h>
8d71740c 32#include <net/netevent.h>
a14a49d2 33#include <net/netlink.h>
1da177e4
LT
34#include <linux/rtnetlink.h>
35#include <linux/random.h>
543537bd 36#include <linux/string.h>
1da177e4
LT
37
38#define NEIGH_DEBUG 1
39
40#define NEIGH_PRINTK(x...) printk(x)
41#define NEIGH_NOPRINTK(x...) do { ; } while(0)
42#define NEIGH_PRINTK0 NEIGH_PRINTK
43#define NEIGH_PRINTK1 NEIGH_NOPRINTK
44#define NEIGH_PRINTK2 NEIGH_NOPRINTK
45
46#if NEIGH_DEBUG >= 1
47#undef NEIGH_PRINTK1
48#define NEIGH_PRINTK1 NEIGH_PRINTK
49#endif
50#if NEIGH_DEBUG >= 2
51#undef NEIGH_PRINTK2
52#define NEIGH_PRINTK2 NEIGH_PRINTK
53#endif
54
55#define PNEIGH_HASHMASK 0xF
56
57static void neigh_timer_handler(unsigned long arg);
58#ifdef CONFIG_ARPD
59static void neigh_app_notify(struct neighbour *n);
60#endif
61static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63
64static struct neigh_table *neigh_tables;
45fc3b11 65#ifdef CONFIG_PROC_FS
1da177e4 66static struct file_operations neigh_stat_seq_fops;
45fc3b11 67#endif
1da177e4
LT
68
69/*
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
76 cache.
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
79
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
83
84 Reference count prevents destruction.
85
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
88 - timer
89 - resolution queue
90
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
95
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
98 */
99
100static DEFINE_RWLOCK(neigh_tbl_lock);
101
102static int neigh_blackhole(struct sk_buff *skb)
103{
104 kfree_skb(skb);
105 return -ENETDOWN;
106}
107
108/*
109 * It is random distribution in the interval (1/2)*base...(3/2)*base.
110 * It corresponds to default IPv6 settings and is not overridable,
111 * because it is really reasonable choice.
112 */
113
114unsigned long neigh_rand_reach_time(unsigned long base)
115{
116 return (base ? (net_random() % base) + (base >> 1) : 0);
117}
118
119
120static int neigh_forced_gc(struct neigh_table *tbl)
121{
122 int shrunk = 0;
123 int i;
124
125 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126
127 write_lock_bh(&tbl->lock);
128 for (i = 0; i <= tbl->hash_mask; i++) {
129 struct neighbour *n, **np;
130
131 np = &tbl->hash_buckets[i];
132 while ((n = *np) != NULL) {
133 /* Neighbour record may be discarded if:
134 * - nobody refers to it.
135 * - it is not permanent
136 */
137 write_lock(&n->lock);
138 if (atomic_read(&n->refcnt) == 1 &&
139 !(n->nud_state & NUD_PERMANENT)) {
140 *np = n->next;
141 n->dead = 1;
142 shrunk = 1;
143 write_unlock(&n->lock);
144 neigh_release(n);
145 continue;
146 }
147 write_unlock(&n->lock);
148 np = &n->next;
149 }
150 }
151
152 tbl->last_flush = jiffies;
153
154 write_unlock_bh(&tbl->lock);
155
156 return shrunk;
157}
158
159static int neigh_del_timer(struct neighbour *n)
160{
161 if ((n->nud_state & NUD_IN_TIMER) &&
162 del_timer(&n->timer)) {
163 neigh_release(n);
164 return 1;
165 }
166 return 0;
167}
168
169static void pneigh_queue_purge(struct sk_buff_head *list)
170{
171 struct sk_buff *skb;
172
173 while ((skb = skb_dequeue(list)) != NULL) {
174 dev_put(skb->dev);
175 kfree_skb(skb);
176 }
177}
178
49636bb1 179static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
1da177e4
LT
180{
181 int i;
182
1da177e4
LT
183 for (i = 0; i <= tbl->hash_mask; i++) {
184 struct neighbour *n, **np = &tbl->hash_buckets[i];
185
186 while ((n = *np) != NULL) {
187 if (dev && n->dev != dev) {
188 np = &n->next;
189 continue;
190 }
191 *np = n->next;
192 write_lock(&n->lock);
193 neigh_del_timer(n);
194 n->dead = 1;
195
196 if (atomic_read(&n->refcnt) != 1) {
197 /* The most unpleasant situation.
198 We must destroy neighbour entry,
199 but someone still uses it.
200
201 The destroy will be delayed until
202 the last user releases us, but
203 we must kill timers etc. and move
204 it to safe state.
205 */
206 skb_queue_purge(&n->arp_queue);
207 n->output = neigh_blackhole;
208 if (n->nud_state & NUD_VALID)
209 n->nud_state = NUD_NOARP;
210 else
211 n->nud_state = NUD_NONE;
212 NEIGH_PRINTK2("neigh %p is stray.\n", n);
213 }
214 write_unlock(&n->lock);
215 neigh_release(n);
216 }
217 }
49636bb1 218}
1da177e4 219
49636bb1
HX
220void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
221{
222 write_lock_bh(&tbl->lock);
223 neigh_flush_dev(tbl, dev);
224 write_unlock_bh(&tbl->lock);
225}
226
227int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
228{
229 write_lock_bh(&tbl->lock);
230 neigh_flush_dev(tbl, dev);
1da177e4
LT
231 pneigh_ifdown(tbl, dev);
232 write_unlock_bh(&tbl->lock);
233
234 del_timer_sync(&tbl->proxy_timer);
235 pneigh_queue_purge(&tbl->proxy_queue);
236 return 0;
237}
238
239static struct neighbour *neigh_alloc(struct neigh_table *tbl)
240{
241 struct neighbour *n = NULL;
242 unsigned long now = jiffies;
243 int entries;
244
245 entries = atomic_inc_return(&tbl->entries) - 1;
246 if (entries >= tbl->gc_thresh3 ||
247 (entries >= tbl->gc_thresh2 &&
248 time_after(now, tbl->last_flush + 5 * HZ))) {
249 if (!neigh_forced_gc(tbl) &&
250 entries >= tbl->gc_thresh3)
251 goto out_entries;
252 }
253
254 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
255 if (!n)
256 goto out_entries;
257
258 memset(n, 0, tbl->entry_size);
259
260 skb_queue_head_init(&n->arp_queue);
261 rwlock_init(&n->lock);
262 n->updated = n->used = now;
263 n->nud_state = NUD_NONE;
264 n->output = neigh_blackhole;
265 n->parms = neigh_parms_clone(&tbl->parms);
266 init_timer(&n->timer);
267 n->timer.function = neigh_timer_handler;
268 n->timer.data = (unsigned long)n;
269
270 NEIGH_CACHE_STAT_INC(tbl, allocs);
271 n->tbl = tbl;
272 atomic_set(&n->refcnt, 1);
273 n->dead = 1;
274out:
275 return n;
276
277out_entries:
278 atomic_dec(&tbl->entries);
279 goto out;
280}
281
282static struct neighbour **neigh_hash_alloc(unsigned int entries)
283{
284 unsigned long size = entries * sizeof(struct neighbour *);
285 struct neighbour **ret;
286
287 if (size <= PAGE_SIZE) {
77d04bd9 288 ret = kzalloc(size, GFP_ATOMIC);
1da177e4
LT
289 } else {
290 ret = (struct neighbour **)
77d04bd9 291 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
1da177e4 292 }
1da177e4
LT
293 return ret;
294}
295
296static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
297{
298 unsigned long size = entries * sizeof(struct neighbour *);
299
300 if (size <= PAGE_SIZE)
301 kfree(hash);
302 else
303 free_pages((unsigned long)hash, get_order(size));
304}
305
306static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
307{
308 struct neighbour **new_hash, **old_hash;
309 unsigned int i, new_hash_mask, old_entries;
310
311 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
312
313 BUG_ON(new_entries & (new_entries - 1));
314 new_hash = neigh_hash_alloc(new_entries);
315 if (!new_hash)
316 return;
317
318 old_entries = tbl->hash_mask + 1;
319 new_hash_mask = new_entries - 1;
320 old_hash = tbl->hash_buckets;
321
322 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
323 for (i = 0; i < old_entries; i++) {
324 struct neighbour *n, *next;
325
326 for (n = old_hash[i]; n; n = next) {
327 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
328
329 hash_val &= new_hash_mask;
330 next = n->next;
331
332 n->next = new_hash[hash_val];
333 new_hash[hash_val] = n;
334 }
335 }
336 tbl->hash_buckets = new_hash;
337 tbl->hash_mask = new_hash_mask;
338
339 neigh_hash_free(old_hash, old_entries);
340}
341
342struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
343 struct net_device *dev)
344{
345 struct neighbour *n;
346 int key_len = tbl->key_len;
347 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
348
349 NEIGH_CACHE_STAT_INC(tbl, lookups);
350
351 read_lock_bh(&tbl->lock);
352 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
353 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
354 neigh_hold(n);
355 NEIGH_CACHE_STAT_INC(tbl, hits);
356 break;
357 }
358 }
359 read_unlock_bh(&tbl->lock);
360 return n;
361}
362
363struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
364{
365 struct neighbour *n;
366 int key_len = tbl->key_len;
367 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
368
369 NEIGH_CACHE_STAT_INC(tbl, lookups);
370
371 read_lock_bh(&tbl->lock);
372 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
373 if (!memcmp(n->primary_key, pkey, key_len)) {
374 neigh_hold(n);
375 NEIGH_CACHE_STAT_INC(tbl, hits);
376 break;
377 }
378 }
379 read_unlock_bh(&tbl->lock);
380 return n;
381}
382
383struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
384 struct net_device *dev)
385{
386 u32 hash_val;
387 int key_len = tbl->key_len;
388 int error;
389 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
390
391 if (!n) {
392 rc = ERR_PTR(-ENOBUFS);
393 goto out;
394 }
395
396 memcpy(n->primary_key, pkey, key_len);
397 n->dev = dev;
398 dev_hold(dev);
399
400 /* Protocol specific setup. */
401 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
402 rc = ERR_PTR(error);
403 goto out_neigh_release;
404 }
405
406 /* Device specific setup. */
407 if (n->parms->neigh_setup &&
408 (error = n->parms->neigh_setup(n)) < 0) {
409 rc = ERR_PTR(error);
410 goto out_neigh_release;
411 }
412
413 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
414
415 write_lock_bh(&tbl->lock);
416
417 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
418 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
419
420 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
421
422 if (n->parms->dead) {
423 rc = ERR_PTR(-EINVAL);
424 goto out_tbl_unlock;
425 }
426
427 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
428 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
429 neigh_hold(n1);
430 rc = n1;
431 goto out_tbl_unlock;
432 }
433 }
434
435 n->next = tbl->hash_buckets[hash_val];
436 tbl->hash_buckets[hash_val] = n;
437 n->dead = 0;
438 neigh_hold(n);
439 write_unlock_bh(&tbl->lock);
440 NEIGH_PRINTK2("neigh %p is created.\n", n);
441 rc = n;
442out:
443 return rc;
444out_tbl_unlock:
445 write_unlock_bh(&tbl->lock);
446out_neigh_release:
447 neigh_release(n);
448 goto out;
449}
450
451struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
452 struct net_device *dev, int creat)
453{
454 struct pneigh_entry *n;
455 int key_len = tbl->key_len;
456 u32 hash_val = *(u32 *)(pkey + key_len - 4);
457
458 hash_val ^= (hash_val >> 16);
459 hash_val ^= hash_val >> 8;
460 hash_val ^= hash_val >> 4;
461 hash_val &= PNEIGH_HASHMASK;
462
463 read_lock_bh(&tbl->lock);
464
465 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
466 if (!memcmp(n->key, pkey, key_len) &&
467 (n->dev == dev || !n->dev)) {
468 read_unlock_bh(&tbl->lock);
469 goto out;
470 }
471 }
472 read_unlock_bh(&tbl->lock);
473 n = NULL;
474 if (!creat)
475 goto out;
476
477 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
478 if (!n)
479 goto out;
480
481 memcpy(n->key, pkey, key_len);
482 n->dev = dev;
483 if (dev)
484 dev_hold(dev);
485
486 if (tbl->pconstructor && tbl->pconstructor(n)) {
487 if (dev)
488 dev_put(dev);
489 kfree(n);
490 n = NULL;
491 goto out;
492 }
493
494 write_lock_bh(&tbl->lock);
495 n->next = tbl->phash_buckets[hash_val];
496 tbl->phash_buckets[hash_val] = n;
497 write_unlock_bh(&tbl->lock);
498out:
499 return n;
500}
501
502
503int pneigh_delete(struct neigh_table *tbl, const void *pkey,
504 struct net_device *dev)
505{
506 struct pneigh_entry *n, **np;
507 int key_len = tbl->key_len;
508 u32 hash_val = *(u32 *)(pkey + key_len - 4);
509
510 hash_val ^= (hash_val >> 16);
511 hash_val ^= hash_val >> 8;
512 hash_val ^= hash_val >> 4;
513 hash_val &= PNEIGH_HASHMASK;
514
515 write_lock_bh(&tbl->lock);
516 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
517 np = &n->next) {
518 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
519 *np = n->next;
520 write_unlock_bh(&tbl->lock);
521 if (tbl->pdestructor)
522 tbl->pdestructor(n);
523 if (n->dev)
524 dev_put(n->dev);
525 kfree(n);
526 return 0;
527 }
528 }
529 write_unlock_bh(&tbl->lock);
530 return -ENOENT;
531}
532
533static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
534{
535 struct pneigh_entry *n, **np;
536 u32 h;
537
538 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
539 np = &tbl->phash_buckets[h];
540 while ((n = *np) != NULL) {
541 if (!dev || n->dev == dev) {
542 *np = n->next;
543 if (tbl->pdestructor)
544 tbl->pdestructor(n);
545 if (n->dev)
546 dev_put(n->dev);
547 kfree(n);
548 continue;
549 }
550 np = &n->next;
551 }
552 }
553 return -ENOENT;
554}
555
556
557/*
558 * neighbour must already be out of the table;
559 *
560 */
561void neigh_destroy(struct neighbour *neigh)
562{
563 struct hh_cache *hh;
564
565 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
566
567 if (!neigh->dead) {
568 printk(KERN_WARNING
569 "Destroying alive neighbour %p\n", neigh);
570 dump_stack();
571 return;
572 }
573
574 if (neigh_del_timer(neigh))
575 printk(KERN_WARNING "Impossible event.\n");
576
577 while ((hh = neigh->hh) != NULL) {
578 neigh->hh = hh->hh_next;
579 hh->hh_next = NULL;
580 write_lock_bh(&hh->hh_lock);
581 hh->hh_output = neigh_blackhole;
582 write_unlock_bh(&hh->hh_lock);
583 if (atomic_dec_and_test(&hh->hh_refcnt))
584 kfree(hh);
585 }
586
c5ecd62c
MT
587 if (neigh->parms->neigh_destructor)
588 (neigh->parms->neigh_destructor)(neigh);
1da177e4
LT
589
590 skb_queue_purge(&neigh->arp_queue);
591
592 dev_put(neigh->dev);
593 neigh_parms_put(neigh->parms);
594
595 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
596
597 atomic_dec(&neigh->tbl->entries);
598 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
599}
600
601/* Neighbour state is suspicious;
602 disable fast path.
603
604 Called with write_locked neigh.
605 */
606static void neigh_suspect(struct neighbour *neigh)
607{
608 struct hh_cache *hh;
609
610 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
611
612 neigh->output = neigh->ops->output;
613
614 for (hh = neigh->hh; hh; hh = hh->hh_next)
615 hh->hh_output = neigh->ops->output;
616}
617
618/* Neighbour state is OK;
619 enable fast path.
620
621 Called with write_locked neigh.
622 */
623static void neigh_connect(struct neighbour *neigh)
624{
625 struct hh_cache *hh;
626
627 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
628
629 neigh->output = neigh->ops->connected_output;
630
631 for (hh = neigh->hh; hh; hh = hh->hh_next)
632 hh->hh_output = neigh->ops->hh_output;
633}
634
635static void neigh_periodic_timer(unsigned long arg)
636{
637 struct neigh_table *tbl = (struct neigh_table *)arg;
638 struct neighbour *n, **np;
639 unsigned long expire, now = jiffies;
640
641 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
642
643 write_lock(&tbl->lock);
644
645 /*
646 * periodically recompute ReachableTime from random function
647 */
648
649 if (time_after(now, tbl->last_rand + 300 * HZ)) {
650 struct neigh_parms *p;
651 tbl->last_rand = now;
652 for (p = &tbl->parms; p; p = p->next)
653 p->reachable_time =
654 neigh_rand_reach_time(p->base_reachable_time);
655 }
656
657 np = &tbl->hash_buckets[tbl->hash_chain_gc];
658 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
659
660 while ((n = *np) != NULL) {
661 unsigned int state;
662
663 write_lock(&n->lock);
664
665 state = n->nud_state;
666 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
667 write_unlock(&n->lock);
668 goto next_elt;
669 }
670
671 if (time_before(n->used, n->confirmed))
672 n->used = n->confirmed;
673
674 if (atomic_read(&n->refcnt) == 1 &&
675 (state == NUD_FAILED ||
676 time_after(now, n->used + n->parms->gc_staletime))) {
677 *np = n->next;
678 n->dead = 1;
679 write_unlock(&n->lock);
680 neigh_release(n);
681 continue;
682 }
683 write_unlock(&n->lock);
684
685next_elt:
686 np = &n->next;
687 }
688
689 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
690 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
691 * base_reachable_time.
692 */
693 expire = tbl->parms.base_reachable_time >> 1;
694 expire /= (tbl->hash_mask + 1);
695 if (!expire)
696 expire = 1;
697
698 mod_timer(&tbl->gc_timer, now + expire);
699
700 write_unlock(&tbl->lock);
701}
702
703static __inline__ int neigh_max_probes(struct neighbour *n)
704{
705 struct neigh_parms *p = n->parms;
706 return (n->nud_state & NUD_PROBE ?
707 p->ucast_probes :
708 p->ucast_probes + p->app_probes + p->mcast_probes);
709}
710
667347f1
DM
711static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
712{
713 if (unlikely(mod_timer(&n->timer, when))) {
714 printk("NEIGH: BUG, double timer add, state is %x\n",
715 n->nud_state);
20375502 716 dump_stack();
667347f1
DM
717 }
718}
1da177e4
LT
719
720/* Called when a timer expires for a neighbour entry. */
721
722static void neigh_timer_handler(unsigned long arg)
723{
724 unsigned long now, next;
725 struct neighbour *neigh = (struct neighbour *)arg;
726 unsigned state;
727 int notify = 0;
728
729 write_lock(&neigh->lock);
730
731 state = neigh->nud_state;
732 now = jiffies;
733 next = now + HZ;
734
735 if (!(state & NUD_IN_TIMER)) {
736#ifndef CONFIG_SMP
737 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
738#endif
739 goto out;
740 }
741
742 if (state & NUD_REACHABLE) {
743 if (time_before_eq(now,
744 neigh->confirmed + neigh->parms->reachable_time)) {
745 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
746 next = neigh->confirmed + neigh->parms->reachable_time;
747 } else if (time_before_eq(now,
748 neigh->used + neigh->parms->delay_probe_time)) {
749 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
750 neigh->nud_state = NUD_DELAY;
955aaa2f 751 neigh->updated = jiffies;
1da177e4
LT
752 neigh_suspect(neigh);
753 next = now + neigh->parms->delay_probe_time;
754 } else {
755 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
756 neigh->nud_state = NUD_STALE;
955aaa2f 757 neigh->updated = jiffies;
1da177e4 758 neigh_suspect(neigh);
8d71740c 759 notify = 1;
1da177e4
LT
760 }
761 } else if (state & NUD_DELAY) {
762 if (time_before_eq(now,
763 neigh->confirmed + neigh->parms->delay_probe_time)) {
764 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
765 neigh->nud_state = NUD_REACHABLE;
955aaa2f 766 neigh->updated = jiffies;
1da177e4 767 neigh_connect(neigh);
8d71740c 768 notify = 1;
1da177e4
LT
769 next = neigh->confirmed + neigh->parms->reachable_time;
770 } else {
771 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
772 neigh->nud_state = NUD_PROBE;
955aaa2f 773 neigh->updated = jiffies;
1da177e4
LT
774 atomic_set(&neigh->probes, 0);
775 next = now + neigh->parms->retrans_time;
776 }
777 } else {
778 /* NUD_PROBE|NUD_INCOMPLETE */
779 next = now + neigh->parms->retrans_time;
780 }
781
782 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
783 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
784 struct sk_buff *skb;
785
786 neigh->nud_state = NUD_FAILED;
955aaa2f 787 neigh->updated = jiffies;
1da177e4
LT
788 notify = 1;
789 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
790 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
791
792 /* It is very thin place. report_unreachable is very complicated
793 routine. Particularly, it can hit the same neighbour entry!
794
795 So that, we try to be accurate and avoid dead loop. --ANK
796 */
797 while (neigh->nud_state == NUD_FAILED &&
798 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
799 write_unlock(&neigh->lock);
800 neigh->ops->error_report(neigh, skb);
801 write_lock(&neigh->lock);
802 }
803 skb_queue_purge(&neigh->arp_queue);
804 }
805
806 if (neigh->nud_state & NUD_IN_TIMER) {
1da177e4
LT
807 if (time_before(next, jiffies + HZ/2))
808 next = jiffies + HZ/2;
6fb9974f
HX
809 if (!mod_timer(&neigh->timer, next))
810 neigh_hold(neigh);
1da177e4
LT
811 }
812 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
813 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
814 /* keep skb alive even if arp_queue overflows */
815 if (skb)
816 skb_get(skb);
817 write_unlock(&neigh->lock);
818 neigh->ops->solicit(neigh, skb);
819 atomic_inc(&neigh->probes);
820 if (skb)
821 kfree_skb(skb);
822 } else {
823out:
824 write_unlock(&neigh->lock);
825 }
8d71740c
TT
826 if (notify)
827 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1da177e4
LT
828
829#ifdef CONFIG_ARPD
830 if (notify && neigh->parms->app_probes)
831 neigh_app_notify(neigh);
832#endif
833 neigh_release(neigh);
834}
835
836int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
837{
838 int rc;
839 unsigned long now;
840
841 write_lock_bh(&neigh->lock);
842
843 rc = 0;
844 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
845 goto out_unlock_bh;
846
847 now = jiffies;
848
849 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
850 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
851 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
852 neigh->nud_state = NUD_INCOMPLETE;
955aaa2f 853 neigh->updated = jiffies;
1da177e4 854 neigh_hold(neigh);
667347f1 855 neigh_add_timer(neigh, now + 1);
1da177e4
LT
856 } else {
857 neigh->nud_state = NUD_FAILED;
955aaa2f 858 neigh->updated = jiffies;
1da177e4
LT
859 write_unlock_bh(&neigh->lock);
860
861 if (skb)
862 kfree_skb(skb);
863 return 1;
864 }
865 } else if (neigh->nud_state & NUD_STALE) {
866 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
867 neigh_hold(neigh);
868 neigh->nud_state = NUD_DELAY;
955aaa2f 869 neigh->updated = jiffies;
667347f1
DM
870 neigh_add_timer(neigh,
871 jiffies + neigh->parms->delay_probe_time);
1da177e4
LT
872 }
873
874 if (neigh->nud_state == NUD_INCOMPLETE) {
875 if (skb) {
876 if (skb_queue_len(&neigh->arp_queue) >=
877 neigh->parms->queue_len) {
878 struct sk_buff *buff;
879 buff = neigh->arp_queue.next;
880 __skb_unlink(buff, &neigh->arp_queue);
881 kfree_skb(buff);
882 }
883 __skb_queue_tail(&neigh->arp_queue, skb);
884 }
885 rc = 1;
886 }
887out_unlock_bh:
888 write_unlock_bh(&neigh->lock);
889 return rc;
890}
891
e92b43a3 892static void neigh_update_hhs(struct neighbour *neigh)
1da177e4
LT
893{
894 struct hh_cache *hh;
895 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
896 neigh->dev->header_cache_update;
897
898 if (update) {
899 for (hh = neigh->hh; hh; hh = hh->hh_next) {
900 write_lock_bh(&hh->hh_lock);
901 update(hh, neigh->dev, neigh->ha);
902 write_unlock_bh(&hh->hh_lock);
903 }
904 }
905}
906
907
908
909/* Generic update routine.
910 -- lladdr is new lladdr or NULL, if it is not supplied.
911 -- new is new state.
912 -- flags
913 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
914 if it is different.
915 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
916 lladdr instead of overriding it
917 if it is different.
918 It also allows to retain current state
919 if lladdr is unchanged.
920 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
921
922 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
923 NTF_ROUTER flag.
924 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
925 a router.
926
927 Caller MUST hold reference count on the entry.
928 */
929
930int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
931 u32 flags)
932{
933 u8 old;
934 int err;
1da177e4 935 int notify = 0;
1da177e4
LT
936 struct net_device *dev;
937 int update_isrouter = 0;
938
939 write_lock_bh(&neigh->lock);
940
941 dev = neigh->dev;
942 old = neigh->nud_state;
943 err = -EPERM;
944
945 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
946 (old & (NUD_NOARP | NUD_PERMANENT)))
947 goto out;
948
949 if (!(new & NUD_VALID)) {
950 neigh_del_timer(neigh);
951 if (old & NUD_CONNECTED)
952 neigh_suspect(neigh);
953 neigh->nud_state = new;
954 err = 0;
1da177e4 955 notify = old & NUD_VALID;
1da177e4
LT
956 goto out;
957 }
958
959 /* Compare new lladdr with cached one */
960 if (!dev->addr_len) {
961 /* First case: device needs no address. */
962 lladdr = neigh->ha;
963 } else if (lladdr) {
964 /* The second case: if something is already cached
965 and a new address is proposed:
966 - compare new & old
967 - if they are different, check override flag
968 */
969 if ((old & NUD_VALID) &&
970 !memcmp(lladdr, neigh->ha, dev->addr_len))
971 lladdr = neigh->ha;
972 } else {
973 /* No address is supplied; if we know something,
974 use it, otherwise discard the request.
975 */
976 err = -EINVAL;
977 if (!(old & NUD_VALID))
978 goto out;
979 lladdr = neigh->ha;
980 }
981
982 if (new & NUD_CONNECTED)
983 neigh->confirmed = jiffies;
984 neigh->updated = jiffies;
985
986 /* If entry was valid and address is not changed,
987 do not change entry state, if new one is STALE.
988 */
989 err = 0;
990 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
991 if (old & NUD_VALID) {
992 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
993 update_isrouter = 0;
994 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
995 (old & NUD_CONNECTED)) {
996 lladdr = neigh->ha;
997 new = NUD_STALE;
998 } else
999 goto out;
1000 } else {
1001 if (lladdr == neigh->ha && new == NUD_STALE &&
1002 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1003 (old & NUD_CONNECTED))
1004 )
1005 new = old;
1006 }
1007 }
1008
1009 if (new != old) {
1010 neigh_del_timer(neigh);
1011 if (new & NUD_IN_TIMER) {
1012 neigh_hold(neigh);
667347f1 1013 neigh_add_timer(neigh, (jiffies +
1da177e4 1014 ((new & NUD_REACHABLE) ?
667347f1
DM
1015 neigh->parms->reachable_time :
1016 0)));
1da177e4
LT
1017 }
1018 neigh->nud_state = new;
1019 }
1020
1021 if (lladdr != neigh->ha) {
1022 memcpy(&neigh->ha, lladdr, dev->addr_len);
1023 neigh_update_hhs(neigh);
1024 if (!(new & NUD_CONNECTED))
1025 neigh->confirmed = jiffies -
1026 (neigh->parms->base_reachable_time << 1);
1da177e4 1027 notify = 1;
1da177e4
LT
1028 }
1029 if (new == old)
1030 goto out;
1031 if (new & NUD_CONNECTED)
1032 neigh_connect(neigh);
1033 else
1034 neigh_suspect(neigh);
1035 if (!(old & NUD_VALID)) {
1036 struct sk_buff *skb;
1037
1038 /* Again: avoid dead loop if something went wrong */
1039
1040 while (neigh->nud_state & NUD_VALID &&
1041 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1042 struct neighbour *n1 = neigh;
1043 write_unlock_bh(&neigh->lock);
1044 /* On shaper/eql skb->dst->neighbour != neigh :( */
1045 if (skb->dst && skb->dst->neighbour)
1046 n1 = skb->dst->neighbour;
1047 n1->output(skb);
1048 write_lock_bh(&neigh->lock);
1049 }
1050 skb_queue_purge(&neigh->arp_queue);
1051 }
1052out:
1053 if (update_isrouter) {
1054 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1055 (neigh->flags | NTF_ROUTER) :
1056 (neigh->flags & ~NTF_ROUTER);
1057 }
1058 write_unlock_bh(&neigh->lock);
8d71740c
TT
1059
1060 if (notify)
1061 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1da177e4
LT
1062#ifdef CONFIG_ARPD
1063 if (notify && neigh->parms->app_probes)
1064 neigh_app_notify(neigh);
1065#endif
1066 return err;
1067}
1068
1069struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1070 u8 *lladdr, void *saddr,
1071 struct net_device *dev)
1072{
1073 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1074 lladdr || !dev->addr_len);
1075 if (neigh)
1076 neigh_update(neigh, lladdr, NUD_STALE,
1077 NEIGH_UPDATE_F_OVERRIDE);
1078 return neigh;
1079}
1080
1081static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1082 u16 protocol)
1083{
1084 struct hh_cache *hh;
1085 struct net_device *dev = dst->dev;
1086
1087 for (hh = n->hh; hh; hh = hh->hh_next)
1088 if (hh->hh_type == protocol)
1089 break;
1090
77d04bd9 1091 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1da177e4
LT
1092 rwlock_init(&hh->hh_lock);
1093 hh->hh_type = protocol;
1094 atomic_set(&hh->hh_refcnt, 0);
1095 hh->hh_next = NULL;
1096 if (dev->hard_header_cache(n, hh)) {
1097 kfree(hh);
1098 hh = NULL;
1099 } else {
1100 atomic_inc(&hh->hh_refcnt);
1101 hh->hh_next = n->hh;
1102 n->hh = hh;
1103 if (n->nud_state & NUD_CONNECTED)
1104 hh->hh_output = n->ops->hh_output;
1105 else
1106 hh->hh_output = n->ops->output;
1107 }
1108 }
1109 if (hh) {
1110 atomic_inc(&hh->hh_refcnt);
1111 dst->hh = hh;
1112 }
1113}
1114
1115/* This function can be used in contexts, where only old dev_queue_xmit
1116 worked, f.e. if you want to override normal output path (eql, shaper),
1117 but resolution is not made yet.
1118 */
1119
1120int neigh_compat_output(struct sk_buff *skb)
1121{
1122 struct net_device *dev = skb->dev;
1123
1124 __skb_pull(skb, skb->nh.raw - skb->data);
1125
1126 if (dev->hard_header &&
1127 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1128 skb->len) < 0 &&
1129 dev->rebuild_header(skb))
1130 return 0;
1131
1132 return dev_queue_xmit(skb);
1133}
1134
1135/* Slow and careful. */
1136
1137int neigh_resolve_output(struct sk_buff *skb)
1138{
1139 struct dst_entry *dst = skb->dst;
1140 struct neighbour *neigh;
1141 int rc = 0;
1142
1143 if (!dst || !(neigh = dst->neighbour))
1144 goto discard;
1145
1146 __skb_pull(skb, skb->nh.raw - skb->data);
1147
1148 if (!neigh_event_send(neigh, skb)) {
1149 int err;
1150 struct net_device *dev = neigh->dev;
1151 if (dev->hard_header_cache && !dst->hh) {
1152 write_lock_bh(&neigh->lock);
1153 if (!dst->hh)
1154 neigh_hh_init(neigh, dst, dst->ops->protocol);
1155 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1156 neigh->ha, NULL, skb->len);
1157 write_unlock_bh(&neigh->lock);
1158 } else {
1159 read_lock_bh(&neigh->lock);
1160 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1161 neigh->ha, NULL, skb->len);
1162 read_unlock_bh(&neigh->lock);
1163 }
1164 if (err >= 0)
1165 rc = neigh->ops->queue_xmit(skb);
1166 else
1167 goto out_kfree_skb;
1168 }
1169out:
1170 return rc;
1171discard:
1172 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1173 dst, dst ? dst->neighbour : NULL);
1174out_kfree_skb:
1175 rc = -EINVAL;
1176 kfree_skb(skb);
1177 goto out;
1178}
1179
1180/* As fast as possible without hh cache */
1181
1182int neigh_connected_output(struct sk_buff *skb)
1183{
1184 int err;
1185 struct dst_entry *dst = skb->dst;
1186 struct neighbour *neigh = dst->neighbour;
1187 struct net_device *dev = neigh->dev;
1188
1189 __skb_pull(skb, skb->nh.raw - skb->data);
1190
1191 read_lock_bh(&neigh->lock);
1192 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1193 neigh->ha, NULL, skb->len);
1194 read_unlock_bh(&neigh->lock);
1195 if (err >= 0)
1196 err = neigh->ops->queue_xmit(skb);
1197 else {
1198 err = -EINVAL;
1199 kfree_skb(skb);
1200 }
1201 return err;
1202}
1203
1204static void neigh_proxy_process(unsigned long arg)
1205{
1206 struct neigh_table *tbl = (struct neigh_table *)arg;
1207 long sched_next = 0;
1208 unsigned long now = jiffies;
1209 struct sk_buff *skb;
1210
1211 spin_lock(&tbl->proxy_queue.lock);
1212
1213 skb = tbl->proxy_queue.next;
1214
1215 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1216 struct sk_buff *back = skb;
a61bbcf2 1217 long tdif = NEIGH_CB(back)->sched_next - now;
1da177e4
LT
1218
1219 skb = skb->next;
1220 if (tdif <= 0) {
1221 struct net_device *dev = back->dev;
1222 __skb_unlink(back, &tbl->proxy_queue);
1223 if (tbl->proxy_redo && netif_running(dev))
1224 tbl->proxy_redo(back);
1225 else
1226 kfree_skb(back);
1227
1228 dev_put(dev);
1229 } else if (!sched_next || tdif < sched_next)
1230 sched_next = tdif;
1231 }
1232 del_timer(&tbl->proxy_timer);
1233 if (sched_next)
1234 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1235 spin_unlock(&tbl->proxy_queue.lock);
1236}
1237
1238void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1239 struct sk_buff *skb)
1240{
1241 unsigned long now = jiffies;
1242 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1243
1244 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1245 kfree_skb(skb);
1246 return;
1247 }
a61bbcf2
PM
1248
1249 NEIGH_CB(skb)->sched_next = sched_next;
1250 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1da177e4
LT
1251
1252 spin_lock(&tbl->proxy_queue.lock);
1253 if (del_timer(&tbl->proxy_timer)) {
1254 if (time_before(tbl->proxy_timer.expires, sched_next))
1255 sched_next = tbl->proxy_timer.expires;
1256 }
1257 dst_release(skb->dst);
1258 skb->dst = NULL;
1259 dev_hold(skb->dev);
1260 __skb_queue_tail(&tbl->proxy_queue, skb);
1261 mod_timer(&tbl->proxy_timer, sched_next);
1262 spin_unlock(&tbl->proxy_queue.lock);
1263}
1264
1265
1266struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1267 struct neigh_table *tbl)
1268{
1269 struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1270
1271 if (p) {
1272 memcpy(p, &tbl->parms, sizeof(*p));
1273 p->tbl = tbl;
1274 atomic_set(&p->refcnt, 1);
1275 INIT_RCU_HEAD(&p->rcu_head);
1276 p->reachable_time =
1277 neigh_rand_reach_time(p->base_reachable_time);
c7fb64db
TG
1278 if (dev) {
1279 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1280 kfree(p);
1281 return NULL;
1282 }
1283
1284 dev_hold(dev);
1285 p->dev = dev;
1da177e4
LT
1286 }
1287 p->sysctl_table = NULL;
1288 write_lock_bh(&tbl->lock);
1289 p->next = tbl->parms.next;
1290 tbl->parms.next = p;
1291 write_unlock_bh(&tbl->lock);
1292 }
1293 return p;
1294}
1295
1296static void neigh_rcu_free_parms(struct rcu_head *head)
1297{
1298 struct neigh_parms *parms =
1299 container_of(head, struct neigh_parms, rcu_head);
1300
1301 neigh_parms_put(parms);
1302}
1303
1304void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1305{
1306 struct neigh_parms **p;
1307
1308 if (!parms || parms == &tbl->parms)
1309 return;
1310 write_lock_bh(&tbl->lock);
1311 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1312 if (*p == parms) {
1313 *p = parms->next;
1314 parms->dead = 1;
1315 write_unlock_bh(&tbl->lock);
c7fb64db
TG
1316 if (parms->dev)
1317 dev_put(parms->dev);
1da177e4
LT
1318 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1319 return;
1320 }
1321 }
1322 write_unlock_bh(&tbl->lock);
1323 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1324}
1325
1326void neigh_parms_destroy(struct neigh_parms *parms)
1327{
1328 kfree(parms);
1329}
1330
bd89efc5 1331void neigh_table_init_no_netlink(struct neigh_table *tbl)
1da177e4
LT
1332{
1333 unsigned long now = jiffies;
1334 unsigned long phsize;
1335
1336 atomic_set(&tbl->parms.refcnt, 1);
1337 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1338 tbl->parms.reachable_time =
1339 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1340
1341 if (!tbl->kmem_cachep)
e5d679f3
AD
1342 tbl->kmem_cachep =
1343 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1344 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1345 NULL, NULL);
1da177e4
LT
1346 tbl->stats = alloc_percpu(struct neigh_statistics);
1347 if (!tbl->stats)
1348 panic("cannot create neighbour cache statistics");
1349
1350#ifdef CONFIG_PROC_FS
1351 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1352 if (!tbl->pde)
1353 panic("cannot create neighbour proc dir entry");
1354 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1355 tbl->pde->data = tbl;
1356#endif
1357
1358 tbl->hash_mask = 1;
1359 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1360
1361 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
77d04bd9 1362 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1da177e4
LT
1363
1364 if (!tbl->hash_buckets || !tbl->phash_buckets)
1365 panic("cannot allocate neighbour cache hashes");
1366
1da177e4
LT
1367 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1368
1369 rwlock_init(&tbl->lock);
1370 init_timer(&tbl->gc_timer);
1371 tbl->gc_timer.data = (unsigned long)tbl;
1372 tbl->gc_timer.function = neigh_periodic_timer;
1373 tbl->gc_timer.expires = now + 1;
1374 add_timer(&tbl->gc_timer);
1375
1376 init_timer(&tbl->proxy_timer);
1377 tbl->proxy_timer.data = (unsigned long)tbl;
1378 tbl->proxy_timer.function = neigh_proxy_process;
1379 skb_queue_head_init(&tbl->proxy_queue);
1380
1381 tbl->last_flush = now;
1382 tbl->last_rand = now + tbl->parms.reachable_time * 20;
bd89efc5
SK
1383}
1384
1385void neigh_table_init(struct neigh_table *tbl)
1386{
1387 struct neigh_table *tmp;
1388
1389 neigh_table_init_no_netlink(tbl);
1da177e4 1390 write_lock(&neigh_tbl_lock);
bd89efc5
SK
1391 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1392 if (tmp->family == tbl->family)
1393 break;
1394 }
1da177e4
LT
1395 tbl->next = neigh_tables;
1396 neigh_tables = tbl;
1397 write_unlock(&neigh_tbl_lock);
bd89efc5
SK
1398
1399 if (unlikely(tmp)) {
1400 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1401 "family %d\n", tbl->family);
1402 dump_stack();
1403 }
1da177e4
LT
1404}
1405
1406int neigh_table_clear(struct neigh_table *tbl)
1407{
1408 struct neigh_table **tp;
1409
1410 /* It is not clean... Fix it to unload IPv6 module safely */
1411 del_timer_sync(&tbl->gc_timer);
1412 del_timer_sync(&tbl->proxy_timer);
1413 pneigh_queue_purge(&tbl->proxy_queue);
1414 neigh_ifdown(tbl, NULL);
1415 if (atomic_read(&tbl->entries))
1416 printk(KERN_CRIT "neighbour leakage\n");
1417 write_lock(&neigh_tbl_lock);
1418 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1419 if (*tp == tbl) {
1420 *tp = tbl->next;
1421 break;
1422 }
1423 }
1424 write_unlock(&neigh_tbl_lock);
1425
1426 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1427 tbl->hash_buckets = NULL;
1428
1429 kfree(tbl->phash_buckets);
1430 tbl->phash_buckets = NULL;
1431
3fcde74b
KK
1432 free_percpu(tbl->stats);
1433 tbl->stats = NULL;
1434
1da177e4
LT
1435 return 0;
1436}
1437
1438int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1439{
a14a49d2
TG
1440 struct ndmsg *ndm;
1441 struct nlattr *dst_attr;
1da177e4
LT
1442 struct neigh_table *tbl;
1443 struct net_device *dev = NULL;
a14a49d2 1444 int err = -EINVAL;
1da177e4 1445
a14a49d2 1446 if (nlmsg_len(nlh) < sizeof(*ndm))
1da177e4
LT
1447 goto out;
1448
a14a49d2
TG
1449 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1450 if (dst_attr == NULL)
1451 goto out;
1452
1453 ndm = nlmsg_data(nlh);
1454 if (ndm->ndm_ifindex) {
1455 dev = dev_get_by_index(ndm->ndm_ifindex);
1456 if (dev == NULL) {
1457 err = -ENODEV;
1458 goto out;
1459 }
1460 }
1461
1da177e4
LT
1462 read_lock(&neigh_tbl_lock);
1463 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
a14a49d2 1464 struct neighbour *neigh;
1da177e4
LT
1465
1466 if (tbl->family != ndm->ndm_family)
1467 continue;
1468 read_unlock(&neigh_tbl_lock);
1469
a14a49d2 1470 if (nla_len(dst_attr) < tbl->key_len)
1da177e4
LT
1471 goto out_dev_put;
1472
1473 if (ndm->ndm_flags & NTF_PROXY) {
a14a49d2 1474 err = pneigh_delete(tbl, nla_data(dst_attr), dev);
1da177e4
LT
1475 goto out_dev_put;
1476 }
1477
a14a49d2
TG
1478 if (dev == NULL)
1479 goto out_dev_put;
1da177e4 1480
a14a49d2
TG
1481 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1482 if (neigh == NULL) {
1483 err = -ENOENT;
1484 goto out_dev_put;
1da177e4 1485 }
a14a49d2
TG
1486
1487 err = neigh_update(neigh, NULL, NUD_FAILED,
1488 NEIGH_UPDATE_F_OVERRIDE |
1489 NEIGH_UPDATE_F_ADMIN);
1490 neigh_release(neigh);
1da177e4
LT
1491 goto out_dev_put;
1492 }
1493 read_unlock(&neigh_tbl_lock);
a14a49d2
TG
1494 err = -EAFNOSUPPORT;
1495
1da177e4
LT
1496out_dev_put:
1497 if (dev)
1498 dev_put(dev);
1499out:
1500 return err;
1501}
1502
1503int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1504{
5208debd
TG
1505 struct ndmsg *ndm;
1506 struct nlattr *tb[NDA_MAX+1];
1da177e4
LT
1507 struct neigh_table *tbl;
1508 struct net_device *dev = NULL;
5208debd 1509 int err;
1da177e4 1510
5208debd
TG
1511 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1512 if (err < 0)
1da177e4
LT
1513 goto out;
1514
5208debd
TG
1515 err = -EINVAL;
1516 if (tb[NDA_DST] == NULL)
1517 goto out;
1518
1519 ndm = nlmsg_data(nlh);
1520 if (ndm->ndm_ifindex) {
1521 dev = dev_get_by_index(ndm->ndm_ifindex);
1522 if (dev == NULL) {
1523 err = -ENODEV;
1524 goto out;
1525 }
1526
1527 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1528 goto out_dev_put;
1529 }
1530
1da177e4
LT
1531 read_lock(&neigh_tbl_lock);
1532 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
5208debd
TG
1533 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1534 struct neighbour *neigh;
1535 void *dst, *lladdr;
1da177e4
LT
1536
1537 if (tbl->family != ndm->ndm_family)
1538 continue;
1539 read_unlock(&neigh_tbl_lock);
1540
5208debd 1541 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1da177e4 1542 goto out_dev_put;
5208debd
TG
1543 dst = nla_data(tb[NDA_DST]);
1544 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1da177e4
LT
1545
1546 if (ndm->ndm_flags & NTF_PROXY) {
5208debd
TG
1547 err = 0;
1548 if (pneigh_lookup(tbl, dst, dev, 1) == NULL)
1549 err = -ENOBUFS;
1da177e4
LT
1550 goto out_dev_put;
1551 }
1552
5208debd 1553 if (dev == NULL)
1da177e4 1554 goto out_dev_put;
5208debd
TG
1555
1556 neigh = neigh_lookup(tbl, dst, dev);
1557 if (neigh == NULL) {
1558 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1559 err = -ENOENT;
1560 goto out_dev_put;
1561 }
1da177e4 1562
5208debd
TG
1563 neigh = __neigh_lookup_errno(tbl, dst, dev);
1564 if (IS_ERR(neigh)) {
1565 err = PTR_ERR(neigh);
1da177e4
LT
1566 goto out_dev_put;
1567 }
1da177e4 1568 } else {
5208debd
TG
1569 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1570 err = -EEXIST;
1571 neigh_release(neigh);
1da177e4
LT
1572 goto out_dev_put;
1573 }
1da177e4 1574
5208debd
TG
1575 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1576 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1577 }
1da177e4 1578
5208debd
TG
1579 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1580 neigh_release(neigh);
1da177e4
LT
1581 goto out_dev_put;
1582 }
1583
1584 read_unlock(&neigh_tbl_lock);
5208debd
TG
1585 err = -EAFNOSUPPORT;
1586
1da177e4
LT
1587out_dev_put:
1588 if (dev)
1589 dev_put(dev);
1590out:
1591 return err;
1592}
1593
c7fb64db
TG
1594static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1595{
ca860fb3
TG
1596 struct nlattr *nest;
1597
1598 nest = nla_nest_start(skb, NDTA_PARMS);
1599 if (nest == NULL)
1600 return -ENOBUFS;
c7fb64db
TG
1601
1602 if (parms->dev)
ca860fb3
TG
1603 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1604
1605 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1606 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1607 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1608 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1609 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1610 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1611 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1612 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
c7fb64db 1613 parms->base_reachable_time);
ca860fb3
TG
1614 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1615 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1616 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1617 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1618 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1619 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
c7fb64db 1620
ca860fb3 1621 return nla_nest_end(skb, nest);
c7fb64db 1622
ca860fb3
TG
1623nla_put_failure:
1624 return nla_nest_cancel(skb, nest);
c7fb64db
TG
1625}
1626
ca860fb3
TG
1627static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1628 u32 pid, u32 seq, int type, int flags)
c7fb64db
TG
1629{
1630 struct nlmsghdr *nlh;
1631 struct ndtmsg *ndtmsg;
1632
ca860fb3
TG
1633 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1634 if (nlh == NULL)
1635 return -ENOBUFS;
c7fb64db 1636
ca860fb3 1637 ndtmsg = nlmsg_data(nlh);
c7fb64db
TG
1638
1639 read_lock_bh(&tbl->lock);
1640 ndtmsg->ndtm_family = tbl->family;
9ef1d4c7
PM
1641 ndtmsg->ndtm_pad1 = 0;
1642 ndtmsg->ndtm_pad2 = 0;
c7fb64db 1643
ca860fb3
TG
1644 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1645 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1646 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1647 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1648 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
c7fb64db
TG
1649
1650 {
1651 unsigned long now = jiffies;
1652 unsigned int flush_delta = now - tbl->last_flush;
1653 unsigned int rand_delta = now - tbl->last_rand;
1654
1655 struct ndt_config ndc = {
1656 .ndtc_key_len = tbl->key_len,
1657 .ndtc_entry_size = tbl->entry_size,
1658 .ndtc_entries = atomic_read(&tbl->entries),
1659 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1660 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1661 .ndtc_hash_rnd = tbl->hash_rnd,
1662 .ndtc_hash_mask = tbl->hash_mask,
1663 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1664 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1665 };
1666
ca860fb3 1667 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
c7fb64db
TG
1668 }
1669
1670 {
1671 int cpu;
1672 struct ndt_stats ndst;
1673
1674 memset(&ndst, 0, sizeof(ndst));
1675
6f912042 1676 for_each_possible_cpu(cpu) {
c7fb64db
TG
1677 struct neigh_statistics *st;
1678
c7fb64db
TG
1679 st = per_cpu_ptr(tbl->stats, cpu);
1680 ndst.ndts_allocs += st->allocs;
1681 ndst.ndts_destroys += st->destroys;
1682 ndst.ndts_hash_grows += st->hash_grows;
1683 ndst.ndts_res_failed += st->res_failed;
1684 ndst.ndts_lookups += st->lookups;
1685 ndst.ndts_hits += st->hits;
1686 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1687 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1688 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1689 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1690 }
1691
ca860fb3 1692 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
c7fb64db
TG
1693 }
1694
1695 BUG_ON(tbl->parms.dev);
1696 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
ca860fb3 1697 goto nla_put_failure;
c7fb64db
TG
1698
1699 read_unlock_bh(&tbl->lock);
ca860fb3 1700 return nlmsg_end(skb, nlh);
c7fb64db 1701
ca860fb3 1702nla_put_failure:
c7fb64db 1703 read_unlock_bh(&tbl->lock);
ca860fb3 1704 return nlmsg_cancel(skb, nlh);
c7fb64db
TG
1705}
1706
ca860fb3
TG
1707static int neightbl_fill_param_info(struct sk_buff *skb,
1708 struct neigh_table *tbl,
c7fb64db 1709 struct neigh_parms *parms,
ca860fb3
TG
1710 u32 pid, u32 seq, int type,
1711 unsigned int flags)
c7fb64db
TG
1712{
1713 struct ndtmsg *ndtmsg;
1714 struct nlmsghdr *nlh;
1715
ca860fb3
TG
1716 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1717 if (nlh == NULL)
1718 return -ENOBUFS;
c7fb64db 1719
ca860fb3 1720 ndtmsg = nlmsg_data(nlh);
c7fb64db
TG
1721
1722 read_lock_bh(&tbl->lock);
1723 ndtmsg->ndtm_family = tbl->family;
9ef1d4c7
PM
1724 ndtmsg->ndtm_pad1 = 0;
1725 ndtmsg->ndtm_pad2 = 0;
c7fb64db 1726
ca860fb3
TG
1727 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1728 neightbl_fill_parms(skb, parms) < 0)
1729 goto errout;
c7fb64db
TG
1730
1731 read_unlock_bh(&tbl->lock);
ca860fb3
TG
1732 return nlmsg_end(skb, nlh);
1733errout:
c7fb64db 1734 read_unlock_bh(&tbl->lock);
ca860fb3 1735 return nlmsg_cancel(skb, nlh);
c7fb64db
TG
1736}
1737
1738static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1739 int ifindex)
1740{
1741 struct neigh_parms *p;
1742
1743 for (p = &tbl->parms; p; p = p->next)
1744 if ((p->dev && p->dev->ifindex == ifindex) ||
1745 (!p->dev && !ifindex))
1746 return p;
1747
1748 return NULL;
1749}
1750
6b3f8674
TG
1751static struct nla_policy nl_neightbl_policy[NDTA_MAX+1] __read_mostly = {
1752 [NDTA_NAME] = { .type = NLA_STRING },
1753 [NDTA_THRESH1] = { .type = NLA_U32 },
1754 [NDTA_THRESH2] = { .type = NLA_U32 },
1755 [NDTA_THRESH3] = { .type = NLA_U32 },
1756 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1757 [NDTA_PARMS] = { .type = NLA_NESTED },
1758};
1759
1760static struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] __read_mostly = {
1761 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1762 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1763 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1764 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1765 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1766 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1767 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1768 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1769 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1770 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1771 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1772 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1773 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1774};
1775
c7fb64db
TG
1776int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1777{
1778 struct neigh_table *tbl;
6b3f8674
TG
1779 struct ndtmsg *ndtmsg;
1780 struct nlattr *tb[NDTA_MAX+1];
1781 int err;
c7fb64db 1782
6b3f8674
TG
1783 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1784 nl_neightbl_policy);
1785 if (err < 0)
1786 goto errout;
c7fb64db 1787
6b3f8674
TG
1788 if (tb[NDTA_NAME] == NULL) {
1789 err = -EINVAL;
1790 goto errout;
1791 }
1792
1793 ndtmsg = nlmsg_data(nlh);
c7fb64db
TG
1794 read_lock(&neigh_tbl_lock);
1795 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1796 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1797 continue;
1798
6b3f8674 1799 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
c7fb64db
TG
1800 break;
1801 }
1802
1803 if (tbl == NULL) {
1804 err = -ENOENT;
6b3f8674 1805 goto errout_locked;
c7fb64db
TG
1806 }
1807
1808 /*
1809 * We acquire tbl->lock to be nice to the periodic timers and
1810 * make sure they always see a consistent set of values.
1811 */
1812 write_lock_bh(&tbl->lock);
1813
6b3f8674
TG
1814 if (tb[NDTA_PARMS]) {
1815 struct nlattr *tbp[NDTPA_MAX+1];
c7fb64db 1816 struct neigh_parms *p;
6b3f8674 1817 int i, ifindex = 0;
c7fb64db 1818
6b3f8674
TG
1819 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1820 nl_ntbl_parm_policy);
1821 if (err < 0)
1822 goto errout_tbl_lock;
c7fb64db 1823
6b3f8674
TG
1824 if (tbp[NDTPA_IFINDEX])
1825 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
c7fb64db
TG
1826
1827 p = lookup_neigh_params(tbl, ifindex);
1828 if (p == NULL) {
1829 err = -ENOENT;
6b3f8674 1830 goto errout_tbl_lock;
c7fb64db 1831 }
c7fb64db 1832
6b3f8674
TG
1833 for (i = 1; i <= NDTPA_MAX; i++) {
1834 if (tbp[i] == NULL)
1835 continue;
c7fb64db 1836
6b3f8674
TG
1837 switch (i) {
1838 case NDTPA_QUEUE_LEN:
1839 p->queue_len = nla_get_u32(tbp[i]);
1840 break;
1841 case NDTPA_PROXY_QLEN:
1842 p->proxy_qlen = nla_get_u32(tbp[i]);
1843 break;
1844 case NDTPA_APP_PROBES:
1845 p->app_probes = nla_get_u32(tbp[i]);
1846 break;
1847 case NDTPA_UCAST_PROBES:
1848 p->ucast_probes = nla_get_u32(tbp[i]);
1849 break;
1850 case NDTPA_MCAST_PROBES:
1851 p->mcast_probes = nla_get_u32(tbp[i]);
1852 break;
1853 case NDTPA_BASE_REACHABLE_TIME:
1854 p->base_reachable_time = nla_get_msecs(tbp[i]);
1855 break;
1856 case NDTPA_GC_STALETIME:
1857 p->gc_staletime = nla_get_msecs(tbp[i]);
1858 break;
1859 case NDTPA_DELAY_PROBE_TIME:
1860 p->delay_probe_time = nla_get_msecs(tbp[i]);
1861 break;
1862 case NDTPA_RETRANS_TIME:
1863 p->retrans_time = nla_get_msecs(tbp[i]);
1864 break;
1865 case NDTPA_ANYCAST_DELAY:
1866 p->anycast_delay = nla_get_msecs(tbp[i]);
1867 break;
1868 case NDTPA_PROXY_DELAY:
1869 p->proxy_delay = nla_get_msecs(tbp[i]);
1870 break;
1871 case NDTPA_LOCKTIME:
1872 p->locktime = nla_get_msecs(tbp[i]);
1873 break;
1874 }
1875 }
1876 }
c7fb64db 1877
6b3f8674
TG
1878 if (tb[NDTA_THRESH1])
1879 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
c7fb64db 1880
6b3f8674
TG
1881 if (tb[NDTA_THRESH2])
1882 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
c7fb64db 1883
6b3f8674
TG
1884 if (tb[NDTA_THRESH3])
1885 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
c7fb64db 1886
6b3f8674
TG
1887 if (tb[NDTA_GC_INTERVAL])
1888 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
c7fb64db
TG
1889
1890 err = 0;
1891
6b3f8674 1892errout_tbl_lock:
c7fb64db 1893 write_unlock_bh(&tbl->lock);
6b3f8674 1894errout_locked:
c7fb64db 1895 read_unlock(&neigh_tbl_lock);
6b3f8674 1896errout:
c7fb64db
TG
1897 return err;
1898}
1899
1900int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1901{
ca860fb3
TG
1902 int family, tidx, nidx = 0;
1903 int tbl_skip = cb->args[0];
1904 int neigh_skip = cb->args[1];
c7fb64db
TG
1905 struct neigh_table *tbl;
1906
ca860fb3 1907 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
c7fb64db
TG
1908
1909 read_lock(&neigh_tbl_lock);
ca860fb3 1910 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
c7fb64db
TG
1911 struct neigh_parms *p;
1912
ca860fb3 1913 if (tidx < tbl_skip || (family && tbl->family != family))
c7fb64db
TG
1914 continue;
1915
ca860fb3
TG
1916 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1917 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1918 NLM_F_MULTI) <= 0)
c7fb64db
TG
1919 break;
1920
ca860fb3
TG
1921 for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) {
1922 if (nidx < neigh_skip)
c7fb64db
TG
1923 continue;
1924
ca860fb3
TG
1925 if (neightbl_fill_param_info(skb, tbl, p,
1926 NETLINK_CB(cb->skb).pid,
1927 cb->nlh->nlmsg_seq,
1928 RTM_NEWNEIGHTBL,
1929 NLM_F_MULTI) <= 0)
c7fb64db
TG
1930 goto out;
1931 }
1932
ca860fb3 1933 neigh_skip = 0;
c7fb64db
TG
1934 }
1935out:
1936 read_unlock(&neigh_tbl_lock);
ca860fb3
TG
1937 cb->args[0] = tidx;
1938 cb->args[1] = nidx;
c7fb64db
TG
1939
1940 return skb->len;
1941}
1da177e4 1942
8b8aec50
TG
1943static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1944 u32 pid, u32 seq, int type, unsigned int flags)
1da177e4
LT
1945{
1946 unsigned long now = jiffies;
1da177e4 1947 struct nda_cacheinfo ci;
8b8aec50
TG
1948 struct nlmsghdr *nlh;
1949 struct ndmsg *ndm;
1950
1951 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1952 if (nlh == NULL)
1953 return -ENOBUFS;
1da177e4 1954
8b8aec50
TG
1955 ndm = nlmsg_data(nlh);
1956 ndm->ndm_family = neigh->ops->family;
9ef1d4c7
PM
1957 ndm->ndm_pad1 = 0;
1958 ndm->ndm_pad2 = 0;
8b8aec50
TG
1959 ndm->ndm_flags = neigh->flags;
1960 ndm->ndm_type = neigh->type;
1961 ndm->ndm_ifindex = neigh->dev->ifindex;
1da177e4 1962
8b8aec50
TG
1963 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
1964
1965 read_lock_bh(&neigh->lock);
1966 ndm->ndm_state = neigh->nud_state;
1967 if ((neigh->nud_state & NUD_VALID) &&
1968 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
1969 read_unlock_bh(&neigh->lock);
1970 goto nla_put_failure;
1971 }
1972
1973 ci.ndm_used = now - neigh->used;
1974 ci.ndm_confirmed = now - neigh->confirmed;
1975 ci.ndm_updated = now - neigh->updated;
1976 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
1977 read_unlock_bh(&neigh->lock);
1978
1979 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
1980 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1981
1982 return nlmsg_end(skb, nlh);
1983
1984nla_put_failure:
1985 return nlmsg_cancel(skb, nlh);
1da177e4
LT
1986}
1987
1988
1989static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1990 struct netlink_callback *cb)
1991{
1992 struct neighbour *n;
1993 int rc, h, s_h = cb->args[1];
1994 int idx, s_idx = idx = cb->args[2];
1995
1996 for (h = 0; h <= tbl->hash_mask; h++) {
1997 if (h < s_h)
1998 continue;
1999 if (h > s_h)
2000 s_idx = 0;
2001 read_lock_bh(&tbl->lock);
2002 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2003 if (idx < s_idx)
2004 continue;
2005 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2006 cb->nlh->nlmsg_seq,
b6544c0b
JHS
2007 RTM_NEWNEIGH,
2008 NLM_F_MULTI) <= 0) {
1da177e4
LT
2009 read_unlock_bh(&tbl->lock);
2010 rc = -1;
2011 goto out;
2012 }
2013 }
2014 read_unlock_bh(&tbl->lock);
2015 }
2016 rc = skb->len;
2017out:
2018 cb->args[1] = h;
2019 cb->args[2] = idx;
2020 return rc;
2021}
2022
2023int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2024{
2025 struct neigh_table *tbl;
2026 int t, family, s_t;
2027
2028 read_lock(&neigh_tbl_lock);
8b8aec50 2029 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1da177e4
LT
2030 s_t = cb->args[0];
2031
2032 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2033 if (t < s_t || (family && tbl->family != family))
2034 continue;
2035 if (t > s_t)
2036 memset(&cb->args[1], 0, sizeof(cb->args) -
2037 sizeof(cb->args[0]));
2038 if (neigh_dump_table(tbl, skb, cb) < 0)
2039 break;
2040 }
2041 read_unlock(&neigh_tbl_lock);
2042
2043 cb->args[0] = t;
2044 return skb->len;
2045}
2046
2047void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2048{
2049 int chain;
2050
2051 read_lock_bh(&tbl->lock);
2052 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2053 struct neighbour *n;
2054
2055 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2056 cb(n, cookie);
2057 }
2058 read_unlock_bh(&tbl->lock);
2059}
2060EXPORT_SYMBOL(neigh_for_each);
2061
2062/* The tbl->lock must be held as a writer and BH disabled. */
2063void __neigh_for_each_release(struct neigh_table *tbl,
2064 int (*cb)(struct neighbour *))
2065{
2066 int chain;
2067
2068 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2069 struct neighbour *n, **np;
2070
2071 np = &tbl->hash_buckets[chain];
2072 while ((n = *np) != NULL) {
2073 int release;
2074
2075 write_lock(&n->lock);
2076 release = cb(n);
2077 if (release) {
2078 *np = n->next;
2079 n->dead = 1;
2080 } else
2081 np = &n->next;
2082 write_unlock(&n->lock);
2083 if (release)
2084 neigh_release(n);
2085 }
2086 }
2087}
2088EXPORT_SYMBOL(__neigh_for_each_release);
2089
2090#ifdef CONFIG_PROC_FS
2091
2092static struct neighbour *neigh_get_first(struct seq_file *seq)
2093{
2094 struct neigh_seq_state *state = seq->private;
2095 struct neigh_table *tbl = state->tbl;
2096 struct neighbour *n = NULL;
2097 int bucket = state->bucket;
2098
2099 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2100 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2101 n = tbl->hash_buckets[bucket];
2102
2103 while (n) {
2104 if (state->neigh_sub_iter) {
2105 loff_t fakep = 0;
2106 void *v;
2107
2108 v = state->neigh_sub_iter(state, n, &fakep);
2109 if (!v)
2110 goto next;
2111 }
2112 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2113 break;
2114 if (n->nud_state & ~NUD_NOARP)
2115 break;
2116 next:
2117 n = n->next;
2118 }
2119
2120 if (n)
2121 break;
2122 }
2123 state->bucket = bucket;
2124
2125 return n;
2126}
2127
2128static struct neighbour *neigh_get_next(struct seq_file *seq,
2129 struct neighbour *n,
2130 loff_t *pos)
2131{
2132 struct neigh_seq_state *state = seq->private;
2133 struct neigh_table *tbl = state->tbl;
2134
2135 if (state->neigh_sub_iter) {
2136 void *v = state->neigh_sub_iter(state, n, pos);
2137 if (v)
2138 return n;
2139 }
2140 n = n->next;
2141
2142 while (1) {
2143 while (n) {
2144 if (state->neigh_sub_iter) {
2145 void *v = state->neigh_sub_iter(state, n, pos);
2146 if (v)
2147 return n;
2148 goto next;
2149 }
2150 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2151 break;
2152
2153 if (n->nud_state & ~NUD_NOARP)
2154 break;
2155 next:
2156 n = n->next;
2157 }
2158
2159 if (n)
2160 break;
2161
2162 if (++state->bucket > tbl->hash_mask)
2163 break;
2164
2165 n = tbl->hash_buckets[state->bucket];
2166 }
2167
2168 if (n && pos)
2169 --(*pos);
2170 return n;
2171}
2172
2173static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2174{
2175 struct neighbour *n = neigh_get_first(seq);
2176
2177 if (n) {
2178 while (*pos) {
2179 n = neigh_get_next(seq, n, pos);
2180 if (!n)
2181 break;
2182 }
2183 }
2184 return *pos ? NULL : n;
2185}
2186
2187static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2188{
2189 struct neigh_seq_state *state = seq->private;
2190 struct neigh_table *tbl = state->tbl;
2191 struct pneigh_entry *pn = NULL;
2192 int bucket = state->bucket;
2193
2194 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2195 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2196 pn = tbl->phash_buckets[bucket];
2197 if (pn)
2198 break;
2199 }
2200 state->bucket = bucket;
2201
2202 return pn;
2203}
2204
2205static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2206 struct pneigh_entry *pn,
2207 loff_t *pos)
2208{
2209 struct neigh_seq_state *state = seq->private;
2210 struct neigh_table *tbl = state->tbl;
2211
2212 pn = pn->next;
2213 while (!pn) {
2214 if (++state->bucket > PNEIGH_HASHMASK)
2215 break;
2216 pn = tbl->phash_buckets[state->bucket];
2217 if (pn)
2218 break;
2219 }
2220
2221 if (pn && pos)
2222 --(*pos);
2223
2224 return pn;
2225}
2226
2227static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2228{
2229 struct pneigh_entry *pn = pneigh_get_first(seq);
2230
2231 if (pn) {
2232 while (*pos) {
2233 pn = pneigh_get_next(seq, pn, pos);
2234 if (!pn)
2235 break;
2236 }
2237 }
2238 return *pos ? NULL : pn;
2239}
2240
2241static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2242{
2243 struct neigh_seq_state *state = seq->private;
2244 void *rc;
2245
2246 rc = neigh_get_idx(seq, pos);
2247 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2248 rc = pneigh_get_idx(seq, pos);
2249
2250 return rc;
2251}
2252
2253void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2254{
2255 struct neigh_seq_state *state = seq->private;
2256 loff_t pos_minus_one;
2257
2258 state->tbl = tbl;
2259 state->bucket = 0;
2260 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2261
2262 read_lock_bh(&tbl->lock);
2263
2264 pos_minus_one = *pos - 1;
2265 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2266}
2267EXPORT_SYMBOL(neigh_seq_start);
2268
2269void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2270{
2271 struct neigh_seq_state *state;
2272 void *rc;
2273
2274 if (v == SEQ_START_TOKEN) {
2275 rc = neigh_get_idx(seq, pos);
2276 goto out;
2277 }
2278
2279 state = seq->private;
2280 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2281 rc = neigh_get_next(seq, v, NULL);
2282 if (rc)
2283 goto out;
2284 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2285 rc = pneigh_get_first(seq);
2286 } else {
2287 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2288 rc = pneigh_get_next(seq, v, NULL);
2289 }
2290out:
2291 ++(*pos);
2292 return rc;
2293}
2294EXPORT_SYMBOL(neigh_seq_next);
2295
2296void neigh_seq_stop(struct seq_file *seq, void *v)
2297{
2298 struct neigh_seq_state *state = seq->private;
2299 struct neigh_table *tbl = state->tbl;
2300
2301 read_unlock_bh(&tbl->lock);
2302}
2303EXPORT_SYMBOL(neigh_seq_stop);
2304
2305/* statistics via seq_file */
2306
2307static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2308{
2309 struct proc_dir_entry *pde = seq->private;
2310 struct neigh_table *tbl = pde->data;
2311 int cpu;
2312
2313 if (*pos == 0)
2314 return SEQ_START_TOKEN;
2315
2316 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2317 if (!cpu_possible(cpu))
2318 continue;
2319 *pos = cpu+1;
2320 return per_cpu_ptr(tbl->stats, cpu);
2321 }
2322 return NULL;
2323}
2324
2325static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2326{
2327 struct proc_dir_entry *pde = seq->private;
2328 struct neigh_table *tbl = pde->data;
2329 int cpu;
2330
2331 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2332 if (!cpu_possible(cpu))
2333 continue;
2334 *pos = cpu+1;
2335 return per_cpu_ptr(tbl->stats, cpu);
2336 }
2337 return NULL;
2338}
2339
2340static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2341{
2342
2343}
2344
2345static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2346{
2347 struct proc_dir_entry *pde = seq->private;
2348 struct neigh_table *tbl = pde->data;
2349 struct neigh_statistics *st = v;
2350
2351 if (v == SEQ_START_TOKEN) {
5bec0039 2352 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
1da177e4
LT
2353 return 0;
2354 }
2355
2356 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2357 "%08lx %08lx %08lx %08lx\n",
2358 atomic_read(&tbl->entries),
2359
2360 st->allocs,
2361 st->destroys,
2362 st->hash_grows,
2363
2364 st->lookups,
2365 st->hits,
2366
2367 st->res_failed,
2368
2369 st->rcv_probes_mcast,
2370 st->rcv_probes_ucast,
2371
2372 st->periodic_gc_runs,
2373 st->forced_gc_runs
2374 );
2375
2376 return 0;
2377}
2378
2379static struct seq_operations neigh_stat_seq_ops = {
2380 .start = neigh_stat_seq_start,
2381 .next = neigh_stat_seq_next,
2382 .stop = neigh_stat_seq_stop,
2383 .show = neigh_stat_seq_show,
2384};
2385
2386static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2387{
2388 int ret = seq_open(file, &neigh_stat_seq_ops);
2389
2390 if (!ret) {
2391 struct seq_file *sf = file->private_data;
2392 sf->private = PDE(inode);
2393 }
2394 return ret;
2395};
2396
2397static struct file_operations neigh_stat_seq_fops = {
2398 .owner = THIS_MODULE,
2399 .open = neigh_stat_seq_open,
2400 .read = seq_read,
2401 .llseek = seq_lseek,
2402 .release = seq_release,
2403};
2404
2405#endif /* CONFIG_PROC_FS */
2406
2407#ifdef CONFIG_ARPD
b8673311 2408static void __neigh_notify(struct neighbour *n, int type, int flags)
1da177e4 2409{
8b8aec50 2410 struct sk_buff *skb;
b8673311 2411 int err = -ENOBUFS;
1da177e4 2412
8b8aec50
TG
2413 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
2414 if (skb == NULL)
b8673311 2415 goto errout;
1da177e4 2416
b8673311
TG
2417 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2418 if (err < 0) {
1da177e4 2419 kfree_skb(skb);
b8673311 2420 goto errout;
1da177e4 2421 }
b8673311
TG
2422
2423 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2424errout:
2425 if (err < 0)
2426 rtnl_set_sk_err(RTNLGRP_NEIGH, err);
1da177e4
LT
2427}
2428
b8673311 2429void neigh_app_ns(struct neighbour *n)
1da177e4 2430{
b8673311
TG
2431 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2432}
1da177e4 2433
b8673311
TG
2434static void neigh_app_notify(struct neighbour *n)
2435{
2436 __neigh_notify(n, RTM_NEWNEIGH, 0);
1da177e4
LT
2437}
2438
2439#endif /* CONFIG_ARPD */
2440
2441#ifdef CONFIG_SYSCTL
2442
2443static struct neigh_sysctl_table {
2444 struct ctl_table_header *sysctl_header;
2445 ctl_table neigh_vars[__NET_NEIGH_MAX];
2446 ctl_table neigh_dev[2];
2447 ctl_table neigh_neigh_dir[2];
2448 ctl_table neigh_proto_dir[2];
2449 ctl_table neigh_root_dir[2];
ab32ea5d 2450} neigh_sysctl_template __read_mostly = {
1da177e4
LT
2451 .neigh_vars = {
2452 {
2453 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2454 .procname = "mcast_solicit",
2455 .maxlen = sizeof(int),
2456 .mode = 0644,
2457 .proc_handler = &proc_dointvec,
2458 },
2459 {
2460 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2461 .procname = "ucast_solicit",
2462 .maxlen = sizeof(int),
2463 .mode = 0644,
2464 .proc_handler = &proc_dointvec,
2465 },
2466 {
2467 .ctl_name = NET_NEIGH_APP_SOLICIT,
2468 .procname = "app_solicit",
2469 .maxlen = sizeof(int),
2470 .mode = 0644,
2471 .proc_handler = &proc_dointvec,
2472 },
2473 {
2474 .ctl_name = NET_NEIGH_RETRANS_TIME,
2475 .procname = "retrans_time",
2476 .maxlen = sizeof(int),
2477 .mode = 0644,
2478 .proc_handler = &proc_dointvec_userhz_jiffies,
2479 },
2480 {
2481 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2482 .procname = "base_reachable_time",
2483 .maxlen = sizeof(int),
2484 .mode = 0644,
2485 .proc_handler = &proc_dointvec_jiffies,
2486 .strategy = &sysctl_jiffies,
2487 },
2488 {
2489 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2490 .procname = "delay_first_probe_time",
2491 .maxlen = sizeof(int),
2492 .mode = 0644,
2493 .proc_handler = &proc_dointvec_jiffies,
2494 .strategy = &sysctl_jiffies,
2495 },
2496 {
2497 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2498 .procname = "gc_stale_time",
2499 .maxlen = sizeof(int),
2500 .mode = 0644,
2501 .proc_handler = &proc_dointvec_jiffies,
2502 .strategy = &sysctl_jiffies,
2503 },
2504 {
2505 .ctl_name = NET_NEIGH_UNRES_QLEN,
2506 .procname = "unres_qlen",
2507 .maxlen = sizeof(int),
2508 .mode = 0644,
2509 .proc_handler = &proc_dointvec,
2510 },
2511 {
2512 .ctl_name = NET_NEIGH_PROXY_QLEN,
2513 .procname = "proxy_qlen",
2514 .maxlen = sizeof(int),
2515 .mode = 0644,
2516 .proc_handler = &proc_dointvec,
2517 },
2518 {
2519 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2520 .procname = "anycast_delay",
2521 .maxlen = sizeof(int),
2522 .mode = 0644,
2523 .proc_handler = &proc_dointvec_userhz_jiffies,
2524 },
2525 {
2526 .ctl_name = NET_NEIGH_PROXY_DELAY,
2527 .procname = "proxy_delay",
2528 .maxlen = sizeof(int),
2529 .mode = 0644,
2530 .proc_handler = &proc_dointvec_userhz_jiffies,
2531 },
2532 {
2533 .ctl_name = NET_NEIGH_LOCKTIME,
2534 .procname = "locktime",
2535 .maxlen = sizeof(int),
2536 .mode = 0644,
2537 .proc_handler = &proc_dointvec_userhz_jiffies,
2538 },
2539 {
2540 .ctl_name = NET_NEIGH_GC_INTERVAL,
2541 .procname = "gc_interval",
2542 .maxlen = sizeof(int),
2543 .mode = 0644,
2544 .proc_handler = &proc_dointvec_jiffies,
2545 .strategy = &sysctl_jiffies,
2546 },
2547 {
2548 .ctl_name = NET_NEIGH_GC_THRESH1,
2549 .procname = "gc_thresh1",
2550 .maxlen = sizeof(int),
2551 .mode = 0644,
2552 .proc_handler = &proc_dointvec,
2553 },
2554 {
2555 .ctl_name = NET_NEIGH_GC_THRESH2,
2556 .procname = "gc_thresh2",
2557 .maxlen = sizeof(int),
2558 .mode = 0644,
2559 .proc_handler = &proc_dointvec,
2560 },
2561 {
2562 .ctl_name = NET_NEIGH_GC_THRESH3,
2563 .procname = "gc_thresh3",
2564 .maxlen = sizeof(int),
2565 .mode = 0644,
2566 .proc_handler = &proc_dointvec,
2567 },
2568 {
2569 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2570 .procname = "retrans_time_ms",
2571 .maxlen = sizeof(int),
2572 .mode = 0644,
2573 .proc_handler = &proc_dointvec_ms_jiffies,
2574 .strategy = &sysctl_ms_jiffies,
2575 },
2576 {
2577 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2578 .procname = "base_reachable_time_ms",
2579 .maxlen = sizeof(int),
2580 .mode = 0644,
2581 .proc_handler = &proc_dointvec_ms_jiffies,
2582 .strategy = &sysctl_ms_jiffies,
2583 },
2584 },
2585 .neigh_dev = {
2586 {
2587 .ctl_name = NET_PROTO_CONF_DEFAULT,
2588 .procname = "default",
2589 .mode = 0555,
2590 },
2591 },
2592 .neigh_neigh_dir = {
2593 {
2594 .procname = "neigh",
2595 .mode = 0555,
2596 },
2597 },
2598 .neigh_proto_dir = {
2599 {
2600 .mode = 0555,
2601 },
2602 },
2603 .neigh_root_dir = {
2604 {
2605 .ctl_name = CTL_NET,
2606 .procname = "net",
2607 .mode = 0555,
2608 },
2609 },
2610};
2611
2612int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2613 int p_id, int pdev_id, char *p_name,
2614 proc_handler *handler, ctl_handler *strategy)
2615{
2616 struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2617 const char *dev_name_source = NULL;
2618 char *dev_name = NULL;
2619 int err = 0;
2620
2621 if (!t)
2622 return -ENOBUFS;
2623 memcpy(t, &neigh_sysctl_template, sizeof(*t));
2624 t->neigh_vars[0].data = &p->mcast_probes;
2625 t->neigh_vars[1].data = &p->ucast_probes;
2626 t->neigh_vars[2].data = &p->app_probes;
2627 t->neigh_vars[3].data = &p->retrans_time;
2628 t->neigh_vars[4].data = &p->base_reachable_time;
2629 t->neigh_vars[5].data = &p->delay_probe_time;
2630 t->neigh_vars[6].data = &p->gc_staletime;
2631 t->neigh_vars[7].data = &p->queue_len;
2632 t->neigh_vars[8].data = &p->proxy_qlen;
2633 t->neigh_vars[9].data = &p->anycast_delay;
2634 t->neigh_vars[10].data = &p->proxy_delay;
2635 t->neigh_vars[11].data = &p->locktime;
2636
2637 if (dev) {
2638 dev_name_source = dev->name;
2639 t->neigh_dev[0].ctl_name = dev->ifindex;
2640 t->neigh_vars[12].procname = NULL;
2641 t->neigh_vars[13].procname = NULL;
2642 t->neigh_vars[14].procname = NULL;
2643 t->neigh_vars[15].procname = NULL;
2644 } else {
2645 dev_name_source = t->neigh_dev[0].procname;
2646 t->neigh_vars[12].data = (int *)(p + 1);
2647 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2648 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2649 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2650 }
2651
2652 t->neigh_vars[16].data = &p->retrans_time;
2653 t->neigh_vars[17].data = &p->base_reachable_time;
2654
2655 if (handler || strategy) {
2656 /* RetransTime */
2657 t->neigh_vars[3].proc_handler = handler;
2658 t->neigh_vars[3].strategy = strategy;
2659 t->neigh_vars[3].extra1 = dev;
2660 /* ReachableTime */
2661 t->neigh_vars[4].proc_handler = handler;
2662 t->neigh_vars[4].strategy = strategy;
2663 t->neigh_vars[4].extra1 = dev;
2664 /* RetransTime (in milliseconds)*/
2665 t->neigh_vars[16].proc_handler = handler;
2666 t->neigh_vars[16].strategy = strategy;
2667 t->neigh_vars[16].extra1 = dev;
2668 /* ReachableTime (in milliseconds) */
2669 t->neigh_vars[17].proc_handler = handler;
2670 t->neigh_vars[17].strategy = strategy;
2671 t->neigh_vars[17].extra1 = dev;
2672 }
2673
543537bd 2674 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
1da177e4
LT
2675 if (!dev_name) {
2676 err = -ENOBUFS;
2677 goto free;
2678 }
2679
2680 t->neigh_dev[0].procname = dev_name;
2681
2682 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2683
2684 t->neigh_proto_dir[0].procname = p_name;
2685 t->neigh_proto_dir[0].ctl_name = p_id;
2686
2687 t->neigh_dev[0].child = t->neigh_vars;
2688 t->neigh_neigh_dir[0].child = t->neigh_dev;
2689 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2690 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2691
2692 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2693 if (!t->sysctl_header) {
2694 err = -ENOBUFS;
2695 goto free_procname;
2696 }
2697 p->sysctl_table = t;
2698 return 0;
2699
2700 /* error path */
2701 free_procname:
2702 kfree(dev_name);
2703 free:
2704 kfree(t);
2705
2706 return err;
2707}
2708
2709void neigh_sysctl_unregister(struct neigh_parms *p)
2710{
2711 if (p->sysctl_table) {
2712 struct neigh_sysctl_table *t = p->sysctl_table;
2713 p->sysctl_table = NULL;
2714 unregister_sysctl_table(t->sysctl_header);
2715 kfree(t->neigh_dev[0].procname);
2716 kfree(t);
2717 }
2718}
2719
2720#endif /* CONFIG_SYSCTL */
2721
2722EXPORT_SYMBOL(__neigh_event_send);
1da177e4
LT
2723EXPORT_SYMBOL(neigh_changeaddr);
2724EXPORT_SYMBOL(neigh_compat_output);
2725EXPORT_SYMBOL(neigh_connected_output);
2726EXPORT_SYMBOL(neigh_create);
2727EXPORT_SYMBOL(neigh_delete);
2728EXPORT_SYMBOL(neigh_destroy);
2729EXPORT_SYMBOL(neigh_dump_info);
2730EXPORT_SYMBOL(neigh_event_ns);
2731EXPORT_SYMBOL(neigh_ifdown);
2732EXPORT_SYMBOL(neigh_lookup);
2733EXPORT_SYMBOL(neigh_lookup_nodev);
2734EXPORT_SYMBOL(neigh_parms_alloc);
2735EXPORT_SYMBOL(neigh_parms_release);
2736EXPORT_SYMBOL(neigh_rand_reach_time);
2737EXPORT_SYMBOL(neigh_resolve_output);
2738EXPORT_SYMBOL(neigh_table_clear);
2739EXPORT_SYMBOL(neigh_table_init);
bd89efc5 2740EXPORT_SYMBOL(neigh_table_init_no_netlink);
1da177e4 2741EXPORT_SYMBOL(neigh_update);
1da177e4
LT
2742EXPORT_SYMBOL(pneigh_enqueue);
2743EXPORT_SYMBOL(pneigh_lookup);
2744
2745#ifdef CONFIG_ARPD
2746EXPORT_SYMBOL(neigh_app_ns);
2747#endif
2748#ifdef CONFIG_SYSCTL
2749EXPORT_SYMBOL(neigh_sysctl_register);
2750EXPORT_SYMBOL(neigh_sysctl_unregister);
2751#endif