irda: Fix heap memory corruption in iriap.c
[linux-2.6-block.git] / net / core / neighbour.c
CommitLineData
1da177e4
LT
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
5a0e3ad6 18#include <linux/slab.h>
1da177e4
LT
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/socket.h>
1da177e4
LT
23#include <linux/netdevice.h>
24#include <linux/proc_fs.h>
25#ifdef CONFIG_SYSCTL
26#include <linux/sysctl.h>
27#endif
28#include <linux/times.h>
457c4cbc 29#include <net/net_namespace.h>
1da177e4
LT
30#include <net/neighbour.h>
31#include <net/dst.h>
32#include <net/sock.h>
8d71740c 33#include <net/netevent.h>
a14a49d2 34#include <net/netlink.h>
1da177e4
LT
35#include <linux/rtnetlink.h>
36#include <linux/random.h>
543537bd 37#include <linux/string.h>
c3609d51 38#include <linux/log2.h>
1da177e4
LT
39
40#define NEIGH_DEBUG 1
41
42#define NEIGH_PRINTK(x...) printk(x)
43#define NEIGH_NOPRINTK(x...) do { ; } while(0)
44#define NEIGH_PRINTK0 NEIGH_PRINTK
45#define NEIGH_PRINTK1 NEIGH_NOPRINTK
46#define NEIGH_PRINTK2 NEIGH_NOPRINTK
47
48#if NEIGH_DEBUG >= 1
49#undef NEIGH_PRINTK1
50#define NEIGH_PRINTK1 NEIGH_PRINTK
51#endif
52#if NEIGH_DEBUG >= 2
53#undef NEIGH_PRINTK2
54#define NEIGH_PRINTK2 NEIGH_PRINTK
55#endif
56
57#define PNEIGH_HASHMASK 0xF
58
59static void neigh_timer_handler(unsigned long arg);
d961db35
TG
60static void __neigh_notify(struct neighbour *n, int type, int flags);
61static void neigh_update_notify(struct neighbour *neigh);
1da177e4 62static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
1da177e4
LT
63
64static struct neigh_table *neigh_tables;
45fc3b11 65#ifdef CONFIG_PROC_FS
9a32144e 66static const struct file_operations neigh_stat_seq_fops;
45fc3b11 67#endif
1da177e4
LT
68
69/*
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
76 cache.
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
79
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
83
84 Reference count prevents destruction.
85
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
88 - timer
89 - resolution queue
90
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
95
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
98 */
99
100static DEFINE_RWLOCK(neigh_tbl_lock);
101
102static int neigh_blackhole(struct sk_buff *skb)
103{
104 kfree_skb(skb);
105 return -ENETDOWN;
106}
107
4f494554
TG
108static void neigh_cleanup_and_release(struct neighbour *neigh)
109{
110 if (neigh->parms->neigh_cleanup)
111 neigh->parms->neigh_cleanup(neigh);
112
d961db35 113 __neigh_notify(neigh, RTM_DELNEIGH, 0);
4f494554
TG
114 neigh_release(neigh);
115}
116
1da177e4
LT
117/*
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
121 */
122
123unsigned long neigh_rand_reach_time(unsigned long base)
124{
a02cec21 125 return base ? (net_random() % base) + (base >> 1) : 0;
1da177e4 126}
0a204500 127EXPORT_SYMBOL(neigh_rand_reach_time);
1da177e4
LT
128
129
130static int neigh_forced_gc(struct neigh_table *tbl)
131{
132 int shrunk = 0;
133 int i;
d6bf7817 134 struct neigh_hash_table *nht;
1da177e4
LT
135
136 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
137
138 write_lock_bh(&tbl->lock);
d6bf7817
ED
139 nht = rcu_dereference_protected(tbl->nht,
140 lockdep_is_held(&tbl->lock));
141 for (i = 0; i <= nht->hash_mask; i++) {
767e97e1
ED
142 struct neighbour *n;
143 struct neighbour __rcu **np;
1da177e4 144
d6bf7817 145 np = &nht->hash_buckets[i];
767e97e1
ED
146 while ((n = rcu_dereference_protected(*np,
147 lockdep_is_held(&tbl->lock))) != NULL) {
1da177e4
LT
148 /* Neighbour record may be discarded if:
149 * - nobody refers to it.
150 * - it is not permanent
151 */
152 write_lock(&n->lock);
153 if (atomic_read(&n->refcnt) == 1 &&
154 !(n->nud_state & NUD_PERMANENT)) {
767e97e1
ED
155 rcu_assign_pointer(*np,
156 rcu_dereference_protected(n->next,
157 lockdep_is_held(&tbl->lock)));
1da177e4
LT
158 n->dead = 1;
159 shrunk = 1;
160 write_unlock(&n->lock);
4f494554 161 neigh_cleanup_and_release(n);
1da177e4
LT
162 continue;
163 }
164 write_unlock(&n->lock);
165 np = &n->next;
166 }
167 }
168
169 tbl->last_flush = jiffies;
170
171 write_unlock_bh(&tbl->lock);
172
173 return shrunk;
174}
175
a43d8994
PE
176static void neigh_add_timer(struct neighbour *n, unsigned long when)
177{
178 neigh_hold(n);
179 if (unlikely(mod_timer(&n->timer, when))) {
180 printk("NEIGH: BUG, double timer add, state is %x\n",
181 n->nud_state);
182 dump_stack();
183 }
184}
185
1da177e4
LT
186static int neigh_del_timer(struct neighbour *n)
187{
188 if ((n->nud_state & NUD_IN_TIMER) &&
189 del_timer(&n->timer)) {
190 neigh_release(n);
191 return 1;
192 }
193 return 0;
194}
195
196static void pneigh_queue_purge(struct sk_buff_head *list)
197{
198 struct sk_buff *skb;
199
200 while ((skb = skb_dequeue(list)) != NULL) {
201 dev_put(skb->dev);
202 kfree_skb(skb);
203 }
204}
205
49636bb1 206static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
1da177e4
LT
207{
208 int i;
d6bf7817 209 struct neigh_hash_table *nht;
1da177e4 210
d6bf7817
ED
211 nht = rcu_dereference_protected(tbl->nht,
212 lockdep_is_held(&tbl->lock));
213
214 for (i = 0; i <= nht->hash_mask; i++) {
767e97e1
ED
215 struct neighbour *n;
216 struct neighbour __rcu **np = &nht->hash_buckets[i];
1da177e4 217
767e97e1
ED
218 while ((n = rcu_dereference_protected(*np,
219 lockdep_is_held(&tbl->lock))) != NULL) {
1da177e4
LT
220 if (dev && n->dev != dev) {
221 np = &n->next;
222 continue;
223 }
767e97e1
ED
224 rcu_assign_pointer(*np,
225 rcu_dereference_protected(n->next,
226 lockdep_is_held(&tbl->lock)));
1da177e4
LT
227 write_lock(&n->lock);
228 neigh_del_timer(n);
229 n->dead = 1;
230
231 if (atomic_read(&n->refcnt) != 1) {
232 /* The most unpleasant situation.
233 We must destroy neighbour entry,
234 but someone still uses it.
235
236 The destroy will be delayed until
237 the last user releases us, but
238 we must kill timers etc. and move
239 it to safe state.
240 */
241 skb_queue_purge(&n->arp_queue);
242 n->output = neigh_blackhole;
243 if (n->nud_state & NUD_VALID)
244 n->nud_state = NUD_NOARP;
245 else
246 n->nud_state = NUD_NONE;
247 NEIGH_PRINTK2("neigh %p is stray.\n", n);
248 }
249 write_unlock(&n->lock);
4f494554 250 neigh_cleanup_and_release(n);
1da177e4
LT
251 }
252 }
49636bb1 253}
1da177e4 254
49636bb1
HX
255void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
256{
257 write_lock_bh(&tbl->lock);
258 neigh_flush_dev(tbl, dev);
259 write_unlock_bh(&tbl->lock);
260}
0a204500 261EXPORT_SYMBOL(neigh_changeaddr);
49636bb1
HX
262
263int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
264{
265 write_lock_bh(&tbl->lock);
266 neigh_flush_dev(tbl, dev);
1da177e4
LT
267 pneigh_ifdown(tbl, dev);
268 write_unlock_bh(&tbl->lock);
269
270 del_timer_sync(&tbl->proxy_timer);
271 pneigh_queue_purge(&tbl->proxy_queue);
272 return 0;
273}
0a204500 274EXPORT_SYMBOL(neigh_ifdown);
1da177e4
LT
275
276static struct neighbour *neigh_alloc(struct neigh_table *tbl)
277{
278 struct neighbour *n = NULL;
279 unsigned long now = jiffies;
280 int entries;
281
282 entries = atomic_inc_return(&tbl->entries) - 1;
283 if (entries >= tbl->gc_thresh3 ||
284 (entries >= tbl->gc_thresh2 &&
285 time_after(now, tbl->last_flush + 5 * HZ))) {
286 if (!neigh_forced_gc(tbl) &&
287 entries >= tbl->gc_thresh3)
288 goto out_entries;
289 }
290
c3762229 291 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
1da177e4
LT
292 if (!n)
293 goto out_entries;
294
1da177e4
LT
295 skb_queue_head_init(&n->arp_queue);
296 rwlock_init(&n->lock);
297 n->updated = n->used = now;
298 n->nud_state = NUD_NONE;
299 n->output = neigh_blackhole;
300 n->parms = neigh_parms_clone(&tbl->parms);
b24b8a24 301 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
1da177e4
LT
302
303 NEIGH_CACHE_STAT_INC(tbl, allocs);
304 n->tbl = tbl;
305 atomic_set(&n->refcnt, 1);
306 n->dead = 1;
307out:
308 return n;
309
310out_entries:
311 atomic_dec(&tbl->entries);
312 goto out;
313}
314
d6bf7817 315static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
1da177e4 316{
d6bf7817
ED
317 size_t size = entries * sizeof(struct neighbour *);
318 struct neigh_hash_table *ret;
319 struct neighbour **buckets;
1da177e4 320
d6bf7817
ED
321 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
322 if (!ret)
323 return NULL;
324 if (size <= PAGE_SIZE)
325 buckets = kzalloc(size, GFP_ATOMIC);
326 else
327 buckets = (struct neighbour **)
328 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
329 get_order(size));
330 if (!buckets) {
331 kfree(ret);
332 return NULL;
1da177e4 333 }
767e97e1 334 rcu_assign_pointer(ret->hash_buckets, buckets);
d6bf7817
ED
335 ret->hash_mask = entries - 1;
336 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
1da177e4
LT
337 return ret;
338}
339
d6bf7817 340static void neigh_hash_free_rcu(struct rcu_head *head)
1da177e4 341{
d6bf7817
ED
342 struct neigh_hash_table *nht = container_of(head,
343 struct neigh_hash_table,
344 rcu);
345 size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
346 struct neighbour **buckets = nht->hash_buckets;
1da177e4
LT
347
348 if (size <= PAGE_SIZE)
d6bf7817 349 kfree(buckets);
1da177e4 350 else
d6bf7817
ED
351 free_pages((unsigned long)buckets, get_order(size));
352 kfree(nht);
1da177e4
LT
353}
354
d6bf7817
ED
355static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
356 unsigned long new_entries)
1da177e4 357{
d6bf7817
ED
358 unsigned int i, hash;
359 struct neigh_hash_table *new_nht, *old_nht;
1da177e4
LT
360
361 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
362
c3609d51 363 BUG_ON(!is_power_of_2(new_entries));
d6bf7817
ED
364 old_nht = rcu_dereference_protected(tbl->nht,
365 lockdep_is_held(&tbl->lock));
366 new_nht = neigh_hash_alloc(new_entries);
367 if (!new_nht)
368 return old_nht;
1da177e4 369
d6bf7817 370 for (i = 0; i <= old_nht->hash_mask; i++) {
1da177e4
LT
371 struct neighbour *n, *next;
372
767e97e1
ED
373 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
374 lockdep_is_held(&tbl->lock));
d6bf7817
ED
375 n != NULL;
376 n = next) {
377 hash = tbl->hash(n->primary_key, n->dev,
378 new_nht->hash_rnd);
1da177e4 379
d6bf7817 380 hash &= new_nht->hash_mask;
767e97e1
ED
381 next = rcu_dereference_protected(n->next,
382 lockdep_is_held(&tbl->lock));
383
384 rcu_assign_pointer(n->next,
385 rcu_dereference_protected(
386 new_nht->hash_buckets[hash],
387 lockdep_is_held(&tbl->lock)));
388 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
1da177e4
LT
389 }
390 }
1da177e4 391
d6bf7817
ED
392 rcu_assign_pointer(tbl->nht, new_nht);
393 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
394 return new_nht;
1da177e4
LT
395}
396
397struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
398 struct net_device *dev)
399{
400 struct neighbour *n;
401 int key_len = tbl->key_len;
bc4bf5f3 402 u32 hash_val;
d6bf7817 403 struct neigh_hash_table *nht;
4ec93edb 404
1da177e4
LT
405 NEIGH_CACHE_STAT_INC(tbl, lookups);
406
d6bf7817
ED
407 rcu_read_lock_bh();
408 nht = rcu_dereference_bh(tbl->nht);
409 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
767e97e1
ED
410
411 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
412 n != NULL;
413 n = rcu_dereference_bh(n->next)) {
1da177e4 414 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
767e97e1
ED
415 if (!atomic_inc_not_zero(&n->refcnt))
416 n = NULL;
1da177e4
LT
417 NEIGH_CACHE_STAT_INC(tbl, hits);
418 break;
419 }
420 }
767e97e1 421
d6bf7817 422 rcu_read_unlock_bh();
1da177e4
LT
423 return n;
424}
0a204500 425EXPORT_SYMBOL(neigh_lookup);
1da177e4 426
426b5303
EB
427struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
428 const void *pkey)
1da177e4
LT
429{
430 struct neighbour *n;
431 int key_len = tbl->key_len;
bc4bf5f3 432 u32 hash_val;
d6bf7817 433 struct neigh_hash_table *nht;
1da177e4
LT
434
435 NEIGH_CACHE_STAT_INC(tbl, lookups);
436
d6bf7817
ED
437 rcu_read_lock_bh();
438 nht = rcu_dereference_bh(tbl->nht);
439 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) & nht->hash_mask;
767e97e1
ED
440
441 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
442 n != NULL;
443 n = rcu_dereference_bh(n->next)) {
426b5303 444 if (!memcmp(n->primary_key, pkey, key_len) &&
878628fb 445 net_eq(dev_net(n->dev), net)) {
767e97e1
ED
446 if (!atomic_inc_not_zero(&n->refcnt))
447 n = NULL;
1da177e4
LT
448 NEIGH_CACHE_STAT_INC(tbl, hits);
449 break;
450 }
451 }
767e97e1 452
d6bf7817 453 rcu_read_unlock_bh();
1da177e4
LT
454 return n;
455}
0a204500 456EXPORT_SYMBOL(neigh_lookup_nodev);
1da177e4
LT
457
458struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
459 struct net_device *dev)
460{
461 u32 hash_val;
462 int key_len = tbl->key_len;
463 int error;
464 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
d6bf7817 465 struct neigh_hash_table *nht;
1da177e4
LT
466
467 if (!n) {
468 rc = ERR_PTR(-ENOBUFS);
469 goto out;
470 }
471
472 memcpy(n->primary_key, pkey, key_len);
473 n->dev = dev;
474 dev_hold(dev);
475
476 /* Protocol specific setup. */
477 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
478 rc = ERR_PTR(error);
479 goto out_neigh_release;
480 }
481
482 /* Device specific setup. */
483 if (n->parms->neigh_setup &&
484 (error = n->parms->neigh_setup(n)) < 0) {
485 rc = ERR_PTR(error);
486 goto out_neigh_release;
487 }
488
489 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
490
491 write_lock_bh(&tbl->lock);
d6bf7817
ED
492 nht = rcu_dereference_protected(tbl->nht,
493 lockdep_is_held(&tbl->lock));
1da177e4 494
d6bf7817
ED
495 if (atomic_read(&tbl->entries) > (nht->hash_mask + 1))
496 nht = neigh_hash_grow(tbl, (nht->hash_mask + 1) << 1);
1da177e4 497
d6bf7817 498 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
1da177e4
LT
499
500 if (n->parms->dead) {
501 rc = ERR_PTR(-EINVAL);
502 goto out_tbl_unlock;
503 }
504
767e97e1
ED
505 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
506 lockdep_is_held(&tbl->lock));
507 n1 != NULL;
508 n1 = rcu_dereference_protected(n1->next,
509 lockdep_is_held(&tbl->lock))) {
1da177e4
LT
510 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
511 neigh_hold(n1);
512 rc = n1;
513 goto out_tbl_unlock;
514 }
515 }
516
1da177e4
LT
517 n->dead = 0;
518 neigh_hold(n);
767e97e1
ED
519 rcu_assign_pointer(n->next,
520 rcu_dereference_protected(nht->hash_buckets[hash_val],
521 lockdep_is_held(&tbl->lock)));
522 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
1da177e4
LT
523 write_unlock_bh(&tbl->lock);
524 NEIGH_PRINTK2("neigh %p is created.\n", n);
525 rc = n;
526out:
527 return rc;
528out_tbl_unlock:
529 write_unlock_bh(&tbl->lock);
530out_neigh_release:
531 neigh_release(n);
532 goto out;
533}
0a204500 534EXPORT_SYMBOL(neigh_create);
1da177e4 535
be01d655 536static u32 pneigh_hash(const void *pkey, int key_len)
fa86d322 537{
fa86d322 538 u32 hash_val = *(u32 *)(pkey + key_len - 4);
fa86d322
PE
539 hash_val ^= (hash_val >> 16);
540 hash_val ^= hash_val >> 8;
541 hash_val ^= hash_val >> 4;
542 hash_val &= PNEIGH_HASHMASK;
be01d655
YH
543 return hash_val;
544}
fa86d322 545
be01d655
YH
546static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
547 struct net *net,
548 const void *pkey,
549 int key_len,
550 struct net_device *dev)
551{
552 while (n) {
fa86d322 553 if (!memcmp(n->key, pkey, key_len) &&
be01d655 554 net_eq(pneigh_net(n), net) &&
fa86d322 555 (n->dev == dev || !n->dev))
be01d655
YH
556 return n;
557 n = n->next;
fa86d322 558 }
be01d655
YH
559 return NULL;
560}
fa86d322 561
be01d655
YH
562struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
563 struct net *net, const void *pkey, struct net_device *dev)
564{
565 int key_len = tbl->key_len;
566 u32 hash_val = pneigh_hash(pkey, key_len);
567
568 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
569 net, pkey, key_len, dev);
fa86d322 570}
0a204500 571EXPORT_SYMBOL_GPL(__pneigh_lookup);
fa86d322 572
426b5303
EB
573struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
574 struct net *net, const void *pkey,
1da177e4
LT
575 struct net_device *dev, int creat)
576{
577 struct pneigh_entry *n;
578 int key_len = tbl->key_len;
be01d655 579 u32 hash_val = pneigh_hash(pkey, key_len);
1da177e4
LT
580
581 read_lock_bh(&tbl->lock);
be01d655
YH
582 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
583 net, pkey, key_len, dev);
1da177e4 584 read_unlock_bh(&tbl->lock);
be01d655
YH
585
586 if (n || !creat)
1da177e4
LT
587 goto out;
588
4ae28944
PE
589 ASSERT_RTNL();
590
1da177e4
LT
591 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
592 if (!n)
593 goto out;
594
e42ea986 595 write_pnet(&n->net, hold_net(net));
1da177e4
LT
596 memcpy(n->key, pkey, key_len);
597 n->dev = dev;
598 if (dev)
599 dev_hold(dev);
600
601 if (tbl->pconstructor && tbl->pconstructor(n)) {
602 if (dev)
603 dev_put(dev);
da12f735 604 release_net(net);
1da177e4
LT
605 kfree(n);
606 n = NULL;
607 goto out;
608 }
609
610 write_lock_bh(&tbl->lock);
611 n->next = tbl->phash_buckets[hash_val];
612 tbl->phash_buckets[hash_val] = n;
613 write_unlock_bh(&tbl->lock);
614out:
615 return n;
616}
0a204500 617EXPORT_SYMBOL(pneigh_lookup);
1da177e4
LT
618
619
426b5303 620int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
1da177e4
LT
621 struct net_device *dev)
622{
623 struct pneigh_entry *n, **np;
624 int key_len = tbl->key_len;
be01d655 625 u32 hash_val = pneigh_hash(pkey, key_len);
1da177e4
LT
626
627 write_lock_bh(&tbl->lock);
628 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
629 np = &n->next) {
426b5303 630 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
878628fb 631 net_eq(pneigh_net(n), net)) {
1da177e4
LT
632 *np = n->next;
633 write_unlock_bh(&tbl->lock);
634 if (tbl->pdestructor)
635 tbl->pdestructor(n);
636 if (n->dev)
637 dev_put(n->dev);
57da52c1 638 release_net(pneigh_net(n));
1da177e4
LT
639 kfree(n);
640 return 0;
641 }
642 }
643 write_unlock_bh(&tbl->lock);
644 return -ENOENT;
645}
646
647static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
648{
649 struct pneigh_entry *n, **np;
650 u32 h;
651
652 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
653 np = &tbl->phash_buckets[h];
654 while ((n = *np) != NULL) {
655 if (!dev || n->dev == dev) {
656 *np = n->next;
657 if (tbl->pdestructor)
658 tbl->pdestructor(n);
659 if (n->dev)
660 dev_put(n->dev);
57da52c1 661 release_net(pneigh_net(n));
1da177e4
LT
662 kfree(n);
663 continue;
664 }
665 np = &n->next;
666 }
667 }
668 return -ENOENT;
669}
670
06f0511d
DL
671static void neigh_parms_destroy(struct neigh_parms *parms);
672
673static inline void neigh_parms_put(struct neigh_parms *parms)
674{
675 if (atomic_dec_and_test(&parms->refcnt))
676 neigh_parms_destroy(parms);
677}
1da177e4 678
767e97e1
ED
679static void neigh_destroy_rcu(struct rcu_head *head)
680{
681 struct neighbour *neigh = container_of(head, struct neighbour, rcu);
682
683 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
684}
1da177e4
LT
685/*
686 * neighbour must already be out of the table;
687 *
688 */
689void neigh_destroy(struct neighbour *neigh)
690{
691 struct hh_cache *hh;
692
693 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
694
695 if (!neigh->dead) {
696 printk(KERN_WARNING
697 "Destroying alive neighbour %p\n", neigh);
698 dump_stack();
699 return;
700 }
701
702 if (neigh_del_timer(neigh))
703 printk(KERN_WARNING "Impossible event.\n");
704
705 while ((hh = neigh->hh) != NULL) {
706 neigh->hh = hh->hh_next;
707 hh->hh_next = NULL;
3644f0ce
SH
708
709 write_seqlock_bh(&hh->hh_lock);
1da177e4 710 hh->hh_output = neigh_blackhole;
3644f0ce 711 write_sequnlock_bh(&hh->hh_lock);
1da177e4
LT
712 if (atomic_dec_and_test(&hh->hh_refcnt))
713 kfree(hh);
714 }
715
1da177e4
LT
716 skb_queue_purge(&neigh->arp_queue);
717
718 dev_put(neigh->dev);
719 neigh_parms_put(neigh->parms);
720
721 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
722
723 atomic_dec(&neigh->tbl->entries);
767e97e1 724 call_rcu(&neigh->rcu, neigh_destroy_rcu);
1da177e4 725}
0a204500 726EXPORT_SYMBOL(neigh_destroy);
1da177e4
LT
727
728/* Neighbour state is suspicious;
729 disable fast path.
730
731 Called with write_locked neigh.
732 */
733static void neigh_suspect(struct neighbour *neigh)
734{
735 struct hh_cache *hh;
736
737 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
738
739 neigh->output = neigh->ops->output;
740
741 for (hh = neigh->hh; hh; hh = hh->hh_next)
742 hh->hh_output = neigh->ops->output;
743}
744
745/* Neighbour state is OK;
746 enable fast path.
747
748 Called with write_locked neigh.
749 */
750static void neigh_connect(struct neighbour *neigh)
751{
752 struct hh_cache *hh;
753
754 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
755
756 neigh->output = neigh->ops->connected_output;
757
758 for (hh = neigh->hh; hh; hh = hh->hh_next)
759 hh->hh_output = neigh->ops->hh_output;
760}
761
e4c4e448 762static void neigh_periodic_work(struct work_struct *work)
1da177e4 763{
e4c4e448 764 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
767e97e1
ED
765 struct neighbour *n;
766 struct neighbour __rcu **np;
e4c4e448 767 unsigned int i;
d6bf7817 768 struct neigh_hash_table *nht;
1da177e4
LT
769
770 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
771
e4c4e448 772 write_lock_bh(&tbl->lock);
d6bf7817
ED
773 nht = rcu_dereference_protected(tbl->nht,
774 lockdep_is_held(&tbl->lock));
1da177e4
LT
775
776 /*
777 * periodically recompute ReachableTime from random function
778 */
779
e4c4e448 780 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
1da177e4 781 struct neigh_parms *p;
e4c4e448 782 tbl->last_rand = jiffies;
1da177e4
LT
783 for (p = &tbl->parms; p; p = p->next)
784 p->reachable_time =
785 neigh_rand_reach_time(p->base_reachable_time);
786 }
787
d6bf7817
ED
788 for (i = 0 ; i <= nht->hash_mask; i++) {
789 np = &nht->hash_buckets[i];
1da177e4 790
767e97e1
ED
791 while ((n = rcu_dereference_protected(*np,
792 lockdep_is_held(&tbl->lock))) != NULL) {
e4c4e448 793 unsigned int state;
1da177e4 794
e4c4e448 795 write_lock(&n->lock);
1da177e4 796
e4c4e448
ED
797 state = n->nud_state;
798 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
799 write_unlock(&n->lock);
800 goto next_elt;
801 }
1da177e4 802
e4c4e448
ED
803 if (time_before(n->used, n->confirmed))
804 n->used = n->confirmed;
1da177e4 805
e4c4e448
ED
806 if (atomic_read(&n->refcnt) == 1 &&
807 (state == NUD_FAILED ||
808 time_after(jiffies, n->used + n->parms->gc_staletime))) {
809 *np = n->next;
810 n->dead = 1;
811 write_unlock(&n->lock);
812 neigh_cleanup_and_release(n);
813 continue;
814 }
1da177e4 815 write_unlock(&n->lock);
1da177e4
LT
816
817next_elt:
e4c4e448
ED
818 np = &n->next;
819 }
820 /*
821 * It's fine to release lock here, even if hash table
822 * grows while we are preempted.
823 */
824 write_unlock_bh(&tbl->lock);
825 cond_resched();
826 write_lock_bh(&tbl->lock);
1da177e4 827 }
4ec93edb
YH
828 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
829 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
830 * base_reachable_time.
1da177e4 831 */
e4c4e448
ED
832 schedule_delayed_work(&tbl->gc_work,
833 tbl->parms.base_reachable_time >> 1);
834 write_unlock_bh(&tbl->lock);
1da177e4
LT
835}
836
837static __inline__ int neigh_max_probes(struct neighbour *n)
838{
839 struct neigh_parms *p = n->parms;
a02cec21 840 return (n->nud_state & NUD_PROBE) ?
1da177e4 841 p->ucast_probes :
a02cec21 842 p->ucast_probes + p->app_probes + p->mcast_probes;
1da177e4
LT
843}
844
5ef12d98 845static void neigh_invalidate(struct neighbour *neigh)
0a141509
ED
846 __releases(neigh->lock)
847 __acquires(neigh->lock)
5ef12d98
TT
848{
849 struct sk_buff *skb;
850
851 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
852 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
853 neigh->updated = jiffies;
854
855 /* It is very thin place. report_unreachable is very complicated
856 routine. Particularly, it can hit the same neighbour entry!
857
858 So that, we try to be accurate and avoid dead loop. --ANK
859 */
860 while (neigh->nud_state == NUD_FAILED &&
861 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
862 write_unlock(&neigh->lock);
863 neigh->ops->error_report(neigh, skb);
864 write_lock(&neigh->lock);
865 }
866 skb_queue_purge(&neigh->arp_queue);
867}
868
1da177e4
LT
869/* Called when a timer expires for a neighbour entry. */
870
871static void neigh_timer_handler(unsigned long arg)
872{
873 unsigned long now, next;
874 struct neighbour *neigh = (struct neighbour *)arg;
875 unsigned state;
876 int notify = 0;
877
878 write_lock(&neigh->lock);
879
880 state = neigh->nud_state;
881 now = jiffies;
882 next = now + HZ;
883
884 if (!(state & NUD_IN_TIMER)) {
885#ifndef CONFIG_SMP
886 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
887#endif
888 goto out;
889 }
890
891 if (state & NUD_REACHABLE) {
4ec93edb 892 if (time_before_eq(now,
1da177e4
LT
893 neigh->confirmed + neigh->parms->reachable_time)) {
894 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
895 next = neigh->confirmed + neigh->parms->reachable_time;
896 } else if (time_before_eq(now,
897 neigh->used + neigh->parms->delay_probe_time)) {
898 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
899 neigh->nud_state = NUD_DELAY;
955aaa2f 900 neigh->updated = jiffies;
1da177e4
LT
901 neigh_suspect(neigh);
902 next = now + neigh->parms->delay_probe_time;
903 } else {
904 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
905 neigh->nud_state = NUD_STALE;
955aaa2f 906 neigh->updated = jiffies;
1da177e4 907 neigh_suspect(neigh);
8d71740c 908 notify = 1;
1da177e4
LT
909 }
910 } else if (state & NUD_DELAY) {
4ec93edb 911 if (time_before_eq(now,
1da177e4
LT
912 neigh->confirmed + neigh->parms->delay_probe_time)) {
913 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
914 neigh->nud_state = NUD_REACHABLE;
955aaa2f 915 neigh->updated = jiffies;
1da177e4 916 neigh_connect(neigh);
8d71740c 917 notify = 1;
1da177e4
LT
918 next = neigh->confirmed + neigh->parms->reachable_time;
919 } else {
920 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
921 neigh->nud_state = NUD_PROBE;
955aaa2f 922 neigh->updated = jiffies;
1da177e4
LT
923 atomic_set(&neigh->probes, 0);
924 next = now + neigh->parms->retrans_time;
925 }
926 } else {
927 /* NUD_PROBE|NUD_INCOMPLETE */
928 next = now + neigh->parms->retrans_time;
929 }
930
931 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
932 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1da177e4
LT
933 neigh->nud_state = NUD_FAILED;
934 notify = 1;
5ef12d98 935 neigh_invalidate(neigh);
1da177e4
LT
936 }
937
938 if (neigh->nud_state & NUD_IN_TIMER) {
1da177e4
LT
939 if (time_before(next, jiffies + HZ/2))
940 next = jiffies + HZ/2;
6fb9974f
HX
941 if (!mod_timer(&neigh->timer, next))
942 neigh_hold(neigh);
1da177e4
LT
943 }
944 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
945 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
9ff56607
DM
946 /* keep skb alive even if arp_queue overflows */
947 if (skb)
7e36763b 948 skb = skb_copy(skb, GFP_ATOMIC);
9ff56607 949 write_unlock(&neigh->lock);
1da177e4
LT
950 neigh->ops->solicit(neigh, skb);
951 atomic_inc(&neigh->probes);
f3fbbe0f 952 kfree_skb(skb);
9ff56607 953 } else {
69cc64d8 954out:
9ff56607
DM
955 write_unlock(&neigh->lock);
956 }
d961db35 957
8d71740c 958 if (notify)
d961db35 959 neigh_update_notify(neigh);
1da177e4 960
1da177e4
LT
961 neigh_release(neigh);
962}
963
964int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
965{
966 int rc;
967 unsigned long now;
968
969 write_lock_bh(&neigh->lock);
970
971 rc = 0;
972 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
973 goto out_unlock_bh;
974
975 now = jiffies;
4ec93edb 976
1da177e4
LT
977 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
978 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
979 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
980 neigh->nud_state = NUD_INCOMPLETE;
955aaa2f 981 neigh->updated = jiffies;
667347f1 982 neigh_add_timer(neigh, now + 1);
1da177e4
LT
983 } else {
984 neigh->nud_state = NUD_FAILED;
955aaa2f 985 neigh->updated = jiffies;
1da177e4
LT
986 write_unlock_bh(&neigh->lock);
987
f3fbbe0f 988 kfree_skb(skb);
1da177e4
LT
989 return 1;
990 }
991 } else if (neigh->nud_state & NUD_STALE) {
992 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
1da177e4 993 neigh->nud_state = NUD_DELAY;
955aaa2f 994 neigh->updated = jiffies;
667347f1
DM
995 neigh_add_timer(neigh,
996 jiffies + neigh->parms->delay_probe_time);
1da177e4
LT
997 }
998
999 if (neigh->nud_state == NUD_INCOMPLETE) {
1000 if (skb) {
1001 if (skb_queue_len(&neigh->arp_queue) >=
1002 neigh->parms->queue_len) {
1003 struct sk_buff *buff;
f72051b0 1004 buff = __skb_dequeue(&neigh->arp_queue);
1da177e4 1005 kfree_skb(buff);
9a6d276e 1006 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1da177e4 1007 }
a4731138 1008 skb_dst_force(skb);
1da177e4
LT
1009 __skb_queue_tail(&neigh->arp_queue, skb);
1010 }
1011 rc = 1;
1012 }
1013out_unlock_bh:
1014 write_unlock_bh(&neigh->lock);
1015 return rc;
1016}
0a204500 1017EXPORT_SYMBOL(__neigh_event_send);
1da177e4 1018
e92b43a3 1019static void neigh_update_hhs(struct neighbour *neigh)
1da177e4
LT
1020{
1021 struct hh_cache *hh;
3b04ddde 1022 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
91a72a70
DK
1023 = NULL;
1024
1025 if (neigh->dev->header_ops)
1026 update = neigh->dev->header_ops->cache_update;
1da177e4
LT
1027
1028 if (update) {
1029 for (hh = neigh->hh; hh; hh = hh->hh_next) {
3644f0ce 1030 write_seqlock_bh(&hh->hh_lock);
1da177e4 1031 update(hh, neigh->dev, neigh->ha);
3644f0ce 1032 write_sequnlock_bh(&hh->hh_lock);
1da177e4
LT
1033 }
1034 }
1035}
1036
1037
1038
1039/* Generic update routine.
1040 -- lladdr is new lladdr or NULL, if it is not supplied.
1041 -- new is new state.
1042 -- flags
1043 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1044 if it is different.
1045 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
4ec93edb 1046 lladdr instead of overriding it
1da177e4
LT
1047 if it is different.
1048 It also allows to retain current state
1049 if lladdr is unchanged.
1050 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1051
4ec93edb 1052 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1da177e4
LT
1053 NTF_ROUTER flag.
1054 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1055 a router.
1056
1057 Caller MUST hold reference count on the entry.
1058 */
1059
1060int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1061 u32 flags)
1062{
1063 u8 old;
1064 int err;
1da177e4 1065 int notify = 0;
1da177e4
LT
1066 struct net_device *dev;
1067 int update_isrouter = 0;
1068
1069 write_lock_bh(&neigh->lock);
1070
1071 dev = neigh->dev;
1072 old = neigh->nud_state;
1073 err = -EPERM;
1074
4ec93edb 1075 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1da177e4
LT
1076 (old & (NUD_NOARP | NUD_PERMANENT)))
1077 goto out;
1078
1079 if (!(new & NUD_VALID)) {
1080 neigh_del_timer(neigh);
1081 if (old & NUD_CONNECTED)
1082 neigh_suspect(neigh);
1083 neigh->nud_state = new;
1084 err = 0;
1da177e4 1085 notify = old & NUD_VALID;
5ef12d98
TT
1086 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1087 (new & NUD_FAILED)) {
1088 neigh_invalidate(neigh);
1089 notify = 1;
1090 }
1da177e4
LT
1091 goto out;
1092 }
1093
1094 /* Compare new lladdr with cached one */
1095 if (!dev->addr_len) {
1096 /* First case: device needs no address. */
1097 lladdr = neigh->ha;
1098 } else if (lladdr) {
1099 /* The second case: if something is already cached
1100 and a new address is proposed:
1101 - compare new & old
1102 - if they are different, check override flag
1103 */
4ec93edb 1104 if ((old & NUD_VALID) &&
1da177e4
LT
1105 !memcmp(lladdr, neigh->ha, dev->addr_len))
1106 lladdr = neigh->ha;
1107 } else {
1108 /* No address is supplied; if we know something,
1109 use it, otherwise discard the request.
1110 */
1111 err = -EINVAL;
1112 if (!(old & NUD_VALID))
1113 goto out;
1114 lladdr = neigh->ha;
1115 }
1116
1117 if (new & NUD_CONNECTED)
1118 neigh->confirmed = jiffies;
1119 neigh->updated = jiffies;
1120
1121 /* If entry was valid and address is not changed,
1122 do not change entry state, if new one is STALE.
1123 */
1124 err = 0;
1125 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1126 if (old & NUD_VALID) {
1127 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1128 update_isrouter = 0;
1129 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1130 (old & NUD_CONNECTED)) {
1131 lladdr = neigh->ha;
1132 new = NUD_STALE;
1133 } else
1134 goto out;
1135 } else {
1136 if (lladdr == neigh->ha && new == NUD_STALE &&
1137 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1138 (old & NUD_CONNECTED))
1139 )
1140 new = old;
1141 }
1142 }
1143
1144 if (new != old) {
1145 neigh_del_timer(neigh);
a43d8994 1146 if (new & NUD_IN_TIMER)
4ec93edb
YH
1147 neigh_add_timer(neigh, (jiffies +
1148 ((new & NUD_REACHABLE) ?
667347f1
DM
1149 neigh->parms->reachable_time :
1150 0)));
1da177e4
LT
1151 neigh->nud_state = new;
1152 }
1153
1154 if (lladdr != neigh->ha) {
1155 memcpy(&neigh->ha, lladdr, dev->addr_len);
1156 neigh_update_hhs(neigh);
1157 if (!(new & NUD_CONNECTED))
1158 neigh->confirmed = jiffies -
1159 (neigh->parms->base_reachable_time << 1);
1da177e4 1160 notify = 1;
1da177e4
LT
1161 }
1162 if (new == old)
1163 goto out;
1164 if (new & NUD_CONNECTED)
1165 neigh_connect(neigh);
1166 else
1167 neigh_suspect(neigh);
1168 if (!(old & NUD_VALID)) {
1169 struct sk_buff *skb;
1170
1171 /* Again: avoid dead loop if something went wrong */
1172
1173 while (neigh->nud_state & NUD_VALID &&
1174 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1175 struct neighbour *n1 = neigh;
1176 write_unlock_bh(&neigh->lock);
1177 /* On shaper/eql skb->dst->neighbour != neigh :( */
adf30907
ED
1178 if (skb_dst(skb) && skb_dst(skb)->neighbour)
1179 n1 = skb_dst(skb)->neighbour;
1da177e4
LT
1180 n1->output(skb);
1181 write_lock_bh(&neigh->lock);
1182 }
1183 skb_queue_purge(&neigh->arp_queue);
1184 }
1185out:
1186 if (update_isrouter) {
1187 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1188 (neigh->flags | NTF_ROUTER) :
1189 (neigh->flags & ~NTF_ROUTER);
1190 }
1191 write_unlock_bh(&neigh->lock);
8d71740c
TT
1192
1193 if (notify)
d961db35
TG
1194 neigh_update_notify(neigh);
1195
1da177e4
LT
1196 return err;
1197}
0a204500 1198EXPORT_SYMBOL(neigh_update);
1da177e4
LT
1199
1200struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1201 u8 *lladdr, void *saddr,
1202 struct net_device *dev)
1203{
1204 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1205 lladdr || !dev->addr_len);
1206 if (neigh)
4ec93edb 1207 neigh_update(neigh, lladdr, NUD_STALE,
1da177e4
LT
1208 NEIGH_UPDATE_F_OVERRIDE);
1209 return neigh;
1210}
0a204500 1211EXPORT_SYMBOL(neigh_event_ns);
1da177e4
LT
1212
1213static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
d77072ec 1214 __be16 protocol)
1da177e4
LT
1215{
1216 struct hh_cache *hh;
1217 struct net_device *dev = dst->dev;
1218
1219 for (hh = n->hh; hh; hh = hh->hh_next)
1220 if (hh->hh_type == protocol)
1221 break;
1222
77d04bd9 1223 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
3644f0ce 1224 seqlock_init(&hh->hh_lock);
1da177e4
LT
1225 hh->hh_type = protocol;
1226 atomic_set(&hh->hh_refcnt, 0);
1227 hh->hh_next = NULL;
3b04ddde
SH
1228
1229 if (dev->header_ops->cache(n, hh)) {
1da177e4
LT
1230 kfree(hh);
1231 hh = NULL;
1232 } else {
1233 atomic_inc(&hh->hh_refcnt);
1234 hh->hh_next = n->hh;
1235 n->hh = hh;
1236 if (n->nud_state & NUD_CONNECTED)
1237 hh->hh_output = n->ops->hh_output;
1238 else
1239 hh->hh_output = n->ops->output;
1240 }
1241 }
1242 if (hh) {
1243 atomic_inc(&hh->hh_refcnt);
1244 dst->hh = hh;
1245 }
1246}
1247
1248/* This function can be used in contexts, where only old dev_queue_xmit
767e97e1
ED
1249 * worked, f.e. if you want to override normal output path (eql, shaper),
1250 * but resolution is not made yet.
1da177e4
LT
1251 */
1252
1253int neigh_compat_output(struct sk_buff *skb)
1254{
1255 struct net_device *dev = skb->dev;
1256
bbe735e4 1257 __skb_pull(skb, skb_network_offset(skb));
1da177e4 1258
0c4e8581
SH
1259 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1260 skb->len) < 0 &&
3b04ddde 1261 dev->header_ops->rebuild(skb))
1da177e4
LT
1262 return 0;
1263
1264 return dev_queue_xmit(skb);
1265}
0a204500 1266EXPORT_SYMBOL(neigh_compat_output);
1da177e4
LT
1267
1268/* Slow and careful. */
1269
1270int neigh_resolve_output(struct sk_buff *skb)
1271{
adf30907 1272 struct dst_entry *dst = skb_dst(skb);
1da177e4
LT
1273 struct neighbour *neigh;
1274 int rc = 0;
1275
1276 if (!dst || !(neigh = dst->neighbour))
1277 goto discard;
1278
bbe735e4 1279 __skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1280
1281 if (!neigh_event_send(neigh, skb)) {
1282 int err;
1283 struct net_device *dev = neigh->dev;
c7d4426a
ED
1284 if (dev->header_ops->cache &&
1285 !dst->hh &&
1286 !(dst->flags & DST_NOCACHE)) {
1da177e4
LT
1287 write_lock_bh(&neigh->lock);
1288 if (!dst->hh)
1289 neigh_hh_init(neigh, dst, dst->ops->protocol);
0c4e8581
SH
1290 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1291 neigh->ha, NULL, skb->len);
1da177e4
LT
1292 write_unlock_bh(&neigh->lock);
1293 } else {
1294 read_lock_bh(&neigh->lock);
0c4e8581
SH
1295 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1296 neigh->ha, NULL, skb->len);
1da177e4
LT
1297 read_unlock_bh(&neigh->lock);
1298 }
1299 if (err >= 0)
1300 rc = neigh->ops->queue_xmit(skb);
1301 else
1302 goto out_kfree_skb;
1303 }
1304out:
1305 return rc;
1306discard:
1307 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1308 dst, dst ? dst->neighbour : NULL);
1309out_kfree_skb:
1310 rc = -EINVAL;
1311 kfree_skb(skb);
1312 goto out;
1313}
0a204500 1314EXPORT_SYMBOL(neigh_resolve_output);
1da177e4
LT
1315
1316/* As fast as possible without hh cache */
1317
1318int neigh_connected_output(struct sk_buff *skb)
1319{
1320 int err;
adf30907 1321 struct dst_entry *dst = skb_dst(skb);
1da177e4
LT
1322 struct neighbour *neigh = dst->neighbour;
1323 struct net_device *dev = neigh->dev;
1324
bbe735e4 1325 __skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1326
1327 read_lock_bh(&neigh->lock);
0c4e8581
SH
1328 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1329 neigh->ha, NULL, skb->len);
1da177e4
LT
1330 read_unlock_bh(&neigh->lock);
1331 if (err >= 0)
1332 err = neigh->ops->queue_xmit(skb);
1333 else {
1334 err = -EINVAL;
1335 kfree_skb(skb);
1336 }
1337 return err;
1338}
0a204500 1339EXPORT_SYMBOL(neigh_connected_output);
1da177e4
LT
1340
1341static void neigh_proxy_process(unsigned long arg)
1342{
1343 struct neigh_table *tbl = (struct neigh_table *)arg;
1344 long sched_next = 0;
1345 unsigned long now = jiffies;
f72051b0 1346 struct sk_buff *skb, *n;
1da177e4
LT
1347
1348 spin_lock(&tbl->proxy_queue.lock);
1349
f72051b0
DM
1350 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1351 long tdif = NEIGH_CB(skb)->sched_next - now;
1da177e4 1352
1da177e4 1353 if (tdif <= 0) {
f72051b0
DM
1354 struct net_device *dev = skb->dev;
1355 __skb_unlink(skb, &tbl->proxy_queue);
1da177e4 1356 if (tbl->proxy_redo && netif_running(dev))
f72051b0 1357 tbl->proxy_redo(skb);
1da177e4 1358 else
f72051b0 1359 kfree_skb(skb);
1da177e4
LT
1360
1361 dev_put(dev);
1362 } else if (!sched_next || tdif < sched_next)
1363 sched_next = tdif;
1364 }
1365 del_timer(&tbl->proxy_timer);
1366 if (sched_next)
1367 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1368 spin_unlock(&tbl->proxy_queue.lock);
1369}
1370
1371void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1372 struct sk_buff *skb)
1373{
1374 unsigned long now = jiffies;
1375 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1376
1377 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1378 kfree_skb(skb);
1379 return;
1380 }
a61bbcf2
PM
1381
1382 NEIGH_CB(skb)->sched_next = sched_next;
1383 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1da177e4
LT
1384
1385 spin_lock(&tbl->proxy_queue.lock);
1386 if (del_timer(&tbl->proxy_timer)) {
1387 if (time_before(tbl->proxy_timer.expires, sched_next))
1388 sched_next = tbl->proxy_timer.expires;
1389 }
adf30907 1390 skb_dst_drop(skb);
1da177e4
LT
1391 dev_hold(skb->dev);
1392 __skb_queue_tail(&tbl->proxy_queue, skb);
1393 mod_timer(&tbl->proxy_timer, sched_next);
1394 spin_unlock(&tbl->proxy_queue.lock);
1395}
0a204500 1396EXPORT_SYMBOL(pneigh_enqueue);
1da177e4 1397
97fd5bc7 1398static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
426b5303
EB
1399 struct net *net, int ifindex)
1400{
1401 struct neigh_parms *p;
1402
1403 for (p = &tbl->parms; p; p = p->next) {
878628fb 1404 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
426b5303
EB
1405 (!p->dev && !ifindex))
1406 return p;
1407 }
1408
1409 return NULL;
1410}
1da177e4
LT
1411
1412struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1413 struct neigh_table *tbl)
1414{
426b5303 1415 struct neigh_parms *p, *ref;
00829823
SH
1416 struct net *net = dev_net(dev);
1417 const struct net_device_ops *ops = dev->netdev_ops;
426b5303 1418
97fd5bc7 1419 ref = lookup_neigh_parms(tbl, net, 0);
426b5303
EB
1420 if (!ref)
1421 return NULL;
1da177e4 1422
426b5303 1423 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1da177e4 1424 if (p) {
1da177e4
LT
1425 p->tbl = tbl;
1426 atomic_set(&p->refcnt, 1);
1da177e4
LT
1427 p->reachable_time =
1428 neigh_rand_reach_time(p->base_reachable_time);
c7fb64db 1429
00829823 1430 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
486b51d3
DL
1431 kfree(p);
1432 return NULL;
1da177e4 1433 }
486b51d3
DL
1434
1435 dev_hold(dev);
1436 p->dev = dev;
e42ea986 1437 write_pnet(&p->net, hold_net(net));
1da177e4
LT
1438 p->sysctl_table = NULL;
1439 write_lock_bh(&tbl->lock);
1440 p->next = tbl->parms.next;
1441 tbl->parms.next = p;
1442 write_unlock_bh(&tbl->lock);
1443 }
1444 return p;
1445}
0a204500 1446EXPORT_SYMBOL(neigh_parms_alloc);
1da177e4
LT
1447
1448static void neigh_rcu_free_parms(struct rcu_head *head)
1449{
1450 struct neigh_parms *parms =
1451 container_of(head, struct neigh_parms, rcu_head);
1452
1453 neigh_parms_put(parms);
1454}
1455
1456void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1457{
1458 struct neigh_parms **p;
1459
1460 if (!parms || parms == &tbl->parms)
1461 return;
1462 write_lock_bh(&tbl->lock);
1463 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1464 if (*p == parms) {
1465 *p = parms->next;
1466 parms->dead = 1;
1467 write_unlock_bh(&tbl->lock);
cecbb639
DM
1468 if (parms->dev)
1469 dev_put(parms->dev);
1da177e4
LT
1470 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1471 return;
1472 }
1473 }
1474 write_unlock_bh(&tbl->lock);
1475 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1476}
0a204500 1477EXPORT_SYMBOL(neigh_parms_release);
1da177e4 1478
06f0511d 1479static void neigh_parms_destroy(struct neigh_parms *parms)
1da177e4 1480{
57da52c1 1481 release_net(neigh_parms_net(parms));
1da177e4
LT
1482 kfree(parms);
1483}
1484
c2ecba71
PE
1485static struct lock_class_key neigh_table_proxy_queue_class;
1486
bd89efc5 1487void neigh_table_init_no_netlink(struct neigh_table *tbl)
1da177e4
LT
1488{
1489 unsigned long now = jiffies;
1490 unsigned long phsize;
1491
e42ea986 1492 write_pnet(&tbl->parms.net, &init_net);
1da177e4 1493 atomic_set(&tbl->parms.refcnt, 1);
1da177e4
LT
1494 tbl->parms.reachable_time =
1495 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1496
1497 if (!tbl->kmem_cachep)
e5d679f3
AD
1498 tbl->kmem_cachep =
1499 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1500 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
20c2df83 1501 NULL);
1da177e4
LT
1502 tbl->stats = alloc_percpu(struct neigh_statistics);
1503 if (!tbl->stats)
1504 panic("cannot create neighbour cache statistics");
4ec93edb 1505
1da177e4 1506#ifdef CONFIG_PROC_FS
9b739ba5
AD
1507 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1508 &neigh_stat_seq_fops, tbl))
1da177e4 1509 panic("cannot create neighbour proc dir entry");
1da177e4
LT
1510#endif
1511
d6bf7817 1512 tbl->nht = neigh_hash_alloc(8);
1da177e4
LT
1513
1514 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
77d04bd9 1515 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1da177e4 1516
d6bf7817 1517 if (!tbl->nht || !tbl->phash_buckets)
1da177e4
LT
1518 panic("cannot allocate neighbour cache hashes");
1519
1da177e4 1520 rwlock_init(&tbl->lock);
e4c4e448
ED
1521 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1522 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
b24b8a24 1523 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
c2ecba71
PE
1524 skb_queue_head_init_class(&tbl->proxy_queue,
1525 &neigh_table_proxy_queue_class);
1da177e4
LT
1526
1527 tbl->last_flush = now;
1528 tbl->last_rand = now + tbl->parms.reachable_time * 20;
bd89efc5 1529}
0a204500 1530EXPORT_SYMBOL(neigh_table_init_no_netlink);
bd89efc5
SK
1531
1532void neigh_table_init(struct neigh_table *tbl)
1533{
1534 struct neigh_table *tmp;
1535
1536 neigh_table_init_no_netlink(tbl);
1da177e4 1537 write_lock(&neigh_tbl_lock);
bd89efc5
SK
1538 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1539 if (tmp->family == tbl->family)
1540 break;
1541 }
1da177e4
LT
1542 tbl->next = neigh_tables;
1543 neigh_tables = tbl;
1544 write_unlock(&neigh_tbl_lock);
bd89efc5
SK
1545
1546 if (unlikely(tmp)) {
1547 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1548 "family %d\n", tbl->family);
1549 dump_stack();
1550 }
1da177e4 1551}
0a204500 1552EXPORT_SYMBOL(neigh_table_init);
1da177e4
LT
1553
1554int neigh_table_clear(struct neigh_table *tbl)
1555{
1556 struct neigh_table **tp;
1557
1558 /* It is not clean... Fix it to unload IPv6 module safely */
e4c4e448
ED
1559 cancel_delayed_work(&tbl->gc_work);
1560 flush_scheduled_work();
1da177e4
LT
1561 del_timer_sync(&tbl->proxy_timer);
1562 pneigh_queue_purge(&tbl->proxy_queue);
1563 neigh_ifdown(tbl, NULL);
1564 if (atomic_read(&tbl->entries))
1565 printk(KERN_CRIT "neighbour leakage\n");
1566 write_lock(&neigh_tbl_lock);
1567 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1568 if (*tp == tbl) {
1569 *tp = tbl->next;
1570 break;
1571 }
1572 }
1573 write_unlock(&neigh_tbl_lock);
1574
d6bf7817
ED
1575 call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
1576 tbl->nht = NULL;
1da177e4
LT
1577
1578 kfree(tbl->phash_buckets);
1579 tbl->phash_buckets = NULL;
1580
3f192b5c
AD
1581 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1582
3fcde74b
KK
1583 free_percpu(tbl->stats);
1584 tbl->stats = NULL;
1585
bfb85c9f
RD
1586 kmem_cache_destroy(tbl->kmem_cachep);
1587 tbl->kmem_cachep = NULL;
1588
1da177e4
LT
1589 return 0;
1590}
0a204500 1591EXPORT_SYMBOL(neigh_table_clear);
1da177e4 1592
c8822a4e 1593static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1da177e4 1594{
3b1e0a65 1595 struct net *net = sock_net(skb->sk);
a14a49d2
TG
1596 struct ndmsg *ndm;
1597 struct nlattr *dst_attr;
1da177e4
LT
1598 struct neigh_table *tbl;
1599 struct net_device *dev = NULL;
a14a49d2 1600 int err = -EINVAL;
1da177e4 1601
110b2499 1602 ASSERT_RTNL();
a14a49d2 1603 if (nlmsg_len(nlh) < sizeof(*ndm))
1da177e4
LT
1604 goto out;
1605
a14a49d2
TG
1606 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1607 if (dst_attr == NULL)
1608 goto out;
1609
1610 ndm = nlmsg_data(nlh);
1611 if (ndm->ndm_ifindex) {
110b2499 1612 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
a14a49d2
TG
1613 if (dev == NULL) {
1614 err = -ENODEV;
1615 goto out;
1616 }
1617 }
1618
1da177e4
LT
1619 read_lock(&neigh_tbl_lock);
1620 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
a14a49d2 1621 struct neighbour *neigh;
1da177e4
LT
1622
1623 if (tbl->family != ndm->ndm_family)
1624 continue;
1625 read_unlock(&neigh_tbl_lock);
1626
a14a49d2 1627 if (nla_len(dst_attr) < tbl->key_len)
110b2499 1628 goto out;
1da177e4
LT
1629
1630 if (ndm->ndm_flags & NTF_PROXY) {
426b5303 1631 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
110b2499 1632 goto out;
1da177e4
LT
1633 }
1634
a14a49d2 1635 if (dev == NULL)
110b2499 1636 goto out;
1da177e4 1637
a14a49d2
TG
1638 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1639 if (neigh == NULL) {
1640 err = -ENOENT;
110b2499 1641 goto out;
1da177e4 1642 }
a14a49d2
TG
1643
1644 err = neigh_update(neigh, NULL, NUD_FAILED,
1645 NEIGH_UPDATE_F_OVERRIDE |
1646 NEIGH_UPDATE_F_ADMIN);
1647 neigh_release(neigh);
110b2499 1648 goto out;
1da177e4
LT
1649 }
1650 read_unlock(&neigh_tbl_lock);
a14a49d2
TG
1651 err = -EAFNOSUPPORT;
1652
1da177e4
LT
1653out:
1654 return err;
1655}
1656
c8822a4e 1657static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1da177e4 1658{
3b1e0a65 1659 struct net *net = sock_net(skb->sk);
5208debd
TG
1660 struct ndmsg *ndm;
1661 struct nlattr *tb[NDA_MAX+1];
1da177e4
LT
1662 struct neigh_table *tbl;
1663 struct net_device *dev = NULL;
5208debd 1664 int err;
1da177e4 1665
110b2499 1666 ASSERT_RTNL();
5208debd
TG
1667 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1668 if (err < 0)
1da177e4
LT
1669 goto out;
1670
5208debd
TG
1671 err = -EINVAL;
1672 if (tb[NDA_DST] == NULL)
1673 goto out;
1674
1675 ndm = nlmsg_data(nlh);
1676 if (ndm->ndm_ifindex) {
110b2499 1677 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
5208debd
TG
1678 if (dev == NULL) {
1679 err = -ENODEV;
1680 goto out;
1681 }
1682
1683 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
110b2499 1684 goto out;
5208debd
TG
1685 }
1686
1da177e4
LT
1687 read_lock(&neigh_tbl_lock);
1688 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
5208debd
TG
1689 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1690 struct neighbour *neigh;
1691 void *dst, *lladdr;
1da177e4
LT
1692
1693 if (tbl->family != ndm->ndm_family)
1694 continue;
1695 read_unlock(&neigh_tbl_lock);
1696
5208debd 1697 if (nla_len(tb[NDA_DST]) < tbl->key_len)
110b2499 1698 goto out;
5208debd
TG
1699 dst = nla_data(tb[NDA_DST]);
1700 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1da177e4
LT
1701
1702 if (ndm->ndm_flags & NTF_PROXY) {
62dd9318
VN
1703 struct pneigh_entry *pn;
1704
1705 err = -ENOBUFS;
426b5303 1706 pn = pneigh_lookup(tbl, net, dst, dev, 1);
62dd9318
VN
1707 if (pn) {
1708 pn->flags = ndm->ndm_flags;
1709 err = 0;
1710 }
110b2499 1711 goto out;
1da177e4
LT
1712 }
1713
5208debd 1714 if (dev == NULL)
110b2499 1715 goto out;
5208debd
TG
1716
1717 neigh = neigh_lookup(tbl, dst, dev);
1718 if (neigh == NULL) {
1719 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1720 err = -ENOENT;
110b2499 1721 goto out;
5208debd 1722 }
4ec93edb 1723
5208debd
TG
1724 neigh = __neigh_lookup_errno(tbl, dst, dev);
1725 if (IS_ERR(neigh)) {
1726 err = PTR_ERR(neigh);
110b2499 1727 goto out;
1da177e4 1728 }
1da177e4 1729 } else {
5208debd
TG
1730 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1731 err = -EEXIST;
1732 neigh_release(neigh);
110b2499 1733 goto out;
1da177e4 1734 }
1da177e4 1735
5208debd
TG
1736 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1737 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1738 }
1da177e4 1739
0c5c2d30
EB
1740 if (ndm->ndm_flags & NTF_USE) {
1741 neigh_event_send(neigh, NULL);
1742 err = 0;
1743 } else
1744 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
5208debd 1745 neigh_release(neigh);
110b2499 1746 goto out;
1da177e4
LT
1747 }
1748
1749 read_unlock(&neigh_tbl_lock);
5208debd 1750 err = -EAFNOSUPPORT;
1da177e4
LT
1751out:
1752 return err;
1753}
1754
c7fb64db
TG
1755static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1756{
ca860fb3
TG
1757 struct nlattr *nest;
1758
1759 nest = nla_nest_start(skb, NDTA_PARMS);
1760 if (nest == NULL)
1761 return -ENOBUFS;
c7fb64db
TG
1762
1763 if (parms->dev)
ca860fb3
TG
1764 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1765
1766 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1767 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1768 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1769 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1770 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1771 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1772 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1773 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
c7fb64db 1774 parms->base_reachable_time);
ca860fb3
TG
1775 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1776 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1777 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1778 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1779 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1780 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
c7fb64db 1781
ca860fb3 1782 return nla_nest_end(skb, nest);
c7fb64db 1783
ca860fb3 1784nla_put_failure:
bc3ed28c
TG
1785 nla_nest_cancel(skb, nest);
1786 return -EMSGSIZE;
c7fb64db
TG
1787}
1788
ca860fb3
TG
1789static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1790 u32 pid, u32 seq, int type, int flags)
c7fb64db
TG
1791{
1792 struct nlmsghdr *nlh;
1793 struct ndtmsg *ndtmsg;
1794
ca860fb3
TG
1795 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1796 if (nlh == NULL)
26932566 1797 return -EMSGSIZE;
c7fb64db 1798
ca860fb3 1799 ndtmsg = nlmsg_data(nlh);
c7fb64db
TG
1800
1801 read_lock_bh(&tbl->lock);
1802 ndtmsg->ndtm_family = tbl->family;
9ef1d4c7
PM
1803 ndtmsg->ndtm_pad1 = 0;
1804 ndtmsg->ndtm_pad2 = 0;
c7fb64db 1805
ca860fb3
TG
1806 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1807 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1808 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1809 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1810 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
c7fb64db
TG
1811
1812 {
1813 unsigned long now = jiffies;
1814 unsigned int flush_delta = now - tbl->last_flush;
1815 unsigned int rand_delta = now - tbl->last_rand;
d6bf7817 1816 struct neigh_hash_table *nht;
c7fb64db
TG
1817 struct ndt_config ndc = {
1818 .ndtc_key_len = tbl->key_len,
1819 .ndtc_entry_size = tbl->entry_size,
1820 .ndtc_entries = atomic_read(&tbl->entries),
1821 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1822 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
c7fb64db
TG
1823 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1824 };
1825
d6bf7817
ED
1826 rcu_read_lock_bh();
1827 nht = rcu_dereference_bh(tbl->nht);
1828 ndc.ndtc_hash_rnd = nht->hash_rnd;
1829 ndc.ndtc_hash_mask = nht->hash_mask;
1830 rcu_read_unlock_bh();
1831
ca860fb3 1832 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
c7fb64db
TG
1833 }
1834
1835 {
1836 int cpu;
1837 struct ndt_stats ndst;
1838
1839 memset(&ndst, 0, sizeof(ndst));
1840
6f912042 1841 for_each_possible_cpu(cpu) {
c7fb64db
TG
1842 struct neigh_statistics *st;
1843
c7fb64db
TG
1844 st = per_cpu_ptr(tbl->stats, cpu);
1845 ndst.ndts_allocs += st->allocs;
1846 ndst.ndts_destroys += st->destroys;
1847 ndst.ndts_hash_grows += st->hash_grows;
1848 ndst.ndts_res_failed += st->res_failed;
1849 ndst.ndts_lookups += st->lookups;
1850 ndst.ndts_hits += st->hits;
1851 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1852 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1853 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1854 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1855 }
1856
ca860fb3 1857 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
c7fb64db
TG
1858 }
1859
1860 BUG_ON(tbl->parms.dev);
1861 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
ca860fb3 1862 goto nla_put_failure;
c7fb64db
TG
1863
1864 read_unlock_bh(&tbl->lock);
ca860fb3 1865 return nlmsg_end(skb, nlh);
c7fb64db 1866
ca860fb3 1867nla_put_failure:
c7fb64db 1868 read_unlock_bh(&tbl->lock);
26932566
PM
1869 nlmsg_cancel(skb, nlh);
1870 return -EMSGSIZE;
c7fb64db
TG
1871}
1872
ca860fb3
TG
1873static int neightbl_fill_param_info(struct sk_buff *skb,
1874 struct neigh_table *tbl,
c7fb64db 1875 struct neigh_parms *parms,
ca860fb3
TG
1876 u32 pid, u32 seq, int type,
1877 unsigned int flags)
c7fb64db
TG
1878{
1879 struct ndtmsg *ndtmsg;
1880 struct nlmsghdr *nlh;
1881
ca860fb3
TG
1882 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1883 if (nlh == NULL)
26932566 1884 return -EMSGSIZE;
c7fb64db 1885
ca860fb3 1886 ndtmsg = nlmsg_data(nlh);
c7fb64db
TG
1887
1888 read_lock_bh(&tbl->lock);
1889 ndtmsg->ndtm_family = tbl->family;
9ef1d4c7
PM
1890 ndtmsg->ndtm_pad1 = 0;
1891 ndtmsg->ndtm_pad2 = 0;
c7fb64db 1892
ca860fb3
TG
1893 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1894 neightbl_fill_parms(skb, parms) < 0)
1895 goto errout;
c7fb64db
TG
1896
1897 read_unlock_bh(&tbl->lock);
ca860fb3
TG
1898 return nlmsg_end(skb, nlh);
1899errout:
c7fb64db 1900 read_unlock_bh(&tbl->lock);
26932566
PM
1901 nlmsg_cancel(skb, nlh);
1902 return -EMSGSIZE;
c7fb64db 1903}
4ec93edb 1904
ef7c79ed 1905static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
6b3f8674
TG
1906 [NDTA_NAME] = { .type = NLA_STRING },
1907 [NDTA_THRESH1] = { .type = NLA_U32 },
1908 [NDTA_THRESH2] = { .type = NLA_U32 },
1909 [NDTA_THRESH3] = { .type = NLA_U32 },
1910 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1911 [NDTA_PARMS] = { .type = NLA_NESTED },
1912};
1913
ef7c79ed 1914static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
6b3f8674
TG
1915 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1916 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1917 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1918 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1919 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1920 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1921 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1922 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1923 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1924 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1925 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1926 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1927 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1928};
1929
c8822a4e 1930static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
c7fb64db 1931{
3b1e0a65 1932 struct net *net = sock_net(skb->sk);
c7fb64db 1933 struct neigh_table *tbl;
6b3f8674
TG
1934 struct ndtmsg *ndtmsg;
1935 struct nlattr *tb[NDTA_MAX+1];
1936 int err;
c7fb64db 1937
6b3f8674
TG
1938 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1939 nl_neightbl_policy);
1940 if (err < 0)
1941 goto errout;
c7fb64db 1942
6b3f8674
TG
1943 if (tb[NDTA_NAME] == NULL) {
1944 err = -EINVAL;
1945 goto errout;
1946 }
1947
1948 ndtmsg = nlmsg_data(nlh);
c7fb64db
TG
1949 read_lock(&neigh_tbl_lock);
1950 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1951 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1952 continue;
1953
6b3f8674 1954 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
c7fb64db
TG
1955 break;
1956 }
1957
1958 if (tbl == NULL) {
1959 err = -ENOENT;
6b3f8674 1960 goto errout_locked;
c7fb64db
TG
1961 }
1962
4ec93edb 1963 /*
c7fb64db
TG
1964 * We acquire tbl->lock to be nice to the periodic timers and
1965 * make sure they always see a consistent set of values.
1966 */
1967 write_lock_bh(&tbl->lock);
1968
6b3f8674
TG
1969 if (tb[NDTA_PARMS]) {
1970 struct nlattr *tbp[NDTPA_MAX+1];
c7fb64db 1971 struct neigh_parms *p;
6b3f8674 1972 int i, ifindex = 0;
c7fb64db 1973
6b3f8674
TG
1974 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1975 nl_ntbl_parm_policy);
1976 if (err < 0)
1977 goto errout_tbl_lock;
c7fb64db 1978
6b3f8674
TG
1979 if (tbp[NDTPA_IFINDEX])
1980 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
c7fb64db 1981
97fd5bc7 1982 p = lookup_neigh_parms(tbl, net, ifindex);
c7fb64db
TG
1983 if (p == NULL) {
1984 err = -ENOENT;
6b3f8674 1985 goto errout_tbl_lock;
c7fb64db 1986 }
c7fb64db 1987
6b3f8674
TG
1988 for (i = 1; i <= NDTPA_MAX; i++) {
1989 if (tbp[i] == NULL)
1990 continue;
c7fb64db 1991
6b3f8674
TG
1992 switch (i) {
1993 case NDTPA_QUEUE_LEN:
1994 p->queue_len = nla_get_u32(tbp[i]);
1995 break;
1996 case NDTPA_PROXY_QLEN:
1997 p->proxy_qlen = nla_get_u32(tbp[i]);
1998 break;
1999 case NDTPA_APP_PROBES:
2000 p->app_probes = nla_get_u32(tbp[i]);
2001 break;
2002 case NDTPA_UCAST_PROBES:
2003 p->ucast_probes = nla_get_u32(tbp[i]);
2004 break;
2005 case NDTPA_MCAST_PROBES:
2006 p->mcast_probes = nla_get_u32(tbp[i]);
2007 break;
2008 case NDTPA_BASE_REACHABLE_TIME:
2009 p->base_reachable_time = nla_get_msecs(tbp[i]);
2010 break;
2011 case NDTPA_GC_STALETIME:
2012 p->gc_staletime = nla_get_msecs(tbp[i]);
2013 break;
2014 case NDTPA_DELAY_PROBE_TIME:
2015 p->delay_probe_time = nla_get_msecs(tbp[i]);
2016 break;
2017 case NDTPA_RETRANS_TIME:
2018 p->retrans_time = nla_get_msecs(tbp[i]);
2019 break;
2020 case NDTPA_ANYCAST_DELAY:
2021 p->anycast_delay = nla_get_msecs(tbp[i]);
2022 break;
2023 case NDTPA_PROXY_DELAY:
2024 p->proxy_delay = nla_get_msecs(tbp[i]);
2025 break;
2026 case NDTPA_LOCKTIME:
2027 p->locktime = nla_get_msecs(tbp[i]);
2028 break;
2029 }
2030 }
2031 }
c7fb64db 2032
6b3f8674
TG
2033 if (tb[NDTA_THRESH1])
2034 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
c7fb64db 2035
6b3f8674
TG
2036 if (tb[NDTA_THRESH2])
2037 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
c7fb64db 2038
6b3f8674
TG
2039 if (tb[NDTA_THRESH3])
2040 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
c7fb64db 2041
6b3f8674
TG
2042 if (tb[NDTA_GC_INTERVAL])
2043 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
c7fb64db
TG
2044
2045 err = 0;
2046
6b3f8674 2047errout_tbl_lock:
c7fb64db 2048 write_unlock_bh(&tbl->lock);
6b3f8674 2049errout_locked:
c7fb64db 2050 read_unlock(&neigh_tbl_lock);
6b3f8674 2051errout:
c7fb64db
TG
2052 return err;
2053}
2054
c8822a4e 2055static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
c7fb64db 2056{
3b1e0a65 2057 struct net *net = sock_net(skb->sk);
ca860fb3
TG
2058 int family, tidx, nidx = 0;
2059 int tbl_skip = cb->args[0];
2060 int neigh_skip = cb->args[1];
c7fb64db
TG
2061 struct neigh_table *tbl;
2062
ca860fb3 2063 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
c7fb64db
TG
2064
2065 read_lock(&neigh_tbl_lock);
ca860fb3 2066 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
c7fb64db
TG
2067 struct neigh_parms *p;
2068
ca860fb3 2069 if (tidx < tbl_skip || (family && tbl->family != family))
c7fb64db
TG
2070 continue;
2071
ca860fb3
TG
2072 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2073 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2074 NLM_F_MULTI) <= 0)
c7fb64db
TG
2075 break;
2076
426b5303 2077 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
878628fb 2078 if (!net_eq(neigh_parms_net(p), net))
426b5303
EB
2079 continue;
2080
efc683fc
GK
2081 if (nidx < neigh_skip)
2082 goto next;
c7fb64db 2083
ca860fb3
TG
2084 if (neightbl_fill_param_info(skb, tbl, p,
2085 NETLINK_CB(cb->skb).pid,
2086 cb->nlh->nlmsg_seq,
2087 RTM_NEWNEIGHTBL,
2088 NLM_F_MULTI) <= 0)
c7fb64db 2089 goto out;
efc683fc
GK
2090 next:
2091 nidx++;
c7fb64db
TG
2092 }
2093
ca860fb3 2094 neigh_skip = 0;
c7fb64db
TG
2095 }
2096out:
2097 read_unlock(&neigh_tbl_lock);
ca860fb3
TG
2098 cb->args[0] = tidx;
2099 cb->args[1] = nidx;
c7fb64db
TG
2100
2101 return skb->len;
2102}
1da177e4 2103
8b8aec50
TG
2104static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2105 u32 pid, u32 seq, int type, unsigned int flags)
1da177e4
LT
2106{
2107 unsigned long now = jiffies;
1da177e4 2108 struct nda_cacheinfo ci;
8b8aec50
TG
2109 struct nlmsghdr *nlh;
2110 struct ndmsg *ndm;
2111
2112 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2113 if (nlh == NULL)
26932566 2114 return -EMSGSIZE;
1da177e4 2115
8b8aec50
TG
2116 ndm = nlmsg_data(nlh);
2117 ndm->ndm_family = neigh->ops->family;
9ef1d4c7
PM
2118 ndm->ndm_pad1 = 0;
2119 ndm->ndm_pad2 = 0;
8b8aec50
TG
2120 ndm->ndm_flags = neigh->flags;
2121 ndm->ndm_type = neigh->type;
2122 ndm->ndm_ifindex = neigh->dev->ifindex;
1da177e4 2123
8b8aec50
TG
2124 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2125
2126 read_lock_bh(&neigh->lock);
2127 ndm->ndm_state = neigh->nud_state;
2128 if ((neigh->nud_state & NUD_VALID) &&
2129 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2130 read_unlock_bh(&neigh->lock);
2131 goto nla_put_failure;
2132 }
2133
b9f5f52c
SH
2134 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2135 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2136 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
8b8aec50
TG
2137 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2138 read_unlock_bh(&neigh->lock);
2139
2140 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2141 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2142
2143 return nlmsg_end(skb, nlh);
2144
2145nla_put_failure:
26932566
PM
2146 nlmsg_cancel(skb, nlh);
2147 return -EMSGSIZE;
1da177e4
LT
2148}
2149
d961db35
TG
2150static void neigh_update_notify(struct neighbour *neigh)
2151{
2152 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2153 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2154}
1da177e4
LT
2155
2156static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2157 struct netlink_callback *cb)
2158{
767e97e1 2159 struct net *net = sock_net(skb->sk);
1da177e4
LT
2160 struct neighbour *n;
2161 int rc, h, s_h = cb->args[1];
2162 int idx, s_idx = idx = cb->args[2];
d6bf7817 2163 struct neigh_hash_table *nht;
1da177e4 2164
d6bf7817
ED
2165 rcu_read_lock_bh();
2166 nht = rcu_dereference_bh(tbl->nht);
2167
d6bf7817 2168 for (h = 0; h <= nht->hash_mask; h++) {
1da177e4
LT
2169 if (h < s_h)
2170 continue;
2171 if (h > s_h)
2172 s_idx = 0;
767e97e1
ED
2173 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2174 n != NULL;
2175 n = rcu_dereference_bh(n->next)) {
09ad9bc7 2176 if (!net_eq(dev_net(n->dev), net))
426b5303 2177 continue;
efc683fc
GK
2178 if (idx < s_idx)
2179 goto next;
1da177e4
LT
2180 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2181 cb->nlh->nlmsg_seq,
b6544c0b
JHS
2182 RTM_NEWNEIGH,
2183 NLM_F_MULTI) <= 0) {
1da177e4
LT
2184 rc = -1;
2185 goto out;
2186 }
767e97e1 2187next:
efc683fc 2188 idx++;
1da177e4 2189 }
1da177e4
LT
2190 }
2191 rc = skb->len;
2192out:
d6bf7817 2193 rcu_read_unlock_bh();
1da177e4
LT
2194 cb->args[1] = h;
2195 cb->args[2] = idx;
2196 return rc;
2197}
2198
c8822a4e 2199static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1da177e4
LT
2200{
2201 struct neigh_table *tbl;
2202 int t, family, s_t;
2203
2204 read_lock(&neigh_tbl_lock);
8b8aec50 2205 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1da177e4
LT
2206 s_t = cb->args[0];
2207
2208 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2209 if (t < s_t || (family && tbl->family != family))
2210 continue;
2211 if (t > s_t)
2212 memset(&cb->args[1], 0, sizeof(cb->args) -
2213 sizeof(cb->args[0]));
2214 if (neigh_dump_table(tbl, skb, cb) < 0)
2215 break;
2216 }
2217 read_unlock(&neigh_tbl_lock);
2218
2219 cb->args[0] = t;
2220 return skb->len;
2221}
2222
2223void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2224{
2225 int chain;
d6bf7817 2226 struct neigh_hash_table *nht;
1da177e4 2227
d6bf7817
ED
2228 rcu_read_lock_bh();
2229 nht = rcu_dereference_bh(tbl->nht);
2230
767e97e1 2231 read_lock(&tbl->lock); /* avoid resizes */
d6bf7817 2232 for (chain = 0; chain <= nht->hash_mask; chain++) {
1da177e4
LT
2233 struct neighbour *n;
2234
767e97e1
ED
2235 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2236 n != NULL;
2237 n = rcu_dereference_bh(n->next))
1da177e4
LT
2238 cb(n, cookie);
2239 }
d6bf7817
ED
2240 read_unlock(&tbl->lock);
2241 rcu_read_unlock_bh();
1da177e4
LT
2242}
2243EXPORT_SYMBOL(neigh_for_each);
2244
2245/* The tbl->lock must be held as a writer and BH disabled. */
2246void __neigh_for_each_release(struct neigh_table *tbl,
2247 int (*cb)(struct neighbour *))
2248{
2249 int chain;
d6bf7817 2250 struct neigh_hash_table *nht;
1da177e4 2251
d6bf7817
ED
2252 nht = rcu_dereference_protected(tbl->nht,
2253 lockdep_is_held(&tbl->lock));
2254 for (chain = 0; chain <= nht->hash_mask; chain++) {
767e97e1
ED
2255 struct neighbour *n;
2256 struct neighbour __rcu **np;
1da177e4 2257
d6bf7817 2258 np = &nht->hash_buckets[chain];
767e97e1
ED
2259 while ((n = rcu_dereference_protected(*np,
2260 lockdep_is_held(&tbl->lock))) != NULL) {
1da177e4
LT
2261 int release;
2262
2263 write_lock(&n->lock);
2264 release = cb(n);
2265 if (release) {
767e97e1
ED
2266 rcu_assign_pointer(*np,
2267 rcu_dereference_protected(n->next,
2268 lockdep_is_held(&tbl->lock)));
1da177e4
LT
2269 n->dead = 1;
2270 } else
2271 np = &n->next;
2272 write_unlock(&n->lock);
4f494554
TG
2273 if (release)
2274 neigh_cleanup_and_release(n);
1da177e4
LT
2275 }
2276 }
2277}
2278EXPORT_SYMBOL(__neigh_for_each_release);
2279
2280#ifdef CONFIG_PROC_FS
2281
2282static struct neighbour *neigh_get_first(struct seq_file *seq)
2283{
2284 struct neigh_seq_state *state = seq->private;
1218854a 2285 struct net *net = seq_file_net(seq);
d6bf7817 2286 struct neigh_hash_table *nht = state->nht;
1da177e4
LT
2287 struct neighbour *n = NULL;
2288 int bucket = state->bucket;
2289
2290 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
d6bf7817 2291 for (bucket = 0; bucket <= nht->hash_mask; bucket++) {
767e97e1 2292 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
1da177e4
LT
2293
2294 while (n) {
878628fb 2295 if (!net_eq(dev_net(n->dev), net))
426b5303 2296 goto next;
1da177e4
LT
2297 if (state->neigh_sub_iter) {
2298 loff_t fakep = 0;
2299 void *v;
2300
2301 v = state->neigh_sub_iter(state, n, &fakep);
2302 if (!v)
2303 goto next;
2304 }
2305 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2306 break;
2307 if (n->nud_state & ~NUD_NOARP)
2308 break;
767e97e1
ED
2309next:
2310 n = rcu_dereference_bh(n->next);
1da177e4
LT
2311 }
2312
2313 if (n)
2314 break;
2315 }
2316 state->bucket = bucket;
2317
2318 return n;
2319}
2320
2321static struct neighbour *neigh_get_next(struct seq_file *seq,
2322 struct neighbour *n,
2323 loff_t *pos)
2324{
2325 struct neigh_seq_state *state = seq->private;
1218854a 2326 struct net *net = seq_file_net(seq);
d6bf7817 2327 struct neigh_hash_table *nht = state->nht;
1da177e4
LT
2328
2329 if (state->neigh_sub_iter) {
2330 void *v = state->neigh_sub_iter(state, n, pos);
2331 if (v)
2332 return n;
2333 }
767e97e1 2334 n = rcu_dereference_bh(n->next);
1da177e4
LT
2335
2336 while (1) {
2337 while (n) {
878628fb 2338 if (!net_eq(dev_net(n->dev), net))
426b5303 2339 goto next;
1da177e4
LT
2340 if (state->neigh_sub_iter) {
2341 void *v = state->neigh_sub_iter(state, n, pos);
2342 if (v)
2343 return n;
2344 goto next;
2345 }
2346 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2347 break;
2348
2349 if (n->nud_state & ~NUD_NOARP)
2350 break;
767e97e1
ED
2351next:
2352 n = rcu_dereference_bh(n->next);
1da177e4
LT
2353 }
2354
2355 if (n)
2356 break;
2357
d6bf7817 2358 if (++state->bucket > nht->hash_mask)
1da177e4
LT
2359 break;
2360
767e97e1 2361 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
1da177e4
LT
2362 }
2363
2364 if (n && pos)
2365 --(*pos);
2366 return n;
2367}
2368
2369static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2370{
2371 struct neighbour *n = neigh_get_first(seq);
2372
2373 if (n) {
745e2031 2374 --(*pos);
1da177e4
LT
2375 while (*pos) {
2376 n = neigh_get_next(seq, n, pos);
2377 if (!n)
2378 break;
2379 }
2380 }
2381 return *pos ? NULL : n;
2382}
2383
2384static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2385{
2386 struct neigh_seq_state *state = seq->private;
1218854a 2387 struct net *net = seq_file_net(seq);
1da177e4
LT
2388 struct neigh_table *tbl = state->tbl;
2389 struct pneigh_entry *pn = NULL;
2390 int bucket = state->bucket;
2391
2392 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2393 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2394 pn = tbl->phash_buckets[bucket];
878628fb 2395 while (pn && !net_eq(pneigh_net(pn), net))
426b5303 2396 pn = pn->next;
1da177e4
LT
2397 if (pn)
2398 break;
2399 }
2400 state->bucket = bucket;
2401
2402 return pn;
2403}
2404
2405static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2406 struct pneigh_entry *pn,
2407 loff_t *pos)
2408{
2409 struct neigh_seq_state *state = seq->private;
1218854a 2410 struct net *net = seq_file_net(seq);
1da177e4
LT
2411 struct neigh_table *tbl = state->tbl;
2412
2413 pn = pn->next;
2414 while (!pn) {
2415 if (++state->bucket > PNEIGH_HASHMASK)
2416 break;
2417 pn = tbl->phash_buckets[state->bucket];
878628fb 2418 while (pn && !net_eq(pneigh_net(pn), net))
426b5303 2419 pn = pn->next;
1da177e4
LT
2420 if (pn)
2421 break;
2422 }
2423
2424 if (pn && pos)
2425 --(*pos);
2426
2427 return pn;
2428}
2429
2430static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2431{
2432 struct pneigh_entry *pn = pneigh_get_first(seq);
2433
2434 if (pn) {
745e2031 2435 --(*pos);
1da177e4
LT
2436 while (*pos) {
2437 pn = pneigh_get_next(seq, pn, pos);
2438 if (!pn)
2439 break;
2440 }
2441 }
2442 return *pos ? NULL : pn;
2443}
2444
2445static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2446{
2447 struct neigh_seq_state *state = seq->private;
2448 void *rc;
745e2031 2449 loff_t idxpos = *pos;
1da177e4 2450
745e2031 2451 rc = neigh_get_idx(seq, &idxpos);
1da177e4 2452 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
745e2031 2453 rc = pneigh_get_idx(seq, &idxpos);
1da177e4
LT
2454
2455 return rc;
2456}
2457
2458void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
d6bf7817 2459 __acquires(rcu_bh)
1da177e4
LT
2460{
2461 struct neigh_seq_state *state = seq->private;
1da177e4
LT
2462
2463 state->tbl = tbl;
2464 state->bucket = 0;
2465 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2466
d6bf7817
ED
2467 rcu_read_lock_bh();
2468 state->nht = rcu_dereference_bh(tbl->nht);
767e97e1 2469
745e2031 2470 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
1da177e4
LT
2471}
2472EXPORT_SYMBOL(neigh_seq_start);
2473
2474void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2475{
2476 struct neigh_seq_state *state;
2477 void *rc;
2478
2479 if (v == SEQ_START_TOKEN) {
bff69732 2480 rc = neigh_get_first(seq);
1da177e4
LT
2481 goto out;
2482 }
2483
2484 state = seq->private;
2485 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2486 rc = neigh_get_next(seq, v, NULL);
2487 if (rc)
2488 goto out;
2489 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2490 rc = pneigh_get_first(seq);
2491 } else {
2492 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2493 rc = pneigh_get_next(seq, v, NULL);
2494 }
2495out:
2496 ++(*pos);
2497 return rc;
2498}
2499EXPORT_SYMBOL(neigh_seq_next);
2500
2501void neigh_seq_stop(struct seq_file *seq, void *v)
d6bf7817 2502 __releases(rcu_bh)
1da177e4 2503{
d6bf7817 2504 rcu_read_unlock_bh();
1da177e4
LT
2505}
2506EXPORT_SYMBOL(neigh_seq_stop);
2507
2508/* statistics via seq_file */
2509
2510static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2511{
81c1ebfc 2512 struct neigh_table *tbl = seq->private;
1da177e4
LT
2513 int cpu;
2514
2515 if (*pos == 0)
2516 return SEQ_START_TOKEN;
4ec93edb 2517
0f23174a 2518 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
2519 if (!cpu_possible(cpu))
2520 continue;
2521 *pos = cpu+1;
2522 return per_cpu_ptr(tbl->stats, cpu);
2523 }
2524 return NULL;
2525}
2526
2527static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2528{
81c1ebfc 2529 struct neigh_table *tbl = seq->private;
1da177e4
LT
2530 int cpu;
2531
0f23174a 2532 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
2533 if (!cpu_possible(cpu))
2534 continue;
2535 *pos = cpu+1;
2536 return per_cpu_ptr(tbl->stats, cpu);
2537 }
2538 return NULL;
2539}
2540
2541static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2542{
2543
2544}
2545
2546static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2547{
81c1ebfc 2548 struct neigh_table *tbl = seq->private;
1da177e4
LT
2549 struct neigh_statistics *st = v;
2550
2551 if (v == SEQ_START_TOKEN) {
9a6d276e 2552 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
1da177e4
LT
2553 return 0;
2554 }
2555
2556 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
9a6d276e 2557 "%08lx %08lx %08lx %08lx %08lx\n",
1da177e4
LT
2558 atomic_read(&tbl->entries),
2559
2560 st->allocs,
2561 st->destroys,
2562 st->hash_grows,
2563
2564 st->lookups,
2565 st->hits,
2566
2567 st->res_failed,
2568
2569 st->rcv_probes_mcast,
2570 st->rcv_probes_ucast,
2571
2572 st->periodic_gc_runs,
9a6d276e
NH
2573 st->forced_gc_runs,
2574 st->unres_discards
1da177e4
LT
2575 );
2576
2577 return 0;
2578}
2579
f690808e 2580static const struct seq_operations neigh_stat_seq_ops = {
1da177e4
LT
2581 .start = neigh_stat_seq_start,
2582 .next = neigh_stat_seq_next,
2583 .stop = neigh_stat_seq_stop,
2584 .show = neigh_stat_seq_show,
2585};
2586
2587static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2588{
2589 int ret = seq_open(file, &neigh_stat_seq_ops);
2590
2591 if (!ret) {
2592 struct seq_file *sf = file->private_data;
81c1ebfc 2593 sf->private = PDE(inode)->data;
1da177e4
LT
2594 }
2595 return ret;
2596};
2597
9a32144e 2598static const struct file_operations neigh_stat_seq_fops = {
1da177e4
LT
2599 .owner = THIS_MODULE,
2600 .open = neigh_stat_seq_open,
2601 .read = seq_read,
2602 .llseek = seq_lseek,
2603 .release = seq_release,
2604};
2605
2606#endif /* CONFIG_PROC_FS */
2607
339bf98f
TG
2608static inline size_t neigh_nlmsg_size(void)
2609{
2610 return NLMSG_ALIGN(sizeof(struct ndmsg))
2611 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2612 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2613 + nla_total_size(sizeof(struct nda_cacheinfo))
2614 + nla_total_size(4); /* NDA_PROBES */
2615}
2616
b8673311 2617static void __neigh_notify(struct neighbour *n, int type, int flags)
1da177e4 2618{
c346dca1 2619 struct net *net = dev_net(n->dev);
8b8aec50 2620 struct sk_buff *skb;
b8673311 2621 int err = -ENOBUFS;
1da177e4 2622
339bf98f 2623 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
8b8aec50 2624 if (skb == NULL)
b8673311 2625 goto errout;
1da177e4 2626
b8673311 2627 err = neigh_fill_info(skb, n, 0, 0, type, flags);
26932566
PM
2628 if (err < 0) {
2629 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2630 WARN_ON(err == -EMSGSIZE);
2631 kfree_skb(skb);
2632 goto errout;
2633 }
1ce85fe4
PNA
2634 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2635 return;
b8673311
TG
2636errout:
2637 if (err < 0)
426b5303 2638 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
1da177e4
LT
2639}
2640
d961db35 2641#ifdef CONFIG_ARPD
b8673311 2642void neigh_app_ns(struct neighbour *n)
1da177e4 2643{
b8673311
TG
2644 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2645}
0a204500 2646EXPORT_SYMBOL(neigh_app_ns);
1da177e4
LT
2647#endif /* CONFIG_ARPD */
2648
2649#ifdef CONFIG_SYSCTL
2650
54716e3b
EB
2651#define NEIGH_VARS_MAX 19
2652
1da177e4
LT
2653static struct neigh_sysctl_table {
2654 struct ctl_table_header *sysctl_header;
54716e3b 2655 struct ctl_table neigh_vars[NEIGH_VARS_MAX];
c3bac5a7 2656 char *dev_name;
ab32ea5d 2657} neigh_sysctl_template __read_mostly = {
1da177e4
LT
2658 .neigh_vars = {
2659 {
1da177e4
LT
2660 .procname = "mcast_solicit",
2661 .maxlen = sizeof(int),
2662 .mode = 0644,
6d9f239a 2663 .proc_handler = proc_dointvec,
1da177e4
LT
2664 },
2665 {
1da177e4
LT
2666 .procname = "ucast_solicit",
2667 .maxlen = sizeof(int),
2668 .mode = 0644,
6d9f239a 2669 .proc_handler = proc_dointvec,
1da177e4
LT
2670 },
2671 {
1da177e4
LT
2672 .procname = "app_solicit",
2673 .maxlen = sizeof(int),
2674 .mode = 0644,
6d9f239a 2675 .proc_handler = proc_dointvec,
1da177e4
LT
2676 },
2677 {
1da177e4
LT
2678 .procname = "retrans_time",
2679 .maxlen = sizeof(int),
2680 .mode = 0644,
6d9f239a 2681 .proc_handler = proc_dointvec_userhz_jiffies,
1da177e4
LT
2682 },
2683 {
1da177e4
LT
2684 .procname = "base_reachable_time",
2685 .maxlen = sizeof(int),
2686 .mode = 0644,
6d9f239a 2687 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
2688 },
2689 {
1da177e4
LT
2690 .procname = "delay_first_probe_time",
2691 .maxlen = sizeof(int),
2692 .mode = 0644,
6d9f239a 2693 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
2694 },
2695 {
1da177e4
LT
2696 .procname = "gc_stale_time",
2697 .maxlen = sizeof(int),
2698 .mode = 0644,
6d9f239a 2699 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
2700 },
2701 {
1da177e4
LT
2702 .procname = "unres_qlen",
2703 .maxlen = sizeof(int),
2704 .mode = 0644,
6d9f239a 2705 .proc_handler = proc_dointvec,
1da177e4
LT
2706 },
2707 {
1da177e4
LT
2708 .procname = "proxy_qlen",
2709 .maxlen = sizeof(int),
2710 .mode = 0644,
6d9f239a 2711 .proc_handler = proc_dointvec,
1da177e4
LT
2712 },
2713 {
1da177e4
LT
2714 .procname = "anycast_delay",
2715 .maxlen = sizeof(int),
2716 .mode = 0644,
6d9f239a 2717 .proc_handler = proc_dointvec_userhz_jiffies,
1da177e4
LT
2718 },
2719 {
1da177e4
LT
2720 .procname = "proxy_delay",
2721 .maxlen = sizeof(int),
2722 .mode = 0644,
6d9f239a 2723 .proc_handler = proc_dointvec_userhz_jiffies,
1da177e4
LT
2724 },
2725 {
1da177e4
LT
2726 .procname = "locktime",
2727 .maxlen = sizeof(int),
2728 .mode = 0644,
6d9f239a 2729 .proc_handler = proc_dointvec_userhz_jiffies,
1da177e4 2730 },
d12af679 2731 {
d12af679
EB
2732 .procname = "retrans_time_ms",
2733 .maxlen = sizeof(int),
2734 .mode = 0644,
6d9f239a 2735 .proc_handler = proc_dointvec_ms_jiffies,
d12af679
EB
2736 },
2737 {
d12af679
EB
2738 .procname = "base_reachable_time_ms",
2739 .maxlen = sizeof(int),
2740 .mode = 0644,
6d9f239a 2741 .proc_handler = proc_dointvec_ms_jiffies,
d12af679 2742 },
1da177e4 2743 {
1da177e4
LT
2744 .procname = "gc_interval",
2745 .maxlen = sizeof(int),
2746 .mode = 0644,
6d9f239a 2747 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
2748 },
2749 {
1da177e4
LT
2750 .procname = "gc_thresh1",
2751 .maxlen = sizeof(int),
2752 .mode = 0644,
6d9f239a 2753 .proc_handler = proc_dointvec,
1da177e4
LT
2754 },
2755 {
1da177e4
LT
2756 .procname = "gc_thresh2",
2757 .maxlen = sizeof(int),
2758 .mode = 0644,
6d9f239a 2759 .proc_handler = proc_dointvec,
1da177e4
LT
2760 },
2761 {
1da177e4
LT
2762 .procname = "gc_thresh3",
2763 .maxlen = sizeof(int),
2764 .mode = 0644,
6d9f239a 2765 .proc_handler = proc_dointvec,
1da177e4 2766 },
c3bac5a7 2767 {},
1da177e4
LT
2768 },
2769};
2770
2771int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
54716e3b 2772 char *p_name, proc_handler *handler)
1da177e4 2773{
3c607bbb 2774 struct neigh_sysctl_table *t;
1da177e4 2775 const char *dev_name_source = NULL;
c3bac5a7
PE
2776
2777#define NEIGH_CTL_PATH_ROOT 0
2778#define NEIGH_CTL_PATH_PROTO 1
2779#define NEIGH_CTL_PATH_NEIGH 2
2780#define NEIGH_CTL_PATH_DEV 3
2781
2782 struct ctl_path neigh_path[] = {
f8572d8f
EB
2783 { .procname = "net", },
2784 { .procname = "proto", },
2785 { .procname = "neigh", },
2786 { .procname = "default", },
c3bac5a7
PE
2787 { },
2788 };
1da177e4 2789
3c607bbb 2790 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
1da177e4 2791 if (!t)
3c607bbb
PE
2792 goto err;
2793
1da177e4
LT
2794 t->neigh_vars[0].data = &p->mcast_probes;
2795 t->neigh_vars[1].data = &p->ucast_probes;
2796 t->neigh_vars[2].data = &p->app_probes;
2797 t->neigh_vars[3].data = &p->retrans_time;
2798 t->neigh_vars[4].data = &p->base_reachable_time;
2799 t->neigh_vars[5].data = &p->delay_probe_time;
2800 t->neigh_vars[6].data = &p->gc_staletime;
2801 t->neigh_vars[7].data = &p->queue_len;
2802 t->neigh_vars[8].data = &p->proxy_qlen;
2803 t->neigh_vars[9].data = &p->anycast_delay;
2804 t->neigh_vars[10].data = &p->proxy_delay;
2805 t->neigh_vars[11].data = &p->locktime;
d12af679
EB
2806 t->neigh_vars[12].data = &p->retrans_time;
2807 t->neigh_vars[13].data = &p->base_reachable_time;
1da177e4
LT
2808
2809 if (dev) {
2810 dev_name_source = dev->name;
d12af679
EB
2811 /* Terminate the table early */
2812 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
1da177e4 2813 } else {
c3bac5a7 2814 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
d12af679
EB
2815 t->neigh_vars[14].data = (int *)(p + 1);
2816 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2817 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2818 t->neigh_vars[17].data = (int *)(p + 1) + 3;
1da177e4
LT
2819 }
2820
1da177e4 2821
f8572d8f 2822 if (handler) {
1da177e4
LT
2823 /* RetransTime */
2824 t->neigh_vars[3].proc_handler = handler;
1da177e4
LT
2825 t->neigh_vars[3].extra1 = dev;
2826 /* ReachableTime */
2827 t->neigh_vars[4].proc_handler = handler;
1da177e4
LT
2828 t->neigh_vars[4].extra1 = dev;
2829 /* RetransTime (in milliseconds)*/
d12af679 2830 t->neigh_vars[12].proc_handler = handler;
d12af679 2831 t->neigh_vars[12].extra1 = dev;
1da177e4 2832 /* ReachableTime (in milliseconds) */
d12af679 2833 t->neigh_vars[13].proc_handler = handler;
d12af679 2834 t->neigh_vars[13].extra1 = dev;
1da177e4
LT
2835 }
2836
c3bac5a7
PE
2837 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2838 if (!t->dev_name)
1da177e4 2839 goto free;
1da177e4 2840
c3bac5a7 2841 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
c3bac5a7 2842 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
1da177e4 2843
4ab438fc 2844 t->sysctl_header =
57da52c1 2845 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
3c607bbb 2846 if (!t->sysctl_header)
1da177e4 2847 goto free_procname;
3c607bbb 2848
1da177e4
LT
2849 p->sysctl_table = t;
2850 return 0;
2851
3c607bbb 2852free_procname:
c3bac5a7 2853 kfree(t->dev_name);
3c607bbb 2854free:
1da177e4 2855 kfree(t);
3c607bbb
PE
2856err:
2857 return -ENOBUFS;
1da177e4 2858}
0a204500 2859EXPORT_SYMBOL(neigh_sysctl_register);
1da177e4
LT
2860
2861void neigh_sysctl_unregister(struct neigh_parms *p)
2862{
2863 if (p->sysctl_table) {
2864 struct neigh_sysctl_table *t = p->sysctl_table;
2865 p->sysctl_table = NULL;
2866 unregister_sysctl_table(t->sysctl_header);
c3bac5a7 2867 kfree(t->dev_name);
1da177e4
LT
2868 kfree(t);
2869 }
2870}
0a204500 2871EXPORT_SYMBOL(neigh_sysctl_unregister);
1da177e4
LT
2872
2873#endif /* CONFIG_SYSCTL */
2874
c8822a4e
TG
2875static int __init neigh_init(void)
2876{
2877 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2878 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2879 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2880
2881 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2882 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2883
2884 return 0;
2885}
2886
2887subsys_initcall(neigh_init);
2888