blob: 50a8c73caf972ae64b963a02322f801669d77eaf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#include <linux/config.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/socket.h>
23#include <linux/sched.h>
24#include <linux/netdevice.h>
25#include <linux/proc_fs.h>
26#ifdef CONFIG_SYSCTL
27#include <linux/sysctl.h>
28#endif
29#include <linux/times.h>
30#include <net/neighbour.h>
31#include <net/dst.h>
32#include <net/sock.h>
33#include <linux/rtnetlink.h>
34#include <linux/random.h>
Paulo Marques543537b2005-06-23 00:09:02 -070035#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37#define NEIGH_DEBUG 1
38
39#define NEIGH_PRINTK(x...) printk(x)
40#define NEIGH_NOPRINTK(x...) do { ; } while(0)
41#define NEIGH_PRINTK0 NEIGH_PRINTK
42#define NEIGH_PRINTK1 NEIGH_NOPRINTK
43#define NEIGH_PRINTK2 NEIGH_NOPRINTK
44
45#if NEIGH_DEBUG >= 1
46#undef NEIGH_PRINTK1
47#define NEIGH_PRINTK1 NEIGH_PRINTK
48#endif
49#if NEIGH_DEBUG >= 2
50#undef NEIGH_PRINTK2
51#define NEIGH_PRINTK2 NEIGH_PRINTK
52#endif
53
54#define PNEIGH_HASHMASK 0xF
55
56static void neigh_timer_handler(unsigned long arg);
57#ifdef CONFIG_ARPD
58static void neigh_app_notify(struct neighbour *n);
59#endif
60static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62
63static struct neigh_table *neigh_tables;
Amos Waterland45fc3b12005-09-24 16:53:16 -070064#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -070065static struct file_operations neigh_stat_seq_fops;
Amos Waterland45fc3b12005-09-24 16:53:16 -070066#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68/*
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
78
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
82
83 Reference count prevents destruction.
84
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
89
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
94
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
97 */
98
99static DEFINE_RWLOCK(neigh_tbl_lock);
100
101static int neigh_blackhole(struct sk_buff *skb)
102{
103 kfree_skb(skb);
104 return -ENETDOWN;
105}
106
107/*
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
111 */
112
113unsigned long neigh_rand_reach_time(unsigned long base)
114{
115 return (base ? (net_random() % base) + (base >> 1) : 0);
116}
117
118
119static int neigh_forced_gc(struct neigh_table *tbl)
120{
121 int shrunk = 0;
122 int i;
123
124 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
125
126 write_lock_bh(&tbl->lock);
127 for (i = 0; i <= tbl->hash_mask; i++) {
128 struct neighbour *n, **np;
129
130 np = &tbl->hash_buckets[i];
131 while ((n = *np) != NULL) {
132 /* Neighbour record may be discarded if:
133 * - nobody refers to it.
134 * - it is not permanent
135 */
136 write_lock(&n->lock);
137 if (atomic_read(&n->refcnt) == 1 &&
138 !(n->nud_state & NUD_PERMANENT)) {
139 *np = n->next;
140 n->dead = 1;
141 shrunk = 1;
142 write_unlock(&n->lock);
143 neigh_release(n);
144 continue;
145 }
146 write_unlock(&n->lock);
147 np = &n->next;
148 }
149 }
150
151 tbl->last_flush = jiffies;
152
153 write_unlock_bh(&tbl->lock);
154
155 return shrunk;
156}
157
158static int neigh_del_timer(struct neighbour *n)
159{
160 if ((n->nud_state & NUD_IN_TIMER) &&
161 del_timer(&n->timer)) {
162 neigh_release(n);
163 return 1;
164 }
165 return 0;
166}
167
168static void pneigh_queue_purge(struct sk_buff_head *list)
169{
170 struct sk_buff *skb;
171
172 while ((skb = skb_dequeue(list)) != NULL) {
173 dev_put(skb->dev);
174 kfree_skb(skb);
175 }
176}
177
Herbert Xu49636bb2005-10-23 17:18:00 +1000178static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 int i;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 for (i = 0; i <= tbl->hash_mask; i++) {
183 struct neighbour *n, **np = &tbl->hash_buckets[i];
184
185 while ((n = *np) != NULL) {
186 if (dev && n->dev != dev) {
187 np = &n->next;
188 continue;
189 }
190 *np = n->next;
191 write_lock(&n->lock);
192 neigh_del_timer(n);
193 n->dead = 1;
194
195 if (atomic_read(&n->refcnt) != 1) {
196 /* The most unpleasant situation.
197 We must destroy neighbour entry,
198 but someone still uses it.
199
200 The destroy will be delayed until
201 the last user releases us, but
202 we must kill timers etc. and move
203 it to safe state.
204 */
205 skb_queue_purge(&n->arp_queue);
206 n->output = neigh_blackhole;
207 if (n->nud_state & NUD_VALID)
208 n->nud_state = NUD_NOARP;
209 else
210 n->nud_state = NUD_NONE;
211 NEIGH_PRINTK2("neigh %p is stray.\n", n);
212 }
213 write_unlock(&n->lock);
214 neigh_release(n);
215 }
216 }
Herbert Xu49636bb2005-10-23 17:18:00 +1000217}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Herbert Xu49636bb2005-10-23 17:18:00 +1000219void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
220{
221 write_lock_bh(&tbl->lock);
222 neigh_flush_dev(tbl, dev);
223 write_unlock_bh(&tbl->lock);
224}
225
226int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
227{
228 write_lock_bh(&tbl->lock);
229 neigh_flush_dev(tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 pneigh_ifdown(tbl, dev);
231 write_unlock_bh(&tbl->lock);
232
233 del_timer_sync(&tbl->proxy_timer);
234 pneigh_queue_purge(&tbl->proxy_queue);
235 return 0;
236}
237
238static struct neighbour *neigh_alloc(struct neigh_table *tbl)
239{
240 struct neighbour *n = NULL;
241 unsigned long now = jiffies;
242 int entries;
243
244 entries = atomic_inc_return(&tbl->entries) - 1;
245 if (entries >= tbl->gc_thresh3 ||
246 (entries >= tbl->gc_thresh2 &&
247 time_after(now, tbl->last_flush + 5 * HZ))) {
248 if (!neigh_forced_gc(tbl) &&
249 entries >= tbl->gc_thresh3)
250 goto out_entries;
251 }
252
253 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
254 if (!n)
255 goto out_entries;
256
257 memset(n, 0, tbl->entry_size);
258
259 skb_queue_head_init(&n->arp_queue);
260 rwlock_init(&n->lock);
261 n->updated = n->used = now;
262 n->nud_state = NUD_NONE;
263 n->output = neigh_blackhole;
264 n->parms = neigh_parms_clone(&tbl->parms);
265 init_timer(&n->timer);
266 n->timer.function = neigh_timer_handler;
267 n->timer.data = (unsigned long)n;
268
269 NEIGH_CACHE_STAT_INC(tbl, allocs);
270 n->tbl = tbl;
271 atomic_set(&n->refcnt, 1);
272 n->dead = 1;
273out:
274 return n;
275
276out_entries:
277 atomic_dec(&tbl->entries);
278 goto out;
279}
280
281static struct neighbour **neigh_hash_alloc(unsigned int entries)
282{
283 unsigned long size = entries * sizeof(struct neighbour *);
284 struct neighbour **ret;
285
286 if (size <= PAGE_SIZE) {
Andrew Morton77d04bd2006-04-07 14:52:59 -0700287 ret = kzalloc(size, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 } else {
289 ret = (struct neighbour **)
Andrew Morton77d04bd2006-04-07 14:52:59 -0700290 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 return ret;
293}
294
295static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
296{
297 unsigned long size = entries * sizeof(struct neighbour *);
298
299 if (size <= PAGE_SIZE)
300 kfree(hash);
301 else
302 free_pages((unsigned long)hash, get_order(size));
303}
304
305static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
306{
307 struct neighbour **new_hash, **old_hash;
308 unsigned int i, new_hash_mask, old_entries;
309
310 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
311
312 BUG_ON(new_entries & (new_entries - 1));
313 new_hash = neigh_hash_alloc(new_entries);
314 if (!new_hash)
315 return;
316
317 old_entries = tbl->hash_mask + 1;
318 new_hash_mask = new_entries - 1;
319 old_hash = tbl->hash_buckets;
320
321 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
322 for (i = 0; i < old_entries; i++) {
323 struct neighbour *n, *next;
324
325 for (n = old_hash[i]; n; n = next) {
326 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
327
328 hash_val &= new_hash_mask;
329 next = n->next;
330
331 n->next = new_hash[hash_val];
332 new_hash[hash_val] = n;
333 }
334 }
335 tbl->hash_buckets = new_hash;
336 tbl->hash_mask = new_hash_mask;
337
338 neigh_hash_free(old_hash, old_entries);
339}
340
341struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
342 struct net_device *dev)
343{
344 struct neighbour *n;
345 int key_len = tbl->key_len;
346 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
347
348 NEIGH_CACHE_STAT_INC(tbl, lookups);
349
350 read_lock_bh(&tbl->lock);
351 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
352 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
353 neigh_hold(n);
354 NEIGH_CACHE_STAT_INC(tbl, hits);
355 break;
356 }
357 }
358 read_unlock_bh(&tbl->lock);
359 return n;
360}
361
362struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
363{
364 struct neighbour *n;
365 int key_len = tbl->key_len;
366 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
367
368 NEIGH_CACHE_STAT_INC(tbl, lookups);
369
370 read_lock_bh(&tbl->lock);
371 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
372 if (!memcmp(n->primary_key, pkey, key_len)) {
373 neigh_hold(n);
374 NEIGH_CACHE_STAT_INC(tbl, hits);
375 break;
376 }
377 }
378 read_unlock_bh(&tbl->lock);
379 return n;
380}
381
382struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
383 struct net_device *dev)
384{
385 u32 hash_val;
386 int key_len = tbl->key_len;
387 int error;
388 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
389
390 if (!n) {
391 rc = ERR_PTR(-ENOBUFS);
392 goto out;
393 }
394
395 memcpy(n->primary_key, pkey, key_len);
396 n->dev = dev;
397 dev_hold(dev);
398
399 /* Protocol specific setup. */
400 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
401 rc = ERR_PTR(error);
402 goto out_neigh_release;
403 }
404
405 /* Device specific setup. */
406 if (n->parms->neigh_setup &&
407 (error = n->parms->neigh_setup(n)) < 0) {
408 rc = ERR_PTR(error);
409 goto out_neigh_release;
410 }
411
412 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
413
414 write_lock_bh(&tbl->lock);
415
416 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
417 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
418
419 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
420
421 if (n->parms->dead) {
422 rc = ERR_PTR(-EINVAL);
423 goto out_tbl_unlock;
424 }
425
426 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
427 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
428 neigh_hold(n1);
429 rc = n1;
430 goto out_tbl_unlock;
431 }
432 }
433
434 n->next = tbl->hash_buckets[hash_val];
435 tbl->hash_buckets[hash_val] = n;
436 n->dead = 0;
437 neigh_hold(n);
438 write_unlock_bh(&tbl->lock);
439 NEIGH_PRINTK2("neigh %p is created.\n", n);
440 rc = n;
441out:
442 return rc;
443out_tbl_unlock:
444 write_unlock_bh(&tbl->lock);
445out_neigh_release:
446 neigh_release(n);
447 goto out;
448}
449
450struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
451 struct net_device *dev, int creat)
452{
453 struct pneigh_entry *n;
454 int key_len = tbl->key_len;
455 u32 hash_val = *(u32 *)(pkey + key_len - 4);
456
457 hash_val ^= (hash_val >> 16);
458 hash_val ^= hash_val >> 8;
459 hash_val ^= hash_val >> 4;
460 hash_val &= PNEIGH_HASHMASK;
461
462 read_lock_bh(&tbl->lock);
463
464 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
465 if (!memcmp(n->key, pkey, key_len) &&
466 (n->dev == dev || !n->dev)) {
467 read_unlock_bh(&tbl->lock);
468 goto out;
469 }
470 }
471 read_unlock_bh(&tbl->lock);
472 n = NULL;
473 if (!creat)
474 goto out;
475
476 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
477 if (!n)
478 goto out;
479
480 memcpy(n->key, pkey, key_len);
481 n->dev = dev;
482 if (dev)
483 dev_hold(dev);
484
485 if (tbl->pconstructor && tbl->pconstructor(n)) {
486 if (dev)
487 dev_put(dev);
488 kfree(n);
489 n = NULL;
490 goto out;
491 }
492
493 write_lock_bh(&tbl->lock);
494 n->next = tbl->phash_buckets[hash_val];
495 tbl->phash_buckets[hash_val] = n;
496 write_unlock_bh(&tbl->lock);
497out:
498 return n;
499}
500
501
502int pneigh_delete(struct neigh_table *tbl, const void *pkey,
503 struct net_device *dev)
504{
505 struct pneigh_entry *n, **np;
506 int key_len = tbl->key_len;
507 u32 hash_val = *(u32 *)(pkey + key_len - 4);
508
509 hash_val ^= (hash_val >> 16);
510 hash_val ^= hash_val >> 8;
511 hash_val ^= hash_val >> 4;
512 hash_val &= PNEIGH_HASHMASK;
513
514 write_lock_bh(&tbl->lock);
515 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
516 np = &n->next) {
517 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
518 *np = n->next;
519 write_unlock_bh(&tbl->lock);
520 if (tbl->pdestructor)
521 tbl->pdestructor(n);
522 if (n->dev)
523 dev_put(n->dev);
524 kfree(n);
525 return 0;
526 }
527 }
528 write_unlock_bh(&tbl->lock);
529 return -ENOENT;
530}
531
532static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
533{
534 struct pneigh_entry *n, **np;
535 u32 h;
536
537 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
538 np = &tbl->phash_buckets[h];
539 while ((n = *np) != NULL) {
540 if (!dev || n->dev == dev) {
541 *np = n->next;
542 if (tbl->pdestructor)
543 tbl->pdestructor(n);
544 if (n->dev)
545 dev_put(n->dev);
546 kfree(n);
547 continue;
548 }
549 np = &n->next;
550 }
551 }
552 return -ENOENT;
553}
554
555
556/*
557 * neighbour must already be out of the table;
558 *
559 */
560void neigh_destroy(struct neighbour *neigh)
561{
562 struct hh_cache *hh;
563
564 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
565
566 if (!neigh->dead) {
567 printk(KERN_WARNING
568 "Destroying alive neighbour %p\n", neigh);
569 dump_stack();
570 return;
571 }
572
573 if (neigh_del_timer(neigh))
574 printk(KERN_WARNING "Impossible event.\n");
575
576 while ((hh = neigh->hh) != NULL) {
577 neigh->hh = hh->hh_next;
578 hh->hh_next = NULL;
579 write_lock_bh(&hh->hh_lock);
580 hh->hh_output = neigh_blackhole;
581 write_unlock_bh(&hh->hh_lock);
582 if (atomic_dec_and_test(&hh->hh_refcnt))
583 kfree(hh);
584 }
585
Michael S. Tsirkinc5ecd622006-03-20 22:25:41 -0800586 if (neigh->parms->neigh_destructor)
587 (neigh->parms->neigh_destructor)(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 skb_queue_purge(&neigh->arp_queue);
590
591 dev_put(neigh->dev);
592 neigh_parms_put(neigh->parms);
593
594 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
595
596 atomic_dec(&neigh->tbl->entries);
597 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
598}
599
600/* Neighbour state is suspicious;
601 disable fast path.
602
603 Called with write_locked neigh.
604 */
605static void neigh_suspect(struct neighbour *neigh)
606{
607 struct hh_cache *hh;
608
609 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
610
611 neigh->output = neigh->ops->output;
612
613 for (hh = neigh->hh; hh; hh = hh->hh_next)
614 hh->hh_output = neigh->ops->output;
615}
616
617/* Neighbour state is OK;
618 enable fast path.
619
620 Called with write_locked neigh.
621 */
622static void neigh_connect(struct neighbour *neigh)
623{
624 struct hh_cache *hh;
625
626 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
627
628 neigh->output = neigh->ops->connected_output;
629
630 for (hh = neigh->hh; hh; hh = hh->hh_next)
631 hh->hh_output = neigh->ops->hh_output;
632}
633
634static void neigh_periodic_timer(unsigned long arg)
635{
636 struct neigh_table *tbl = (struct neigh_table *)arg;
637 struct neighbour *n, **np;
638 unsigned long expire, now = jiffies;
639
640 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
641
642 write_lock(&tbl->lock);
643
644 /*
645 * periodically recompute ReachableTime from random function
646 */
647
648 if (time_after(now, tbl->last_rand + 300 * HZ)) {
649 struct neigh_parms *p;
650 tbl->last_rand = now;
651 for (p = &tbl->parms; p; p = p->next)
652 p->reachable_time =
653 neigh_rand_reach_time(p->base_reachable_time);
654 }
655
656 np = &tbl->hash_buckets[tbl->hash_chain_gc];
657 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
658
659 while ((n = *np) != NULL) {
660 unsigned int state;
661
662 write_lock(&n->lock);
663
664 state = n->nud_state;
665 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
666 write_unlock(&n->lock);
667 goto next_elt;
668 }
669
670 if (time_before(n->used, n->confirmed))
671 n->used = n->confirmed;
672
673 if (atomic_read(&n->refcnt) == 1 &&
674 (state == NUD_FAILED ||
675 time_after(now, n->used + n->parms->gc_staletime))) {
676 *np = n->next;
677 n->dead = 1;
678 write_unlock(&n->lock);
679 neigh_release(n);
680 continue;
681 }
682 write_unlock(&n->lock);
683
684next_elt:
685 np = &n->next;
686 }
687
688 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
689 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
690 * base_reachable_time.
691 */
692 expire = tbl->parms.base_reachable_time >> 1;
693 expire /= (tbl->hash_mask + 1);
694 if (!expire)
695 expire = 1;
696
697 mod_timer(&tbl->gc_timer, now + expire);
698
699 write_unlock(&tbl->lock);
700}
701
702static __inline__ int neigh_max_probes(struct neighbour *n)
703{
704 struct neigh_parms *p = n->parms;
705 return (n->nud_state & NUD_PROBE ?
706 p->ucast_probes :
707 p->ucast_probes + p->app_probes + p->mcast_probes);
708}
709
David S. Miller667347f2005-09-27 12:07:44 -0700710static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
711{
712 if (unlikely(mod_timer(&n->timer, when))) {
713 printk("NEIGH: BUG, double timer add, state is %x\n",
714 n->nud_state);
Herbert Xu20375502005-10-23 16:11:39 +1000715 dump_stack();
David S. Miller667347f2005-09-27 12:07:44 -0700716 }
717}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
719/* Called when a timer expires for a neighbour entry. */
720
721static void neigh_timer_handler(unsigned long arg)
722{
723 unsigned long now, next;
724 struct neighbour *neigh = (struct neighbour *)arg;
725 unsigned state;
726 int notify = 0;
727
728 write_lock(&neigh->lock);
729
730 state = neigh->nud_state;
731 now = jiffies;
732 next = now + HZ;
733
734 if (!(state & NUD_IN_TIMER)) {
735#ifndef CONFIG_SMP
736 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
737#endif
738 goto out;
739 }
740
741 if (state & NUD_REACHABLE) {
742 if (time_before_eq(now,
743 neigh->confirmed + neigh->parms->reachable_time)) {
744 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
745 next = neigh->confirmed + neigh->parms->reachable_time;
746 } else if (time_before_eq(now,
747 neigh->used + neigh->parms->delay_probe_time)) {
748 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
749 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800750 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 neigh_suspect(neigh);
752 next = now + neigh->parms->delay_probe_time;
753 } else {
754 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
755 neigh->nud_state = NUD_STALE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800756 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 neigh_suspect(neigh);
758 }
759 } else if (state & NUD_DELAY) {
760 if (time_before_eq(now,
761 neigh->confirmed + neigh->parms->delay_probe_time)) {
762 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
763 neigh->nud_state = NUD_REACHABLE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800764 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 neigh_connect(neigh);
766 next = neigh->confirmed + neigh->parms->reachable_time;
767 } else {
768 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
769 neigh->nud_state = NUD_PROBE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800770 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 atomic_set(&neigh->probes, 0);
772 next = now + neigh->parms->retrans_time;
773 }
774 } else {
775 /* NUD_PROBE|NUD_INCOMPLETE */
776 next = now + neigh->parms->retrans_time;
777 }
778
779 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
780 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
781 struct sk_buff *skb;
782
783 neigh->nud_state = NUD_FAILED;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800784 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 notify = 1;
786 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
787 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
788
789 /* It is very thin place. report_unreachable is very complicated
790 routine. Particularly, it can hit the same neighbour entry!
791
792 So that, we try to be accurate and avoid dead loop. --ANK
793 */
794 while (neigh->nud_state == NUD_FAILED &&
795 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
796 write_unlock(&neigh->lock);
797 neigh->ops->error_report(neigh, skb);
798 write_lock(&neigh->lock);
799 }
800 skb_queue_purge(&neigh->arp_queue);
801 }
802
803 if (neigh->nud_state & NUD_IN_TIMER) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 if (time_before(next, jiffies + HZ/2))
805 next = jiffies + HZ/2;
Herbert Xu6fb99742005-10-23 16:37:48 +1000806 if (!mod_timer(&neigh->timer, next))
807 neigh_hold(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 }
809 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
810 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
811 /* keep skb alive even if arp_queue overflows */
812 if (skb)
813 skb_get(skb);
814 write_unlock(&neigh->lock);
815 neigh->ops->solicit(neigh, skb);
816 atomic_inc(&neigh->probes);
817 if (skb)
818 kfree_skb(skb);
819 } else {
820out:
821 write_unlock(&neigh->lock);
822 }
823
824#ifdef CONFIG_ARPD
825 if (notify && neigh->parms->app_probes)
826 neigh_app_notify(neigh);
827#endif
828 neigh_release(neigh);
829}
830
831int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
832{
833 int rc;
834 unsigned long now;
835
836 write_lock_bh(&neigh->lock);
837
838 rc = 0;
839 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
840 goto out_unlock_bh;
841
842 now = jiffies;
843
844 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
845 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
846 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
847 neigh->nud_state = NUD_INCOMPLETE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800848 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 neigh_hold(neigh);
David S. Miller667347f2005-09-27 12:07:44 -0700850 neigh_add_timer(neigh, now + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 } else {
852 neigh->nud_state = NUD_FAILED;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800853 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 write_unlock_bh(&neigh->lock);
855
856 if (skb)
857 kfree_skb(skb);
858 return 1;
859 }
860 } else if (neigh->nud_state & NUD_STALE) {
861 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
862 neigh_hold(neigh);
863 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800864 neigh->updated = jiffies;
David S. Miller667347f2005-09-27 12:07:44 -0700865 neigh_add_timer(neigh,
866 jiffies + neigh->parms->delay_probe_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 }
868
869 if (neigh->nud_state == NUD_INCOMPLETE) {
870 if (skb) {
871 if (skb_queue_len(&neigh->arp_queue) >=
872 neigh->parms->queue_len) {
873 struct sk_buff *buff;
874 buff = neigh->arp_queue.next;
875 __skb_unlink(buff, &neigh->arp_queue);
876 kfree_skb(buff);
877 }
878 __skb_queue_tail(&neigh->arp_queue, skb);
879 }
880 rc = 1;
881 }
882out_unlock_bh:
883 write_unlock_bh(&neigh->lock);
884 return rc;
885}
886
887static __inline__ void neigh_update_hhs(struct neighbour *neigh)
888{
889 struct hh_cache *hh;
890 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
891 neigh->dev->header_cache_update;
892
893 if (update) {
894 for (hh = neigh->hh; hh; hh = hh->hh_next) {
895 write_lock_bh(&hh->hh_lock);
896 update(hh, neigh->dev, neigh->ha);
897 write_unlock_bh(&hh->hh_lock);
898 }
899 }
900}
901
902
903
904/* Generic update routine.
905 -- lladdr is new lladdr or NULL, if it is not supplied.
906 -- new is new state.
907 -- flags
908 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
909 if it is different.
910 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
911 lladdr instead of overriding it
912 if it is different.
913 It also allows to retain current state
914 if lladdr is unchanged.
915 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
916
917 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
918 NTF_ROUTER flag.
919 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
920 a router.
921
922 Caller MUST hold reference count on the entry.
923 */
924
925int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
926 u32 flags)
927{
928 u8 old;
929 int err;
930#ifdef CONFIG_ARPD
931 int notify = 0;
932#endif
933 struct net_device *dev;
934 int update_isrouter = 0;
935
936 write_lock_bh(&neigh->lock);
937
938 dev = neigh->dev;
939 old = neigh->nud_state;
940 err = -EPERM;
941
942 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
943 (old & (NUD_NOARP | NUD_PERMANENT)))
944 goto out;
945
946 if (!(new & NUD_VALID)) {
947 neigh_del_timer(neigh);
948 if (old & NUD_CONNECTED)
949 neigh_suspect(neigh);
950 neigh->nud_state = new;
951 err = 0;
952#ifdef CONFIG_ARPD
953 notify = old & NUD_VALID;
954#endif
955 goto out;
956 }
957
958 /* Compare new lladdr with cached one */
959 if (!dev->addr_len) {
960 /* First case: device needs no address. */
961 lladdr = neigh->ha;
962 } else if (lladdr) {
963 /* The second case: if something is already cached
964 and a new address is proposed:
965 - compare new & old
966 - if they are different, check override flag
967 */
968 if ((old & NUD_VALID) &&
969 !memcmp(lladdr, neigh->ha, dev->addr_len))
970 lladdr = neigh->ha;
971 } else {
972 /* No address is supplied; if we know something,
973 use it, otherwise discard the request.
974 */
975 err = -EINVAL;
976 if (!(old & NUD_VALID))
977 goto out;
978 lladdr = neigh->ha;
979 }
980
981 if (new & NUD_CONNECTED)
982 neigh->confirmed = jiffies;
983 neigh->updated = jiffies;
984
985 /* If entry was valid and address is not changed,
986 do not change entry state, if new one is STALE.
987 */
988 err = 0;
989 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
990 if (old & NUD_VALID) {
991 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
992 update_isrouter = 0;
993 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
994 (old & NUD_CONNECTED)) {
995 lladdr = neigh->ha;
996 new = NUD_STALE;
997 } else
998 goto out;
999 } else {
1000 if (lladdr == neigh->ha && new == NUD_STALE &&
1001 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1002 (old & NUD_CONNECTED))
1003 )
1004 new = old;
1005 }
1006 }
1007
1008 if (new != old) {
1009 neigh_del_timer(neigh);
1010 if (new & NUD_IN_TIMER) {
1011 neigh_hold(neigh);
David S. Miller667347f2005-09-27 12:07:44 -07001012 neigh_add_timer(neigh, (jiffies +
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 ((new & NUD_REACHABLE) ?
David S. Miller667347f2005-09-27 12:07:44 -07001014 neigh->parms->reachable_time :
1015 0)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 }
1017 neigh->nud_state = new;
1018 }
1019
1020 if (lladdr != neigh->ha) {
1021 memcpy(&neigh->ha, lladdr, dev->addr_len);
1022 neigh_update_hhs(neigh);
1023 if (!(new & NUD_CONNECTED))
1024 neigh->confirmed = jiffies -
1025 (neigh->parms->base_reachable_time << 1);
1026#ifdef CONFIG_ARPD
1027 notify = 1;
1028#endif
1029 }
1030 if (new == old)
1031 goto out;
1032 if (new & NUD_CONNECTED)
1033 neigh_connect(neigh);
1034 else
1035 neigh_suspect(neigh);
1036 if (!(old & NUD_VALID)) {
1037 struct sk_buff *skb;
1038
1039 /* Again: avoid dead loop if something went wrong */
1040
1041 while (neigh->nud_state & NUD_VALID &&
1042 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1043 struct neighbour *n1 = neigh;
1044 write_unlock_bh(&neigh->lock);
1045 /* On shaper/eql skb->dst->neighbour != neigh :( */
1046 if (skb->dst && skb->dst->neighbour)
1047 n1 = skb->dst->neighbour;
1048 n1->output(skb);
1049 write_lock_bh(&neigh->lock);
1050 }
1051 skb_queue_purge(&neigh->arp_queue);
1052 }
1053out:
1054 if (update_isrouter) {
1055 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1056 (neigh->flags | NTF_ROUTER) :
1057 (neigh->flags & ~NTF_ROUTER);
1058 }
1059 write_unlock_bh(&neigh->lock);
1060#ifdef CONFIG_ARPD
1061 if (notify && neigh->parms->app_probes)
1062 neigh_app_notify(neigh);
1063#endif
1064 return err;
1065}
1066
1067struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1068 u8 *lladdr, void *saddr,
1069 struct net_device *dev)
1070{
1071 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1072 lladdr || !dev->addr_len);
1073 if (neigh)
1074 neigh_update(neigh, lladdr, NUD_STALE,
1075 NEIGH_UPDATE_F_OVERRIDE);
1076 return neigh;
1077}
1078
1079static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1080 u16 protocol)
1081{
1082 struct hh_cache *hh;
1083 struct net_device *dev = dst->dev;
1084
1085 for (hh = n->hh; hh; hh = hh->hh_next)
1086 if (hh->hh_type == protocol)
1087 break;
1088
Andrew Morton77d04bd2006-04-07 14:52:59 -07001089 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 rwlock_init(&hh->hh_lock);
1091 hh->hh_type = protocol;
1092 atomic_set(&hh->hh_refcnt, 0);
1093 hh->hh_next = NULL;
1094 if (dev->hard_header_cache(n, hh)) {
1095 kfree(hh);
1096 hh = NULL;
1097 } else {
1098 atomic_inc(&hh->hh_refcnt);
1099 hh->hh_next = n->hh;
1100 n->hh = hh;
1101 if (n->nud_state & NUD_CONNECTED)
1102 hh->hh_output = n->ops->hh_output;
1103 else
1104 hh->hh_output = n->ops->output;
1105 }
1106 }
1107 if (hh) {
1108 atomic_inc(&hh->hh_refcnt);
1109 dst->hh = hh;
1110 }
1111}
1112
1113/* This function can be used in contexts, where only old dev_queue_xmit
1114 worked, f.e. if you want to override normal output path (eql, shaper),
1115 but resolution is not made yet.
1116 */
1117
1118int neigh_compat_output(struct sk_buff *skb)
1119{
1120 struct net_device *dev = skb->dev;
1121
1122 __skb_pull(skb, skb->nh.raw - skb->data);
1123
1124 if (dev->hard_header &&
1125 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1126 skb->len) < 0 &&
1127 dev->rebuild_header(skb))
1128 return 0;
1129
1130 return dev_queue_xmit(skb);
1131}
1132
1133/* Slow and careful. */
1134
1135int neigh_resolve_output(struct sk_buff *skb)
1136{
1137 struct dst_entry *dst = skb->dst;
1138 struct neighbour *neigh;
1139 int rc = 0;
1140
1141 if (!dst || !(neigh = dst->neighbour))
1142 goto discard;
1143
1144 __skb_pull(skb, skb->nh.raw - skb->data);
1145
1146 if (!neigh_event_send(neigh, skb)) {
1147 int err;
1148 struct net_device *dev = neigh->dev;
1149 if (dev->hard_header_cache && !dst->hh) {
1150 write_lock_bh(&neigh->lock);
1151 if (!dst->hh)
1152 neigh_hh_init(neigh, dst, dst->ops->protocol);
1153 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1154 neigh->ha, NULL, skb->len);
1155 write_unlock_bh(&neigh->lock);
1156 } else {
1157 read_lock_bh(&neigh->lock);
1158 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1159 neigh->ha, NULL, skb->len);
1160 read_unlock_bh(&neigh->lock);
1161 }
1162 if (err >= 0)
1163 rc = neigh->ops->queue_xmit(skb);
1164 else
1165 goto out_kfree_skb;
1166 }
1167out:
1168 return rc;
1169discard:
1170 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1171 dst, dst ? dst->neighbour : NULL);
1172out_kfree_skb:
1173 rc = -EINVAL;
1174 kfree_skb(skb);
1175 goto out;
1176}
1177
1178/* As fast as possible without hh cache */
1179
1180int neigh_connected_output(struct sk_buff *skb)
1181{
1182 int err;
1183 struct dst_entry *dst = skb->dst;
1184 struct neighbour *neigh = dst->neighbour;
1185 struct net_device *dev = neigh->dev;
1186
1187 __skb_pull(skb, skb->nh.raw - skb->data);
1188
1189 read_lock_bh(&neigh->lock);
1190 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1191 neigh->ha, NULL, skb->len);
1192 read_unlock_bh(&neigh->lock);
1193 if (err >= 0)
1194 err = neigh->ops->queue_xmit(skb);
1195 else {
1196 err = -EINVAL;
1197 kfree_skb(skb);
1198 }
1199 return err;
1200}
1201
1202static void neigh_proxy_process(unsigned long arg)
1203{
1204 struct neigh_table *tbl = (struct neigh_table *)arg;
1205 long sched_next = 0;
1206 unsigned long now = jiffies;
1207 struct sk_buff *skb;
1208
1209 spin_lock(&tbl->proxy_queue.lock);
1210
1211 skb = tbl->proxy_queue.next;
1212
1213 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1214 struct sk_buff *back = skb;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001215 long tdif = NEIGH_CB(back)->sched_next - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217 skb = skb->next;
1218 if (tdif <= 0) {
1219 struct net_device *dev = back->dev;
1220 __skb_unlink(back, &tbl->proxy_queue);
1221 if (tbl->proxy_redo && netif_running(dev))
1222 tbl->proxy_redo(back);
1223 else
1224 kfree_skb(back);
1225
1226 dev_put(dev);
1227 } else if (!sched_next || tdif < sched_next)
1228 sched_next = tdif;
1229 }
1230 del_timer(&tbl->proxy_timer);
1231 if (sched_next)
1232 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1233 spin_unlock(&tbl->proxy_queue.lock);
1234}
1235
1236void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1237 struct sk_buff *skb)
1238{
1239 unsigned long now = jiffies;
1240 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1241
1242 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1243 kfree_skb(skb);
1244 return;
1245 }
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001246
1247 NEIGH_CB(skb)->sched_next = sched_next;
1248 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
1250 spin_lock(&tbl->proxy_queue.lock);
1251 if (del_timer(&tbl->proxy_timer)) {
1252 if (time_before(tbl->proxy_timer.expires, sched_next))
1253 sched_next = tbl->proxy_timer.expires;
1254 }
1255 dst_release(skb->dst);
1256 skb->dst = NULL;
1257 dev_hold(skb->dev);
1258 __skb_queue_tail(&tbl->proxy_queue, skb);
1259 mod_timer(&tbl->proxy_timer, sched_next);
1260 spin_unlock(&tbl->proxy_queue.lock);
1261}
1262
1263
1264struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1265 struct neigh_table *tbl)
1266{
1267 struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1268
1269 if (p) {
1270 memcpy(p, &tbl->parms, sizeof(*p));
1271 p->tbl = tbl;
1272 atomic_set(&p->refcnt, 1);
1273 INIT_RCU_HEAD(&p->rcu_head);
1274 p->reachable_time =
1275 neigh_rand_reach_time(p->base_reachable_time);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001276 if (dev) {
1277 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1278 kfree(p);
1279 return NULL;
1280 }
1281
1282 dev_hold(dev);
1283 p->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 }
1285 p->sysctl_table = NULL;
1286 write_lock_bh(&tbl->lock);
1287 p->next = tbl->parms.next;
1288 tbl->parms.next = p;
1289 write_unlock_bh(&tbl->lock);
1290 }
1291 return p;
1292}
1293
1294static void neigh_rcu_free_parms(struct rcu_head *head)
1295{
1296 struct neigh_parms *parms =
1297 container_of(head, struct neigh_parms, rcu_head);
1298
1299 neigh_parms_put(parms);
1300}
1301
1302void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1303{
1304 struct neigh_parms **p;
1305
1306 if (!parms || parms == &tbl->parms)
1307 return;
1308 write_lock_bh(&tbl->lock);
1309 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1310 if (*p == parms) {
1311 *p = parms->next;
1312 parms->dead = 1;
1313 write_unlock_bh(&tbl->lock);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001314 if (parms->dev)
1315 dev_put(parms->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1317 return;
1318 }
1319 }
1320 write_unlock_bh(&tbl->lock);
1321 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1322}
1323
1324void neigh_parms_destroy(struct neigh_parms *parms)
1325{
1326 kfree(parms);
1327}
1328
Simon Kelleybd89efc2006-05-12 14:56:08 -07001329void neigh_table_init_no_netlink(struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330{
1331 unsigned long now = jiffies;
1332 unsigned long phsize;
1333
1334 atomic_set(&tbl->parms.refcnt, 1);
1335 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1336 tbl->parms.reachable_time =
1337 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1338
1339 if (!tbl->kmem_cachep)
1340 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1341 tbl->entry_size,
1342 0, SLAB_HWCACHE_ALIGN,
1343 NULL, NULL);
1344
1345 if (!tbl->kmem_cachep)
1346 panic("cannot create neighbour cache");
1347
1348 tbl->stats = alloc_percpu(struct neigh_statistics);
1349 if (!tbl->stats)
1350 panic("cannot create neighbour cache statistics");
1351
1352#ifdef CONFIG_PROC_FS
1353 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1354 if (!tbl->pde)
1355 panic("cannot create neighbour proc dir entry");
1356 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1357 tbl->pde->data = tbl;
1358#endif
1359
1360 tbl->hash_mask = 1;
1361 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1362
1363 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
Andrew Morton77d04bd2006-04-07 14:52:59 -07001364 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
1366 if (!tbl->hash_buckets || !tbl->phash_buckets)
1367 panic("cannot allocate neighbour cache hashes");
1368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1370
1371 rwlock_init(&tbl->lock);
1372 init_timer(&tbl->gc_timer);
1373 tbl->gc_timer.data = (unsigned long)tbl;
1374 tbl->gc_timer.function = neigh_periodic_timer;
1375 tbl->gc_timer.expires = now + 1;
1376 add_timer(&tbl->gc_timer);
1377
1378 init_timer(&tbl->proxy_timer);
1379 tbl->proxy_timer.data = (unsigned long)tbl;
1380 tbl->proxy_timer.function = neigh_proxy_process;
1381 skb_queue_head_init(&tbl->proxy_queue);
1382
1383 tbl->last_flush = now;
1384 tbl->last_rand = now + tbl->parms.reachable_time * 20;
Simon Kelleybd89efc2006-05-12 14:56:08 -07001385}
1386
1387void neigh_table_init(struct neigh_table *tbl)
1388{
1389 struct neigh_table *tmp;
1390
1391 neigh_table_init_no_netlink(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 write_lock(&neigh_tbl_lock);
Simon Kelleybd89efc2006-05-12 14:56:08 -07001393 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1394 if (tmp->family == tbl->family)
1395 break;
1396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 tbl->next = neigh_tables;
1398 neigh_tables = tbl;
1399 write_unlock(&neigh_tbl_lock);
Simon Kelleybd89efc2006-05-12 14:56:08 -07001400
1401 if (unlikely(tmp)) {
1402 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1403 "family %d\n", tbl->family);
1404 dump_stack();
1405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406}
1407
1408int neigh_table_clear(struct neigh_table *tbl)
1409{
1410 struct neigh_table **tp;
1411
1412 /* It is not clean... Fix it to unload IPv6 module safely */
1413 del_timer_sync(&tbl->gc_timer);
1414 del_timer_sync(&tbl->proxy_timer);
1415 pneigh_queue_purge(&tbl->proxy_queue);
1416 neigh_ifdown(tbl, NULL);
1417 if (atomic_read(&tbl->entries))
1418 printk(KERN_CRIT "neighbour leakage\n");
1419 write_lock(&neigh_tbl_lock);
1420 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1421 if (*tp == tbl) {
1422 *tp = tbl->next;
1423 break;
1424 }
1425 }
1426 write_unlock(&neigh_tbl_lock);
1427
1428 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1429 tbl->hash_buckets = NULL;
1430
1431 kfree(tbl->phash_buckets);
1432 tbl->phash_buckets = NULL;
1433
1434 return 0;
1435}
1436
1437int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1438{
1439 struct ndmsg *ndm = NLMSG_DATA(nlh);
1440 struct rtattr **nda = arg;
1441 struct neigh_table *tbl;
1442 struct net_device *dev = NULL;
1443 int err = -ENODEV;
1444
1445 if (ndm->ndm_ifindex &&
1446 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1447 goto out;
1448
1449 read_lock(&neigh_tbl_lock);
1450 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1451 struct rtattr *dst_attr = nda[NDA_DST - 1];
1452 struct neighbour *n;
1453
1454 if (tbl->family != ndm->ndm_family)
1455 continue;
1456 read_unlock(&neigh_tbl_lock);
1457
1458 err = -EINVAL;
1459 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1460 goto out_dev_put;
1461
1462 if (ndm->ndm_flags & NTF_PROXY) {
1463 err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1464 goto out_dev_put;
1465 }
1466
1467 if (!dev)
1468 goto out;
1469
1470 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1471 if (n) {
1472 err = neigh_update(n, NULL, NUD_FAILED,
1473 NEIGH_UPDATE_F_OVERRIDE|
1474 NEIGH_UPDATE_F_ADMIN);
1475 neigh_release(n);
1476 }
1477 goto out_dev_put;
1478 }
1479 read_unlock(&neigh_tbl_lock);
1480 err = -EADDRNOTAVAIL;
1481out_dev_put:
1482 if (dev)
1483 dev_put(dev);
1484out:
1485 return err;
1486}
1487
1488int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1489{
1490 struct ndmsg *ndm = NLMSG_DATA(nlh);
1491 struct rtattr **nda = arg;
1492 struct neigh_table *tbl;
1493 struct net_device *dev = NULL;
1494 int err = -ENODEV;
1495
1496 if (ndm->ndm_ifindex &&
1497 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1498 goto out;
1499
1500 read_lock(&neigh_tbl_lock);
1501 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1502 struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1503 struct rtattr *dst_attr = nda[NDA_DST - 1];
1504 int override = 1;
1505 struct neighbour *n;
1506
1507 if (tbl->family != ndm->ndm_family)
1508 continue;
1509 read_unlock(&neigh_tbl_lock);
1510
1511 err = -EINVAL;
1512 if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1513 goto out_dev_put;
1514
1515 if (ndm->ndm_flags & NTF_PROXY) {
1516 err = -ENOBUFS;
1517 if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1518 err = 0;
1519 goto out_dev_put;
1520 }
1521
1522 err = -EINVAL;
1523 if (!dev)
1524 goto out;
1525 if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1526 goto out_dev_put;
1527
1528 n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1529 if (n) {
1530 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1531 err = -EEXIST;
1532 neigh_release(n);
1533 goto out_dev_put;
1534 }
1535
1536 override = nlh->nlmsg_flags & NLM_F_REPLACE;
1537 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1538 err = -ENOENT;
1539 goto out_dev_put;
1540 } else {
1541 n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1542 if (IS_ERR(n)) {
1543 err = PTR_ERR(n);
1544 goto out_dev_put;
1545 }
1546 }
1547
1548 err = neigh_update(n,
1549 lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1550 ndm->ndm_state,
1551 (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1552 NEIGH_UPDATE_F_ADMIN);
1553
1554 neigh_release(n);
1555 goto out_dev_put;
1556 }
1557
1558 read_unlock(&neigh_tbl_lock);
1559 err = -EADDRNOTAVAIL;
1560out_dev_put:
1561 if (dev)
1562 dev_put(dev);
1563out:
1564 return err;
1565}
1566
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001567static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1568{
Thomas Grafe386c6e2005-06-18 22:52:09 -07001569 struct rtattr *nest = NULL;
1570
1571 nest = RTA_NEST(skb, NDTA_PARMS);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001572
1573 if (parms->dev)
1574 RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1575
1576 RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1577 RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1578 RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1579 RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1580 RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1581 RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1582 RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1583 RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1584 parms->base_reachable_time);
1585 RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1586 RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1587 RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1588 RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1589 RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1590 RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1591
1592 return RTA_NEST_END(skb, nest);
1593
1594rtattr_failure:
1595 return RTA_NEST_CANCEL(skb, nest);
1596}
1597
1598static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1599 struct netlink_callback *cb)
1600{
1601 struct nlmsghdr *nlh;
1602 struct ndtmsg *ndtmsg;
1603
Thomas Graf17977542005-06-18 22:53:48 -07001604 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1605 NLM_F_MULTI);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001606
Thomas Graf4b6ea822005-06-18 22:51:43 -07001607 ndtmsg = NLMSG_DATA(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001608
1609 read_lock_bh(&tbl->lock);
1610 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001611 ndtmsg->ndtm_pad1 = 0;
1612 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001613
1614 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1615 RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1616 RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1617 RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1618 RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1619
1620 {
1621 unsigned long now = jiffies;
1622 unsigned int flush_delta = now - tbl->last_flush;
1623 unsigned int rand_delta = now - tbl->last_rand;
1624
1625 struct ndt_config ndc = {
1626 .ndtc_key_len = tbl->key_len,
1627 .ndtc_entry_size = tbl->entry_size,
1628 .ndtc_entries = atomic_read(&tbl->entries),
1629 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1630 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1631 .ndtc_hash_rnd = tbl->hash_rnd,
1632 .ndtc_hash_mask = tbl->hash_mask,
1633 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1634 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1635 };
1636
1637 RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1638 }
1639
1640 {
1641 int cpu;
1642 struct ndt_stats ndst;
1643
1644 memset(&ndst, 0, sizeof(ndst));
1645
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07001646 for_each_possible_cpu(cpu) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001647 struct neigh_statistics *st;
1648
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001649 st = per_cpu_ptr(tbl->stats, cpu);
1650 ndst.ndts_allocs += st->allocs;
1651 ndst.ndts_destroys += st->destroys;
1652 ndst.ndts_hash_grows += st->hash_grows;
1653 ndst.ndts_res_failed += st->res_failed;
1654 ndst.ndts_lookups += st->lookups;
1655 ndst.ndts_hits += st->hits;
1656 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1657 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1658 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1659 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1660 }
1661
1662 RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1663 }
1664
1665 BUG_ON(tbl->parms.dev);
1666 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1667 goto rtattr_failure;
1668
1669 read_unlock_bh(&tbl->lock);
1670 return NLMSG_END(skb, nlh);
1671
1672rtattr_failure:
1673 read_unlock_bh(&tbl->lock);
1674 return NLMSG_CANCEL(skb, nlh);
1675
1676nlmsg_failure:
1677 return -1;
1678}
1679
1680static int neightbl_fill_param_info(struct neigh_table *tbl,
1681 struct neigh_parms *parms,
1682 struct sk_buff *skb,
1683 struct netlink_callback *cb)
1684{
1685 struct ndtmsg *ndtmsg;
1686 struct nlmsghdr *nlh;
1687
Thomas Graf17977542005-06-18 22:53:48 -07001688 nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1689 NLM_F_MULTI);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001690
Thomas Graf4b6ea822005-06-18 22:51:43 -07001691 ndtmsg = NLMSG_DATA(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001692
1693 read_lock_bh(&tbl->lock);
1694 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001695 ndtmsg->ndtm_pad1 = 0;
1696 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001697 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1698
1699 if (neightbl_fill_parms(skb, parms) < 0)
1700 goto rtattr_failure;
1701
1702 read_unlock_bh(&tbl->lock);
1703 return NLMSG_END(skb, nlh);
1704
1705rtattr_failure:
1706 read_unlock_bh(&tbl->lock);
1707 return NLMSG_CANCEL(skb, nlh);
1708
1709nlmsg_failure:
1710 return -1;
1711}
1712
1713static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1714 int ifindex)
1715{
1716 struct neigh_parms *p;
1717
1718 for (p = &tbl->parms; p; p = p->next)
1719 if ((p->dev && p->dev->ifindex == ifindex) ||
1720 (!p->dev && !ifindex))
1721 return p;
1722
1723 return NULL;
1724}
1725
1726int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1727{
1728 struct neigh_table *tbl;
1729 struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1730 struct rtattr **tb = arg;
1731 int err = -EINVAL;
1732
1733 if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1734 return -EINVAL;
1735
1736 read_lock(&neigh_tbl_lock);
1737 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1738 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1739 continue;
1740
1741 if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1742 break;
1743 }
1744
1745 if (tbl == NULL) {
1746 err = -ENOENT;
1747 goto errout;
1748 }
1749
1750 /*
1751 * We acquire tbl->lock to be nice to the periodic timers and
1752 * make sure they always see a consistent set of values.
1753 */
1754 write_lock_bh(&tbl->lock);
1755
1756 if (tb[NDTA_THRESH1 - 1])
1757 tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1758
1759 if (tb[NDTA_THRESH2 - 1])
1760 tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1761
1762 if (tb[NDTA_THRESH3 - 1])
1763 tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1764
1765 if (tb[NDTA_GC_INTERVAL - 1])
1766 tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1767
1768 if (tb[NDTA_PARMS - 1]) {
1769 struct rtattr *tbp[NDTPA_MAX];
1770 struct neigh_parms *p;
1771 u32 ifindex = 0;
1772
1773 if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1774 goto rtattr_failure;
1775
1776 if (tbp[NDTPA_IFINDEX - 1])
1777 ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1778
1779 p = lookup_neigh_params(tbl, ifindex);
1780 if (p == NULL) {
1781 err = -ENOENT;
1782 goto rtattr_failure;
1783 }
1784
1785 if (tbp[NDTPA_QUEUE_LEN - 1])
1786 p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1787
1788 if (tbp[NDTPA_PROXY_QLEN - 1])
1789 p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1790
1791 if (tbp[NDTPA_APP_PROBES - 1])
1792 p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1793
1794 if (tbp[NDTPA_UCAST_PROBES - 1])
1795 p->ucast_probes =
1796 RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1797
1798 if (tbp[NDTPA_MCAST_PROBES - 1])
1799 p->mcast_probes =
1800 RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1801
1802 if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1803 p->base_reachable_time =
1804 RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1805
1806 if (tbp[NDTPA_GC_STALETIME - 1])
1807 p->gc_staletime =
1808 RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1809
1810 if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1811 p->delay_probe_time =
1812 RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1813
1814 if (tbp[NDTPA_RETRANS_TIME - 1])
1815 p->retrans_time =
1816 RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1817
1818 if (tbp[NDTPA_ANYCAST_DELAY - 1])
1819 p->anycast_delay =
1820 RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1821
1822 if (tbp[NDTPA_PROXY_DELAY - 1])
1823 p->proxy_delay =
1824 RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1825
1826 if (tbp[NDTPA_LOCKTIME - 1])
1827 p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1828 }
1829
1830 err = 0;
1831
1832rtattr_failure:
1833 write_unlock_bh(&tbl->lock);
1834errout:
1835 read_unlock(&neigh_tbl_lock);
1836 return err;
1837}
1838
1839int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1840{
1841 int idx, family;
1842 int s_idx = cb->args[0];
1843 struct neigh_table *tbl;
1844
1845 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1846
1847 read_lock(&neigh_tbl_lock);
1848 for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1849 struct neigh_parms *p;
1850
1851 if (idx < s_idx || (family && tbl->family != family))
1852 continue;
1853
1854 if (neightbl_fill_info(tbl, skb, cb) <= 0)
1855 break;
1856
1857 for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1858 if (idx < s_idx)
1859 continue;
1860
1861 if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1862 goto out;
1863 }
1864
1865 }
1866out:
1867 read_unlock(&neigh_tbl_lock);
1868 cb->args[0] = idx;
1869
1870 return skb->len;
1871}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872
1873static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07001874 u32 pid, u32 seq, int event, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
1876 unsigned long now = jiffies;
1877 unsigned char *b = skb->tail;
1878 struct nda_cacheinfo ci;
1879 int locked = 0;
1880 u32 probes;
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07001881 struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1882 sizeof(struct ndmsg), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 struct ndmsg *ndm = NLMSG_DATA(nlh);
1884
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 ndm->ndm_family = n->ops->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001886 ndm->ndm_pad1 = 0;
1887 ndm->ndm_pad2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 ndm->ndm_flags = n->flags;
1889 ndm->ndm_type = n->type;
1890 ndm->ndm_ifindex = n->dev->ifindex;
1891 RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1892 read_lock_bh(&n->lock);
1893 locked = 1;
1894 ndm->ndm_state = n->nud_state;
1895 if (n->nud_state & NUD_VALID)
1896 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1897 ci.ndm_used = now - n->used;
1898 ci.ndm_confirmed = now - n->confirmed;
1899 ci.ndm_updated = now - n->updated;
1900 ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1901 probes = atomic_read(&n->probes);
1902 read_unlock_bh(&n->lock);
1903 locked = 0;
1904 RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1905 RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1906 nlh->nlmsg_len = skb->tail - b;
1907 return skb->len;
1908
1909nlmsg_failure:
1910rtattr_failure:
1911 if (locked)
1912 read_unlock_bh(&n->lock);
1913 skb_trim(skb, b - skb->data);
1914 return -1;
1915}
1916
1917
1918static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1919 struct netlink_callback *cb)
1920{
1921 struct neighbour *n;
1922 int rc, h, s_h = cb->args[1];
1923 int idx, s_idx = idx = cb->args[2];
1924
1925 for (h = 0; h <= tbl->hash_mask; h++) {
1926 if (h < s_h)
1927 continue;
1928 if (h > s_h)
1929 s_idx = 0;
1930 read_lock_bh(&tbl->lock);
1931 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1932 if (idx < s_idx)
1933 continue;
1934 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1935 cb->nlh->nlmsg_seq,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07001936 RTM_NEWNEIGH,
1937 NLM_F_MULTI) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 read_unlock_bh(&tbl->lock);
1939 rc = -1;
1940 goto out;
1941 }
1942 }
1943 read_unlock_bh(&tbl->lock);
1944 }
1945 rc = skb->len;
1946out:
1947 cb->args[1] = h;
1948 cb->args[2] = idx;
1949 return rc;
1950}
1951
1952int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1953{
1954 struct neigh_table *tbl;
1955 int t, family, s_t;
1956
1957 read_lock(&neigh_tbl_lock);
1958 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1959 s_t = cb->args[0];
1960
1961 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1962 if (t < s_t || (family && tbl->family != family))
1963 continue;
1964 if (t > s_t)
1965 memset(&cb->args[1], 0, sizeof(cb->args) -
1966 sizeof(cb->args[0]));
1967 if (neigh_dump_table(tbl, skb, cb) < 0)
1968 break;
1969 }
1970 read_unlock(&neigh_tbl_lock);
1971
1972 cb->args[0] = t;
1973 return skb->len;
1974}
1975
1976void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1977{
1978 int chain;
1979
1980 read_lock_bh(&tbl->lock);
1981 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1982 struct neighbour *n;
1983
1984 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1985 cb(n, cookie);
1986 }
1987 read_unlock_bh(&tbl->lock);
1988}
1989EXPORT_SYMBOL(neigh_for_each);
1990
1991/* The tbl->lock must be held as a writer and BH disabled. */
1992void __neigh_for_each_release(struct neigh_table *tbl,
1993 int (*cb)(struct neighbour *))
1994{
1995 int chain;
1996
1997 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1998 struct neighbour *n, **np;
1999
2000 np = &tbl->hash_buckets[chain];
2001 while ((n = *np) != NULL) {
2002 int release;
2003
2004 write_lock(&n->lock);
2005 release = cb(n);
2006 if (release) {
2007 *np = n->next;
2008 n->dead = 1;
2009 } else
2010 np = &n->next;
2011 write_unlock(&n->lock);
2012 if (release)
2013 neigh_release(n);
2014 }
2015 }
2016}
2017EXPORT_SYMBOL(__neigh_for_each_release);
2018
2019#ifdef CONFIG_PROC_FS
2020
2021static struct neighbour *neigh_get_first(struct seq_file *seq)
2022{
2023 struct neigh_seq_state *state = seq->private;
2024 struct neigh_table *tbl = state->tbl;
2025 struct neighbour *n = NULL;
2026 int bucket = state->bucket;
2027
2028 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2029 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2030 n = tbl->hash_buckets[bucket];
2031
2032 while (n) {
2033 if (state->neigh_sub_iter) {
2034 loff_t fakep = 0;
2035 void *v;
2036
2037 v = state->neigh_sub_iter(state, n, &fakep);
2038 if (!v)
2039 goto next;
2040 }
2041 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2042 break;
2043 if (n->nud_state & ~NUD_NOARP)
2044 break;
2045 next:
2046 n = n->next;
2047 }
2048
2049 if (n)
2050 break;
2051 }
2052 state->bucket = bucket;
2053
2054 return n;
2055}
2056
2057static struct neighbour *neigh_get_next(struct seq_file *seq,
2058 struct neighbour *n,
2059 loff_t *pos)
2060{
2061 struct neigh_seq_state *state = seq->private;
2062 struct neigh_table *tbl = state->tbl;
2063
2064 if (state->neigh_sub_iter) {
2065 void *v = state->neigh_sub_iter(state, n, pos);
2066 if (v)
2067 return n;
2068 }
2069 n = n->next;
2070
2071 while (1) {
2072 while (n) {
2073 if (state->neigh_sub_iter) {
2074 void *v = state->neigh_sub_iter(state, n, pos);
2075 if (v)
2076 return n;
2077 goto next;
2078 }
2079 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2080 break;
2081
2082 if (n->nud_state & ~NUD_NOARP)
2083 break;
2084 next:
2085 n = n->next;
2086 }
2087
2088 if (n)
2089 break;
2090
2091 if (++state->bucket > tbl->hash_mask)
2092 break;
2093
2094 n = tbl->hash_buckets[state->bucket];
2095 }
2096
2097 if (n && pos)
2098 --(*pos);
2099 return n;
2100}
2101
2102static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2103{
2104 struct neighbour *n = neigh_get_first(seq);
2105
2106 if (n) {
2107 while (*pos) {
2108 n = neigh_get_next(seq, n, pos);
2109 if (!n)
2110 break;
2111 }
2112 }
2113 return *pos ? NULL : n;
2114}
2115
2116static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2117{
2118 struct neigh_seq_state *state = seq->private;
2119 struct neigh_table *tbl = state->tbl;
2120 struct pneigh_entry *pn = NULL;
2121 int bucket = state->bucket;
2122
2123 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2124 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2125 pn = tbl->phash_buckets[bucket];
2126 if (pn)
2127 break;
2128 }
2129 state->bucket = bucket;
2130
2131 return pn;
2132}
2133
2134static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2135 struct pneigh_entry *pn,
2136 loff_t *pos)
2137{
2138 struct neigh_seq_state *state = seq->private;
2139 struct neigh_table *tbl = state->tbl;
2140
2141 pn = pn->next;
2142 while (!pn) {
2143 if (++state->bucket > PNEIGH_HASHMASK)
2144 break;
2145 pn = tbl->phash_buckets[state->bucket];
2146 if (pn)
2147 break;
2148 }
2149
2150 if (pn && pos)
2151 --(*pos);
2152
2153 return pn;
2154}
2155
2156static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2157{
2158 struct pneigh_entry *pn = pneigh_get_first(seq);
2159
2160 if (pn) {
2161 while (*pos) {
2162 pn = pneigh_get_next(seq, pn, pos);
2163 if (!pn)
2164 break;
2165 }
2166 }
2167 return *pos ? NULL : pn;
2168}
2169
2170static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2171{
2172 struct neigh_seq_state *state = seq->private;
2173 void *rc;
2174
2175 rc = neigh_get_idx(seq, pos);
2176 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2177 rc = pneigh_get_idx(seq, pos);
2178
2179 return rc;
2180}
2181
2182void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2183{
2184 struct neigh_seq_state *state = seq->private;
2185 loff_t pos_minus_one;
2186
2187 state->tbl = tbl;
2188 state->bucket = 0;
2189 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2190
2191 read_lock_bh(&tbl->lock);
2192
2193 pos_minus_one = *pos - 1;
2194 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2195}
2196EXPORT_SYMBOL(neigh_seq_start);
2197
2198void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2199{
2200 struct neigh_seq_state *state;
2201 void *rc;
2202
2203 if (v == SEQ_START_TOKEN) {
2204 rc = neigh_get_idx(seq, pos);
2205 goto out;
2206 }
2207
2208 state = seq->private;
2209 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2210 rc = neigh_get_next(seq, v, NULL);
2211 if (rc)
2212 goto out;
2213 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2214 rc = pneigh_get_first(seq);
2215 } else {
2216 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2217 rc = pneigh_get_next(seq, v, NULL);
2218 }
2219out:
2220 ++(*pos);
2221 return rc;
2222}
2223EXPORT_SYMBOL(neigh_seq_next);
2224
2225void neigh_seq_stop(struct seq_file *seq, void *v)
2226{
2227 struct neigh_seq_state *state = seq->private;
2228 struct neigh_table *tbl = state->tbl;
2229
2230 read_unlock_bh(&tbl->lock);
2231}
2232EXPORT_SYMBOL(neigh_seq_stop);
2233
2234/* statistics via seq_file */
2235
2236static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2237{
2238 struct proc_dir_entry *pde = seq->private;
2239 struct neigh_table *tbl = pde->data;
2240 int cpu;
2241
2242 if (*pos == 0)
2243 return SEQ_START_TOKEN;
2244
2245 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2246 if (!cpu_possible(cpu))
2247 continue;
2248 *pos = cpu+1;
2249 return per_cpu_ptr(tbl->stats, cpu);
2250 }
2251 return NULL;
2252}
2253
2254static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2255{
2256 struct proc_dir_entry *pde = seq->private;
2257 struct neigh_table *tbl = pde->data;
2258 int cpu;
2259
2260 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2261 if (!cpu_possible(cpu))
2262 continue;
2263 *pos = cpu+1;
2264 return per_cpu_ptr(tbl->stats, cpu);
2265 }
2266 return NULL;
2267}
2268
2269static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2270{
2271
2272}
2273
2274static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2275{
2276 struct proc_dir_entry *pde = seq->private;
2277 struct neigh_table *tbl = pde->data;
2278 struct neigh_statistics *st = v;
2279
2280 if (v == SEQ_START_TOKEN) {
Olaf Rempel5bec0032005-04-28 12:16:08 -07002281 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 return 0;
2283 }
2284
2285 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2286 "%08lx %08lx %08lx %08lx\n",
2287 atomic_read(&tbl->entries),
2288
2289 st->allocs,
2290 st->destroys,
2291 st->hash_grows,
2292
2293 st->lookups,
2294 st->hits,
2295
2296 st->res_failed,
2297
2298 st->rcv_probes_mcast,
2299 st->rcv_probes_ucast,
2300
2301 st->periodic_gc_runs,
2302 st->forced_gc_runs
2303 );
2304
2305 return 0;
2306}
2307
2308static struct seq_operations neigh_stat_seq_ops = {
2309 .start = neigh_stat_seq_start,
2310 .next = neigh_stat_seq_next,
2311 .stop = neigh_stat_seq_stop,
2312 .show = neigh_stat_seq_show,
2313};
2314
2315static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2316{
2317 int ret = seq_open(file, &neigh_stat_seq_ops);
2318
2319 if (!ret) {
2320 struct seq_file *sf = file->private_data;
2321 sf->private = PDE(inode);
2322 }
2323 return ret;
2324};
2325
2326static struct file_operations neigh_stat_seq_fops = {
2327 .owner = THIS_MODULE,
2328 .open = neigh_stat_seq_open,
2329 .read = seq_read,
2330 .llseek = seq_lseek,
2331 .release = seq_release,
2332};
2333
2334#endif /* CONFIG_PROC_FS */
2335
2336#ifdef CONFIG_ARPD
2337void neigh_app_ns(struct neighbour *n)
2338{
2339 struct nlmsghdr *nlh;
2340 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2341 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2342
2343 if (!skb)
2344 return;
2345
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002346 if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 kfree_skb(skb);
2348 return;
2349 }
2350 nlh = (struct nlmsghdr *)skb->data;
2351 nlh->nlmsg_flags = NLM_F_REQUEST;
Patrick McHardyac6d4392005-08-14 19:29:52 -07002352 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2353 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354}
2355
2356static void neigh_app_notify(struct neighbour *n)
2357{
2358 struct nlmsghdr *nlh;
2359 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2360 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2361
2362 if (!skb)
2363 return;
2364
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002365 if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 kfree_skb(skb);
2367 return;
2368 }
2369 nlh = (struct nlmsghdr *)skb->data;
Patrick McHardyac6d4392005-08-14 19:29:52 -07002370 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2371 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372}
2373
2374#endif /* CONFIG_ARPD */
2375
2376#ifdef CONFIG_SYSCTL
2377
2378static struct neigh_sysctl_table {
2379 struct ctl_table_header *sysctl_header;
2380 ctl_table neigh_vars[__NET_NEIGH_MAX];
2381 ctl_table neigh_dev[2];
2382 ctl_table neigh_neigh_dir[2];
2383 ctl_table neigh_proto_dir[2];
2384 ctl_table neigh_root_dir[2];
2385} neigh_sysctl_template = {
2386 .neigh_vars = {
2387 {
2388 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2389 .procname = "mcast_solicit",
2390 .maxlen = sizeof(int),
2391 .mode = 0644,
2392 .proc_handler = &proc_dointvec,
2393 },
2394 {
2395 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2396 .procname = "ucast_solicit",
2397 .maxlen = sizeof(int),
2398 .mode = 0644,
2399 .proc_handler = &proc_dointvec,
2400 },
2401 {
2402 .ctl_name = NET_NEIGH_APP_SOLICIT,
2403 .procname = "app_solicit",
2404 .maxlen = sizeof(int),
2405 .mode = 0644,
2406 .proc_handler = &proc_dointvec,
2407 },
2408 {
2409 .ctl_name = NET_NEIGH_RETRANS_TIME,
2410 .procname = "retrans_time",
2411 .maxlen = sizeof(int),
2412 .mode = 0644,
2413 .proc_handler = &proc_dointvec_userhz_jiffies,
2414 },
2415 {
2416 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2417 .procname = "base_reachable_time",
2418 .maxlen = sizeof(int),
2419 .mode = 0644,
2420 .proc_handler = &proc_dointvec_jiffies,
2421 .strategy = &sysctl_jiffies,
2422 },
2423 {
2424 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2425 .procname = "delay_first_probe_time",
2426 .maxlen = sizeof(int),
2427 .mode = 0644,
2428 .proc_handler = &proc_dointvec_jiffies,
2429 .strategy = &sysctl_jiffies,
2430 },
2431 {
2432 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2433 .procname = "gc_stale_time",
2434 .maxlen = sizeof(int),
2435 .mode = 0644,
2436 .proc_handler = &proc_dointvec_jiffies,
2437 .strategy = &sysctl_jiffies,
2438 },
2439 {
2440 .ctl_name = NET_NEIGH_UNRES_QLEN,
2441 .procname = "unres_qlen",
2442 .maxlen = sizeof(int),
2443 .mode = 0644,
2444 .proc_handler = &proc_dointvec,
2445 },
2446 {
2447 .ctl_name = NET_NEIGH_PROXY_QLEN,
2448 .procname = "proxy_qlen",
2449 .maxlen = sizeof(int),
2450 .mode = 0644,
2451 .proc_handler = &proc_dointvec,
2452 },
2453 {
2454 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2455 .procname = "anycast_delay",
2456 .maxlen = sizeof(int),
2457 .mode = 0644,
2458 .proc_handler = &proc_dointvec_userhz_jiffies,
2459 },
2460 {
2461 .ctl_name = NET_NEIGH_PROXY_DELAY,
2462 .procname = "proxy_delay",
2463 .maxlen = sizeof(int),
2464 .mode = 0644,
2465 .proc_handler = &proc_dointvec_userhz_jiffies,
2466 },
2467 {
2468 .ctl_name = NET_NEIGH_LOCKTIME,
2469 .procname = "locktime",
2470 .maxlen = sizeof(int),
2471 .mode = 0644,
2472 .proc_handler = &proc_dointvec_userhz_jiffies,
2473 },
2474 {
2475 .ctl_name = NET_NEIGH_GC_INTERVAL,
2476 .procname = "gc_interval",
2477 .maxlen = sizeof(int),
2478 .mode = 0644,
2479 .proc_handler = &proc_dointvec_jiffies,
2480 .strategy = &sysctl_jiffies,
2481 },
2482 {
2483 .ctl_name = NET_NEIGH_GC_THRESH1,
2484 .procname = "gc_thresh1",
2485 .maxlen = sizeof(int),
2486 .mode = 0644,
2487 .proc_handler = &proc_dointvec,
2488 },
2489 {
2490 .ctl_name = NET_NEIGH_GC_THRESH2,
2491 .procname = "gc_thresh2",
2492 .maxlen = sizeof(int),
2493 .mode = 0644,
2494 .proc_handler = &proc_dointvec,
2495 },
2496 {
2497 .ctl_name = NET_NEIGH_GC_THRESH3,
2498 .procname = "gc_thresh3",
2499 .maxlen = sizeof(int),
2500 .mode = 0644,
2501 .proc_handler = &proc_dointvec,
2502 },
2503 {
2504 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2505 .procname = "retrans_time_ms",
2506 .maxlen = sizeof(int),
2507 .mode = 0644,
2508 .proc_handler = &proc_dointvec_ms_jiffies,
2509 .strategy = &sysctl_ms_jiffies,
2510 },
2511 {
2512 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2513 .procname = "base_reachable_time_ms",
2514 .maxlen = sizeof(int),
2515 .mode = 0644,
2516 .proc_handler = &proc_dointvec_ms_jiffies,
2517 .strategy = &sysctl_ms_jiffies,
2518 },
2519 },
2520 .neigh_dev = {
2521 {
2522 .ctl_name = NET_PROTO_CONF_DEFAULT,
2523 .procname = "default",
2524 .mode = 0555,
2525 },
2526 },
2527 .neigh_neigh_dir = {
2528 {
2529 .procname = "neigh",
2530 .mode = 0555,
2531 },
2532 },
2533 .neigh_proto_dir = {
2534 {
2535 .mode = 0555,
2536 },
2537 },
2538 .neigh_root_dir = {
2539 {
2540 .ctl_name = CTL_NET,
2541 .procname = "net",
2542 .mode = 0555,
2543 },
2544 },
2545};
2546
2547int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2548 int p_id, int pdev_id, char *p_name,
2549 proc_handler *handler, ctl_handler *strategy)
2550{
2551 struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2552 const char *dev_name_source = NULL;
2553 char *dev_name = NULL;
2554 int err = 0;
2555
2556 if (!t)
2557 return -ENOBUFS;
2558 memcpy(t, &neigh_sysctl_template, sizeof(*t));
2559 t->neigh_vars[0].data = &p->mcast_probes;
2560 t->neigh_vars[1].data = &p->ucast_probes;
2561 t->neigh_vars[2].data = &p->app_probes;
2562 t->neigh_vars[3].data = &p->retrans_time;
2563 t->neigh_vars[4].data = &p->base_reachable_time;
2564 t->neigh_vars[5].data = &p->delay_probe_time;
2565 t->neigh_vars[6].data = &p->gc_staletime;
2566 t->neigh_vars[7].data = &p->queue_len;
2567 t->neigh_vars[8].data = &p->proxy_qlen;
2568 t->neigh_vars[9].data = &p->anycast_delay;
2569 t->neigh_vars[10].data = &p->proxy_delay;
2570 t->neigh_vars[11].data = &p->locktime;
2571
2572 if (dev) {
2573 dev_name_source = dev->name;
2574 t->neigh_dev[0].ctl_name = dev->ifindex;
2575 t->neigh_vars[12].procname = NULL;
2576 t->neigh_vars[13].procname = NULL;
2577 t->neigh_vars[14].procname = NULL;
2578 t->neigh_vars[15].procname = NULL;
2579 } else {
2580 dev_name_source = t->neigh_dev[0].procname;
2581 t->neigh_vars[12].data = (int *)(p + 1);
2582 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2583 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2584 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2585 }
2586
2587 t->neigh_vars[16].data = &p->retrans_time;
2588 t->neigh_vars[17].data = &p->base_reachable_time;
2589
2590 if (handler || strategy) {
2591 /* RetransTime */
2592 t->neigh_vars[3].proc_handler = handler;
2593 t->neigh_vars[3].strategy = strategy;
2594 t->neigh_vars[3].extra1 = dev;
2595 /* ReachableTime */
2596 t->neigh_vars[4].proc_handler = handler;
2597 t->neigh_vars[4].strategy = strategy;
2598 t->neigh_vars[4].extra1 = dev;
2599 /* RetransTime (in milliseconds)*/
2600 t->neigh_vars[16].proc_handler = handler;
2601 t->neigh_vars[16].strategy = strategy;
2602 t->neigh_vars[16].extra1 = dev;
2603 /* ReachableTime (in milliseconds) */
2604 t->neigh_vars[17].proc_handler = handler;
2605 t->neigh_vars[17].strategy = strategy;
2606 t->neigh_vars[17].extra1 = dev;
2607 }
2608
Paulo Marques543537b2005-06-23 00:09:02 -07002609 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 if (!dev_name) {
2611 err = -ENOBUFS;
2612 goto free;
2613 }
2614
2615 t->neigh_dev[0].procname = dev_name;
2616
2617 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2618
2619 t->neigh_proto_dir[0].procname = p_name;
2620 t->neigh_proto_dir[0].ctl_name = p_id;
2621
2622 t->neigh_dev[0].child = t->neigh_vars;
2623 t->neigh_neigh_dir[0].child = t->neigh_dev;
2624 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2625 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2626
2627 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2628 if (!t->sysctl_header) {
2629 err = -ENOBUFS;
2630 goto free_procname;
2631 }
2632 p->sysctl_table = t;
2633 return 0;
2634
2635 /* error path */
2636 free_procname:
2637 kfree(dev_name);
2638 free:
2639 kfree(t);
2640
2641 return err;
2642}
2643
2644void neigh_sysctl_unregister(struct neigh_parms *p)
2645{
2646 if (p->sysctl_table) {
2647 struct neigh_sysctl_table *t = p->sysctl_table;
2648 p->sysctl_table = NULL;
2649 unregister_sysctl_table(t->sysctl_header);
2650 kfree(t->neigh_dev[0].procname);
2651 kfree(t);
2652 }
2653}
2654
2655#endif /* CONFIG_SYSCTL */
2656
2657EXPORT_SYMBOL(__neigh_event_send);
2658EXPORT_SYMBOL(neigh_add);
2659EXPORT_SYMBOL(neigh_changeaddr);
2660EXPORT_SYMBOL(neigh_compat_output);
2661EXPORT_SYMBOL(neigh_connected_output);
2662EXPORT_SYMBOL(neigh_create);
2663EXPORT_SYMBOL(neigh_delete);
2664EXPORT_SYMBOL(neigh_destroy);
2665EXPORT_SYMBOL(neigh_dump_info);
2666EXPORT_SYMBOL(neigh_event_ns);
2667EXPORT_SYMBOL(neigh_ifdown);
2668EXPORT_SYMBOL(neigh_lookup);
2669EXPORT_SYMBOL(neigh_lookup_nodev);
2670EXPORT_SYMBOL(neigh_parms_alloc);
2671EXPORT_SYMBOL(neigh_parms_release);
2672EXPORT_SYMBOL(neigh_rand_reach_time);
2673EXPORT_SYMBOL(neigh_resolve_output);
2674EXPORT_SYMBOL(neigh_table_clear);
2675EXPORT_SYMBOL(neigh_table_init);
Simon Kelleybd89efc2006-05-12 14:56:08 -07002676EXPORT_SYMBOL(neigh_table_init_no_netlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677EXPORT_SYMBOL(neigh_update);
2678EXPORT_SYMBOL(neigh_update_hhs);
2679EXPORT_SYMBOL(pneigh_enqueue);
2680EXPORT_SYMBOL(pneigh_lookup);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002681EXPORT_SYMBOL(neightbl_dump_info);
2682EXPORT_SYMBOL(neightbl_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
2684#ifdef CONFIG_ARPD
2685EXPORT_SYMBOL(neigh_app_ns);
2686#endif
2687#ifdef CONFIG_SYSCTL
2688EXPORT_SYMBOL(neigh_sysctl_register);
2689EXPORT_SYMBOL(neigh_sysctl_unregister);
2690#endif