blob: bd899d557737294f10470d236ca050c9fe93fdfd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/netdevice.h>
23#include <linux/proc_fs.h>
24#ifdef CONFIG_SYSCTL
25#include <linux/sysctl.h>
26#endif
27#include <linux/times.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020028#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <net/neighbour.h>
30#include <net/dst.h>
31#include <net/sock.h>
Tom Tucker8d717402006-07-30 20:43:36 -070032#include <net/netevent.h>
Thomas Grafa14a49d2006-08-07 17:53:08 -070033#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/rtnetlink.h>
35#include <linux/random.h>
Paulo Marques543537b2005-06-23 00:09:02 -070036#include <linux/string.h>
vignesh babuc3609d52007-08-24 22:27:55 -070037#include <linux/log2.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39#define NEIGH_DEBUG 1
40
41#define NEIGH_PRINTK(x...) printk(x)
42#define NEIGH_NOPRINTK(x...) do { ; } while(0)
43#define NEIGH_PRINTK0 NEIGH_PRINTK
44#define NEIGH_PRINTK1 NEIGH_NOPRINTK
45#define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47#if NEIGH_DEBUG >= 1
48#undef NEIGH_PRINTK1
49#define NEIGH_PRINTK1 NEIGH_PRINTK
50#endif
51#if NEIGH_DEBUG >= 2
52#undef NEIGH_PRINTK2
53#define NEIGH_PRINTK2 NEIGH_PRINTK
54#endif
55
56#define PNEIGH_HASHMASK 0xF
57
58static void neigh_timer_handler(unsigned long arg);
Thomas Grafd961db32007-08-08 23:12:56 -070059static void __neigh_notify(struct neighbour *n, int type, int flags);
60static void neigh_update_notify(struct neighbour *neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63
64static struct neigh_table *neigh_tables;
Amos Waterland45fc3b12005-09-24 16:53:16 -070065#ifdef CONFIG_PROC_FS
Arjan van de Ven9a321442007-02-12 00:55:35 -080066static const struct file_operations neigh_stat_seq_fops;
Amos Waterland45fc3b12005-09-24 16:53:16 -070067#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69/*
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
76 cache.
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
79
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
83
84 Reference count prevents destruction.
85
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
88 - timer
89 - resolution queue
90
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
95
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
98 */
99
100static DEFINE_RWLOCK(neigh_tbl_lock);
101
102static int neigh_blackhole(struct sk_buff *skb)
103{
104 kfree_skb(skb);
105 return -ENETDOWN;
106}
107
Thomas Graf4f494552007-08-08 23:12:36 -0700108static void neigh_cleanup_and_release(struct neighbour *neigh)
109{
110 if (neigh->parms->neigh_cleanup)
111 neigh->parms->neigh_cleanup(neigh);
112
Thomas Grafd961db32007-08-08 23:12:56 -0700113 __neigh_notify(neigh, RTM_DELNEIGH, 0);
Thomas Graf4f494552007-08-08 23:12:36 -0700114 neigh_release(neigh);
115}
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117/*
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
121 */
122
123unsigned long neigh_rand_reach_time(unsigned long base)
124{
125 return (base ? (net_random() % base) + (base >> 1) : 0);
126}
127
128
129static int neigh_forced_gc(struct neigh_table *tbl)
130{
131 int shrunk = 0;
132 int i;
133
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
139
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
145 */
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
149 *np = n->next;
150 n->dead = 1;
151 shrunk = 1;
152 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -0700153 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 continue;
155 }
156 write_unlock(&n->lock);
157 np = &n->next;
158 }
159 }
160
161 tbl->last_flush = jiffies;
162
163 write_unlock_bh(&tbl->lock);
164
165 return shrunk;
166}
167
Pavel Emelyanova43d8992007-12-20 15:49:05 -0800168static void neigh_add_timer(struct neighbour *n, unsigned long when)
169{
170 neigh_hold(n);
171 if (unlikely(mod_timer(&n->timer, when))) {
172 printk("NEIGH: BUG, double timer add, state is %x\n",
173 n->nud_state);
174 dump_stack();
175 }
176}
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static int neigh_del_timer(struct neighbour *n)
179{
180 if ((n->nud_state & NUD_IN_TIMER) &&
181 del_timer(&n->timer)) {
182 neigh_release(n);
183 return 1;
184 }
185 return 0;
186}
187
188static void pneigh_queue_purge(struct sk_buff_head *list)
189{
190 struct sk_buff *skb;
191
192 while ((skb = skb_dequeue(list)) != NULL) {
193 dev_put(skb->dev);
194 kfree_skb(skb);
195 }
196}
197
Herbert Xu49636bb2005-10-23 17:18:00 +1000198static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
200 int i;
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 for (i = 0; i <= tbl->hash_mask; i++) {
203 struct neighbour *n, **np = &tbl->hash_buckets[i];
204
205 while ((n = *np) != NULL) {
206 if (dev && n->dev != dev) {
207 np = &n->next;
208 continue;
209 }
210 *np = n->next;
211 write_lock(&n->lock);
212 neigh_del_timer(n);
213 n->dead = 1;
214
215 if (atomic_read(&n->refcnt) != 1) {
216 /* The most unpleasant situation.
217 We must destroy neighbour entry,
218 but someone still uses it.
219
220 The destroy will be delayed until
221 the last user releases us, but
222 we must kill timers etc. and move
223 it to safe state.
224 */
225 skb_queue_purge(&n->arp_queue);
226 n->output = neigh_blackhole;
227 if (n->nud_state & NUD_VALID)
228 n->nud_state = NUD_NOARP;
229 else
230 n->nud_state = NUD_NONE;
231 NEIGH_PRINTK2("neigh %p is stray.\n", n);
232 }
233 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -0700234 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 }
236 }
Herbert Xu49636bb2005-10-23 17:18:00 +1000237}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Herbert Xu49636bb2005-10-23 17:18:00 +1000239void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
240{
241 write_lock_bh(&tbl->lock);
242 neigh_flush_dev(tbl, dev);
243 write_unlock_bh(&tbl->lock);
244}
245
246int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
247{
248 write_lock_bh(&tbl->lock);
249 neigh_flush_dev(tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 pneigh_ifdown(tbl, dev);
251 write_unlock_bh(&tbl->lock);
252
253 del_timer_sync(&tbl->proxy_timer);
254 pneigh_queue_purge(&tbl->proxy_queue);
255 return 0;
256}
257
258static struct neighbour *neigh_alloc(struct neigh_table *tbl)
259{
260 struct neighbour *n = NULL;
261 unsigned long now = jiffies;
262 int entries;
263
264 entries = atomic_inc_return(&tbl->entries) - 1;
265 if (entries >= tbl->gc_thresh3 ||
266 (entries >= tbl->gc_thresh2 &&
267 time_after(now, tbl->last_flush + 5 * HZ))) {
268 if (!neigh_forced_gc(tbl) &&
269 entries >= tbl->gc_thresh3)
270 goto out_entries;
271 }
272
Robert P. J. Dayc3762222007-02-10 01:45:03 -0800273 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 if (!n)
275 goto out_entries;
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 skb_queue_head_init(&n->arp_queue);
278 rwlock_init(&n->lock);
279 n->updated = n->used = now;
280 n->nud_state = NUD_NONE;
281 n->output = neigh_blackhole;
282 n->parms = neigh_parms_clone(&tbl->parms);
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -0800283 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285 NEIGH_CACHE_STAT_INC(tbl, allocs);
286 n->tbl = tbl;
287 atomic_set(&n->refcnt, 1);
288 n->dead = 1;
289out:
290 return n;
291
292out_entries:
293 atomic_dec(&tbl->entries);
294 goto out;
295}
296
297static struct neighbour **neigh_hash_alloc(unsigned int entries)
298{
299 unsigned long size = entries * sizeof(struct neighbour *);
300 struct neighbour **ret;
301
302 if (size <= PAGE_SIZE) {
Andrew Morton77d04bd2006-04-07 14:52:59 -0700303 ret = kzalloc(size, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 } else {
305 ret = (struct neighbour **)
Andrew Morton77d04bd2006-04-07 14:52:59 -0700306 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 return ret;
309}
310
311static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
312{
313 unsigned long size = entries * sizeof(struct neighbour *);
314
315 if (size <= PAGE_SIZE)
316 kfree(hash);
317 else
318 free_pages((unsigned long)hash, get_order(size));
319}
320
321static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
322{
323 struct neighbour **new_hash, **old_hash;
324 unsigned int i, new_hash_mask, old_entries;
325
326 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
327
vignesh babuc3609d52007-08-24 22:27:55 -0700328 BUG_ON(!is_power_of_2(new_entries));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 new_hash = neigh_hash_alloc(new_entries);
330 if (!new_hash)
331 return;
332
333 old_entries = tbl->hash_mask + 1;
334 new_hash_mask = new_entries - 1;
335 old_hash = tbl->hash_buckets;
336
337 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
338 for (i = 0; i < old_entries; i++) {
339 struct neighbour *n, *next;
340
341 for (n = old_hash[i]; n; n = next) {
342 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
343
344 hash_val &= new_hash_mask;
345 next = n->next;
346
347 n->next = new_hash[hash_val];
348 new_hash[hash_val] = n;
349 }
350 }
351 tbl->hash_buckets = new_hash;
352 tbl->hash_mask = new_hash_mask;
353
354 neigh_hash_free(old_hash, old_entries);
355}
356
357struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
358 struct net_device *dev)
359{
360 struct neighbour *n;
361 int key_len = tbl->key_len;
Julian Anastasovc5e29462006-10-03 15:49:46 -0700362 u32 hash_val = tbl->hash(pkey, dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 NEIGH_CACHE_STAT_INC(tbl, lookups);
365
366 read_lock_bh(&tbl->lock);
Julian Anastasovc5e29462006-10-03 15:49:46 -0700367 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
369 neigh_hold(n);
370 NEIGH_CACHE_STAT_INC(tbl, hits);
371 break;
372 }
373 }
374 read_unlock_bh(&tbl->lock);
375 return n;
376}
377
Eric W. Biederman426b5302008-01-24 00:13:18 -0800378struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
379 const void *pkey)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
381 struct neighbour *n;
382 int key_len = tbl->key_len;
Julian Anastasovc5e29462006-10-03 15:49:46 -0700383 u32 hash_val = tbl->hash(pkey, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
385 NEIGH_CACHE_STAT_INC(tbl, lookups);
386
387 read_lock_bh(&tbl->lock);
Julian Anastasovc5e29462006-10-03 15:49:46 -0700388 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800389 if (!memcmp(n->primary_key, pkey, key_len) &&
390 (net == n->dev->nd_net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 neigh_hold(n);
392 NEIGH_CACHE_STAT_INC(tbl, hits);
393 break;
394 }
395 }
396 read_unlock_bh(&tbl->lock);
397 return n;
398}
399
400struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
401 struct net_device *dev)
402{
403 u32 hash_val;
404 int key_len = tbl->key_len;
405 int error;
406 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
407
408 if (!n) {
409 rc = ERR_PTR(-ENOBUFS);
410 goto out;
411 }
412
413 memcpy(n->primary_key, pkey, key_len);
414 n->dev = dev;
415 dev_hold(dev);
416
417 /* Protocol specific setup. */
418 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
419 rc = ERR_PTR(error);
420 goto out_neigh_release;
421 }
422
423 /* Device specific setup. */
424 if (n->parms->neigh_setup &&
425 (error = n->parms->neigh_setup(n)) < 0) {
426 rc = ERR_PTR(error);
427 goto out_neigh_release;
428 }
429
430 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
431
432 write_lock_bh(&tbl->lock);
433
434 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
435 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
436
437 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
438
439 if (n->parms->dead) {
440 rc = ERR_PTR(-EINVAL);
441 goto out_tbl_unlock;
442 }
443
444 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
445 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
446 neigh_hold(n1);
447 rc = n1;
448 goto out_tbl_unlock;
449 }
450 }
451
452 n->next = tbl->hash_buckets[hash_val];
453 tbl->hash_buckets[hash_val] = n;
454 n->dead = 0;
455 neigh_hold(n);
456 write_unlock_bh(&tbl->lock);
457 NEIGH_PRINTK2("neigh %p is created.\n", n);
458 rc = n;
459out:
460 return rc;
461out_tbl_unlock:
462 write_unlock_bh(&tbl->lock);
463out_neigh_release:
464 neigh_release(n);
465 goto out;
466}
467
Eric W. Biederman426b5302008-01-24 00:13:18 -0800468struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
469 struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 struct net_device *dev, int creat)
471{
472 struct pneigh_entry *n;
473 int key_len = tbl->key_len;
474 u32 hash_val = *(u32 *)(pkey + key_len - 4);
475
476 hash_val ^= (hash_val >> 16);
477 hash_val ^= hash_val >> 8;
478 hash_val ^= hash_val >> 4;
479 hash_val &= PNEIGH_HASHMASK;
480
481 read_lock_bh(&tbl->lock);
482
483 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
484 if (!memcmp(n->key, pkey, key_len) &&
Eric W. Biederman426b5302008-01-24 00:13:18 -0800485 (n->net == net) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 (n->dev == dev || !n->dev)) {
487 read_unlock_bh(&tbl->lock);
488 goto out;
489 }
490 }
491 read_unlock_bh(&tbl->lock);
492 n = NULL;
493 if (!creat)
494 goto out;
495
Pavel Emelyanov4ae28942007-10-15 12:54:15 -0700496 ASSERT_RTNL();
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
499 if (!n)
500 goto out;
501
Eric W. Biederman426b5302008-01-24 00:13:18 -0800502 n->net = hold_net(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 memcpy(n->key, pkey, key_len);
504 n->dev = dev;
505 if (dev)
506 dev_hold(dev);
507
508 if (tbl->pconstructor && tbl->pconstructor(n)) {
509 if (dev)
510 dev_put(dev);
511 kfree(n);
512 n = NULL;
513 goto out;
514 }
515
516 write_lock_bh(&tbl->lock);
517 n->next = tbl->phash_buckets[hash_val];
518 tbl->phash_buckets[hash_val] = n;
519 write_unlock_bh(&tbl->lock);
520out:
521 return n;
522}
523
524
Eric W. Biederman426b5302008-01-24 00:13:18 -0800525int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 struct net_device *dev)
527{
528 struct pneigh_entry *n, **np;
529 int key_len = tbl->key_len;
530 u32 hash_val = *(u32 *)(pkey + key_len - 4);
531
532 hash_val ^= (hash_val >> 16);
533 hash_val ^= hash_val >> 8;
534 hash_val ^= hash_val >> 4;
535 hash_val &= PNEIGH_HASHMASK;
536
537 write_lock_bh(&tbl->lock);
538 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
539 np = &n->next) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800540 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
541 (n->net == net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 *np = n->next;
543 write_unlock_bh(&tbl->lock);
544 if (tbl->pdestructor)
545 tbl->pdestructor(n);
546 if (n->dev)
547 dev_put(n->dev);
Eric W. Biederman426b5302008-01-24 00:13:18 -0800548 release_net(n->net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 kfree(n);
550 return 0;
551 }
552 }
553 write_unlock_bh(&tbl->lock);
554 return -ENOENT;
555}
556
557static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
558{
559 struct pneigh_entry *n, **np;
560 u32 h;
561
562 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
563 np = &tbl->phash_buckets[h];
564 while ((n = *np) != NULL) {
565 if (!dev || n->dev == dev) {
566 *np = n->next;
567 if (tbl->pdestructor)
568 tbl->pdestructor(n);
569 if (n->dev)
570 dev_put(n->dev);
Eric W. Biederman426b5302008-01-24 00:13:18 -0800571 release_net(n->net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 kfree(n);
573 continue;
574 }
575 np = &n->next;
576 }
577 }
578 return -ENOENT;
579}
580
581
582/*
583 * neighbour must already be out of the table;
584 *
585 */
586void neigh_destroy(struct neighbour *neigh)
587{
588 struct hh_cache *hh;
589
590 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
591
592 if (!neigh->dead) {
593 printk(KERN_WARNING
594 "Destroying alive neighbour %p\n", neigh);
595 dump_stack();
596 return;
597 }
598
599 if (neigh_del_timer(neigh))
600 printk(KERN_WARNING "Impossible event.\n");
601
602 while ((hh = neigh->hh) != NULL) {
603 neigh->hh = hh->hh_next;
604 hh->hh_next = NULL;
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800605
606 write_seqlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 hh->hh_output = neigh_blackhole;
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800608 write_sequnlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 if (atomic_dec_and_test(&hh->hh_refcnt))
610 kfree(hh);
611 }
612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 skb_queue_purge(&neigh->arp_queue);
614
615 dev_put(neigh->dev);
616 neigh_parms_put(neigh->parms);
617
618 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
619
620 atomic_dec(&neigh->tbl->entries);
621 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
622}
623
624/* Neighbour state is suspicious;
625 disable fast path.
626
627 Called with write_locked neigh.
628 */
629static void neigh_suspect(struct neighbour *neigh)
630{
631 struct hh_cache *hh;
632
633 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
634
635 neigh->output = neigh->ops->output;
636
637 for (hh = neigh->hh; hh; hh = hh->hh_next)
638 hh->hh_output = neigh->ops->output;
639}
640
641/* Neighbour state is OK;
642 enable fast path.
643
644 Called with write_locked neigh.
645 */
646static void neigh_connect(struct neighbour *neigh)
647{
648 struct hh_cache *hh;
649
650 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
651
652 neigh->output = neigh->ops->connected_output;
653
654 for (hh = neigh->hh; hh; hh = hh->hh_next)
655 hh->hh_output = neigh->ops->hh_output;
656}
657
658static void neigh_periodic_timer(unsigned long arg)
659{
660 struct neigh_table *tbl = (struct neigh_table *)arg;
661 struct neighbour *n, **np;
662 unsigned long expire, now = jiffies;
663
664 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
665
666 write_lock(&tbl->lock);
667
668 /*
669 * periodically recompute ReachableTime from random function
670 */
671
672 if (time_after(now, tbl->last_rand + 300 * HZ)) {
673 struct neigh_parms *p;
674 tbl->last_rand = now;
675 for (p = &tbl->parms; p; p = p->next)
676 p->reachable_time =
677 neigh_rand_reach_time(p->base_reachable_time);
678 }
679
680 np = &tbl->hash_buckets[tbl->hash_chain_gc];
681 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
682
683 while ((n = *np) != NULL) {
684 unsigned int state;
685
686 write_lock(&n->lock);
687
688 state = n->nud_state;
689 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
690 write_unlock(&n->lock);
691 goto next_elt;
692 }
693
694 if (time_before(n->used, n->confirmed))
695 n->used = n->confirmed;
696
697 if (atomic_read(&n->refcnt) == 1 &&
698 (state == NUD_FAILED ||
699 time_after(now, n->used + n->parms->gc_staletime))) {
700 *np = n->next;
701 n->dead = 1;
702 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -0700703 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 continue;
705 }
706 write_unlock(&n->lock);
707
708next_elt:
709 np = &n->next;
710 }
711
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900712 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
713 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
714 * base_reachable_time.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 */
716 expire = tbl->parms.base_reachable_time >> 1;
717 expire /= (tbl->hash_mask + 1);
718 if (!expire)
719 expire = 1;
720
Arjan van de Venf5a6e012007-02-05 17:59:51 -0800721 if (expire>HZ)
722 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
723 else
724 mod_timer(&tbl->gc_timer, now + expire);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 write_unlock(&tbl->lock);
727}
728
729static __inline__ int neigh_max_probes(struct neighbour *n)
730{
731 struct neigh_parms *p = n->parms;
732 return (n->nud_state & NUD_PROBE ?
733 p->ucast_probes :
734 p->ucast_probes + p->app_probes + p->mcast_probes);
735}
736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737/* Called when a timer expires for a neighbour entry. */
738
739static void neigh_timer_handler(unsigned long arg)
740{
741 unsigned long now, next;
742 struct neighbour *neigh = (struct neighbour *)arg;
743 unsigned state;
744 int notify = 0;
745
746 write_lock(&neigh->lock);
747
748 state = neigh->nud_state;
749 now = jiffies;
750 next = now + HZ;
751
752 if (!(state & NUD_IN_TIMER)) {
753#ifndef CONFIG_SMP
754 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
755#endif
756 goto out;
757 }
758
759 if (state & NUD_REACHABLE) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900760 if (time_before_eq(now,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 neigh->confirmed + neigh->parms->reachable_time)) {
762 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
763 next = neigh->confirmed + neigh->parms->reachable_time;
764 } else if (time_before_eq(now,
765 neigh->used + neigh->parms->delay_probe_time)) {
766 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
767 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800768 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 neigh_suspect(neigh);
770 next = now + neigh->parms->delay_probe_time;
771 } else {
772 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
773 neigh->nud_state = NUD_STALE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800774 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 neigh_suspect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -0700776 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 }
778 } else if (state & NUD_DELAY) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900779 if (time_before_eq(now,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 neigh->confirmed + neigh->parms->delay_probe_time)) {
781 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
782 neigh->nud_state = NUD_REACHABLE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800783 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 neigh_connect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -0700785 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 next = neigh->confirmed + neigh->parms->reachable_time;
787 } else {
788 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
789 neigh->nud_state = NUD_PROBE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800790 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 atomic_set(&neigh->probes, 0);
792 next = now + neigh->parms->retrans_time;
793 }
794 } else {
795 /* NUD_PROBE|NUD_INCOMPLETE */
796 next = now + neigh->parms->retrans_time;
797 }
798
799 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
800 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
801 struct sk_buff *skb;
802
803 neigh->nud_state = NUD_FAILED;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800804 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 notify = 1;
806 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
807 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
808
809 /* It is very thin place. report_unreachable is very complicated
810 routine. Particularly, it can hit the same neighbour entry!
811
812 So that, we try to be accurate and avoid dead loop. --ANK
813 */
814 while (neigh->nud_state == NUD_FAILED &&
815 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
816 write_unlock(&neigh->lock);
817 neigh->ops->error_report(neigh, skb);
818 write_lock(&neigh->lock);
819 }
820 skb_queue_purge(&neigh->arp_queue);
821 }
822
823 if (neigh->nud_state & NUD_IN_TIMER) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 if (time_before(next, jiffies + HZ/2))
825 next = jiffies + HZ/2;
Herbert Xu6fb99742005-10-23 16:37:48 +1000826 if (!mod_timer(&neigh->timer, next))
827 neigh_hold(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 }
829 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
830 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
831 /* keep skb alive even if arp_queue overflows */
832 if (skb)
833 skb_get(skb);
834 write_unlock(&neigh->lock);
835 neigh->ops->solicit(neigh, skb);
836 atomic_inc(&neigh->probes);
837 if (skb)
838 kfree_skb(skb);
839 } else {
840out:
841 write_unlock(&neigh->lock);
842 }
843
Thomas Grafd961db32007-08-08 23:12:56 -0700844 if (notify)
845 neigh_update_notify(neigh);
846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 neigh_release(neigh);
848}
849
850int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
851{
852 int rc;
853 unsigned long now;
854
855 write_lock_bh(&neigh->lock);
856
857 rc = 0;
858 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
859 goto out_unlock_bh;
860
861 now = jiffies;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
864 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
865 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
866 neigh->nud_state = NUD_INCOMPLETE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800867 neigh->updated = jiffies;
David S. Miller667347f2005-09-27 12:07:44 -0700868 neigh_add_timer(neigh, now + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 } else {
870 neigh->nud_state = NUD_FAILED;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800871 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 write_unlock_bh(&neigh->lock);
873
874 if (skb)
875 kfree_skb(skb);
876 return 1;
877 }
878 } else if (neigh->nud_state & NUD_STALE) {
879 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800881 neigh->updated = jiffies;
David S. Miller667347f2005-09-27 12:07:44 -0700882 neigh_add_timer(neigh,
883 jiffies + neigh->parms->delay_probe_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 }
885
886 if (neigh->nud_state == NUD_INCOMPLETE) {
887 if (skb) {
888 if (skb_queue_len(&neigh->arp_queue) >=
889 neigh->parms->queue_len) {
890 struct sk_buff *buff;
891 buff = neigh->arp_queue.next;
892 __skb_unlink(buff, &neigh->arp_queue);
893 kfree_skb(buff);
894 }
895 __skb_queue_tail(&neigh->arp_queue, skb);
896 }
897 rc = 1;
898 }
899out_unlock_bh:
900 write_unlock_bh(&neigh->lock);
901 return rc;
902}
903
Stephen Hemmingere92b43a2006-08-17 18:17:37 -0700904static void neigh_update_hhs(struct neighbour *neigh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905{
906 struct hh_cache *hh;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700907 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
908 = neigh->dev->header_ops->cache_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
910 if (update) {
911 for (hh = neigh->hh; hh; hh = hh->hh_next) {
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800912 write_seqlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 update(hh, neigh->dev, neigh->ha);
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800914 write_sequnlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 }
916 }
917}
918
919
920
921/* Generic update routine.
922 -- lladdr is new lladdr or NULL, if it is not supplied.
923 -- new is new state.
924 -- flags
925 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
926 if it is different.
927 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900928 lladdr instead of overriding it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 if it is different.
930 It also allows to retain current state
931 if lladdr is unchanged.
932 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
933
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900934 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 NTF_ROUTER flag.
936 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
937 a router.
938
939 Caller MUST hold reference count on the entry.
940 */
941
942int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
943 u32 flags)
944{
945 u8 old;
946 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 int notify = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 struct net_device *dev;
949 int update_isrouter = 0;
950
951 write_lock_bh(&neigh->lock);
952
953 dev = neigh->dev;
954 old = neigh->nud_state;
955 err = -EPERM;
956
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900957 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 (old & (NUD_NOARP | NUD_PERMANENT)))
959 goto out;
960
961 if (!(new & NUD_VALID)) {
962 neigh_del_timer(neigh);
963 if (old & NUD_CONNECTED)
964 neigh_suspect(neigh);
965 neigh->nud_state = new;
966 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 notify = old & NUD_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 goto out;
969 }
970
971 /* Compare new lladdr with cached one */
972 if (!dev->addr_len) {
973 /* First case: device needs no address. */
974 lladdr = neigh->ha;
975 } else if (lladdr) {
976 /* The second case: if something is already cached
977 and a new address is proposed:
978 - compare new & old
979 - if they are different, check override flag
980 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900981 if ((old & NUD_VALID) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 !memcmp(lladdr, neigh->ha, dev->addr_len))
983 lladdr = neigh->ha;
984 } else {
985 /* No address is supplied; if we know something,
986 use it, otherwise discard the request.
987 */
988 err = -EINVAL;
989 if (!(old & NUD_VALID))
990 goto out;
991 lladdr = neigh->ha;
992 }
993
994 if (new & NUD_CONNECTED)
995 neigh->confirmed = jiffies;
996 neigh->updated = jiffies;
997
998 /* If entry was valid and address is not changed,
999 do not change entry state, if new one is STALE.
1000 */
1001 err = 0;
1002 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1003 if (old & NUD_VALID) {
1004 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1005 update_isrouter = 0;
1006 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1007 (old & NUD_CONNECTED)) {
1008 lladdr = neigh->ha;
1009 new = NUD_STALE;
1010 } else
1011 goto out;
1012 } else {
1013 if (lladdr == neigh->ha && new == NUD_STALE &&
1014 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1015 (old & NUD_CONNECTED))
1016 )
1017 new = old;
1018 }
1019 }
1020
1021 if (new != old) {
1022 neigh_del_timer(neigh);
Pavel Emelyanova43d8992007-12-20 15:49:05 -08001023 if (new & NUD_IN_TIMER)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001024 neigh_add_timer(neigh, (jiffies +
1025 ((new & NUD_REACHABLE) ?
David S. Miller667347f2005-09-27 12:07:44 -07001026 neigh->parms->reachable_time :
1027 0)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 neigh->nud_state = new;
1029 }
1030
1031 if (lladdr != neigh->ha) {
1032 memcpy(&neigh->ha, lladdr, dev->addr_len);
1033 neigh_update_hhs(neigh);
1034 if (!(new & NUD_CONNECTED))
1035 neigh->confirmed = jiffies -
1036 (neigh->parms->base_reachable_time << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 }
1039 if (new == old)
1040 goto out;
1041 if (new & NUD_CONNECTED)
1042 neigh_connect(neigh);
1043 else
1044 neigh_suspect(neigh);
1045 if (!(old & NUD_VALID)) {
1046 struct sk_buff *skb;
1047
1048 /* Again: avoid dead loop if something went wrong */
1049
1050 while (neigh->nud_state & NUD_VALID &&
1051 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1052 struct neighbour *n1 = neigh;
1053 write_unlock_bh(&neigh->lock);
1054 /* On shaper/eql skb->dst->neighbour != neigh :( */
1055 if (skb->dst && skb->dst->neighbour)
1056 n1 = skb->dst->neighbour;
1057 n1->output(skb);
1058 write_lock_bh(&neigh->lock);
1059 }
1060 skb_queue_purge(&neigh->arp_queue);
1061 }
1062out:
1063 if (update_isrouter) {
1064 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1065 (neigh->flags | NTF_ROUTER) :
1066 (neigh->flags & ~NTF_ROUTER);
1067 }
1068 write_unlock_bh(&neigh->lock);
Tom Tucker8d717402006-07-30 20:43:36 -07001069
1070 if (notify)
Thomas Grafd961db32007-08-08 23:12:56 -07001071 neigh_update_notify(neigh);
1072
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 return err;
1074}
1075
1076struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1077 u8 *lladdr, void *saddr,
1078 struct net_device *dev)
1079{
1080 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1081 lladdr || !dev->addr_len);
1082 if (neigh)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001083 neigh_update(neigh, lladdr, NUD_STALE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 NEIGH_UPDATE_F_OVERRIDE);
1085 return neigh;
1086}
1087
1088static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
Al Virod77072e2006-09-28 14:20:34 -07001089 __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090{
1091 struct hh_cache *hh;
1092 struct net_device *dev = dst->dev;
1093
1094 for (hh = n->hh; hh; hh = hh->hh_next)
1095 if (hh->hh_type == protocol)
1096 break;
1097
Andrew Morton77d04bd2006-04-07 14:52:59 -07001098 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
Stephen Hemminger3644f0c2006-12-07 15:08:17 -08001099 seqlock_init(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 hh->hh_type = protocol;
1101 atomic_set(&hh->hh_refcnt, 0);
1102 hh->hh_next = NULL;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001103
1104 if (dev->header_ops->cache(n, hh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 kfree(hh);
1106 hh = NULL;
1107 } else {
1108 atomic_inc(&hh->hh_refcnt);
1109 hh->hh_next = n->hh;
1110 n->hh = hh;
1111 if (n->nud_state & NUD_CONNECTED)
1112 hh->hh_output = n->ops->hh_output;
1113 else
1114 hh->hh_output = n->ops->output;
1115 }
1116 }
1117 if (hh) {
1118 atomic_inc(&hh->hh_refcnt);
1119 dst->hh = hh;
1120 }
1121}
1122
1123/* This function can be used in contexts, where only old dev_queue_xmit
1124 worked, f.e. if you want to override normal output path (eql, shaper),
1125 but resolution is not made yet.
1126 */
1127
1128int neigh_compat_output(struct sk_buff *skb)
1129{
1130 struct net_device *dev = skb->dev;
1131
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001132 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001134 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1135 skb->len) < 0 &&
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001136 dev->header_ops->rebuild(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 return 0;
1138
1139 return dev_queue_xmit(skb);
1140}
1141
1142/* Slow and careful. */
1143
1144int neigh_resolve_output(struct sk_buff *skb)
1145{
1146 struct dst_entry *dst = skb->dst;
1147 struct neighbour *neigh;
1148 int rc = 0;
1149
1150 if (!dst || !(neigh = dst->neighbour))
1151 goto discard;
1152
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001153 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
1155 if (!neigh_event_send(neigh, skb)) {
1156 int err;
1157 struct net_device *dev = neigh->dev;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001158 if (dev->header_ops->cache && !dst->hh) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 write_lock_bh(&neigh->lock);
1160 if (!dst->hh)
1161 neigh_hh_init(neigh, dst, dst->ops->protocol);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001162 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1163 neigh->ha, NULL, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 write_unlock_bh(&neigh->lock);
1165 } else {
1166 read_lock_bh(&neigh->lock);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001167 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1168 neigh->ha, NULL, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 read_unlock_bh(&neigh->lock);
1170 }
1171 if (err >= 0)
1172 rc = neigh->ops->queue_xmit(skb);
1173 else
1174 goto out_kfree_skb;
1175 }
1176out:
1177 return rc;
1178discard:
1179 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1180 dst, dst ? dst->neighbour : NULL);
1181out_kfree_skb:
1182 rc = -EINVAL;
1183 kfree_skb(skb);
1184 goto out;
1185}
1186
1187/* As fast as possible without hh cache */
1188
1189int neigh_connected_output(struct sk_buff *skb)
1190{
1191 int err;
1192 struct dst_entry *dst = skb->dst;
1193 struct neighbour *neigh = dst->neighbour;
1194 struct net_device *dev = neigh->dev;
1195
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001196 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198 read_lock_bh(&neigh->lock);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001199 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1200 neigh->ha, NULL, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 read_unlock_bh(&neigh->lock);
1202 if (err >= 0)
1203 err = neigh->ops->queue_xmit(skb);
1204 else {
1205 err = -EINVAL;
1206 kfree_skb(skb);
1207 }
1208 return err;
1209}
1210
1211static void neigh_proxy_process(unsigned long arg)
1212{
1213 struct neigh_table *tbl = (struct neigh_table *)arg;
1214 long sched_next = 0;
1215 unsigned long now = jiffies;
1216 struct sk_buff *skb;
1217
1218 spin_lock(&tbl->proxy_queue.lock);
1219
1220 skb = tbl->proxy_queue.next;
1221
1222 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1223 struct sk_buff *back = skb;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001224 long tdif = NEIGH_CB(back)->sched_next - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 skb = skb->next;
1227 if (tdif <= 0) {
1228 struct net_device *dev = back->dev;
1229 __skb_unlink(back, &tbl->proxy_queue);
1230 if (tbl->proxy_redo && netif_running(dev))
1231 tbl->proxy_redo(back);
1232 else
1233 kfree_skb(back);
1234
1235 dev_put(dev);
1236 } else if (!sched_next || tdif < sched_next)
1237 sched_next = tdif;
1238 }
1239 del_timer(&tbl->proxy_timer);
1240 if (sched_next)
1241 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1242 spin_unlock(&tbl->proxy_queue.lock);
1243}
1244
1245void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1246 struct sk_buff *skb)
1247{
1248 unsigned long now = jiffies;
1249 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1250
1251 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1252 kfree_skb(skb);
1253 return;
1254 }
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001255
1256 NEIGH_CB(skb)->sched_next = sched_next;
1257 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
1259 spin_lock(&tbl->proxy_queue.lock);
1260 if (del_timer(&tbl->proxy_timer)) {
1261 if (time_before(tbl->proxy_timer.expires, sched_next))
1262 sched_next = tbl->proxy_timer.expires;
1263 }
1264 dst_release(skb->dst);
1265 skb->dst = NULL;
1266 dev_hold(skb->dev);
1267 __skb_queue_tail(&tbl->proxy_queue, skb);
1268 mod_timer(&tbl->proxy_timer, sched_next);
1269 spin_unlock(&tbl->proxy_queue.lock);
1270}
1271
Eric W. Biederman426b5302008-01-24 00:13:18 -08001272static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1273 struct net *net, int ifindex)
1274{
1275 struct neigh_parms *p;
1276
1277 for (p = &tbl->parms; p; p = p->next) {
1278 if (p->net != net)
1279 continue;
1280 if ((p->dev && p->dev->ifindex == ifindex) ||
1281 (!p->dev && !ifindex))
1282 return p;
1283 }
1284
1285 return NULL;
1286}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
1288struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1289 struct neigh_table *tbl)
1290{
Eric W. Biederman426b5302008-01-24 00:13:18 -08001291 struct neigh_parms *p, *ref;
1292 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Eric W. Biederman426b5302008-01-24 00:13:18 -08001294 net = &init_net;
1295 if (dev)
1296 net = dev->nd_net;
1297
1298 ref = lookup_neigh_params(tbl, net, 0);
1299 if (!ref)
1300 return NULL;
1301
1302 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 if (p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 p->tbl = tbl;
1305 atomic_set(&p->refcnt, 1);
1306 INIT_RCU_HEAD(&p->rcu_head);
1307 p->reachable_time =
1308 neigh_rand_reach_time(p->base_reachable_time);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001309 if (dev) {
1310 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1311 kfree(p);
1312 return NULL;
1313 }
1314
1315 dev_hold(dev);
1316 p->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 }
Eric W. Biederman426b5302008-01-24 00:13:18 -08001318 p->net = hold_net(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 p->sysctl_table = NULL;
1320 write_lock_bh(&tbl->lock);
1321 p->next = tbl->parms.next;
1322 tbl->parms.next = p;
1323 write_unlock_bh(&tbl->lock);
1324 }
1325 return p;
1326}
1327
1328static void neigh_rcu_free_parms(struct rcu_head *head)
1329{
1330 struct neigh_parms *parms =
1331 container_of(head, struct neigh_parms, rcu_head);
1332
1333 neigh_parms_put(parms);
1334}
1335
1336void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1337{
1338 struct neigh_parms **p;
1339
1340 if (!parms || parms == &tbl->parms)
1341 return;
1342 write_lock_bh(&tbl->lock);
1343 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1344 if (*p == parms) {
1345 *p = parms->next;
1346 parms->dead = 1;
1347 write_unlock_bh(&tbl->lock);
David S. Millercecbb632008-01-20 16:39:03 -08001348 if (parms->dev)
1349 dev_put(parms->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1351 return;
1352 }
1353 }
1354 write_unlock_bh(&tbl->lock);
1355 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1356}
1357
1358void neigh_parms_destroy(struct neigh_parms *parms)
1359{
Eric W. Biederman426b5302008-01-24 00:13:18 -08001360 release_net(parms->net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 kfree(parms);
1362}
1363
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001364static struct lock_class_key neigh_table_proxy_queue_class;
1365
Simon Kelleybd89efc2006-05-12 14:56:08 -07001366void neigh_table_init_no_netlink(struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367{
1368 unsigned long now = jiffies;
1369 unsigned long phsize;
1370
Eric W. Biederman426b5302008-01-24 00:13:18 -08001371 tbl->parms.net = &init_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 atomic_set(&tbl->parms.refcnt, 1);
1373 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1374 tbl->parms.reachable_time =
1375 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1376
1377 if (!tbl->kmem_cachep)
Alexey Dobriyane5d679f2006-08-26 19:25:52 -07001378 tbl->kmem_cachep =
1379 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1380 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001381 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 tbl->stats = alloc_percpu(struct neigh_statistics);
1383 if (!tbl->stats)
1384 panic("cannot create neighbour cache statistics");
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386#ifdef CONFIG_PROC_FS
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02001387 tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001388 if (!tbl->pde)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 panic("cannot create neighbour proc dir entry");
1390 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1391 tbl->pde->data = tbl;
1392#endif
1393
1394 tbl->hash_mask = 1;
1395 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1396
1397 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
Andrew Morton77d04bd2006-04-07 14:52:59 -07001398 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
1400 if (!tbl->hash_buckets || !tbl->phash_buckets)
1401 panic("cannot allocate neighbour cache hashes");
1402
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1404
1405 rwlock_init(&tbl->lock);
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -08001406 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 tbl->gc_timer.expires = now + 1;
1408 add_timer(&tbl->gc_timer);
1409
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -08001410 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001411 skb_queue_head_init_class(&tbl->proxy_queue,
1412 &neigh_table_proxy_queue_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
1414 tbl->last_flush = now;
1415 tbl->last_rand = now + tbl->parms.reachable_time * 20;
Simon Kelleybd89efc2006-05-12 14:56:08 -07001416}
1417
1418void neigh_table_init(struct neigh_table *tbl)
1419{
1420 struct neigh_table *tmp;
1421
1422 neigh_table_init_no_netlink(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 write_lock(&neigh_tbl_lock);
Simon Kelleybd89efc2006-05-12 14:56:08 -07001424 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1425 if (tmp->family == tbl->family)
1426 break;
1427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 tbl->next = neigh_tables;
1429 neigh_tables = tbl;
1430 write_unlock(&neigh_tbl_lock);
Simon Kelleybd89efc2006-05-12 14:56:08 -07001431
1432 if (unlikely(tmp)) {
1433 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1434 "family %d\n", tbl->family);
1435 dump_stack();
1436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437}
1438
1439int neigh_table_clear(struct neigh_table *tbl)
1440{
1441 struct neigh_table **tp;
1442
1443 /* It is not clean... Fix it to unload IPv6 module safely */
1444 del_timer_sync(&tbl->gc_timer);
1445 del_timer_sync(&tbl->proxy_timer);
1446 pneigh_queue_purge(&tbl->proxy_queue);
1447 neigh_ifdown(tbl, NULL);
1448 if (atomic_read(&tbl->entries))
1449 printk(KERN_CRIT "neighbour leakage\n");
1450 write_lock(&neigh_tbl_lock);
1451 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1452 if (*tp == tbl) {
1453 *tp = tbl->next;
1454 break;
1455 }
1456 }
1457 write_unlock(&neigh_tbl_lock);
1458
1459 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1460 tbl->hash_buckets = NULL;
1461
1462 kfree(tbl->phash_buckets);
1463 tbl->phash_buckets = NULL;
1464
Alexey Dobriyan3f192b52007-11-05 21:28:13 -08001465 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1466
Kirill Korotaev3fcde742006-09-01 01:34:10 -07001467 free_percpu(tbl->stats);
1468 tbl->stats = NULL;
1469
Randy Dunlapbfb85c92007-10-21 16:24:27 -07001470 kmem_cache_destroy(tbl->kmem_cachep);
1471 tbl->kmem_cachep = NULL;
1472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 return 0;
1474}
1475
Thomas Grafc8822a42007-03-22 11:50:06 -07001476static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477{
Eric W. Biederman881d9662007-09-17 11:56:21 -07001478 struct net *net = skb->sk->sk_net;
Thomas Grafa14a49d2006-08-07 17:53:08 -07001479 struct ndmsg *ndm;
1480 struct nlattr *dst_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 struct neigh_table *tbl;
1482 struct net_device *dev = NULL;
Thomas Grafa14a49d2006-08-07 17:53:08 -07001483 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Thomas Grafa14a49d2006-08-07 17:53:08 -07001485 if (nlmsg_len(nlh) < sizeof(*ndm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 goto out;
1487
Thomas Grafa14a49d2006-08-07 17:53:08 -07001488 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1489 if (dst_attr == NULL)
1490 goto out;
1491
1492 ndm = nlmsg_data(nlh);
1493 if (ndm->ndm_ifindex) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07001494 dev = dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001495 if (dev == NULL) {
1496 err = -ENODEV;
1497 goto out;
1498 }
1499 }
1500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 read_lock(&neigh_tbl_lock);
1502 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
Thomas Grafa14a49d2006-08-07 17:53:08 -07001503 struct neighbour *neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
1505 if (tbl->family != ndm->ndm_family)
1506 continue;
1507 read_unlock(&neigh_tbl_lock);
1508
Thomas Grafa14a49d2006-08-07 17:53:08 -07001509 if (nla_len(dst_attr) < tbl->key_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 goto out_dev_put;
1511
1512 if (ndm->ndm_flags & NTF_PROXY) {
Eric W. Biederman426b5302008-01-24 00:13:18 -08001513 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 goto out_dev_put;
1515 }
1516
Thomas Grafa14a49d2006-08-07 17:53:08 -07001517 if (dev == NULL)
1518 goto out_dev_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
Thomas Grafa14a49d2006-08-07 17:53:08 -07001520 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1521 if (neigh == NULL) {
1522 err = -ENOENT;
1523 goto out_dev_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 }
Thomas Grafa14a49d2006-08-07 17:53:08 -07001525
1526 err = neigh_update(neigh, NULL, NUD_FAILED,
1527 NEIGH_UPDATE_F_OVERRIDE |
1528 NEIGH_UPDATE_F_ADMIN);
1529 neigh_release(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 goto out_dev_put;
1531 }
1532 read_unlock(&neigh_tbl_lock);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001533 err = -EAFNOSUPPORT;
1534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535out_dev_put:
1536 if (dev)
1537 dev_put(dev);
1538out:
1539 return err;
1540}
1541
Thomas Grafc8822a42007-03-22 11:50:06 -07001542static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543{
Eric W. Biederman881d9662007-09-17 11:56:21 -07001544 struct net *net = skb->sk->sk_net;
Thomas Graf5208deb2006-08-07 17:55:40 -07001545 struct ndmsg *ndm;
1546 struct nlattr *tb[NDA_MAX+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 struct neigh_table *tbl;
1548 struct net_device *dev = NULL;
Thomas Graf5208deb2006-08-07 17:55:40 -07001549 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Thomas Graf5208deb2006-08-07 17:55:40 -07001551 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1552 if (err < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 goto out;
1554
Thomas Graf5208deb2006-08-07 17:55:40 -07001555 err = -EINVAL;
1556 if (tb[NDA_DST] == NULL)
1557 goto out;
1558
1559 ndm = nlmsg_data(nlh);
1560 if (ndm->ndm_ifindex) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07001561 dev = dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Graf5208deb2006-08-07 17:55:40 -07001562 if (dev == NULL) {
1563 err = -ENODEV;
1564 goto out;
1565 }
1566
1567 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1568 goto out_dev_put;
1569 }
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 read_lock(&neigh_tbl_lock);
1572 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
Thomas Graf5208deb2006-08-07 17:55:40 -07001573 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1574 struct neighbour *neigh;
1575 void *dst, *lladdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
1577 if (tbl->family != ndm->ndm_family)
1578 continue;
1579 read_unlock(&neigh_tbl_lock);
1580
Thomas Graf5208deb2006-08-07 17:55:40 -07001581 if (nla_len(tb[NDA_DST]) < tbl->key_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 goto out_dev_put;
Thomas Graf5208deb2006-08-07 17:55:40 -07001583 dst = nla_data(tb[NDA_DST]);
1584 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
1586 if (ndm->ndm_flags & NTF_PROXY) {
Ville Nuorvala62dd9312006-09-22 14:43:19 -07001587 struct pneigh_entry *pn;
1588
1589 err = -ENOBUFS;
Eric W. Biederman426b5302008-01-24 00:13:18 -08001590 pn = pneigh_lookup(tbl, net, dst, dev, 1);
Ville Nuorvala62dd9312006-09-22 14:43:19 -07001591 if (pn) {
1592 pn->flags = ndm->ndm_flags;
1593 err = 0;
1594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 goto out_dev_put;
1596 }
1597
Thomas Graf5208deb2006-08-07 17:55:40 -07001598 if (dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 goto out_dev_put;
Thomas Graf5208deb2006-08-07 17:55:40 -07001600
1601 neigh = neigh_lookup(tbl, dst, dev);
1602 if (neigh == NULL) {
1603 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1604 err = -ENOENT;
1605 goto out_dev_put;
1606 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001607
Thomas Graf5208deb2006-08-07 17:55:40 -07001608 neigh = __neigh_lookup_errno(tbl, dst, dev);
1609 if (IS_ERR(neigh)) {
1610 err = PTR_ERR(neigh);
1611 goto out_dev_put;
1612 }
1613 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1615 err = -EEXIST;
Thomas Graf5208deb2006-08-07 17:55:40 -07001616 neigh_release(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 goto out_dev_put;
1618 }
Thomas Graf5208deb2006-08-07 17:55:40 -07001619
1620 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1621 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 }
1623
Thomas Graf5208deb2006-08-07 17:55:40 -07001624 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1625 neigh_release(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 goto out_dev_put;
1627 }
1628
1629 read_unlock(&neigh_tbl_lock);
Thomas Graf5208deb2006-08-07 17:55:40 -07001630 err = -EAFNOSUPPORT;
1631
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632out_dev_put:
1633 if (dev)
1634 dev_put(dev);
1635out:
1636 return err;
1637}
1638
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001639static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1640{
Thomas Grafca860fb2006-08-07 18:00:18 -07001641 struct nlattr *nest;
1642
1643 nest = nla_nest_start(skb, NDTA_PARMS);
1644 if (nest == NULL)
1645 return -ENOBUFS;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001646
1647 if (parms->dev)
Thomas Grafca860fb2006-08-07 18:00:18 -07001648 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001649
Thomas Grafca860fb2006-08-07 18:00:18 -07001650 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1651 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1652 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1653 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1654 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1655 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1656 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1657 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001658 parms->base_reachable_time);
Thomas Grafca860fb2006-08-07 18:00:18 -07001659 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1660 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1661 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1662 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1663 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1664 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001665
Thomas Grafca860fb2006-08-07 18:00:18 -07001666 return nla_nest_end(skb, nest);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001667
Thomas Grafca860fb2006-08-07 18:00:18 -07001668nla_put_failure:
1669 return nla_nest_cancel(skb, nest);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001670}
1671
Thomas Grafca860fb2006-08-07 18:00:18 -07001672static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1673 u32 pid, u32 seq, int type, int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001674{
1675 struct nlmsghdr *nlh;
1676 struct ndtmsg *ndtmsg;
1677
Thomas Grafca860fb2006-08-07 18:00:18 -07001678 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1679 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08001680 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001681
Thomas Grafca860fb2006-08-07 18:00:18 -07001682 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001683
1684 read_lock_bh(&tbl->lock);
1685 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001686 ndtmsg->ndtm_pad1 = 0;
1687 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001688
Thomas Grafca860fb2006-08-07 18:00:18 -07001689 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1690 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1691 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1692 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1693 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001694
1695 {
1696 unsigned long now = jiffies;
1697 unsigned int flush_delta = now - tbl->last_flush;
1698 unsigned int rand_delta = now - tbl->last_rand;
1699
1700 struct ndt_config ndc = {
1701 .ndtc_key_len = tbl->key_len,
1702 .ndtc_entry_size = tbl->entry_size,
1703 .ndtc_entries = atomic_read(&tbl->entries),
1704 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1705 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1706 .ndtc_hash_rnd = tbl->hash_rnd,
1707 .ndtc_hash_mask = tbl->hash_mask,
1708 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1709 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1710 };
1711
Thomas Grafca860fb2006-08-07 18:00:18 -07001712 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001713 }
1714
1715 {
1716 int cpu;
1717 struct ndt_stats ndst;
1718
1719 memset(&ndst, 0, sizeof(ndst));
1720
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07001721 for_each_possible_cpu(cpu) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001722 struct neigh_statistics *st;
1723
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001724 st = per_cpu_ptr(tbl->stats, cpu);
1725 ndst.ndts_allocs += st->allocs;
1726 ndst.ndts_destroys += st->destroys;
1727 ndst.ndts_hash_grows += st->hash_grows;
1728 ndst.ndts_res_failed += st->res_failed;
1729 ndst.ndts_lookups += st->lookups;
1730 ndst.ndts_hits += st->hits;
1731 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1732 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1733 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1734 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1735 }
1736
Thomas Grafca860fb2006-08-07 18:00:18 -07001737 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001738 }
1739
1740 BUG_ON(tbl->parms.dev);
1741 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
Thomas Grafca860fb2006-08-07 18:00:18 -07001742 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001743
1744 read_unlock_bh(&tbl->lock);
Thomas Grafca860fb2006-08-07 18:00:18 -07001745 return nlmsg_end(skb, nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001746
Thomas Grafca860fb2006-08-07 18:00:18 -07001747nla_put_failure:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001748 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08001749 nlmsg_cancel(skb, nlh);
1750 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001751}
1752
Thomas Grafca860fb2006-08-07 18:00:18 -07001753static int neightbl_fill_param_info(struct sk_buff *skb,
1754 struct neigh_table *tbl,
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001755 struct neigh_parms *parms,
Thomas Grafca860fb2006-08-07 18:00:18 -07001756 u32 pid, u32 seq, int type,
1757 unsigned int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001758{
1759 struct ndtmsg *ndtmsg;
1760 struct nlmsghdr *nlh;
1761
Thomas Grafca860fb2006-08-07 18:00:18 -07001762 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1763 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08001764 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001765
Thomas Grafca860fb2006-08-07 18:00:18 -07001766 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001767
1768 read_lock_bh(&tbl->lock);
1769 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001770 ndtmsg->ndtm_pad1 = 0;
1771 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001772
Thomas Grafca860fb2006-08-07 18:00:18 -07001773 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1774 neightbl_fill_parms(skb, parms) < 0)
1775 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001776
1777 read_unlock_bh(&tbl->lock);
Thomas Grafca860fb2006-08-07 18:00:18 -07001778 return nlmsg_end(skb, nlh);
1779errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001780 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08001781 nlmsg_cancel(skb, nlh);
1782 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001783}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001784
Patrick McHardyef7c79e2007-06-05 12:38:30 -07001785static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07001786 [NDTA_NAME] = { .type = NLA_STRING },
1787 [NDTA_THRESH1] = { .type = NLA_U32 },
1788 [NDTA_THRESH2] = { .type = NLA_U32 },
1789 [NDTA_THRESH3] = { .type = NLA_U32 },
1790 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1791 [NDTA_PARMS] = { .type = NLA_NESTED },
1792};
1793
Patrick McHardyef7c79e2007-06-05 12:38:30 -07001794static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07001795 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1796 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1797 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1798 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1799 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1800 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1801 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1802 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1803 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1804 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1805 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1806 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1807 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1808};
1809
Thomas Grafc8822a42007-03-22 11:50:06 -07001810static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001811{
Denis V. Lunevb8542722007-12-01 00:21:31 +11001812 struct net *net = skb->sk->sk_net;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001813 struct neigh_table *tbl;
Thomas Graf6b3f8672006-08-07 17:58:53 -07001814 struct ndtmsg *ndtmsg;
1815 struct nlattr *tb[NDTA_MAX+1];
1816 int err;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001817
Thomas Graf6b3f8672006-08-07 17:58:53 -07001818 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1819 nl_neightbl_policy);
1820 if (err < 0)
1821 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001822
Thomas Graf6b3f8672006-08-07 17:58:53 -07001823 if (tb[NDTA_NAME] == NULL) {
1824 err = -EINVAL;
1825 goto errout;
1826 }
1827
1828 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001829 read_lock(&neigh_tbl_lock);
1830 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1831 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1832 continue;
1833
Thomas Graf6b3f8672006-08-07 17:58:53 -07001834 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001835 break;
1836 }
1837
1838 if (tbl == NULL) {
1839 err = -ENOENT;
Thomas Graf6b3f8672006-08-07 17:58:53 -07001840 goto errout_locked;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001841 }
1842
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001843 /*
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001844 * We acquire tbl->lock to be nice to the periodic timers and
1845 * make sure they always see a consistent set of values.
1846 */
1847 write_lock_bh(&tbl->lock);
1848
Thomas Graf6b3f8672006-08-07 17:58:53 -07001849 if (tb[NDTA_PARMS]) {
1850 struct nlattr *tbp[NDTPA_MAX+1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001851 struct neigh_parms *p;
Thomas Graf6b3f8672006-08-07 17:58:53 -07001852 int i, ifindex = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001853
Thomas Graf6b3f8672006-08-07 17:58:53 -07001854 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1855 nl_ntbl_parm_policy);
1856 if (err < 0)
1857 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001858
Thomas Graf6b3f8672006-08-07 17:58:53 -07001859 if (tbp[NDTPA_IFINDEX])
1860 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001861
Eric W. Biederman426b5302008-01-24 00:13:18 -08001862 p = lookup_neigh_params(tbl, net, ifindex);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001863 if (p == NULL) {
1864 err = -ENOENT;
Thomas Graf6b3f8672006-08-07 17:58:53 -07001865 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001866 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001867
Thomas Graf6b3f8672006-08-07 17:58:53 -07001868 for (i = 1; i <= NDTPA_MAX; i++) {
1869 if (tbp[i] == NULL)
1870 continue;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001871
Thomas Graf6b3f8672006-08-07 17:58:53 -07001872 switch (i) {
1873 case NDTPA_QUEUE_LEN:
1874 p->queue_len = nla_get_u32(tbp[i]);
1875 break;
1876 case NDTPA_PROXY_QLEN:
1877 p->proxy_qlen = nla_get_u32(tbp[i]);
1878 break;
1879 case NDTPA_APP_PROBES:
1880 p->app_probes = nla_get_u32(tbp[i]);
1881 break;
1882 case NDTPA_UCAST_PROBES:
1883 p->ucast_probes = nla_get_u32(tbp[i]);
1884 break;
1885 case NDTPA_MCAST_PROBES:
1886 p->mcast_probes = nla_get_u32(tbp[i]);
1887 break;
1888 case NDTPA_BASE_REACHABLE_TIME:
1889 p->base_reachable_time = nla_get_msecs(tbp[i]);
1890 break;
1891 case NDTPA_GC_STALETIME:
1892 p->gc_staletime = nla_get_msecs(tbp[i]);
1893 break;
1894 case NDTPA_DELAY_PROBE_TIME:
1895 p->delay_probe_time = nla_get_msecs(tbp[i]);
1896 break;
1897 case NDTPA_RETRANS_TIME:
1898 p->retrans_time = nla_get_msecs(tbp[i]);
1899 break;
1900 case NDTPA_ANYCAST_DELAY:
1901 p->anycast_delay = nla_get_msecs(tbp[i]);
1902 break;
1903 case NDTPA_PROXY_DELAY:
1904 p->proxy_delay = nla_get_msecs(tbp[i]);
1905 break;
1906 case NDTPA_LOCKTIME:
1907 p->locktime = nla_get_msecs(tbp[i]);
1908 break;
1909 }
1910 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001911 }
1912
Thomas Graf6b3f8672006-08-07 17:58:53 -07001913 if (tb[NDTA_THRESH1])
1914 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1915
1916 if (tb[NDTA_THRESH2])
1917 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1918
1919 if (tb[NDTA_THRESH3])
1920 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1921
1922 if (tb[NDTA_GC_INTERVAL])
1923 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1924
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001925 err = 0;
1926
Thomas Graf6b3f8672006-08-07 17:58:53 -07001927errout_tbl_lock:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001928 write_unlock_bh(&tbl->lock);
Thomas Graf6b3f8672006-08-07 17:58:53 -07001929errout_locked:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001930 read_unlock(&neigh_tbl_lock);
Thomas Graf6b3f8672006-08-07 17:58:53 -07001931errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001932 return err;
1933}
1934
Thomas Grafc8822a42007-03-22 11:50:06 -07001935static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001936{
Denis V. Lunevb8542722007-12-01 00:21:31 +11001937 struct net *net = skb->sk->sk_net;
Thomas Grafca860fb2006-08-07 18:00:18 -07001938 int family, tidx, nidx = 0;
1939 int tbl_skip = cb->args[0];
1940 int neigh_skip = cb->args[1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001941 struct neigh_table *tbl;
1942
Thomas Grafca860fb2006-08-07 18:00:18 -07001943 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001944
1945 read_lock(&neigh_tbl_lock);
Thomas Grafca860fb2006-08-07 18:00:18 -07001946 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001947 struct neigh_parms *p;
1948
Thomas Grafca860fb2006-08-07 18:00:18 -07001949 if (tidx < tbl_skip || (family && tbl->family != family))
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001950 continue;
1951
Thomas Grafca860fb2006-08-07 18:00:18 -07001952 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1953 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1954 NLM_F_MULTI) <= 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001955 break;
1956
Eric W. Biederman426b5302008-01-24 00:13:18 -08001957 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
1958 if (net != p->net)
1959 continue;
1960
1961 if (nidx++ < neigh_skip)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001962 continue;
1963
Thomas Grafca860fb2006-08-07 18:00:18 -07001964 if (neightbl_fill_param_info(skb, tbl, p,
1965 NETLINK_CB(cb->skb).pid,
1966 cb->nlh->nlmsg_seq,
1967 RTM_NEWNEIGHTBL,
1968 NLM_F_MULTI) <= 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001969 goto out;
1970 }
1971
Thomas Grafca860fb2006-08-07 18:00:18 -07001972 neigh_skip = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001973 }
1974out:
1975 read_unlock(&neigh_tbl_lock);
Thomas Grafca860fb2006-08-07 18:00:18 -07001976 cb->args[0] = tidx;
1977 cb->args[1] = nidx;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001978
1979 return skb->len;
1980}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Thomas Graf8b8aec52006-08-07 17:56:37 -07001982static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1983 u32 pid, u32 seq, int type, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984{
1985 unsigned long now = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 struct nda_cacheinfo ci;
Thomas Graf8b8aec52006-08-07 17:56:37 -07001987 struct nlmsghdr *nlh;
1988 struct ndmsg *ndm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
Thomas Graf8b8aec52006-08-07 17:56:37 -07001990 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1991 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08001992 return -EMSGSIZE;
Thomas Graf8b8aec52006-08-07 17:56:37 -07001993
1994 ndm = nlmsg_data(nlh);
1995 ndm->ndm_family = neigh->ops->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001996 ndm->ndm_pad1 = 0;
1997 ndm->ndm_pad2 = 0;
Thomas Graf8b8aec52006-08-07 17:56:37 -07001998 ndm->ndm_flags = neigh->flags;
1999 ndm->ndm_type = neigh->type;
2000 ndm->ndm_ifindex = neigh->dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Thomas Graf8b8aec52006-08-07 17:56:37 -07002002 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2003
2004 read_lock_bh(&neigh->lock);
2005 ndm->ndm_state = neigh->nud_state;
2006 if ((neigh->nud_state & NUD_VALID) &&
2007 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2008 read_unlock_bh(&neigh->lock);
2009 goto nla_put_failure;
2010 }
2011
2012 ci.ndm_used = now - neigh->used;
2013 ci.ndm_confirmed = now - neigh->confirmed;
2014 ci.ndm_updated = now - neigh->updated;
2015 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2016 read_unlock_bh(&neigh->lock);
2017
2018 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2019 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2020
2021 return nlmsg_end(skb, nlh);
2022
2023nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08002024 nlmsg_cancel(skb, nlh);
2025 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026}
2027
Thomas Grafd961db32007-08-08 23:12:56 -07002028static void neigh_update_notify(struct neighbour *neigh)
2029{
2030 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2031 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2032}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
2034static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2035 struct netlink_callback *cb)
2036{
Eric W. Biederman426b5302008-01-24 00:13:18 -08002037 struct net * net = skb->sk->sk_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 struct neighbour *n;
2039 int rc, h, s_h = cb->args[1];
2040 int idx, s_idx = idx = cb->args[2];
2041
Julian Anastasovc5e29462006-10-03 15:49:46 -07002042 read_lock_bh(&tbl->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 for (h = 0; h <= tbl->hash_mask; h++) {
2044 if (h < s_h)
2045 continue;
2046 if (h > s_h)
2047 s_idx = 0;
Eric W. Biederman426b5302008-01-24 00:13:18 -08002048 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2049 int lidx;
2050 if (n->dev->nd_net != net)
2051 continue;
2052 lidx = idx++;
2053 if (lidx < s_idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 continue;
2055 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2056 cb->nlh->nlmsg_seq,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002057 RTM_NEWNEIGH,
2058 NLM_F_MULTI) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 read_unlock_bh(&tbl->lock);
2060 rc = -1;
2061 goto out;
2062 }
2063 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 }
Julian Anastasovc5e29462006-10-03 15:49:46 -07002065 read_unlock_bh(&tbl->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 rc = skb->len;
2067out:
2068 cb->args[1] = h;
2069 cb->args[2] = idx;
2070 return rc;
2071}
2072
Thomas Grafc8822a42007-03-22 11:50:06 -07002073static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074{
2075 struct neigh_table *tbl;
2076 int t, family, s_t;
2077
2078 read_lock(&neigh_tbl_lock);
Thomas Graf8b8aec52006-08-07 17:56:37 -07002079 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 s_t = cb->args[0];
2081
2082 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2083 if (t < s_t || (family && tbl->family != family))
2084 continue;
2085 if (t > s_t)
2086 memset(&cb->args[1], 0, sizeof(cb->args) -
2087 sizeof(cb->args[0]));
2088 if (neigh_dump_table(tbl, skb, cb) < 0)
2089 break;
2090 }
2091 read_unlock(&neigh_tbl_lock);
2092
2093 cb->args[0] = t;
2094 return skb->len;
2095}
2096
2097void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2098{
2099 int chain;
2100
2101 read_lock_bh(&tbl->lock);
2102 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2103 struct neighbour *n;
2104
2105 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2106 cb(n, cookie);
2107 }
2108 read_unlock_bh(&tbl->lock);
2109}
2110EXPORT_SYMBOL(neigh_for_each);
2111
2112/* The tbl->lock must be held as a writer and BH disabled. */
2113void __neigh_for_each_release(struct neigh_table *tbl,
2114 int (*cb)(struct neighbour *))
2115{
2116 int chain;
2117
2118 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2119 struct neighbour *n, **np;
2120
2121 np = &tbl->hash_buckets[chain];
2122 while ((n = *np) != NULL) {
2123 int release;
2124
2125 write_lock(&n->lock);
2126 release = cb(n);
2127 if (release) {
2128 *np = n->next;
2129 n->dead = 1;
2130 } else
2131 np = &n->next;
2132 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -07002133 if (release)
2134 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 }
2136 }
2137}
2138EXPORT_SYMBOL(__neigh_for_each_release);
2139
2140#ifdef CONFIG_PROC_FS
2141
2142static struct neighbour *neigh_get_first(struct seq_file *seq)
2143{
2144 struct neigh_seq_state *state = seq->private;
Eric W. Biederman426b5302008-01-24 00:13:18 -08002145 struct net *net = state->net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 struct neigh_table *tbl = state->tbl;
2147 struct neighbour *n = NULL;
2148 int bucket = state->bucket;
2149
2150 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2151 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2152 n = tbl->hash_buckets[bucket];
2153
2154 while (n) {
Eric W. Biederman426b5302008-01-24 00:13:18 -08002155 if (n->dev->nd_net != net)
2156 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 if (state->neigh_sub_iter) {
2158 loff_t fakep = 0;
2159 void *v;
2160
2161 v = state->neigh_sub_iter(state, n, &fakep);
2162 if (!v)
2163 goto next;
2164 }
2165 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2166 break;
2167 if (n->nud_state & ~NUD_NOARP)
2168 break;
2169 next:
2170 n = n->next;
2171 }
2172
2173 if (n)
2174 break;
2175 }
2176 state->bucket = bucket;
2177
2178 return n;
2179}
2180
2181static struct neighbour *neigh_get_next(struct seq_file *seq,
2182 struct neighbour *n,
2183 loff_t *pos)
2184{
2185 struct neigh_seq_state *state = seq->private;
Eric W. Biederman426b5302008-01-24 00:13:18 -08002186 struct net *net = state->net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 struct neigh_table *tbl = state->tbl;
2188
2189 if (state->neigh_sub_iter) {
2190 void *v = state->neigh_sub_iter(state, n, pos);
2191 if (v)
2192 return n;
2193 }
2194 n = n->next;
2195
2196 while (1) {
2197 while (n) {
Eric W. Biederman426b5302008-01-24 00:13:18 -08002198 if (n->dev->nd_net != net)
2199 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 if (state->neigh_sub_iter) {
2201 void *v = state->neigh_sub_iter(state, n, pos);
2202 if (v)
2203 return n;
2204 goto next;
2205 }
2206 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2207 break;
2208
2209 if (n->nud_state & ~NUD_NOARP)
2210 break;
2211 next:
2212 n = n->next;
2213 }
2214
2215 if (n)
2216 break;
2217
2218 if (++state->bucket > tbl->hash_mask)
2219 break;
2220
2221 n = tbl->hash_buckets[state->bucket];
2222 }
2223
2224 if (n && pos)
2225 --(*pos);
2226 return n;
2227}
2228
2229static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2230{
2231 struct neighbour *n = neigh_get_first(seq);
2232
2233 if (n) {
2234 while (*pos) {
2235 n = neigh_get_next(seq, n, pos);
2236 if (!n)
2237 break;
2238 }
2239 }
2240 return *pos ? NULL : n;
2241}
2242
2243static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2244{
2245 struct neigh_seq_state *state = seq->private;
Eric W. Biederman426b5302008-01-24 00:13:18 -08002246 struct net * net = state->net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 struct neigh_table *tbl = state->tbl;
2248 struct pneigh_entry *pn = NULL;
2249 int bucket = state->bucket;
2250
2251 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2252 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2253 pn = tbl->phash_buckets[bucket];
Eric W. Biederman426b5302008-01-24 00:13:18 -08002254 while (pn && (pn->net != net))
2255 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 if (pn)
2257 break;
2258 }
2259 state->bucket = bucket;
2260
2261 return pn;
2262}
2263
2264static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2265 struct pneigh_entry *pn,
2266 loff_t *pos)
2267{
2268 struct neigh_seq_state *state = seq->private;
Eric W. Biederman426b5302008-01-24 00:13:18 -08002269 struct net * net = state->net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 struct neigh_table *tbl = state->tbl;
2271
2272 pn = pn->next;
2273 while (!pn) {
2274 if (++state->bucket > PNEIGH_HASHMASK)
2275 break;
2276 pn = tbl->phash_buckets[state->bucket];
Eric W. Biederman426b5302008-01-24 00:13:18 -08002277 while (pn && (pn->net != net))
2278 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 if (pn)
2280 break;
2281 }
2282
2283 if (pn && pos)
2284 --(*pos);
2285
2286 return pn;
2287}
2288
2289static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2290{
2291 struct pneigh_entry *pn = pneigh_get_first(seq);
2292
2293 if (pn) {
2294 while (*pos) {
2295 pn = pneigh_get_next(seq, pn, pos);
2296 if (!pn)
2297 break;
2298 }
2299 }
2300 return *pos ? NULL : pn;
2301}
2302
2303static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2304{
2305 struct neigh_seq_state *state = seq->private;
2306 void *rc;
2307
2308 rc = neigh_get_idx(seq, pos);
2309 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2310 rc = pneigh_get_idx(seq, pos);
2311
2312 return rc;
2313}
2314
2315void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2316{
2317 struct neigh_seq_state *state = seq->private;
2318 loff_t pos_minus_one;
2319
2320 state->tbl = tbl;
2321 state->bucket = 0;
2322 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2323
2324 read_lock_bh(&tbl->lock);
2325
2326 pos_minus_one = *pos - 1;
2327 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2328}
2329EXPORT_SYMBOL(neigh_seq_start);
2330
2331void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2332{
2333 struct neigh_seq_state *state;
2334 void *rc;
2335
2336 if (v == SEQ_START_TOKEN) {
2337 rc = neigh_get_idx(seq, pos);
2338 goto out;
2339 }
2340
2341 state = seq->private;
2342 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2343 rc = neigh_get_next(seq, v, NULL);
2344 if (rc)
2345 goto out;
2346 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2347 rc = pneigh_get_first(seq);
2348 } else {
2349 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2350 rc = pneigh_get_next(seq, v, NULL);
2351 }
2352out:
2353 ++(*pos);
2354 return rc;
2355}
2356EXPORT_SYMBOL(neigh_seq_next);
2357
2358void neigh_seq_stop(struct seq_file *seq, void *v)
2359{
2360 struct neigh_seq_state *state = seq->private;
2361 struct neigh_table *tbl = state->tbl;
2362
2363 read_unlock_bh(&tbl->lock);
2364}
2365EXPORT_SYMBOL(neigh_seq_stop);
2366
2367/* statistics via seq_file */
2368
2369static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2370{
2371 struct proc_dir_entry *pde = seq->private;
2372 struct neigh_table *tbl = pde->data;
2373 int cpu;
2374
2375 if (*pos == 0)
2376 return SEQ_START_TOKEN;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002377
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2379 if (!cpu_possible(cpu))
2380 continue;
2381 *pos = cpu+1;
2382 return per_cpu_ptr(tbl->stats, cpu);
2383 }
2384 return NULL;
2385}
2386
2387static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2388{
2389 struct proc_dir_entry *pde = seq->private;
2390 struct neigh_table *tbl = pde->data;
2391 int cpu;
2392
2393 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2394 if (!cpu_possible(cpu))
2395 continue;
2396 *pos = cpu+1;
2397 return per_cpu_ptr(tbl->stats, cpu);
2398 }
2399 return NULL;
2400}
2401
2402static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2403{
2404
2405}
2406
2407static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2408{
2409 struct proc_dir_entry *pde = seq->private;
2410 struct neigh_table *tbl = pde->data;
2411 struct neigh_statistics *st = v;
2412
2413 if (v == SEQ_START_TOKEN) {
Olaf Rempel5bec0032005-04-28 12:16:08 -07002414 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 return 0;
2416 }
2417
2418 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2419 "%08lx %08lx %08lx %08lx\n",
2420 atomic_read(&tbl->entries),
2421
2422 st->allocs,
2423 st->destroys,
2424 st->hash_grows,
2425
2426 st->lookups,
2427 st->hits,
2428
2429 st->res_failed,
2430
2431 st->rcv_probes_mcast,
2432 st->rcv_probes_ucast,
2433
2434 st->periodic_gc_runs,
2435 st->forced_gc_runs
2436 );
2437
2438 return 0;
2439}
2440
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002441static const struct seq_operations neigh_stat_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 .start = neigh_stat_seq_start,
2443 .next = neigh_stat_seq_next,
2444 .stop = neigh_stat_seq_stop,
2445 .show = neigh_stat_seq_show,
2446};
2447
2448static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2449{
2450 int ret = seq_open(file, &neigh_stat_seq_ops);
2451
2452 if (!ret) {
2453 struct seq_file *sf = file->private_data;
2454 sf->private = PDE(inode);
2455 }
2456 return ret;
2457};
2458
Arjan van de Ven9a321442007-02-12 00:55:35 -08002459static const struct file_operations neigh_stat_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 .owner = THIS_MODULE,
2461 .open = neigh_stat_seq_open,
2462 .read = seq_read,
2463 .llseek = seq_lseek,
2464 .release = seq_release,
2465};
2466
2467#endif /* CONFIG_PROC_FS */
2468
Thomas Graf339bf982006-11-10 14:10:15 -08002469static inline size_t neigh_nlmsg_size(void)
2470{
2471 return NLMSG_ALIGN(sizeof(struct ndmsg))
2472 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2473 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2474 + nla_total_size(sizeof(struct nda_cacheinfo))
2475 + nla_total_size(4); /* NDA_PROBES */
2476}
2477
Thomas Grafb8673312006-08-15 00:33:14 -07002478static void __neigh_notify(struct neighbour *n, int type, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479{
Eric W. Biederman426b5302008-01-24 00:13:18 -08002480 struct net *net = n->dev->nd_net;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002481 struct sk_buff *skb;
Thomas Grafb8673312006-08-15 00:33:14 -07002482 int err = -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Thomas Graf339bf982006-11-10 14:10:15 -08002484 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
Thomas Graf8b8aec52006-08-07 17:56:37 -07002485 if (skb == NULL)
Thomas Grafb8673312006-08-15 00:33:14 -07002486 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
Thomas Grafb8673312006-08-15 00:33:14 -07002488 err = neigh_fill_info(skb, n, 0, 0, type, flags);
Patrick McHardy26932562007-01-31 23:16:40 -08002489 if (err < 0) {
2490 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2491 WARN_ON(err == -EMSGSIZE);
2492 kfree_skb(skb);
2493 goto errout;
2494 }
Eric W. Biederman426b5302008-01-24 00:13:18 -08002495 err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
Thomas Grafb8673312006-08-15 00:33:14 -07002496errout:
2497 if (err < 0)
Eric W. Biederman426b5302008-01-24 00:13:18 -08002498 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
Thomas Grafb8673312006-08-15 00:33:14 -07002499}
2500
Thomas Grafd961db32007-08-08 23:12:56 -07002501#ifdef CONFIG_ARPD
Thomas Grafb8673312006-08-15 00:33:14 -07002502void neigh_app_ns(struct neighbour *n)
2503{
2504 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506#endif /* CONFIG_ARPD */
2507
2508#ifdef CONFIG_SYSCTL
2509
2510static struct neigh_sysctl_table {
2511 struct ctl_table_header *sysctl_header;
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002512 struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2513 char *dev_name;
Brian Haleyab32ea52006-09-22 14:15:41 -07002514} neigh_sysctl_template __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 .neigh_vars = {
2516 {
2517 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2518 .procname = "mcast_solicit",
2519 .maxlen = sizeof(int),
2520 .mode = 0644,
2521 .proc_handler = &proc_dointvec,
2522 },
2523 {
2524 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2525 .procname = "ucast_solicit",
2526 .maxlen = sizeof(int),
2527 .mode = 0644,
2528 .proc_handler = &proc_dointvec,
2529 },
2530 {
2531 .ctl_name = NET_NEIGH_APP_SOLICIT,
2532 .procname = "app_solicit",
2533 .maxlen = sizeof(int),
2534 .mode = 0644,
2535 .proc_handler = &proc_dointvec,
2536 },
2537 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 .procname = "retrans_time",
2539 .maxlen = sizeof(int),
2540 .mode = 0644,
2541 .proc_handler = &proc_dointvec_userhz_jiffies,
2542 },
2543 {
2544 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2545 .procname = "base_reachable_time",
2546 .maxlen = sizeof(int),
2547 .mode = 0644,
2548 .proc_handler = &proc_dointvec_jiffies,
2549 .strategy = &sysctl_jiffies,
2550 },
2551 {
2552 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2553 .procname = "delay_first_probe_time",
2554 .maxlen = sizeof(int),
2555 .mode = 0644,
2556 .proc_handler = &proc_dointvec_jiffies,
2557 .strategy = &sysctl_jiffies,
2558 },
2559 {
2560 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2561 .procname = "gc_stale_time",
2562 .maxlen = sizeof(int),
2563 .mode = 0644,
2564 .proc_handler = &proc_dointvec_jiffies,
2565 .strategy = &sysctl_jiffies,
2566 },
2567 {
2568 .ctl_name = NET_NEIGH_UNRES_QLEN,
2569 .procname = "unres_qlen",
2570 .maxlen = sizeof(int),
2571 .mode = 0644,
2572 .proc_handler = &proc_dointvec,
2573 },
2574 {
2575 .ctl_name = NET_NEIGH_PROXY_QLEN,
2576 .procname = "proxy_qlen",
2577 .maxlen = sizeof(int),
2578 .mode = 0644,
2579 .proc_handler = &proc_dointvec,
2580 },
2581 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 .procname = "anycast_delay",
2583 .maxlen = sizeof(int),
2584 .mode = 0644,
2585 .proc_handler = &proc_dointvec_userhz_jiffies,
2586 },
2587 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 .procname = "proxy_delay",
2589 .maxlen = sizeof(int),
2590 .mode = 0644,
2591 .proc_handler = &proc_dointvec_userhz_jiffies,
2592 },
2593 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 .procname = "locktime",
2595 .maxlen = sizeof(int),
2596 .mode = 0644,
2597 .proc_handler = &proc_dointvec_userhz_jiffies,
2598 },
2599 {
Eric W. Biedermand12af672007-10-18 03:05:25 -07002600 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2601 .procname = "retrans_time_ms",
2602 .maxlen = sizeof(int),
2603 .mode = 0644,
2604 .proc_handler = &proc_dointvec_ms_jiffies,
2605 .strategy = &sysctl_ms_jiffies,
2606 },
2607 {
2608 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2609 .procname = "base_reachable_time_ms",
2610 .maxlen = sizeof(int),
2611 .mode = 0644,
2612 .proc_handler = &proc_dointvec_ms_jiffies,
2613 .strategy = &sysctl_ms_jiffies,
2614 },
2615 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 .ctl_name = NET_NEIGH_GC_INTERVAL,
2617 .procname = "gc_interval",
2618 .maxlen = sizeof(int),
2619 .mode = 0644,
2620 .proc_handler = &proc_dointvec_jiffies,
2621 .strategy = &sysctl_jiffies,
2622 },
2623 {
2624 .ctl_name = NET_NEIGH_GC_THRESH1,
2625 .procname = "gc_thresh1",
2626 .maxlen = sizeof(int),
2627 .mode = 0644,
2628 .proc_handler = &proc_dointvec,
2629 },
2630 {
2631 .ctl_name = NET_NEIGH_GC_THRESH2,
2632 .procname = "gc_thresh2",
2633 .maxlen = sizeof(int),
2634 .mode = 0644,
2635 .proc_handler = &proc_dointvec,
2636 },
2637 {
2638 .ctl_name = NET_NEIGH_GC_THRESH3,
2639 .procname = "gc_thresh3",
2640 .maxlen = sizeof(int),
2641 .mode = 0644,
2642 .proc_handler = &proc_dointvec,
2643 },
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002644 {},
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 },
2646};
2647
2648int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002649 int p_id, int pdev_id, char *p_name,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 proc_handler *handler, ctl_handler *strategy)
2651{
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11002652 struct neigh_sysctl_table *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 const char *dev_name_source = NULL;
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002654
2655#define NEIGH_CTL_PATH_ROOT 0
2656#define NEIGH_CTL_PATH_PROTO 1
2657#define NEIGH_CTL_PATH_NEIGH 2
2658#define NEIGH_CTL_PATH_DEV 3
2659
2660 struct ctl_path neigh_path[] = {
2661 { .procname = "net", .ctl_name = CTL_NET, },
2662 { .procname = "proto", .ctl_name = 0, },
2663 { .procname = "neigh", .ctl_name = 0, },
2664 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2665 { },
2666 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11002668 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 if (!t)
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11002670 goto err;
2671
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 t->neigh_vars[0].data = &p->mcast_probes;
2673 t->neigh_vars[1].data = &p->ucast_probes;
2674 t->neigh_vars[2].data = &p->app_probes;
2675 t->neigh_vars[3].data = &p->retrans_time;
2676 t->neigh_vars[4].data = &p->base_reachable_time;
2677 t->neigh_vars[5].data = &p->delay_probe_time;
2678 t->neigh_vars[6].data = &p->gc_staletime;
2679 t->neigh_vars[7].data = &p->queue_len;
2680 t->neigh_vars[8].data = &p->proxy_qlen;
2681 t->neigh_vars[9].data = &p->anycast_delay;
2682 t->neigh_vars[10].data = &p->proxy_delay;
2683 t->neigh_vars[11].data = &p->locktime;
Eric W. Biedermand12af672007-10-18 03:05:25 -07002684 t->neigh_vars[12].data = &p->retrans_time;
2685 t->neigh_vars[13].data = &p->base_reachable_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
2687 if (dev) {
2688 dev_name_source = dev->name;
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002689 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
Eric W. Biedermand12af672007-10-18 03:05:25 -07002690 /* Terminate the table early */
2691 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 } else {
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002693 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
Eric W. Biedermand12af672007-10-18 03:05:25 -07002694 t->neigh_vars[14].data = (int *)(p + 1);
2695 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2696 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2697 t->neigh_vars[17].data = (int *)(p + 1) + 3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 }
2699
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
2701 if (handler || strategy) {
2702 /* RetransTime */
2703 t->neigh_vars[3].proc_handler = handler;
2704 t->neigh_vars[3].strategy = strategy;
2705 t->neigh_vars[3].extra1 = dev;
Eric W. Biedermand12af672007-10-18 03:05:25 -07002706 if (!strategy)
2707 t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 /* ReachableTime */
2709 t->neigh_vars[4].proc_handler = handler;
2710 t->neigh_vars[4].strategy = strategy;
2711 t->neigh_vars[4].extra1 = dev;
Eric W. Biedermand12af672007-10-18 03:05:25 -07002712 if (!strategy)
2713 t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 /* RetransTime (in milliseconds)*/
Eric W. Biedermand12af672007-10-18 03:05:25 -07002715 t->neigh_vars[12].proc_handler = handler;
2716 t->neigh_vars[12].strategy = strategy;
2717 t->neigh_vars[12].extra1 = dev;
2718 if (!strategy)
2719 t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 /* ReachableTime (in milliseconds) */
Eric W. Biedermand12af672007-10-18 03:05:25 -07002721 t->neigh_vars[13].proc_handler = handler;
2722 t->neigh_vars[13].strategy = strategy;
2723 t->neigh_vars[13].extra1 = dev;
2724 if (!strategy)
2725 t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 }
2727
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002728 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2729 if (!t->dev_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 goto free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002732 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2733 neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2734 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2735 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002737 t->sysctl_header = register_sysctl_paths(neigh_path, t->neigh_vars);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11002738 if (!t->sysctl_header)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 goto free_procname;
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11002740
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 p->sysctl_table = t;
2742 return 0;
2743
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11002744free_procname:
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002745 kfree(t->dev_name);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11002746free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 kfree(t);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11002748err:
2749 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750}
2751
2752void neigh_sysctl_unregister(struct neigh_parms *p)
2753{
2754 if (p->sysctl_table) {
2755 struct neigh_sysctl_table *t = p->sysctl_table;
2756 p->sysctl_table = NULL;
2757 unregister_sysctl_table(t->sysctl_header);
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11002758 kfree(t->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 kfree(t);
2760 }
2761}
2762
2763#endif /* CONFIG_SYSCTL */
2764
Thomas Grafc8822a42007-03-22 11:50:06 -07002765static int __init neigh_init(void)
2766{
2767 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2768 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2769 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2770
2771 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2772 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2773
2774 return 0;
2775}
2776
2777subsys_initcall(neigh_init);
2778
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779EXPORT_SYMBOL(__neigh_event_send);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780EXPORT_SYMBOL(neigh_changeaddr);
2781EXPORT_SYMBOL(neigh_compat_output);
2782EXPORT_SYMBOL(neigh_connected_output);
2783EXPORT_SYMBOL(neigh_create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784EXPORT_SYMBOL(neigh_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785EXPORT_SYMBOL(neigh_event_ns);
2786EXPORT_SYMBOL(neigh_ifdown);
2787EXPORT_SYMBOL(neigh_lookup);
2788EXPORT_SYMBOL(neigh_lookup_nodev);
2789EXPORT_SYMBOL(neigh_parms_alloc);
2790EXPORT_SYMBOL(neigh_parms_release);
2791EXPORT_SYMBOL(neigh_rand_reach_time);
2792EXPORT_SYMBOL(neigh_resolve_output);
2793EXPORT_SYMBOL(neigh_table_clear);
2794EXPORT_SYMBOL(neigh_table_init);
Simon Kelleybd89efc2006-05-12 14:56:08 -07002795EXPORT_SYMBOL(neigh_table_init_no_netlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796EXPORT_SYMBOL(neigh_update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797EXPORT_SYMBOL(pneigh_enqueue);
2798EXPORT_SYMBOL(pneigh_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
2800#ifdef CONFIG_ARPD
2801EXPORT_SYMBOL(neigh_app_ns);
2802#endif
2803#ifdef CONFIG_SYSCTL
2804EXPORT_SYMBOL(neigh_sysctl_register);
2805EXPORT_SYMBOL(neigh_sysctl_unregister);
2806#endif