blob: 01cdfe85bb09b9e6a24ac1555f89681fff188709 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
Joe Perchese005d192012-05-16 19:58:40 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/socket.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/netdevice.h>
26#include <linux/proc_fs.h>
27#ifdef CONFIG_SYSCTL
28#include <linux/sysctl.h>
29#endif
30#include <linux/times.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020031#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/neighbour.h>
David Ahern86e00b72019-05-01 18:18:42 -070033#include <net/arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <net/dst.h>
35#include <net/sock.h>
Tom Tucker8d717402006-07-30 20:43:36 -070036#include <net/netevent.h>
Thomas Grafa14a49d2006-08-07 17:53:08 -070037#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/rtnetlink.h>
39#include <linux/random.h>
Paulo Marques543537b2005-06-23 00:09:02 -070040#include <linux/string.h>
vignesh babuc3609d52007-08-24 22:27:55 -070041#include <linux/log2.h>
Jiri Pirko1d4c8c22013-12-07 19:26:56 +010042#include <linux/inetdevice.h>
Jiri Pirkobba24892013-12-07 19:26:57 +010043#include <net/addrconf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Joe Perchesd5d427c2013-04-15 15:17:19 +000045#define DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#define NEIGH_DEBUG 1
Joe Perchesd5d427c2013-04-15 15:17:19 +000047#define neigh_dbg(level, fmt, ...) \
48do { \
49 if (level <= NEIGH_DEBUG) \
50 pr_debug(fmt, ##__VA_ARGS__); \
51} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53#define PNEIGH_HASHMASK 0xF
54
55static void neigh_timer_handler(unsigned long arg);
Thomas Grafd961db32007-08-08 23:12:56 -070056static void __neigh_notify(struct neighbour *n, int type, int flags);
57static void neigh_update_notify(struct neighbour *neigh);
Wolfgang Bumiller581cb192018-04-12 10:46:55 +020058static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Amos Waterland45fc3b12005-09-24 16:53:16 -070061#ifdef CONFIG_PROC_FS
Arjan van de Ven9a321442007-02-12 00:55:35 -080062static const struct file_operations neigh_stat_seq_fops;
Amos Waterland45fc3b12005-09-24 16:53:16 -070063#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65/*
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
67
68 - All the scans/updates to hash buckets MUST be made under this lock.
69 - NOTHING clever should be made under this lock: no callbacks
70 to protocol backends, no attempts to send something to network.
71 It will result in deadlocks, if backend/driver wants to use neighbour
72 cache.
73 - If the entry requires some non-trivial actions, increase
74 its reference count and release table lock.
75
76 Neighbour entries are protected:
77 - with reference count.
78 - with rwlock neigh->lock
79
80 Reference count prevents destruction.
81
82 neigh->lock mainly serializes ll address data and its validity state.
83 However, the same lock is used to protect another entry fields:
84 - timer
85 - resolution queue
86
87 Again, nothing clever shall be made under neigh->lock,
88 the most complicated procedure, which we allow is dev->hard_header.
89 It is supposed, that dev->hard_header is simplistic and does
90 not make callbacks to neighbour tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 */
92
David S. Miller8f40b162011-07-17 13:34:11 -070093static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 kfree_skb(skb);
96 return -ENETDOWN;
97}
98
Thomas Graf4f494552007-08-08 23:12:36 -070099static void neigh_cleanup_and_release(struct neighbour *neigh)
100{
101 if (neigh->parms->neigh_cleanup)
102 neigh->parms->neigh_cleanup(neigh);
103
Thomas Grafd961db32007-08-08 23:12:56 -0700104 __neigh_notify(neigh, RTM_DELNEIGH, 0);
Thomas Graf4f494552007-08-08 23:12:36 -0700105 neigh_release(neigh);
106}
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108/*
109 * It is random distribution in the interval (1/2)*base...(3/2)*base.
110 * It corresponds to default IPv6 settings and is not overridable,
111 * because it is really reasonable choice.
112 */
113
114unsigned long neigh_rand_reach_time(unsigned long base)
115{
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500116 return base ? (prandom_u32() % base) + (base >> 1) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900118EXPORT_SYMBOL(neigh_rand_reach_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120
121static int neigh_forced_gc(struct neigh_table *tbl)
122{
123 int shrunk = 0;
124 int i;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000125 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
128
129 write_lock_bh(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000130 nht = rcu_dereference_protected(tbl->nht,
131 lockdep_is_held(&tbl->lock));
David S. Millercd089332011-07-11 01:28:12 -0700132 for (i = 0; i < (1 << nht->hash_shift); i++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -0700133 struct neighbour *n;
134 struct neighbour __rcu **np;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000136 np = &nht->hash_buckets[i];
Eric Dumazet767e97e2010-10-06 17:49:21 -0700137 while ((n = rcu_dereference_protected(*np,
138 lockdep_is_held(&tbl->lock))) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 /* Neighbour record may be discarded if:
140 * - nobody refers to it.
141 * - it is not permanent
142 */
143 write_lock(&n->lock);
144 if (atomic_read(&n->refcnt) == 1 &&
145 !(n->nud_state & NUD_PERMANENT)) {
Eric Dumazet767e97e2010-10-06 17:49:21 -0700146 rcu_assign_pointer(*np,
147 rcu_dereference_protected(n->next,
148 lockdep_is_held(&tbl->lock)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 n->dead = 1;
150 shrunk = 1;
151 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -0700152 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 continue;
154 }
155 write_unlock(&n->lock);
156 np = &n->next;
157 }
158 }
159
160 tbl->last_flush = jiffies;
161
162 write_unlock_bh(&tbl->lock);
163
164 return shrunk;
165}
166
Pavel Emelyanova43d8992007-12-20 15:49:05 -0800167static void neigh_add_timer(struct neighbour *n, unsigned long when)
168{
169 neigh_hold(n);
170 if (unlikely(mod_timer(&n->timer, when))) {
171 printk("NEIGH: BUG, double timer add, state is %x\n",
172 n->nud_state);
173 dump_stack();
174 }
175}
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177static int neigh_del_timer(struct neighbour *n)
178{
179 if ((n->nud_state & NUD_IN_TIMER) &&
180 del_timer(&n->timer)) {
181 neigh_release(n);
182 return 1;
183 }
184 return 0;
185}
186
187static void pneigh_queue_purge(struct sk_buff_head *list)
188{
189 struct sk_buff *skb;
190
191 while ((skb = skb_dequeue(list)) != NULL) {
192 dev_put(skb->dev);
193 kfree_skb(skb);
194 }
195}
196
Herbert Xu49636bb2005-10-23 17:18:00 +1000197static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
199 int i;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000200 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000202 nht = rcu_dereference_protected(tbl->nht,
203 lockdep_is_held(&tbl->lock));
204
David S. Millercd089332011-07-11 01:28:12 -0700205 for (i = 0; i < (1 << nht->hash_shift); i++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -0700206 struct neighbour *n;
207 struct neighbour __rcu **np = &nht->hash_buckets[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Eric Dumazet767e97e2010-10-06 17:49:21 -0700209 while ((n = rcu_dereference_protected(*np,
210 lockdep_is_held(&tbl->lock))) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 if (dev && n->dev != dev) {
212 np = &n->next;
213 continue;
214 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700215 rcu_assign_pointer(*np,
216 rcu_dereference_protected(n->next,
217 lockdep_is_held(&tbl->lock)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 write_lock(&n->lock);
219 neigh_del_timer(n);
220 n->dead = 1;
221
222 if (atomic_read(&n->refcnt) != 1) {
223 /* The most unpleasant situation.
224 We must destroy neighbour entry,
225 but someone still uses it.
226
227 The destroy will be delayed until
228 the last user releases us, but
229 we must kill timers etc. and move
230 it to safe state.
231 */
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700232 __skb_queue_purge(&n->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000233 n->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 n->output = neigh_blackhole;
235 if (n->nud_state & NUD_VALID)
236 n->nud_state = NUD_NOARP;
237 else
238 n->nud_state = NUD_NONE;
Joe Perchesd5d427c2013-04-15 15:17:19 +0000239 neigh_dbg(2, "neigh %p is stray\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -0700242 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 }
244 }
Herbert Xu49636bb2005-10-23 17:18:00 +1000245}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Herbert Xu49636bb2005-10-23 17:18:00 +1000247void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
248{
249 write_lock_bh(&tbl->lock);
250 neigh_flush_dev(tbl, dev);
251 write_unlock_bh(&tbl->lock);
252}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900253EXPORT_SYMBOL(neigh_changeaddr);
Herbert Xu49636bb2005-10-23 17:18:00 +1000254
255int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
256{
257 write_lock_bh(&tbl->lock);
258 neigh_flush_dev(tbl, dev);
Wolfgang Bumiller581cb192018-04-12 10:46:55 +0200259 pneigh_ifdown_and_unlock(tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 del_timer_sync(&tbl->proxy_timer);
262 pneigh_queue_purge(&tbl->proxy_queue);
263 return 0;
264}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900265EXPORT_SYMBOL(neigh_ifdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
David Miller596b9b62011-07-25 00:01:25 +0000267static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268{
269 struct neighbour *n = NULL;
270 unsigned long now = jiffies;
271 int entries;
272
273 entries = atomic_inc_return(&tbl->entries) - 1;
274 if (entries >= tbl->gc_thresh3 ||
275 (entries >= tbl->gc_thresh2 &&
276 time_after(now, tbl->last_flush + 5 * HZ))) {
277 if (!neigh_forced_gc(tbl) &&
Rick Jonesfb811392015-08-07 11:10:37 -0700278 entries >= tbl->gc_thresh3) {
279 net_info_ratelimited("%s: neighbor table overflow!\n",
280 tbl->id);
281 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 goto out_entries;
Rick Jonesfb811392015-08-07 11:10:37 -0700283 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 }
285
YOSHIFUJI Hideaki / 吉藤英明08433ef2013-01-24 00:44:23 +0000286 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 if (!n)
288 goto out_entries;
289
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700290 __skb_queue_head_init(&n->arp_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 rwlock_init(&n->lock);
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +0000292 seqlock_init(&n->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 n->updated = n->used = now;
294 n->nud_state = NUD_NONE;
295 n->output = neigh_blackhole;
David S. Millerf6b72b62011-07-14 07:53:20 -0700296 seqlock_init(&n->hh.hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 n->parms = neigh_parms_clone(&tbl->parms);
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -0800298 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
300 NEIGH_CACHE_STAT_INC(tbl, allocs);
301 n->tbl = tbl;
302 atomic_set(&n->refcnt, 1);
303 n->dead = 1;
304out:
305 return n;
306
307out_entries:
308 atomic_dec(&tbl->entries);
309 goto out;
310}
311
David S. Miller2c2aba62011-12-28 15:06:58 -0500312static void neigh_get_hash_rnd(u32 *x)
313{
314 get_random_bytes(x, sizeof(*x));
315 *x |= 1;
316}
317
David S. Millercd089332011-07-11 01:28:12 -0700318static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319{
David S. Millercd089332011-07-11 01:28:12 -0700320 size_t size = (1 << shift) * sizeof(struct neighbour *);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000321 struct neigh_hash_table *ret;
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000322 struct neighbour __rcu **buckets;
David S. Miller2c2aba62011-12-28 15:06:58 -0500323 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000325 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
326 if (!ret)
327 return NULL;
328 if (size <= PAGE_SIZE)
329 buckets = kzalloc(size, GFP_ATOMIC);
330 else
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000331 buckets = (struct neighbour __rcu **)
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000332 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
333 get_order(size));
334 if (!buckets) {
335 kfree(ret);
336 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000338 ret->hash_buckets = buckets;
David S. Millercd089332011-07-11 01:28:12 -0700339 ret->hash_shift = shift;
David S. Miller2c2aba62011-12-28 15:06:58 -0500340 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
341 neigh_get_hash_rnd(&ret->hash_rnd[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 return ret;
343}
344
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000345static void neigh_hash_free_rcu(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000347 struct neigh_hash_table *nht = container_of(head,
348 struct neigh_hash_table,
349 rcu);
David S. Millercd089332011-07-11 01:28:12 -0700350 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000351 struct neighbour __rcu **buckets = nht->hash_buckets;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353 if (size <= PAGE_SIZE)
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000354 kfree(buckets);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 else
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000356 free_pages((unsigned long)buckets, get_order(size));
357 kfree(nht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358}
359
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000360static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
David S. Millercd089332011-07-11 01:28:12 -0700361 unsigned long new_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000363 unsigned int i, hash;
364 struct neigh_hash_table *new_nht, *old_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
366 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
367
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000368 old_nht = rcu_dereference_protected(tbl->nht,
369 lockdep_is_held(&tbl->lock));
David S. Millercd089332011-07-11 01:28:12 -0700370 new_nht = neigh_hash_alloc(new_shift);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000371 if (!new_nht)
372 return old_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
David S. Millercd089332011-07-11 01:28:12 -0700374 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 struct neighbour *n, *next;
376
Eric Dumazet767e97e2010-10-06 17:49:21 -0700377 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
378 lockdep_is_held(&tbl->lock));
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000379 n != NULL;
380 n = next) {
381 hash = tbl->hash(n->primary_key, n->dev,
382 new_nht->hash_rnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
David S. Millercd089332011-07-11 01:28:12 -0700384 hash >>= (32 - new_nht->hash_shift);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700385 next = rcu_dereference_protected(n->next,
386 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Eric Dumazet767e97e2010-10-06 17:49:21 -0700388 rcu_assign_pointer(n->next,
389 rcu_dereference_protected(
390 new_nht->hash_buckets[hash],
391 lockdep_is_held(&tbl->lock)));
392 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 }
394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000396 rcu_assign_pointer(tbl->nht, new_nht);
397 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
398 return new_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399}
400
401struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
402 struct net_device *dev)
403{
404 struct neighbour *n;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 NEIGH_CACHE_STAT_INC(tbl, lookups);
407
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000408 rcu_read_lock_bh();
Eric W. Biederman60395a22015-03-03 17:10:44 -0600409 n = __neigh_lookup_noref(tbl, pkey, dev);
410 if (n) {
411 if (!atomic_inc_not_zero(&n->refcnt))
412 n = NULL;
413 NEIGH_CACHE_STAT_INC(tbl, hits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700415
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000416 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 return n;
418}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900419EXPORT_SYMBOL(neigh_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Eric W. Biederman426b5302008-01-24 00:13:18 -0800421struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
422 const void *pkey)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423{
424 struct neighbour *n;
425 int key_len = tbl->key_len;
Pavel Emelyanovbc4bf5f2008-02-23 19:57:02 -0800426 u32 hash_val;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000427 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
429 NEIGH_CACHE_STAT_INC(tbl, lookups);
430
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000431 rcu_read_lock_bh();
432 nht = rcu_dereference_bh(tbl->nht);
David S. Millercd089332011-07-11 01:28:12 -0700433 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700434
435 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
436 n != NULL;
437 n = rcu_dereference_bh(n->next)) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800438 if (!memcmp(n->primary_key, pkey, key_len) &&
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900439 net_eq(dev_net(n->dev), net)) {
Eric Dumazet767e97e2010-10-06 17:49:21 -0700440 if (!atomic_inc_not_zero(&n->refcnt))
441 n = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 NEIGH_CACHE_STAT_INC(tbl, hits);
443 break;
444 }
445 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700446
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000447 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 return n;
449}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900450EXPORT_SYMBOL(neigh_lookup_nodev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
David S. Millera263b302012-07-02 02:02:15 -0700452struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
453 struct net_device *dev, bool want_ref)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454{
455 u32 hash_val;
456 int key_len = tbl->key_len;
457 int error;
David Miller596b9b62011-07-25 00:01:25 +0000458 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000459 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461 if (!n) {
462 rc = ERR_PTR(-ENOBUFS);
463 goto out;
464 }
465
466 memcpy(n->primary_key, pkey, key_len);
467 n->dev = dev;
468 dev_hold(dev);
469
470 /* Protocol specific setup. */
471 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
472 rc = ERR_PTR(error);
473 goto out_neigh_release;
474 }
475
David Millerda6a8fa2011-07-25 00:01:38 +0000476 if (dev->netdev_ops->ndo_neigh_construct) {
Jiri Pirko503eebc2016-07-05 11:27:37 +0200477 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
David Millerda6a8fa2011-07-25 00:01:38 +0000478 if (error < 0) {
479 rc = ERR_PTR(error);
480 goto out_neigh_release;
481 }
482 }
483
David S. Miller447f2192011-12-19 15:04:41 -0500484 /* Device specific setup. */
485 if (n->parms->neigh_setup &&
486 (error = n->parms->neigh_setup(n)) < 0) {
487 rc = ERR_PTR(error);
488 goto out_neigh_release;
489 }
490
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100491 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 write_lock_bh(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000494 nht = rcu_dereference_protected(tbl->nht,
495 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
David S. Millercd089332011-07-11 01:28:12 -0700497 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
498 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Jim Westfall014510b2018-01-14 04:18:50 -0800500 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 if (n->parms->dead) {
503 rc = ERR_PTR(-EINVAL);
504 goto out_tbl_unlock;
505 }
506
Eric Dumazet767e97e2010-10-06 17:49:21 -0700507 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
508 lockdep_is_held(&tbl->lock));
509 n1 != NULL;
510 n1 = rcu_dereference_protected(n1->next,
511 lockdep_is_held(&tbl->lock))) {
Jim Westfall014510b2018-01-14 04:18:50 -0800512 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
David S. Millera263b302012-07-02 02:02:15 -0700513 if (want_ref)
514 neigh_hold(n1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 rc = n1;
516 goto out_tbl_unlock;
517 }
518 }
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 n->dead = 0;
David S. Millera263b302012-07-02 02:02:15 -0700521 if (want_ref)
522 neigh_hold(n);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700523 rcu_assign_pointer(n->next,
524 rcu_dereference_protected(nht->hash_buckets[hash_val],
525 lockdep_is_held(&tbl->lock)));
526 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 write_unlock_bh(&tbl->lock);
Joe Perchesd5d427c2013-04-15 15:17:19 +0000528 neigh_dbg(2, "neigh %p is created\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 rc = n;
530out:
531 return rc;
532out_tbl_unlock:
533 write_unlock_bh(&tbl->lock);
534out_neigh_release:
535 neigh_release(n);
536 goto out;
537}
David S. Millera263b302012-07-02 02:02:15 -0700538EXPORT_SYMBOL(__neigh_create);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900540static u32 pneigh_hash(const void *pkey, int key_len)
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700541{
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700542 u32 hash_val = *(u32 *)(pkey + key_len - 4);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700543 hash_val ^= (hash_val >> 16);
544 hash_val ^= hash_val >> 8;
545 hash_val ^= hash_val >> 4;
546 hash_val &= PNEIGH_HASHMASK;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900547 return hash_val;
548}
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700549
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900550static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
551 struct net *net,
552 const void *pkey,
553 int key_len,
554 struct net_device *dev)
555{
556 while (n) {
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700557 if (!memcmp(n->key, pkey, key_len) &&
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900558 net_eq(pneigh_net(n), net) &&
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700559 (n->dev == dev || !n->dev))
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900560 return n;
561 n = n->next;
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700562 }
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900563 return NULL;
564}
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700565
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900566struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
567 struct net *net, const void *pkey, struct net_device *dev)
568{
569 int key_len = tbl->key_len;
570 u32 hash_val = pneigh_hash(pkey, key_len);
571
572 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
573 net, pkey, key_len, dev);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700574}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900575EXPORT_SYMBOL_GPL(__pneigh_lookup);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700576
Eric W. Biederman426b5302008-01-24 00:13:18 -0800577struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
578 struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 struct net_device *dev, int creat)
580{
581 struct pneigh_entry *n;
582 int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900583 u32 hash_val = pneigh_hash(pkey, key_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 read_lock_bh(&tbl->lock);
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900586 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
587 net, pkey, key_len, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 read_unlock_bh(&tbl->lock);
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900589
590 if (n || !creat)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 goto out;
592
Pavel Emelyanov4ae28942007-10-15 12:54:15 -0700593 ASSERT_RTNL();
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
596 if (!n)
597 goto out;
598
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -0500599 write_pnet(&n->net, net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 memcpy(n->key, pkey, key_len);
601 n->dev = dev;
602 if (dev)
603 dev_hold(dev);
604
605 if (tbl->pconstructor && tbl->pconstructor(n)) {
606 if (dev)
607 dev_put(dev);
608 kfree(n);
609 n = NULL;
610 goto out;
611 }
612
613 write_lock_bh(&tbl->lock);
614 n->next = tbl->phash_buckets[hash_val];
615 tbl->phash_buckets[hash_val] = n;
616 write_unlock_bh(&tbl->lock);
617out:
618 return n;
619}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900620EXPORT_SYMBOL(pneigh_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
622
Eric W. Biederman426b5302008-01-24 00:13:18 -0800623int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 struct net_device *dev)
625{
626 struct pneigh_entry *n, **np;
627 int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900628 u32 hash_val = pneigh_hash(pkey, key_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
630 write_lock_bh(&tbl->lock);
631 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
632 np = &n->next) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800633 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900634 net_eq(pneigh_net(n), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 *np = n->next;
636 write_unlock_bh(&tbl->lock);
637 if (tbl->pdestructor)
638 tbl->pdestructor(n);
639 if (n->dev)
640 dev_put(n->dev);
641 kfree(n);
642 return 0;
643 }
644 }
645 write_unlock_bh(&tbl->lock);
646 return -ENOENT;
647}
648
Wolfgang Bumiller581cb192018-04-12 10:46:55 +0200649static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
650 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
Wolfgang Bumiller581cb192018-04-12 10:46:55 +0200652 struct pneigh_entry *n, **np, *freelist = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 u32 h;
654
655 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
656 np = &tbl->phash_buckets[h];
657 while ((n = *np) != NULL) {
658 if (!dev || n->dev == dev) {
659 *np = n->next;
Wolfgang Bumiller581cb192018-04-12 10:46:55 +0200660 n->next = freelist;
661 freelist = n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 continue;
663 }
664 np = &n->next;
665 }
666 }
Wolfgang Bumiller581cb192018-04-12 10:46:55 +0200667 write_unlock_bh(&tbl->lock);
668 while ((n = freelist)) {
669 freelist = n->next;
670 n->next = NULL;
671 if (tbl->pdestructor)
672 tbl->pdestructor(n);
673 if (n->dev)
674 dev_put(n->dev);
675 kfree(n);
676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 return -ENOENT;
678}
679
Denis V. Lunev06f05112008-01-24 00:30:58 -0800680static void neigh_parms_destroy(struct neigh_parms *parms);
681
682static inline void neigh_parms_put(struct neigh_parms *parms)
683{
684 if (atomic_dec_and_test(&parms->refcnt))
685 neigh_parms_destroy(parms);
686}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688/*
689 * neighbour must already be out of the table;
690 *
691 */
692void neigh_destroy(struct neighbour *neigh)
693{
David Millerda6a8fa2011-07-25 00:01:38 +0000694 struct net_device *dev = neigh->dev;
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
697
698 if (!neigh->dead) {
Joe Perchese005d192012-05-16 19:58:40 +0000699 pr_warn("Destroying alive neighbour %p\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 dump_stack();
701 return;
702 }
703
704 if (neigh_del_timer(neigh))
Joe Perchese005d192012-05-16 19:58:40 +0000705 pr_warn("Impossible event\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700707 write_lock_bh(&neigh->lock);
708 __skb_queue_purge(&neigh->arp_queue);
709 write_unlock_bh(&neigh->lock);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000710 neigh->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
David S. Miller447f2192011-12-19 15:04:41 -0500712 if (dev->netdev_ops->ndo_neigh_destroy)
Jiri Pirko503eebc2016-07-05 11:27:37 +0200713 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
David S. Miller447f2192011-12-19 15:04:41 -0500714
David Millerda6a8fa2011-07-25 00:01:38 +0000715 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 neigh_parms_put(neigh->parms);
717
Joe Perchesd5d427c2013-04-15 15:17:19 +0000718 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720 atomic_dec(&neigh->tbl->entries);
David Miller5b8b0062011-07-25 00:01:22 +0000721 kfree_rcu(neigh, rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900723EXPORT_SYMBOL(neigh_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
725/* Neighbour state is suspicious;
726 disable fast path.
727
728 Called with write_locked neigh.
729 */
730static void neigh_suspect(struct neighbour *neigh)
731{
Joe Perchesd5d427c2013-04-15 15:17:19 +0000732 neigh_dbg(2, "neigh %p is suspected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 neigh->output = neigh->ops->output;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735}
736
737/* Neighbour state is OK;
738 enable fast path.
739
740 Called with write_locked neigh.
741 */
742static void neigh_connect(struct neighbour *neigh)
743{
Joe Perchesd5d427c2013-04-15 15:17:19 +0000744 neigh_dbg(2, "neigh %p is connected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746 neigh->output = neigh->ops->connected_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
748
Eric Dumazete4c4e442009-07-30 03:15:07 +0000749static void neigh_periodic_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Eric Dumazete4c4e442009-07-30 03:15:07 +0000751 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700752 struct neighbour *n;
753 struct neighbour __rcu **np;
Eric Dumazete4c4e442009-07-30 03:15:07 +0000754 unsigned int i;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000755 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
757 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
758
Eric Dumazete4c4e442009-07-30 03:15:07 +0000759 write_lock_bh(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000760 nht = rcu_dereference_protected(tbl->nht,
761 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 /*
764 * periodically recompute ReachableTime from random function
765 */
766
Eric Dumazete4c4e442009-07-30 03:15:07 +0000767 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 struct neigh_parms *p;
Eric Dumazete4c4e442009-07-30 03:15:07 +0000769 tbl->last_rand = jiffies;
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +0100770 list_for_each_entry(p, &tbl->parms_list, list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 p->reachable_time =
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100772 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 }
774
Duan Jiongfeff9ab2014-02-27 17:14:41 +0800775 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
776 goto out;
777
David S. Millercd089332011-07-11 01:28:12 -0700778 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000779 np = &nht->hash_buckets[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
Eric Dumazet767e97e2010-10-06 17:49:21 -0700781 while ((n = rcu_dereference_protected(*np,
782 lockdep_is_held(&tbl->lock))) != NULL) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000783 unsigned int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
Eric Dumazete4c4e442009-07-30 03:15:07 +0000785 write_lock(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Eric Dumazete4c4e442009-07-30 03:15:07 +0000787 state = n->nud_state;
788 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
789 write_unlock(&n->lock);
790 goto next_elt;
791 }
792
793 if (time_before(n->used, n->confirmed))
794 n->used = n->confirmed;
795
796 if (atomic_read(&n->refcnt) == 1 &&
797 (state == NUD_FAILED ||
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100798 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000799 *np = n->next;
800 n->dead = 1;
801 write_unlock(&n->lock);
802 neigh_cleanup_and_release(n);
803 continue;
804 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 write_unlock(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
807next_elt:
Eric Dumazete4c4e442009-07-30 03:15:07 +0000808 np = &n->next;
809 }
810 /*
811 * It's fine to release lock here, even if hash table
812 * grows while we are preempted.
813 */
814 write_unlock_bh(&tbl->lock);
815 cond_resched();
816 write_lock_bh(&tbl->lock);
Michel Machado84338a62012-02-21 16:04:13 -0500817 nht = rcu_dereference_protected(tbl->nht,
818 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 }
YOSHIFUJI Hideaki / 吉藤英明27246802013-01-22 05:20:05 +0000820out:
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100821 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
822 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
823 * BASE_REACHABLE_TIME.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 */
viresh kumarf6180022014-01-22 12:23:33 +0530825 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100826 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
Eric Dumazete4c4e442009-07-30 03:15:07 +0000827 write_unlock_bh(&tbl->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828}
829
830static __inline__ int neigh_max_probes(struct neighbour *n)
831{
832 struct neigh_parms *p = n->parms;
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +0900833 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
834 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
835 NEIGH_VAR(p, MCAST_PROBES));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836}
837
Timo Teras5ef12d92009-06-11 04:16:28 -0700838static void neigh_invalidate(struct neighbour *neigh)
Eric Dumazet0a141502010-03-09 19:40:54 +0000839 __releases(neigh->lock)
840 __acquires(neigh->lock)
Timo Teras5ef12d92009-06-11 04:16:28 -0700841{
842 struct sk_buff *skb;
843
844 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
Joe Perchesd5d427c2013-04-15 15:17:19 +0000845 neigh_dbg(2, "neigh %p is failed\n", neigh);
Timo Teras5ef12d92009-06-11 04:16:28 -0700846 neigh->updated = jiffies;
847
848 /* It is very thin place. report_unreachable is very complicated
849 routine. Particularly, it can hit the same neighbour entry!
850
851 So that, we try to be accurate and avoid dead loop. --ANK
852 */
853 while (neigh->nud_state == NUD_FAILED &&
854 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
855 write_unlock(&neigh->lock);
856 neigh->ops->error_report(neigh, skb);
857 write_lock(&neigh->lock);
858 }
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700859 __skb_queue_purge(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000860 neigh->arp_queue_len_bytes = 0;
Timo Teras5ef12d92009-06-11 04:16:28 -0700861}
862
Eric Dumazetcd28ca02011-08-09 08:15:58 +0000863static void neigh_probe(struct neighbour *neigh)
864 __releases(neigh->lock)
865{
Hannes Frederic Sowa4ed377e2013-09-21 06:32:34 +0200866 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
Eric Dumazetcd28ca02011-08-09 08:15:58 +0000867 /* keep skb alive even if arp_queue overflows */
868 if (skb)
Martin Zhang19125c12015-11-17 20:49:30 +0800869 skb = skb_clone(skb, GFP_ATOMIC);
Eric Dumazetcd28ca02011-08-09 08:15:58 +0000870 write_unlock(&neigh->lock);
Eric Dumazet4f991612017-03-23 12:39:21 -0700871 if (neigh->ops->solicit)
872 neigh->ops->solicit(neigh, skb);
Eric Dumazetcd28ca02011-08-09 08:15:58 +0000873 atomic_inc(&neigh->probes);
874 kfree_skb(skb);
875}
876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877/* Called when a timer expires for a neighbour entry. */
878
879static void neigh_timer_handler(unsigned long arg)
880{
881 unsigned long now, next;
882 struct neighbour *neigh = (struct neighbour *)arg;
Eric Dumazet95c96172012-04-15 05:58:06 +0000883 unsigned int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 int notify = 0;
885
886 write_lock(&neigh->lock);
887
888 state = neigh->nud_state;
889 now = jiffies;
890 next = now + HZ;
891
David S. Miller045f7b32011-11-01 17:45:55 -0400892 if (!(state & NUD_IN_TIMER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
895 if (state & NUD_REACHABLE) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900896 if (time_before_eq(now,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 neigh->confirmed + neigh->parms->reachable_time)) {
Joe Perchesd5d427c2013-04-15 15:17:19 +0000898 neigh_dbg(2, "neigh %p is still alive\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 next = neigh->confirmed + neigh->parms->reachable_time;
900 } else if (time_before_eq(now,
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100901 neigh->used +
902 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
Joe Perchesd5d427c2013-04-15 15:17:19 +0000903 neigh_dbg(2, "neigh %p is delayed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800905 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 neigh_suspect(neigh);
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100907 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 } else {
Joe Perchesd5d427c2013-04-15 15:17:19 +0000909 neigh_dbg(2, "neigh %p is suspected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 neigh->nud_state = NUD_STALE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800911 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 neigh_suspect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -0700913 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 }
915 } else if (state & NUD_DELAY) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900916 if (time_before_eq(now,
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100917 neigh->confirmed +
918 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
Joe Perchesd5d427c2013-04-15 15:17:19 +0000919 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 neigh->nud_state = NUD_REACHABLE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800921 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 neigh_connect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -0700923 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 next = neigh->confirmed + neigh->parms->reachable_time;
925 } else {
Joe Perchesd5d427c2013-04-15 15:17:19 +0000926 neigh_dbg(2, "neigh %p is probed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 neigh->nud_state = NUD_PROBE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800928 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 atomic_set(&neigh->probes, 0);
Erik Kline765c9c62015-05-18 19:44:41 +0900930 notify = 1;
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100931 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 }
933 } else {
934 /* NUD_PROBE|NUD_INCOMPLETE */
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100935 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 }
937
938 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
939 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 neigh->nud_state = NUD_FAILED;
941 notify = 1;
Timo Teras5ef12d92009-06-11 04:16:28 -0700942 neigh_invalidate(neigh);
Duan Jiong5e2c21d2014-02-27 17:03:03 +0800943 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 }
945
946 if (neigh->nud_state & NUD_IN_TIMER) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 if (time_before(next, jiffies + HZ/2))
948 next = jiffies + HZ/2;
Herbert Xu6fb99742005-10-23 16:37:48 +1000949 if (!mod_timer(&neigh->timer, next))
950 neigh_hold(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 }
952 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
Eric Dumazetcd28ca02011-08-09 08:15:58 +0000953 neigh_probe(neigh);
David S. Miller9ff56602008-02-17 18:39:54 -0800954 } else {
David S. Miller69cc64d2008-02-11 21:45:44 -0800955out:
David S. Miller9ff56602008-02-17 18:39:54 -0800956 write_unlock(&neigh->lock);
957 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Thomas Grafd961db32007-08-08 23:12:56 -0700959 if (notify)
960 neigh_update_notify(neigh);
961
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 neigh_release(neigh);
963}
964
965int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
966{
967 int rc;
Eric Dumazetcd28ca02011-08-09 08:15:58 +0000968 bool immediate_probe = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
970 write_lock_bh(&neigh->lock);
971
972 rc = 0;
973 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
974 goto out_unlock_bh;
Julian Anastasov2c51a972015-06-16 22:56:39 +0300975 if (neigh->dead)
976 goto out_dead;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100979 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
980 NEIGH_VAR(neigh->parms, APP_PROBES)) {
Eric Dumazetcd28ca02011-08-09 08:15:58 +0000981 unsigned long next, now = jiffies;
982
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100983 atomic_set(&neigh->probes,
984 NEIGH_VAR(neigh->parms, UCAST_PROBES));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 neigh->nud_state = NUD_INCOMPLETE;
Eric Dumazetcd28ca02011-08-09 08:15:58 +0000986 neigh->updated = now;
Jiri Pirko1f9248e52013-12-07 19:26:53 +0100987 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
988 HZ/2);
Eric Dumazetcd28ca02011-08-09 08:15:58 +0000989 neigh_add_timer(neigh, next);
990 immediate_probe = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 } else {
992 neigh->nud_state = NUD_FAILED;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -0800993 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 write_unlock_bh(&neigh->lock);
995
Wei Yongjunf3fbbe02009-02-25 00:37:32 +0000996 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 return 1;
998 }
999 } else if (neigh->nud_state & NUD_STALE) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001000 neigh_dbg(2, "neigh %p is delayed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001002 neigh->updated = jiffies;
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001003 neigh_add_timer(neigh, jiffies +
1004 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 }
1006
1007 if (neigh->nud_state == NUD_INCOMPLETE) {
1008 if (skb) {
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001009 while (neigh->arp_queue_len_bytes + skb->truesize >
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001010 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 struct sk_buff *buff;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001012
David S. Millerf72051b2008-09-23 01:11:18 -07001013 buff = __skb_dequeue(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001014 if (!buff)
1015 break;
1016 neigh->arp_queue_len_bytes -= buff->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 kfree_skb(buff);
Neil Horman9a6d2762008-07-16 20:50:49 -07001018 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 }
Eric Dumazeta4731132010-05-27 16:09:39 -07001020 skb_dst_force(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 __skb_queue_tail(&neigh->arp_queue, skb);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001022 neigh->arp_queue_len_bytes += skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 }
1024 rc = 1;
1025 }
1026out_unlock_bh:
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001027 if (immediate_probe)
1028 neigh_probe(neigh);
1029 else
1030 write_unlock(&neigh->lock);
1031 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 return rc;
Julian Anastasov2c51a972015-06-16 22:56:39 +03001033
1034out_dead:
1035 if (neigh->nud_state & NUD_STALE)
1036 goto out_unlock_bh;
1037 write_unlock_bh(&neigh->lock);
1038 kfree_skb(skb);
1039 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001041EXPORT_SYMBOL(__neigh_event_send);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
David S. Millerf6b72b62011-07-14 07:53:20 -07001043static void neigh_update_hhs(struct neighbour *neigh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044{
1045 struct hh_cache *hh;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001046 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
Doug Kehn91a72a72010-07-14 18:02:16 -07001047 = NULL;
1048
1049 if (neigh->dev->header_ops)
1050 update = neigh->dev->header_ops->cache_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 if (update) {
David S. Millerf6b72b62011-07-14 07:53:20 -07001053 hh = &neigh->hh;
1054 if (hh->hh_len) {
Stephen Hemminger3644f0c2006-12-07 15:08:17 -08001055 write_seqlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 update(hh, neigh->dev, neigh->ha);
Stephen Hemminger3644f0c2006-12-07 15:08:17 -08001057 write_sequnlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 }
1059 }
1060}
1061
1062
1063
1064/* Generic update routine.
1065 -- lladdr is new lladdr or NULL, if it is not supplied.
1066 -- new is new state.
1067 -- flags
1068 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1069 if it is different.
1070 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001071 lladdr instead of overriding it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 if it is different.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1074
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001075 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 NTF_ROUTER flag.
1077 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1078 a router.
1079
1080 Caller MUST hold reference count on the entry.
1081 */
1082
1083int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1084 u32 flags)
1085{
1086 u8 old;
1087 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 int notify = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 struct net_device *dev;
1090 int update_isrouter = 0;
1091
1092 write_lock_bh(&neigh->lock);
1093
1094 dev = neigh->dev;
1095 old = neigh->nud_state;
1096 err = -EPERM;
1097
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001098 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 (old & (NUD_NOARP | NUD_PERMANENT)))
1100 goto out;
Julian Anastasov2c51a972015-06-16 22:56:39 +03001101 if (neigh->dead)
1102 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 if (!(new & NUD_VALID)) {
1105 neigh_del_timer(neigh);
1106 if (old & NUD_CONNECTED)
1107 neigh_suspect(neigh);
1108 neigh->nud_state = new;
1109 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 notify = old & NUD_VALID;
Timo Teras5ef12d92009-06-11 04:16:28 -07001111 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1112 (new & NUD_FAILED)) {
1113 neigh_invalidate(neigh);
1114 notify = 1;
1115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 goto out;
1117 }
1118
1119 /* Compare new lladdr with cached one */
1120 if (!dev->addr_len) {
1121 /* First case: device needs no address. */
1122 lladdr = neigh->ha;
1123 } else if (lladdr) {
1124 /* The second case: if something is already cached
1125 and a new address is proposed:
1126 - compare new & old
1127 - if they are different, check override flag
1128 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001129 if ((old & NUD_VALID) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 !memcmp(lladdr, neigh->ha, dev->addr_len))
1131 lladdr = neigh->ha;
1132 } else {
1133 /* No address is supplied; if we know something,
1134 use it, otherwise discard the request.
1135 */
1136 err = -EINVAL;
1137 if (!(old & NUD_VALID))
1138 goto out;
1139 lladdr = neigh->ha;
1140 }
1141
Vasily Khoruzhicke68a49c2018-09-13 11:12:03 -07001142 /* Update confirmed timestamp for neighbour entry after we
1143 * received ARP packet even if it doesn't change IP to MAC binding.
1144 */
1145 if (new & NUD_CONNECTED)
1146 neigh->confirmed = jiffies;
1147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 /* If entry was valid and address is not changed,
1149 do not change entry state, if new one is STALE.
1150 */
1151 err = 0;
1152 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1153 if (old & NUD_VALID) {
1154 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1155 update_isrouter = 0;
1156 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1157 (old & NUD_CONNECTED)) {
1158 lladdr = neigh->ha;
1159 new = NUD_STALE;
1160 } else
1161 goto out;
1162 } else {
Julian Anastasov0e7bbcc2016-07-27 09:56:50 +03001163 if (lladdr == neigh->ha && new == NUD_STALE &&
1164 !(flags & NEIGH_UPDATE_F_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 new = old;
1166 }
1167 }
1168
Vasily Khoruzhicke68a49c2018-09-13 11:12:03 -07001169 /* Update timestamp only once we know we will make a change to the
Ihar Hrachyshka2c260522017-05-16 08:44:24 -07001170 * neighbour entry. Otherwise we risk to move the locktime window with
1171 * noop updates and ignore relevant ARP updates.
1172 */
Vasily Khoruzhicke68a49c2018-09-13 11:12:03 -07001173 if (new != old || lladdr != neigh->ha)
Ihar Hrachyshka2c260522017-05-16 08:44:24 -07001174 neigh->updated = jiffies;
Ihar Hrachyshka2c260522017-05-16 08:44:24 -07001175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 if (new != old) {
1177 neigh_del_timer(neigh);
Erik Kline765c9c62015-05-18 19:44:41 +09001178 if (new & NUD_PROBE)
1179 atomic_set(&neigh->probes, 0);
Pavel Emelyanova43d8992007-12-20 15:49:05 -08001180 if (new & NUD_IN_TIMER)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001181 neigh_add_timer(neigh, (jiffies +
1182 ((new & NUD_REACHABLE) ?
David S. Miller667347f2005-09-27 12:07:44 -07001183 neigh->parms->reachable_time :
1184 0)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 neigh->nud_state = new;
Bob Gilligan53385d22013-12-15 13:39:56 -08001186 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 }
1188
1189 if (lladdr != neigh->ha) {
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001190 write_seqlock(&neigh->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 memcpy(&neigh->ha, lladdr, dev->addr_len);
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001192 write_sequnlock(&neigh->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 neigh_update_hhs(neigh);
1194 if (!(new & NUD_CONNECTED))
1195 neigh->confirmed = jiffies -
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001196 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 }
1199 if (new == old)
1200 goto out;
1201 if (new & NUD_CONNECTED)
1202 neigh_connect(neigh);
1203 else
1204 neigh_suspect(neigh);
1205 if (!(old & NUD_VALID)) {
1206 struct sk_buff *skb;
1207
1208 /* Again: avoid dead loop if something went wrong */
1209
1210 while (neigh->nud_state & NUD_VALID &&
1211 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
David S. Miller69cce1d2011-07-17 23:09:49 -07001212 struct dst_entry *dst = skb_dst(skb);
1213 struct neighbour *n2, *n1 = neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 write_unlock_bh(&neigh->lock);
roy.qing.li@gmail.come049f282011-10-17 22:32:42 +00001215
1216 rcu_read_lock();
David S. Miller13a43d92012-07-02 22:15:37 -07001217
1218 /* Why not just use 'neigh' as-is? The problem is that
1219 * things such as shaper, eql, and sch_teql can end up
1220 * using alternative, different, neigh objects to output
1221 * the packet in the output path. So what we need to do
1222 * here is re-lookup the top-level neigh in the path so
1223 * we can reinject the packet there.
1224 */
1225 n2 = NULL;
1226 if (dst) {
1227 n2 = dst_neigh_lookup_skb(dst, skb);
1228 if (n2)
1229 n1 = n2;
1230 }
David S. Miller8f40b162011-07-17 13:34:11 -07001231 n1->output(n1, skb);
David S. Miller13a43d92012-07-02 22:15:37 -07001232 if (n2)
1233 neigh_release(n2);
roy.qing.li@gmail.come049f282011-10-17 22:32:42 +00001234 rcu_read_unlock();
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 write_lock_bh(&neigh->lock);
1237 }
Eric Dumazetc9ab4d82013-06-28 02:37:42 -07001238 __skb_queue_purge(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001239 neigh->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 }
1241out:
1242 if (update_isrouter) {
1243 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1244 (neigh->flags | NTF_ROUTER) :
1245 (neigh->flags & ~NTF_ROUTER);
1246 }
1247 write_unlock_bh(&neigh->lock);
Tom Tucker8d717402006-07-30 20:43:36 -07001248
1249 if (notify)
Thomas Grafd961db32007-08-08 23:12:56 -07001250 neigh_update_notify(neigh);
1251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 return err;
1253}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001254EXPORT_SYMBOL(neigh_update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
Jiri Benc7e980562013-12-11 13:48:20 +01001256/* Update the neigh to listen temporarily for probe responses, even if it is
1257 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1258 */
1259void __neigh_set_probe_once(struct neighbour *neigh)
1260{
Julian Anastasov2c51a972015-06-16 22:56:39 +03001261 if (neigh->dead)
1262 return;
Jiri Benc7e980562013-12-11 13:48:20 +01001263 neigh->updated = jiffies;
1264 if (!(neigh->nud_state & NUD_FAILED))
1265 return;
Duan Jiong2176d5d2014-05-09 13:16:48 +08001266 neigh->nud_state = NUD_INCOMPLETE;
1267 atomic_set(&neigh->probes, neigh_max_probes(neigh));
Jiri Benc7e980562013-12-11 13:48:20 +01001268 neigh_add_timer(neigh,
1269 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1270}
1271EXPORT_SYMBOL(__neigh_set_probe_once);
1272
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1274 u8 *lladdr, void *saddr,
1275 struct net_device *dev)
1276{
1277 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1278 lladdr || !dev->addr_len);
1279 if (neigh)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001280 neigh_update(neigh, lladdr, NUD_STALE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 NEIGH_UPDATE_F_OVERRIDE);
1282 return neigh;
1283}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001284EXPORT_SYMBOL(neigh_event_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
Eric Dumazet34d101d2010-10-11 09:16:57 -07001286/* called with read_lock_bh(&n->lock); */
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001287static void neigh_hh_init(struct neighbour *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288{
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001289 struct net_device *dev = n->dev;
1290 __be16 prot = n->tbl->protocol;
David S. Millerf6b72b62011-07-14 07:53:20 -07001291 struct hh_cache *hh = &n->hh;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001292
1293 write_lock_bh(&n->lock);
Eric Dumazet34d101d2010-10-11 09:16:57 -07001294
David S. Millerf6b72b62011-07-14 07:53:20 -07001295 /* Only one thread can come in here and initialize the
1296 * hh_cache entry.
1297 */
David S. Millerb23b5452011-07-16 17:45:02 -07001298 if (!hh->hh_len)
1299 dev->header_ops->cache(n, hh, prot);
David S. Millerf6b72b62011-07-14 07:53:20 -07001300
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001301 write_unlock_bh(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302}
1303
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304/* Slow and careful. */
1305
David S. Miller8f40b162011-07-17 13:34:11 -07001306int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 int rc = 0;
1309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 if (!neigh_event_send(neigh, skb)) {
1311 int err;
1312 struct net_device *dev = neigh->dev;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001313 unsigned int seq;
Eric Dumazet34d101d2010-10-11 09:16:57 -07001314
David S. Millerf6b72b62011-07-14 07:53:20 -07001315 if (dev->header_ops->cache && !neigh->hh.hh_len)
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001316 neigh_hh_init(neigh);
Eric Dumazet34d101d2010-10-11 09:16:57 -07001317
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001318 do {
ramesh.nagappa@gmail.come1f16502012-10-05 19:10:15 +00001319 __skb_pull(skb, skb_network_offset(skb));
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001320 seq = read_seqbegin(&neigh->ha_lock);
1321 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1322 neigh->ha, NULL, skb->len);
1323 } while (read_seqretry(&neigh->ha_lock, seq));
Eric Dumazet34d101d2010-10-11 09:16:57 -07001324
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 if (err >= 0)
David S. Miller542d4d62011-07-16 18:06:24 -07001326 rc = dev_queue_xmit(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 else
1328 goto out_kfree_skb;
1329 }
1330out:
1331 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332out_kfree_skb:
1333 rc = -EINVAL;
1334 kfree_skb(skb);
1335 goto out;
1336}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001337EXPORT_SYMBOL(neigh_resolve_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
1339/* As fast as possible without hh cache */
1340
David S. Miller8f40b162011-07-17 13:34:11 -07001341int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 struct net_device *dev = neigh->dev;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001344 unsigned int seq;
David S. Miller8f40b162011-07-17 13:34:11 -07001345 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001347 do {
ramesh.nagappa@gmail.come1f16502012-10-05 19:10:15 +00001348 __skb_pull(skb, skb_network_offset(skb));
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001349 seq = read_seqbegin(&neigh->ha_lock);
1350 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1351 neigh->ha, NULL, skb->len);
1352 } while (read_seqretry(&neigh->ha_lock, seq));
1353
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 if (err >= 0)
David S. Miller542d4d62011-07-16 18:06:24 -07001355 err = dev_queue_xmit(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 else {
1357 err = -EINVAL;
1358 kfree_skb(skb);
1359 }
1360 return err;
1361}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001362EXPORT_SYMBOL(neigh_connected_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
David S. Miller8f40b162011-07-17 13:34:11 -07001364int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1365{
1366 return dev_queue_xmit(skb);
1367}
1368EXPORT_SYMBOL(neigh_direct_output);
1369
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370static void neigh_proxy_process(unsigned long arg)
1371{
1372 struct neigh_table *tbl = (struct neigh_table *)arg;
1373 long sched_next = 0;
1374 unsigned long now = jiffies;
David S. Millerf72051b2008-09-23 01:11:18 -07001375 struct sk_buff *skb, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
1377 spin_lock(&tbl->proxy_queue.lock);
1378
David S. Millerf72051b2008-09-23 01:11:18 -07001379 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1380 long tdif = NEIGH_CB(skb)->sched_next - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 if (tdif <= 0) {
David S. Millerf72051b2008-09-23 01:11:18 -07001383 struct net_device *dev = skb->dev;
Eric Dumazet20e60742011-08-22 19:32:42 +00001384
David S. Millerf72051b2008-09-23 01:11:18 -07001385 __skb_unlink(skb, &tbl->proxy_queue);
Eric Dumazet20e60742011-08-22 19:32:42 +00001386 if (tbl->proxy_redo && netif_running(dev)) {
1387 rcu_read_lock();
David S. Millerf72051b2008-09-23 01:11:18 -07001388 tbl->proxy_redo(skb);
Eric Dumazet20e60742011-08-22 19:32:42 +00001389 rcu_read_unlock();
1390 } else {
David S. Millerf72051b2008-09-23 01:11:18 -07001391 kfree_skb(skb);
Eric Dumazet20e60742011-08-22 19:32:42 +00001392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
1394 dev_put(dev);
1395 } else if (!sched_next || tdif < sched_next)
1396 sched_next = tdif;
1397 }
1398 del_timer(&tbl->proxy_timer);
1399 if (sched_next)
1400 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1401 spin_unlock(&tbl->proxy_queue.lock);
1402}
1403
1404void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1405 struct sk_buff *skb)
1406{
1407 unsigned long now = jiffies;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -05001408
1409 unsigned long sched_next = now + (prandom_u32() %
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001410 NEIGH_VAR(p, PROXY_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001412 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 kfree_skb(skb);
1414 return;
1415 }
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001416
1417 NEIGH_CB(skb)->sched_next = sched_next;
1418 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
1420 spin_lock(&tbl->proxy_queue.lock);
1421 if (del_timer(&tbl->proxy_timer)) {
1422 if (time_before(tbl->proxy_timer.expires, sched_next))
1423 sched_next = tbl->proxy_timer.expires;
1424 }
Eric Dumazetadf30902009-06-02 05:19:30 +00001425 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 dev_hold(skb->dev);
1427 __skb_queue_tail(&tbl->proxy_queue, skb);
1428 mod_timer(&tbl->proxy_timer, sched_next);
1429 spin_unlock(&tbl->proxy_queue.lock);
1430}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001431EXPORT_SYMBOL(pneigh_enqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Tobias Klauser97fd5bc2009-07-13 11:17:49 -07001433static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
Eric W. Biederman426b5302008-01-24 00:13:18 -08001434 struct net *net, int ifindex)
1435{
1436 struct neigh_parms *p;
1437
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001438 list_for_each_entry(p, &tbl->parms_list, list) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001439 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
Gao feng170d6f92013-06-20 10:01:33 +08001440 (!p->dev && !ifindex && net_eq(net, &init_net)))
Eric W. Biederman426b5302008-01-24 00:13:18 -08001441 return p;
1442 }
1443
1444 return NULL;
1445}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
1447struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1448 struct neigh_table *tbl)
1449{
Gao fengcf89d6b2013-06-20 10:01:32 +08001450 struct neigh_parms *p;
Stephen Hemminger00829822008-11-20 20:14:53 -08001451 struct net *net = dev_net(dev);
1452 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Gao fengcf89d6b2013-06-20 10:01:32 +08001454 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 if (p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 p->tbl = tbl;
1457 atomic_set(&p->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 p->reachable_time =
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001459 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Denis V. Lunev486b51d2008-01-14 22:59:59 -08001460 dev_hold(dev);
1461 p->dev = dev;
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -05001462 write_pnet(&p->net, net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 p->sysctl_table = NULL;
Veaceslav Falico63134802013-08-02 19:07:38 +02001464
1465 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
Veaceslav Falico63134802013-08-02 19:07:38 +02001466 dev_put(dev);
1467 kfree(p);
1468 return NULL;
1469 }
1470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 write_lock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001472 list_add(&p->list, &tbl->parms.list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 write_unlock_bh(&tbl->lock);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01001474
1475 neigh_parms_data_state_cleanall(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 }
1477 return p;
1478}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001479EXPORT_SYMBOL(neigh_parms_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
1481static void neigh_rcu_free_parms(struct rcu_head *head)
1482{
1483 struct neigh_parms *parms =
1484 container_of(head, struct neigh_parms, rcu_head);
1485
1486 neigh_parms_put(parms);
1487}
1488
1489void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1490{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 if (!parms || parms == &tbl->parms)
1492 return;
1493 write_lock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001494 list_del(&parms->list);
1495 parms->dead = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 write_unlock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001497 if (parms->dev)
1498 dev_put(parms->dev);
1499 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001501EXPORT_SYMBOL(neigh_parms_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
Denis V. Lunev06f05112008-01-24 00:30:58 -08001503static void neigh_parms_destroy(struct neigh_parms *parms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504{
1505 kfree(parms);
1506}
1507
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001508static struct lock_class_key neigh_table_proxy_queue_class;
1509
WANG Congd7480fd2014-11-10 15:59:36 -08001510static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1511
1512void neigh_table_init(int index, struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513{
1514 unsigned long now = jiffies;
1515 unsigned long phsize;
1516
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001517 INIT_LIST_HEAD(&tbl->parms_list);
1518 list_add(&tbl->parms.list, &tbl->parms_list);
Eric Dumazete42ea982008-11-12 00:54:54 -08001519 write_pnet(&tbl->parms.net, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 atomic_set(&tbl->parms.refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 tbl->parms.reachable_time =
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001522 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 tbl->stats = alloc_percpu(struct neigh_statistics);
1525 if (!tbl->stats)
1526 panic("cannot create neighbour cache statistics");
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528#ifdef CONFIG_PROC_FS
Alexey Dobriyan9b739ba2008-11-11 16:47:44 -08001529 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1530 &neigh_stat_seq_fops, tbl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 panic("cannot create neighbour proc dir entry");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532#endif
1533
David S. Millercd089332011-07-11 01:28:12 -07001534 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
1536 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
Andrew Morton77d04bd2006-04-07 14:52:59 -07001537 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001539 if (!tbl->nht || !tbl->phash_buckets)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 panic("cannot allocate neighbour cache hashes");
1541
YOSHIFUJI Hideaki / 吉藤英明08433ef2013-01-24 00:44:23 +00001542 if (!tbl->entry_size)
1543 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1544 tbl->key_len, NEIGH_PRIV_ALIGN);
1545 else
1546 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 rwlock_init(&tbl->lock);
Tejun Heo203b42f2012-08-21 13:18:23 -07001549 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
viresh kumarf6180022014-01-22 12:23:33 +05301550 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1551 tbl->parms.reachable_time);
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -08001552 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001553 skb_queue_head_init_class(&tbl->proxy_queue,
1554 &neigh_table_proxy_queue_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556 tbl->last_flush = now;
1557 tbl->last_rand = now + tbl->parms.reachable_time * 20;
Simon Kelleybd89efc2006-05-12 14:56:08 -07001558
WANG Congd7480fd2014-11-10 15:59:36 -08001559 neigh_tables[index] = tbl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001561EXPORT_SYMBOL(neigh_table_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562
WANG Congd7480fd2014-11-10 15:59:36 -08001563int neigh_table_clear(int index, struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564{
WANG Congd7480fd2014-11-10 15:59:36 -08001565 neigh_tables[index] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 /* It is not clean... Fix it to unload IPv6 module safely */
Tejun Heoa5c30b32010-10-19 06:04:42 +00001567 cancel_delayed_work_sync(&tbl->gc_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 del_timer_sync(&tbl->proxy_timer);
1569 pneigh_queue_purge(&tbl->proxy_queue);
1570 neigh_ifdown(tbl, NULL);
1571 if (atomic_read(&tbl->entries))
Joe Perchese005d192012-05-16 19:58:40 +00001572 pr_crit("neighbour leakage\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Eric Dumazet6193d2b2011-01-19 22:02:47 +00001574 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1575 neigh_hash_free_rcu);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001576 tbl->nht = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
1578 kfree(tbl->phash_buckets);
1579 tbl->phash_buckets = NULL;
1580
Alexey Dobriyan3f192b52007-11-05 21:28:13 -08001581 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1582
Kirill Korotaev3fcde742006-09-01 01:34:10 -07001583 free_percpu(tbl->stats);
1584 tbl->stats = NULL;
1585
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 return 0;
1587}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001588EXPORT_SYMBOL(neigh_table_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
WANG Congd7480fd2014-11-10 15:59:36 -08001590static struct neigh_table *neigh_find_table(int family)
1591{
1592 struct neigh_table *tbl = NULL;
1593
1594 switch (family) {
1595 case AF_INET:
1596 tbl = neigh_tables[NEIGH_ARP_TABLE];
1597 break;
1598 case AF_INET6:
1599 tbl = neigh_tables[NEIGH_ND_TABLE];
1600 break;
1601 case AF_DECnet:
1602 tbl = neigh_tables[NEIGH_DN_TABLE];
1603 break;
1604 }
1605
1606 return tbl;
1607}
1608
Thomas Graf661d2962013-03-21 07:45:29 +00001609static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001611 struct net *net = sock_net(skb->sk);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001612 struct ndmsg *ndm;
1613 struct nlattr *dst_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 struct neigh_table *tbl;
WANG Congd7480fd2014-11-10 15:59:36 -08001615 struct neighbour *neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 struct net_device *dev = NULL;
Thomas Grafa14a49d2006-08-07 17:53:08 -07001617 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
Eric Dumazet110b2492010-10-04 04:27:36 +00001619 ASSERT_RTNL();
Thomas Grafa14a49d2006-08-07 17:53:08 -07001620 if (nlmsg_len(nlh) < sizeof(*ndm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 goto out;
1622
Thomas Grafa14a49d2006-08-07 17:53:08 -07001623 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1624 if (dst_attr == NULL)
1625 goto out;
1626
1627 ndm = nlmsg_data(nlh);
1628 if (ndm->ndm_ifindex) {
Eric Dumazet110b2492010-10-04 04:27:36 +00001629 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001630 if (dev == NULL) {
1631 err = -ENODEV;
1632 goto out;
1633 }
1634 }
1635
WANG Congd7480fd2014-11-10 15:59:36 -08001636 tbl = neigh_find_table(ndm->ndm_family);
1637 if (tbl == NULL)
1638 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
WANG Congd7480fd2014-11-10 15:59:36 -08001640 if (nla_len(dst_attr) < tbl->key_len)
1641 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
WANG Congd7480fd2014-11-10 15:59:36 -08001643 if (ndm->ndm_flags & NTF_PROXY) {
1644 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
Eric Dumazet110b2492010-10-04 04:27:36 +00001645 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 }
WANG Congd7480fd2014-11-10 15:59:36 -08001647
1648 if (dev == NULL)
1649 goto out;
1650
1651 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1652 if (neigh == NULL) {
1653 err = -ENOENT;
1654 goto out;
1655 }
1656
1657 err = neigh_update(neigh, NULL, NUD_FAILED,
1658 NEIGH_UPDATE_F_OVERRIDE |
1659 NEIGH_UPDATE_F_ADMIN);
1660 neigh_release(neigh);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001661
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662out:
1663 return err;
1664}
1665
Thomas Graf661d2962013-03-21 07:45:29 +00001666static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667{
WANG Congd7480fd2014-11-10 15:59:36 -08001668 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001669 struct net *net = sock_net(skb->sk);
Thomas Graf5208deb2006-08-07 17:55:40 -07001670 struct ndmsg *ndm;
1671 struct nlattr *tb[NDA_MAX+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 struct neigh_table *tbl;
1673 struct net_device *dev = NULL;
WANG Congd7480fd2014-11-10 15:59:36 -08001674 struct neighbour *neigh;
1675 void *dst, *lladdr;
Thomas Graf5208deb2006-08-07 17:55:40 -07001676 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Eric Dumazet110b2492010-10-04 04:27:36 +00001678 ASSERT_RTNL();
Thomas Graf5208deb2006-08-07 17:55:40 -07001679 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1680 if (err < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 goto out;
1682
Thomas Graf5208deb2006-08-07 17:55:40 -07001683 err = -EINVAL;
1684 if (tb[NDA_DST] == NULL)
1685 goto out;
1686
1687 ndm = nlmsg_data(nlh);
1688 if (ndm->ndm_ifindex) {
Eric Dumazet110b2492010-10-04 04:27:36 +00001689 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Graf5208deb2006-08-07 17:55:40 -07001690 if (dev == NULL) {
1691 err = -ENODEV;
1692 goto out;
1693 }
1694
1695 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
Eric Dumazet110b2492010-10-04 04:27:36 +00001696 goto out;
Thomas Graf5208deb2006-08-07 17:55:40 -07001697 }
1698
WANG Congd7480fd2014-11-10 15:59:36 -08001699 tbl = neigh_find_table(ndm->ndm_family);
1700 if (tbl == NULL)
1701 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
WANG Congd7480fd2014-11-10 15:59:36 -08001703 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1704 goto out;
1705 dst = nla_data(tb[NDA_DST]);
1706 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
WANG Congd7480fd2014-11-10 15:59:36 -08001708 if (ndm->ndm_flags & NTF_PROXY) {
1709 struct pneigh_entry *pn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
WANG Congd7480fd2014-11-10 15:59:36 -08001711 err = -ENOBUFS;
1712 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1713 if (pn) {
1714 pn->flags = ndm->ndm_flags;
Eric Biederman0c5c2d32009-03-04 00:03:08 -08001715 err = 0;
WANG Congd7480fd2014-11-10 15:59:36 -08001716 }
Eric Dumazet110b2492010-10-04 04:27:36 +00001717 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 }
1719
WANG Congd7480fd2014-11-10 15:59:36 -08001720 if (dev == NULL)
1721 goto out;
1722
1723 neigh = neigh_lookup(tbl, dst, dev);
1724 if (neigh == NULL) {
1725 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1726 err = -ENOENT;
1727 goto out;
1728 }
1729
1730 neigh = __neigh_lookup_errno(tbl, dst, dev);
1731 if (IS_ERR(neigh)) {
1732 err = PTR_ERR(neigh);
1733 goto out;
1734 }
1735 } else {
1736 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1737 err = -EEXIST;
1738 neigh_release(neigh);
1739 goto out;
1740 }
1741
1742 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1743 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1744 }
1745
1746 if (ndm->ndm_flags & NTF_USE) {
1747 neigh_event_send(neigh, NULL);
1748 err = 0;
1749 } else
1750 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1751 neigh_release(neigh);
1752
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753out:
1754 return err;
1755}
1756
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001757static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1758{
Thomas Grafca860fb2006-08-07 18:00:18 -07001759 struct nlattr *nest;
1760
1761 nest = nla_nest_start(skb, NDTA_PARMS);
1762 if (nest == NULL)
1763 return -ENOBUFS;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001764
David S. Miller9a6308d2012-04-01 20:06:28 -04001765 if ((parms->dev &&
1766 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1767 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001768 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1769 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04001770 /* approximative value for deprecated QUEUE_LEN (in packets) */
1771 nla_put_u32(skb, NDTPA_QUEUE_LEN,
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001772 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1773 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1774 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1775 nla_put_u32(skb, NDTPA_UCAST_PROBES,
1776 NEIGH_VAR(parms, UCAST_PROBES)) ||
1777 nla_put_u32(skb, NDTPA_MCAST_PROBES,
1778 NEIGH_VAR(parms, MCAST_PROBES)) ||
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09001779 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1780 NEIGH_VAR(parms, MCAST_REPROBES)) ||
Nicolas Dichtel2175d872016-04-22 17:31:21 +02001781 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1782 NDTPA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04001783 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02001784 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001785 nla_put_msecs(skb, NDTPA_GC_STALETIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02001786 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04001787 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02001788 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001789 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02001790 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001791 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02001792 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001793 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02001794 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
Jiri Pirko1f9248e52013-12-07 19:26:53 +01001795 nla_put_msecs(skb, NDTPA_LOCKTIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02001796 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
David S. Miller9a6308d2012-04-01 20:06:28 -04001797 goto nla_put_failure;
Thomas Grafca860fb2006-08-07 18:00:18 -07001798 return nla_nest_end(skb, nest);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001799
Thomas Grafca860fb2006-08-07 18:00:18 -07001800nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -07001801 nla_nest_cancel(skb, nest);
1802 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001803}
1804
Thomas Grafca860fb2006-08-07 18:00:18 -07001805static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1806 u32 pid, u32 seq, int type, int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001807{
1808 struct nlmsghdr *nlh;
1809 struct ndtmsg *ndtmsg;
1810
Thomas Grafca860fb2006-08-07 18:00:18 -07001811 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1812 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08001813 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001814
Thomas Grafca860fb2006-08-07 18:00:18 -07001815 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001816
1817 read_lock_bh(&tbl->lock);
1818 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001819 ndtmsg->ndtm_pad1 = 0;
1820 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001821
David S. Miller9a6308d2012-04-01 20:06:28 -04001822 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
Nicolas Dichtel2175d872016-04-22 17:31:21 +02001823 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04001824 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1825 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1826 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1827 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001828 {
1829 unsigned long now = jiffies;
1830 unsigned int flush_delta = now - tbl->last_flush;
1831 unsigned int rand_delta = now - tbl->last_rand;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001832 struct neigh_hash_table *nht;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001833 struct ndt_config ndc = {
1834 .ndtc_key_len = tbl->key_len,
1835 .ndtc_entry_size = tbl->entry_size,
1836 .ndtc_entries = atomic_read(&tbl->entries),
1837 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1838 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001839 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1840 };
1841
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001842 rcu_read_lock_bh();
1843 nht = rcu_dereference_bh(tbl->nht);
David S. Miller2c2aba62011-12-28 15:06:58 -05001844 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
David S. Millercd089332011-07-11 01:28:12 -07001845 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001846 rcu_read_unlock_bh();
1847
David S. Miller9a6308d2012-04-01 20:06:28 -04001848 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1849 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001850 }
1851
1852 {
1853 int cpu;
1854 struct ndt_stats ndst;
1855
1856 memset(&ndst, 0, sizeof(ndst));
1857
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07001858 for_each_possible_cpu(cpu) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001859 struct neigh_statistics *st;
1860
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001861 st = per_cpu_ptr(tbl->stats, cpu);
1862 ndst.ndts_allocs += st->allocs;
1863 ndst.ndts_destroys += st->destroys;
1864 ndst.ndts_hash_grows += st->hash_grows;
1865 ndst.ndts_res_failed += st->res_failed;
1866 ndst.ndts_lookups += st->lookups;
1867 ndst.ndts_hits += st->hits;
1868 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1869 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1870 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1871 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
Rick Jonesfb811392015-08-07 11:10:37 -07001872 ndst.ndts_table_fulls += st->table_fulls;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001873 }
1874
Nicolas Dichtelb6763382016-04-26 10:06:17 +02001875 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1876 NDTA_PAD))
David S. Miller9a6308d2012-04-01 20:06:28 -04001877 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001878 }
1879
1880 BUG_ON(tbl->parms.dev);
1881 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
Thomas Grafca860fb2006-08-07 18:00:18 -07001882 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001883
1884 read_unlock_bh(&tbl->lock);
Johannes Berg053c0952015-01-16 22:09:00 +01001885 nlmsg_end(skb, nlh);
1886 return 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001887
Thomas Grafca860fb2006-08-07 18:00:18 -07001888nla_put_failure:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001889 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08001890 nlmsg_cancel(skb, nlh);
1891 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001892}
1893
Thomas Grafca860fb2006-08-07 18:00:18 -07001894static int neightbl_fill_param_info(struct sk_buff *skb,
1895 struct neigh_table *tbl,
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001896 struct neigh_parms *parms,
Thomas Grafca860fb2006-08-07 18:00:18 -07001897 u32 pid, u32 seq, int type,
1898 unsigned int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001899{
1900 struct ndtmsg *ndtmsg;
1901 struct nlmsghdr *nlh;
1902
Thomas Grafca860fb2006-08-07 18:00:18 -07001903 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1904 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08001905 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001906
Thomas Grafca860fb2006-08-07 18:00:18 -07001907 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001908
1909 read_lock_bh(&tbl->lock);
1910 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001911 ndtmsg->ndtm_pad1 = 0;
1912 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001913
Thomas Grafca860fb2006-08-07 18:00:18 -07001914 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1915 neightbl_fill_parms(skb, parms) < 0)
1916 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001917
1918 read_unlock_bh(&tbl->lock);
Johannes Berg053c0952015-01-16 22:09:00 +01001919 nlmsg_end(skb, nlh);
1920 return 0;
Thomas Grafca860fb2006-08-07 18:00:18 -07001921errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001922 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08001923 nlmsg_cancel(skb, nlh);
1924 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001925}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001926
Patrick McHardyef7c79e2007-06-05 12:38:30 -07001927static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07001928 [NDTA_NAME] = { .type = NLA_STRING },
1929 [NDTA_THRESH1] = { .type = NLA_U32 },
1930 [NDTA_THRESH2] = { .type = NLA_U32 },
1931 [NDTA_THRESH3] = { .type = NLA_U32 },
1932 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1933 [NDTA_PARMS] = { .type = NLA_NESTED },
1934};
1935
Patrick McHardyef7c79e2007-06-05 12:38:30 -07001936static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07001937 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1938 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1939 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1940 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1941 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1942 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09001943 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
Thomas Graf6b3f8672006-08-07 17:58:53 -07001944 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1945 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1946 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1947 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1948 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1949 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1950 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1951};
1952
Thomas Graf661d2962013-03-21 07:45:29 +00001953static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001954{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001955 struct net *net = sock_net(skb->sk);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001956 struct neigh_table *tbl;
Thomas Graf6b3f8672006-08-07 17:58:53 -07001957 struct ndtmsg *ndtmsg;
1958 struct nlattr *tb[NDTA_MAX+1];
WANG Congd7480fd2014-11-10 15:59:36 -08001959 bool found = false;
1960 int err, tidx;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001961
Thomas Graf6b3f8672006-08-07 17:58:53 -07001962 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1963 nl_neightbl_policy);
1964 if (err < 0)
1965 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001966
Thomas Graf6b3f8672006-08-07 17:58:53 -07001967 if (tb[NDTA_NAME] == NULL) {
1968 err = -EINVAL;
1969 goto errout;
1970 }
1971
1972 ndtmsg = nlmsg_data(nlh);
WANG Congd7480fd2014-11-10 15:59:36 -08001973
1974 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1975 tbl = neigh_tables[tidx];
1976 if (!tbl)
1977 continue;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001978 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1979 continue;
WANG Congd7480fd2014-11-10 15:59:36 -08001980 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1981 found = true;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001982 break;
WANG Congd7480fd2014-11-10 15:59:36 -08001983 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001984 }
1985
WANG Congd7480fd2014-11-10 15:59:36 -08001986 if (!found)
1987 return -ENOENT;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001988
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001989 /*
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001990 * We acquire tbl->lock to be nice to the periodic timers and
1991 * make sure they always see a consistent set of values.
1992 */
1993 write_lock_bh(&tbl->lock);
1994
Thomas Graf6b3f8672006-08-07 17:58:53 -07001995 if (tb[NDTA_PARMS]) {
1996 struct nlattr *tbp[NDTPA_MAX+1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001997 struct neigh_parms *p;
Thomas Graf6b3f8672006-08-07 17:58:53 -07001998 int i, ifindex = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001999
Thomas Graf6b3f8672006-08-07 17:58:53 -07002000 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2001 nl_ntbl_parm_policy);
2002 if (err < 0)
2003 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002004
Thomas Graf6b3f8672006-08-07 17:58:53 -07002005 if (tbp[NDTPA_IFINDEX])
2006 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002007
Tobias Klauser97fd5bc2009-07-13 11:17:49 -07002008 p = lookup_neigh_parms(tbl, net, ifindex);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002009 if (p == NULL) {
2010 err = -ENOENT;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002011 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002012 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002013
Thomas Graf6b3f8672006-08-07 17:58:53 -07002014 for (i = 1; i <= NDTPA_MAX; i++) {
2015 if (tbp[i] == NULL)
2016 continue;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002017
Thomas Graf6b3f8672006-08-07 17:58:53 -07002018 switch (i) {
2019 case NDTPA_QUEUE_LEN:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002020 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2021 nla_get_u32(tbp[i]) *
2022 SKB_TRUESIZE(ETH_FRAME_LEN));
Eric Dumazet8b5c1712011-11-09 12:07:14 +00002023 break;
2024 case NDTPA_QUEUE_LENBYTES:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002025 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2026 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002027 break;
2028 case NDTPA_PROXY_QLEN:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002029 NEIGH_VAR_SET(p, PROXY_QLEN,
2030 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002031 break;
2032 case NDTPA_APP_PROBES:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002033 NEIGH_VAR_SET(p, APP_PROBES,
2034 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002035 break;
2036 case NDTPA_UCAST_PROBES:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002037 NEIGH_VAR_SET(p, UCAST_PROBES,
2038 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002039 break;
2040 case NDTPA_MCAST_PROBES:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002041 NEIGH_VAR_SET(p, MCAST_PROBES,
2042 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002043 break;
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002044 case NDTPA_MCAST_REPROBES:
2045 NEIGH_VAR_SET(p, MCAST_REPROBES,
2046 nla_get_u32(tbp[i]));
2047 break;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002048 case NDTPA_BASE_REACHABLE_TIME:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002049 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2050 nla_get_msecs(tbp[i]));
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01002051 /* update reachable_time as well, otherwise, the change will
2052 * only be effective after the next time neigh_periodic_work
2053 * decides to recompute it (can be multiple minutes)
2054 */
2055 p->reachable_time =
2056 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002057 break;
2058 case NDTPA_GC_STALETIME:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002059 NEIGH_VAR_SET(p, GC_STALETIME,
2060 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002061 break;
2062 case NDTPA_DELAY_PROBE_TIME:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002063 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2064 nla_get_msecs(tbp[i]));
Ido Schimmel2a4501a2016-07-05 11:27:42 +02002065 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002066 break;
2067 case NDTPA_RETRANS_TIME:
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002068 NEIGH_VAR_SET(p, RETRANS_TIME,
2069 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002070 break;
2071 case NDTPA_ANYCAST_DELAY:
Jiri Pirko39774582014-01-14 15:46:07 +01002072 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2073 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002074 break;
2075 case NDTPA_PROXY_DELAY:
Jiri Pirko39774582014-01-14 15:46:07 +01002076 NEIGH_VAR_SET(p, PROXY_DELAY,
2077 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002078 break;
2079 case NDTPA_LOCKTIME:
Jiri Pirko39774582014-01-14 15:46:07 +01002080 NEIGH_VAR_SET(p, LOCKTIME,
2081 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002082 break;
2083 }
2084 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002085 }
2086
Gao fengdc25c672013-06-20 10:01:34 +08002087 err = -ENOENT;
2088 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2089 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2090 !net_eq(net, &init_net))
2091 goto errout_tbl_lock;
2092
Thomas Graf6b3f8672006-08-07 17:58:53 -07002093 if (tb[NDTA_THRESH1])
2094 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2095
2096 if (tb[NDTA_THRESH2])
2097 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2098
2099 if (tb[NDTA_THRESH3])
2100 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2101
2102 if (tb[NDTA_GC_INTERVAL])
2103 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2104
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002105 err = 0;
2106
Thomas Graf6b3f8672006-08-07 17:58:53 -07002107errout_tbl_lock:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002108 write_unlock_bh(&tbl->lock);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002109errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002110 return err;
2111}
2112
Thomas Grafc8822a42007-03-22 11:50:06 -07002113static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002114{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002115 struct net *net = sock_net(skb->sk);
Thomas Grafca860fb2006-08-07 18:00:18 -07002116 int family, tidx, nidx = 0;
2117 int tbl_skip = cb->args[0];
2118 int neigh_skip = cb->args[1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002119 struct neigh_table *tbl;
2120
Thomas Grafca860fb2006-08-07 18:00:18 -07002121 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002122
WANG Congd7480fd2014-11-10 15:59:36 -08002123 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002124 struct neigh_parms *p;
2125
WANG Congd7480fd2014-11-10 15:59:36 -08002126 tbl = neigh_tables[tidx];
2127 if (!tbl)
2128 continue;
2129
Thomas Grafca860fb2006-08-07 18:00:18 -07002130 if (tidx < tbl_skip || (family && tbl->family != family))
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002131 continue;
2132
Eric W. Biederman15e47302012-09-07 20:12:54 +00002133 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
Thomas Grafca860fb2006-08-07 18:00:18 -07002134 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
David S. Miller7b46a642015-01-18 23:36:08 -05002135 NLM_F_MULTI) < 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002136 break;
2137
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01002138 nidx = 0;
2139 p = list_next_entry(&tbl->parms, list);
2140 list_for_each_entry_from(p, &tbl->parms_list, list) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002141 if (!net_eq(neigh_parms_net(p), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08002142 continue;
2143
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002144 if (nidx < neigh_skip)
2145 goto next;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002146
Thomas Grafca860fb2006-08-07 18:00:18 -07002147 if (neightbl_fill_param_info(skb, tbl, p,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002148 NETLINK_CB(cb->skb).portid,
Thomas Grafca860fb2006-08-07 18:00:18 -07002149 cb->nlh->nlmsg_seq,
2150 RTM_NEWNEIGHTBL,
David S. Miller7b46a642015-01-18 23:36:08 -05002151 NLM_F_MULTI) < 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002152 goto out;
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002153 next:
2154 nidx++;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002155 }
2156
Thomas Grafca860fb2006-08-07 18:00:18 -07002157 neigh_skip = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002158 }
2159out:
Thomas Grafca860fb2006-08-07 18:00:18 -07002160 cb->args[0] = tidx;
2161 cb->args[1] = nidx;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002162
2163 return skb->len;
2164}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
Thomas Graf8b8aec52006-08-07 17:56:37 -07002166static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2167 u32 pid, u32 seq, int type, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168{
2169 unsigned long now = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 struct nda_cacheinfo ci;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002171 struct nlmsghdr *nlh;
2172 struct ndmsg *ndm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173
Thomas Graf8b8aec52006-08-07 17:56:37 -07002174 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2175 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002176 return -EMSGSIZE;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002177
2178 ndm = nlmsg_data(nlh);
2179 ndm->ndm_family = neigh->ops->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002180 ndm->ndm_pad1 = 0;
2181 ndm->ndm_pad2 = 0;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002182 ndm->ndm_flags = neigh->flags;
2183 ndm->ndm_type = neigh->type;
2184 ndm->ndm_ifindex = neigh->dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
David S. Miller9a6308d2012-04-01 20:06:28 -04002186 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2187 goto nla_put_failure;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002188
2189 read_lock_bh(&neigh->lock);
2190 ndm->ndm_state = neigh->nud_state;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00002191 if (neigh->nud_state & NUD_VALID) {
2192 char haddr[MAX_ADDR_LEN];
2193
2194 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2195 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2196 read_unlock_bh(&neigh->lock);
2197 goto nla_put_failure;
2198 }
Thomas Graf8b8aec52006-08-07 17:56:37 -07002199 }
2200
Stephen Hemmingerb9f5f522008-06-03 16:03:15 -07002201 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2202 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2203 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
Thomas Graf8b8aec52006-08-07 17:56:37 -07002204 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2205 read_unlock_bh(&neigh->lock);
2206
David S. Miller9a6308d2012-04-01 20:06:28 -04002207 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2208 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2209 goto nla_put_failure;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002210
Johannes Berg053c0952015-01-16 22:09:00 +01002211 nlmsg_end(skb, nlh);
2212 return 0;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002213
2214nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08002215 nlmsg_cancel(skb, nlh);
2216 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217}
2218
Tony Zelenoff84920c12012-01-26 22:28:58 +00002219static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2220 u32 pid, u32 seq, int type, unsigned int flags,
2221 struct neigh_table *tbl)
2222{
2223 struct nlmsghdr *nlh;
2224 struct ndmsg *ndm;
2225
2226 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2227 if (nlh == NULL)
2228 return -EMSGSIZE;
2229
2230 ndm = nlmsg_data(nlh);
2231 ndm->ndm_family = tbl->family;
2232 ndm->ndm_pad1 = 0;
2233 ndm->ndm_pad2 = 0;
2234 ndm->ndm_flags = pn->flags | NTF_PROXY;
Jun Zhao545469f2014-07-26 00:38:59 +08002235 ndm->ndm_type = RTN_UNICAST;
Konstantin Khlebnikov6adc5fd2015-12-01 01:14:48 +03002236 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002237 ndm->ndm_state = NUD_NONE;
2238
David S. Miller9a6308d2012-04-01 20:06:28 -04002239 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2240 goto nla_put_failure;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002241
Johannes Berg053c0952015-01-16 22:09:00 +01002242 nlmsg_end(skb, nlh);
2243 return 0;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002244
2245nla_put_failure:
2246 nlmsg_cancel(skb, nlh);
2247 return -EMSGSIZE;
2248}
2249
Thomas Grafd961db32007-08-08 23:12:56 -07002250static void neigh_update_notify(struct neighbour *neigh)
2251{
2252 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2253 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2254}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
David Ahern21fdd092015-09-29 09:32:03 -07002256static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2257{
2258 struct net_device *master;
2259
2260 if (!master_idx)
2261 return false;
2262
2263 master = netdev_master_upper_dev_get(dev);
2264 if (!master || master->ifindex != master_idx)
2265 return true;
2266
2267 return false;
2268}
2269
David Ahern16660f02015-10-03 11:43:46 -07002270static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2271{
2272 if (filter_idx && dev->ifindex != filter_idx)
2273 return true;
2274
2275 return false;
2276}
2277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2279 struct netlink_callback *cb)
2280{
Eric Dumazet767e97e2010-10-06 17:49:21 -07002281 struct net *net = sock_net(skb->sk);
David Ahern21fdd092015-09-29 09:32:03 -07002282 const struct nlmsghdr *nlh = cb->nlh;
2283 struct nlattr *tb[NDA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 struct neighbour *n;
2285 int rc, h, s_h = cb->args[1];
2286 int idx, s_idx = idx = cb->args[2];
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002287 struct neigh_hash_table *nht;
David Ahern16660f02015-10-03 11:43:46 -07002288 int filter_master_idx = 0, filter_idx = 0;
David Ahern21fdd092015-09-29 09:32:03 -07002289 unsigned int flags = NLM_F_MULTI;
2290 int err;
2291
2292 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2293 if (!err) {
Eric Dumazet0e770d22018-04-11 14:46:00 -07002294 if (tb[NDA_IFINDEX]) {
2295 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2296 return -EINVAL;
David Ahern16660f02015-10-03 11:43:46 -07002297 filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
Eric Dumazet0e770d22018-04-11 14:46:00 -07002298 }
2299 if (tb[NDA_MASTER]) {
2300 if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2301 return -EINVAL;
David Ahern21fdd092015-09-29 09:32:03 -07002302 filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
Eric Dumazet0e770d22018-04-11 14:46:00 -07002303 }
David Ahern16660f02015-10-03 11:43:46 -07002304 if (filter_idx || filter_master_idx)
David Ahern21fdd092015-09-29 09:32:03 -07002305 flags |= NLM_F_DUMP_FILTERED;
2306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002308 rcu_read_lock_bh();
2309 nht = rcu_dereference_bh(tbl->nht);
2310
Eric Dumazet4bd66832012-06-07 04:58:35 +00002311 for (h = s_h; h < (1 << nht->hash_shift); h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 if (h > s_h)
2313 s_idx = 0;
Eric Dumazet767e97e2010-10-06 17:49:21 -07002314 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2315 n != NULL;
2316 n = rcu_dereference_bh(n->next)) {
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002317 if (!net_eq(dev_net(n->dev), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08002318 continue;
David Ahern16660f02015-10-03 11:43:46 -07002319 if (neigh_ifindex_filtered(n->dev, filter_idx))
2320 continue;
David Ahern21fdd092015-09-29 09:32:03 -07002321 if (neigh_master_filtered(n->dev, filter_master_idx))
2322 continue;
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002323 if (idx < s_idx)
2324 goto next;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002325 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 cb->nlh->nlmsg_seq,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002327 RTM_NEWNEIGH,
David Ahern21fdd092015-09-29 09:32:03 -07002328 flags) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 rc = -1;
2330 goto out;
2331 }
Eric Dumazet767e97e2010-10-06 17:49:21 -07002332next:
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002333 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 }
2336 rc = skb->len;
2337out:
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002338 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 cb->args[1] = h;
2340 cb->args[2] = idx;
2341 return rc;
2342}
2343
Tony Zelenoff84920c12012-01-26 22:28:58 +00002344static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2345 struct netlink_callback *cb)
2346{
2347 struct pneigh_entry *n;
2348 struct net *net = sock_net(skb->sk);
2349 int rc, h, s_h = cb->args[3];
2350 int idx, s_idx = idx = cb->args[4];
2351
2352 read_lock_bh(&tbl->lock);
2353
Eric Dumazet4bd66832012-06-07 04:58:35 +00002354 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
Tony Zelenoff84920c12012-01-26 22:28:58 +00002355 if (h > s_h)
2356 s_idx = 0;
2357 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
Konstantin Khlebnikov6adc5fd2015-12-01 01:14:48 +03002358 if (pneigh_net(n) != net)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002359 continue;
2360 if (idx < s_idx)
2361 goto next;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002362 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
Tony Zelenoff84920c12012-01-26 22:28:58 +00002363 cb->nlh->nlmsg_seq,
2364 RTM_NEWNEIGH,
David S. Miller7b46a642015-01-18 23:36:08 -05002365 NLM_F_MULTI, tbl) < 0) {
Tony Zelenoff84920c12012-01-26 22:28:58 +00002366 read_unlock_bh(&tbl->lock);
2367 rc = -1;
2368 goto out;
2369 }
2370 next:
2371 idx++;
2372 }
2373 }
2374
2375 read_unlock_bh(&tbl->lock);
2376 rc = skb->len;
2377out:
2378 cb->args[3] = h;
2379 cb->args[4] = idx;
2380 return rc;
2381
2382}
2383
Thomas Grafc8822a42007-03-22 11:50:06 -07002384static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385{
2386 struct neigh_table *tbl;
2387 int t, family, s_t;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002388 int proxy = 0;
Eric Dumazet4bd66832012-06-07 04:58:35 +00002389 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
Thomas Graf8b8aec52006-08-07 17:56:37 -07002391 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002392
2393 /* check for full ndmsg structure presence, family member is
2394 * the same for both structures
2395 */
2396 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2397 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2398 proxy = 1;
2399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 s_t = cb->args[0];
2401
WANG Congd7480fd2014-11-10 15:59:36 -08002402 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2403 tbl = neigh_tables[t];
2404
2405 if (!tbl)
2406 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 if (t < s_t || (family && tbl->family != family))
2408 continue;
2409 if (t > s_t)
2410 memset(&cb->args[1], 0, sizeof(cb->args) -
2411 sizeof(cb->args[0]));
Tony Zelenoff84920c12012-01-26 22:28:58 +00002412 if (proxy)
2413 err = pneigh_dump_table(tbl, skb, cb);
2414 else
2415 err = neigh_dump_table(tbl, skb, cb);
Eric Dumazet4bd66832012-06-07 04:58:35 +00002416 if (err < 0)
2417 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
2420 cb->args[0] = t;
2421 return skb->len;
2422}
2423
2424void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2425{
2426 int chain;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002427 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002429 rcu_read_lock_bh();
2430 nht = rcu_dereference_bh(tbl->nht);
2431
Eric Dumazet767e97e2010-10-06 17:49:21 -07002432 read_lock(&tbl->lock); /* avoid resizes */
David S. Millercd089332011-07-11 01:28:12 -07002433 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 struct neighbour *n;
2435
Eric Dumazet767e97e2010-10-06 17:49:21 -07002436 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2437 n != NULL;
2438 n = rcu_dereference_bh(n->next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 cb(n, cookie);
2440 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002441 read_unlock(&tbl->lock);
2442 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443}
2444EXPORT_SYMBOL(neigh_for_each);
2445
2446/* The tbl->lock must be held as a writer and BH disabled. */
2447void __neigh_for_each_release(struct neigh_table *tbl,
2448 int (*cb)(struct neighbour *))
2449{
2450 int chain;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002451 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002453 nht = rcu_dereference_protected(tbl->nht,
2454 lockdep_is_held(&tbl->lock));
David S. Millercd089332011-07-11 01:28:12 -07002455 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07002456 struct neighbour *n;
2457 struct neighbour __rcu **np;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002459 np = &nht->hash_buckets[chain];
Eric Dumazet767e97e2010-10-06 17:49:21 -07002460 while ((n = rcu_dereference_protected(*np,
2461 lockdep_is_held(&tbl->lock))) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 int release;
2463
2464 write_lock(&n->lock);
2465 release = cb(n);
2466 if (release) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07002467 rcu_assign_pointer(*np,
2468 rcu_dereference_protected(n->next,
2469 lockdep_is_held(&tbl->lock)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 n->dead = 1;
2471 } else
2472 np = &n->next;
2473 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -07002474 if (release)
2475 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 }
2477 }
2478}
2479EXPORT_SYMBOL(__neigh_for_each_release);
2480
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06002481int neigh_xmit(int index, struct net_device *dev,
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002482 const void *addr, struct sk_buff *skb)
2483{
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06002484 int err = -EAFNOSUPPORT;
2485 if (likely(index < NEIGH_NR_TABLES)) {
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002486 struct neigh_table *tbl;
2487 struct neighbour *neigh;
2488
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06002489 tbl = neigh_tables[index];
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002490 if (!tbl)
2491 goto out;
David Barrosob560f032016-06-28 11:16:43 +03002492 rcu_read_lock_bh();
David Ahern86e00b72019-05-01 18:18:42 -07002493 if (index == NEIGH_ARP_TABLE) {
2494 u32 key = *((u32 *)addr);
2495
2496 neigh = __ipv4_neigh_lookup_noref(dev, key);
2497 } else {
2498 neigh = __neigh_lookup_noref(tbl, addr, dev);
2499 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002500 if (!neigh)
2501 neigh = __neigh_create(tbl, addr, dev, false);
2502 err = PTR_ERR(neigh);
David Barrosob560f032016-06-28 11:16:43 +03002503 if (IS_ERR(neigh)) {
2504 rcu_read_unlock_bh();
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002505 goto out_kfree_skb;
David Barrosob560f032016-06-28 11:16:43 +03002506 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002507 err = neigh->output(neigh, skb);
David Barrosob560f032016-06-28 11:16:43 +03002508 rcu_read_unlock_bh();
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002509 }
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06002510 else if (index == NEIGH_LINK_TABLE) {
2511 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2512 addr, NULL, skb->len);
2513 if (err < 0)
2514 goto out_kfree_skb;
2515 err = dev_queue_xmit(skb);
2516 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002517out:
2518 return err;
2519out_kfree_skb:
2520 kfree_skb(skb);
2521 goto out;
2522}
2523EXPORT_SYMBOL(neigh_xmit);
2524
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525#ifdef CONFIG_PROC_FS
2526
2527static struct neighbour *neigh_get_first(struct seq_file *seq)
2528{
2529 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002530 struct net *net = seq_file_net(seq);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002531 struct neigh_hash_table *nht = state->nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 struct neighbour *n = NULL;
2533 int bucket = state->bucket;
2534
2535 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
David S. Millercd089332011-07-11 01:28:12 -07002536 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07002537 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
2539 while (n) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002540 if (!net_eq(dev_net(n->dev), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08002541 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 if (state->neigh_sub_iter) {
2543 loff_t fakep = 0;
2544 void *v;
2545
2546 v = state->neigh_sub_iter(state, n, &fakep);
2547 if (!v)
2548 goto next;
2549 }
2550 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2551 break;
2552 if (n->nud_state & ~NUD_NOARP)
2553 break;
Eric Dumazet767e97e2010-10-06 17:49:21 -07002554next:
2555 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 }
2557
2558 if (n)
2559 break;
2560 }
2561 state->bucket = bucket;
2562
2563 return n;
2564}
2565
2566static struct neighbour *neigh_get_next(struct seq_file *seq,
2567 struct neighbour *n,
2568 loff_t *pos)
2569{
2570 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002571 struct net *net = seq_file_net(seq);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002572 struct neigh_hash_table *nht = state->nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573
2574 if (state->neigh_sub_iter) {
2575 void *v = state->neigh_sub_iter(state, n, pos);
2576 if (v)
2577 return n;
2578 }
Eric Dumazet767e97e2010-10-06 17:49:21 -07002579 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580
2581 while (1) {
2582 while (n) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002583 if (!net_eq(dev_net(n->dev), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08002584 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 if (state->neigh_sub_iter) {
2586 void *v = state->neigh_sub_iter(state, n, pos);
2587 if (v)
2588 return n;
2589 goto next;
2590 }
2591 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2592 break;
2593
2594 if (n->nud_state & ~NUD_NOARP)
2595 break;
Eric Dumazet767e97e2010-10-06 17:49:21 -07002596next:
2597 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 }
2599
2600 if (n)
2601 break;
2602
David S. Millercd089332011-07-11 01:28:12 -07002603 if (++state->bucket >= (1 << nht->hash_shift))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 break;
2605
Eric Dumazet767e97e2010-10-06 17:49:21 -07002606 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 }
2608
2609 if (n && pos)
2610 --(*pos);
2611 return n;
2612}
2613
2614static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2615{
2616 struct neighbour *n = neigh_get_first(seq);
2617
2618 if (n) {
Chris Larson745e2032008-08-03 01:10:55 -07002619 --(*pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 while (*pos) {
2621 n = neigh_get_next(seq, n, pos);
2622 if (!n)
2623 break;
2624 }
2625 }
2626 return *pos ? NULL : n;
2627}
2628
2629static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2630{
2631 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002632 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 struct neigh_table *tbl = state->tbl;
2634 struct pneigh_entry *pn = NULL;
2635 int bucket = state->bucket;
2636
2637 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2638 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2639 pn = tbl->phash_buckets[bucket];
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002640 while (pn && !net_eq(pneigh_net(pn), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08002641 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 if (pn)
2643 break;
2644 }
2645 state->bucket = bucket;
2646
2647 return pn;
2648}
2649
2650static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2651 struct pneigh_entry *pn,
2652 loff_t *pos)
2653{
2654 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002655 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 struct neigh_table *tbl = state->tbl;
2657
Jorge Boncompte [DTI2]df07a942011-11-25 13:24:49 -05002658 do {
2659 pn = pn->next;
2660 } while (pn && !net_eq(pneigh_net(pn), net));
2661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 while (!pn) {
2663 if (++state->bucket > PNEIGH_HASHMASK)
2664 break;
2665 pn = tbl->phash_buckets[state->bucket];
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002666 while (pn && !net_eq(pneigh_net(pn), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08002667 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 if (pn)
2669 break;
2670 }
2671
2672 if (pn && pos)
2673 --(*pos);
2674
2675 return pn;
2676}
2677
2678static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2679{
2680 struct pneigh_entry *pn = pneigh_get_first(seq);
2681
2682 if (pn) {
Chris Larson745e2032008-08-03 01:10:55 -07002683 --(*pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 while (*pos) {
2685 pn = pneigh_get_next(seq, pn, pos);
2686 if (!pn)
2687 break;
2688 }
2689 }
2690 return *pos ? NULL : pn;
2691}
2692
2693static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2694{
2695 struct neigh_seq_state *state = seq->private;
2696 void *rc;
Chris Larson745e2032008-08-03 01:10:55 -07002697 loff_t idxpos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
Chris Larson745e2032008-08-03 01:10:55 -07002699 rc = neigh_get_idx(seq, &idxpos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
Chris Larson745e2032008-08-03 01:10:55 -07002701 rc = pneigh_get_idx(seq, &idxpos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702
2703 return rc;
2704}
2705
2706void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
Eric Dumazetc55ce1d2019-06-15 16:28:48 -07002707 __acquires(tbl->lock)
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002708 __acquires(rcu_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709{
2710 struct neigh_seq_state *state = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711
2712 state->tbl = tbl;
2713 state->bucket = 0;
2714 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2715
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002716 rcu_read_lock_bh();
2717 state->nht = rcu_dereference_bh(tbl->nht);
Eric Dumazetc55ce1d2019-06-15 16:28:48 -07002718 read_lock(&tbl->lock);
Eric Dumazet767e97e2010-10-06 17:49:21 -07002719
Chris Larson745e2032008-08-03 01:10:55 -07002720 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721}
2722EXPORT_SYMBOL(neigh_seq_start);
2723
2724void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2725{
2726 struct neigh_seq_state *state;
2727 void *rc;
2728
2729 if (v == SEQ_START_TOKEN) {
Chris Larsonbff69732008-08-03 01:02:41 -07002730 rc = neigh_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 goto out;
2732 }
2733
2734 state = seq->private;
2735 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2736 rc = neigh_get_next(seq, v, NULL);
2737 if (rc)
2738 goto out;
2739 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2740 rc = pneigh_get_first(seq);
2741 } else {
2742 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2743 rc = pneigh_get_next(seq, v, NULL);
2744 }
2745out:
2746 ++(*pos);
2747 return rc;
2748}
2749EXPORT_SYMBOL(neigh_seq_next);
2750
2751void neigh_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc55ce1d2019-06-15 16:28:48 -07002752 __releases(tbl->lock)
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002753 __releases(rcu_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754{
Eric Dumazetc55ce1d2019-06-15 16:28:48 -07002755 struct neigh_seq_state *state = seq->private;
2756 struct neigh_table *tbl = state->tbl;
2757
2758 read_unlock(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002759 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760}
2761EXPORT_SYMBOL(neigh_seq_stop);
2762
2763/* statistics via seq_file */
2764
2765static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2766{
Alexey Dobriyan81c1ebf2010-01-22 10:16:05 +00002767 struct neigh_table *tbl = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 int cpu;
2769
2770 if (*pos == 0)
2771 return SEQ_START_TOKEN;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002772
Rusty Russell0f23174a2008-12-29 12:23:42 +00002773 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 if (!cpu_possible(cpu))
2775 continue;
2776 *pos = cpu+1;
2777 return per_cpu_ptr(tbl->stats, cpu);
2778 }
2779 return NULL;
2780}
2781
2782static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2783{
Alexey Dobriyan81c1ebf2010-01-22 10:16:05 +00002784 struct neigh_table *tbl = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 int cpu;
2786
Rusty Russell0f23174a2008-12-29 12:23:42 +00002787 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 if (!cpu_possible(cpu))
2789 continue;
2790 *pos = cpu+1;
2791 return per_cpu_ptr(tbl->stats, cpu);
2792 }
2793 return NULL;
2794}
2795
2796static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2797{
2798
2799}
2800
2801static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2802{
Alexey Dobriyan81c1ebf2010-01-22 10:16:05 +00002803 struct neigh_table *tbl = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 struct neigh_statistics *st = v;
2805
2806 if (v == SEQ_START_TOKEN) {
Rick Jonesfb811392015-08-07 11:10:37 -07002807 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 return 0;
2809 }
2810
2811 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
Rick Jonesfb811392015-08-07 11:10:37 -07002812 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 atomic_read(&tbl->entries),
2814
2815 st->allocs,
2816 st->destroys,
2817 st->hash_grows,
2818
2819 st->lookups,
2820 st->hits,
2821
2822 st->res_failed,
2823
2824 st->rcv_probes_mcast,
2825 st->rcv_probes_ucast,
2826
2827 st->periodic_gc_runs,
Neil Horman9a6d2762008-07-16 20:50:49 -07002828 st->forced_gc_runs,
Rick Jonesfb811392015-08-07 11:10:37 -07002829 st->unres_discards,
2830 st->table_fulls
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 );
2832
2833 return 0;
2834}
2835
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002836static const struct seq_operations neigh_stat_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 .start = neigh_stat_seq_start,
2838 .next = neigh_stat_seq_next,
2839 .stop = neigh_stat_seq_stop,
2840 .show = neigh_stat_seq_show,
2841};
2842
2843static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2844{
2845 int ret = seq_open(file, &neigh_stat_seq_ops);
2846
2847 if (!ret) {
2848 struct seq_file *sf = file->private_data;
Al Virod9dda782013-03-31 18:16:14 -04002849 sf->private = PDE_DATA(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 }
2851 return ret;
2852};
2853
Arjan van de Ven9a321442007-02-12 00:55:35 -08002854static const struct file_operations neigh_stat_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 .owner = THIS_MODULE,
2856 .open = neigh_stat_seq_open,
2857 .read = seq_read,
2858 .llseek = seq_lseek,
2859 .release = seq_release,
2860};
2861
2862#endif /* CONFIG_PROC_FS */
2863
Thomas Graf339bf982006-11-10 14:10:15 -08002864static inline size_t neigh_nlmsg_size(void)
2865{
2866 return NLMSG_ALIGN(sizeof(struct ndmsg))
2867 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2868 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2869 + nla_total_size(sizeof(struct nda_cacheinfo))
2870 + nla_total_size(4); /* NDA_PROBES */
2871}
2872
Thomas Grafb8673312006-08-15 00:33:14 -07002873static void __neigh_notify(struct neighbour *n, int type, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002875 struct net *net = dev_net(n->dev);
Thomas Graf8b8aec52006-08-07 17:56:37 -07002876 struct sk_buff *skb;
Thomas Grafb8673312006-08-15 00:33:14 -07002877 int err = -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
Thomas Graf339bf982006-11-10 14:10:15 -08002879 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
Thomas Graf8b8aec52006-08-07 17:56:37 -07002880 if (skb == NULL)
Thomas Grafb8673312006-08-15 00:33:14 -07002881 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882
Thomas Grafb8673312006-08-15 00:33:14 -07002883 err = neigh_fill_info(skb, n, 0, 0, type, flags);
Patrick McHardy26932562007-01-31 23:16:40 -08002884 if (err < 0) {
2885 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2886 WARN_ON(err == -EMSGSIZE);
2887 kfree_skb(skb);
2888 goto errout;
2889 }
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002890 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2891 return;
Thomas Grafb8673312006-08-15 00:33:14 -07002892errout:
2893 if (err < 0)
Eric W. Biederman426b5302008-01-24 00:13:18 -08002894 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
Thomas Grafb8673312006-08-15 00:33:14 -07002895}
2896
2897void neigh_app_ns(struct neighbour *n)
2898{
2899 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09002901EXPORT_SYMBOL(neigh_app_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
2903#ifdef CONFIG_SYSCTL
Cong Wangb93196d2012-12-06 10:04:04 +08002904static int zero;
Francesco Fusco555445c2013-07-24 10:39:06 +02002905static int int_max = INT_MAX;
Cong Wangb93196d2012-12-06 10:04:04 +08002906static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
Joe Perchesfe2c6332013-06-11 23:04:25 -07002908static int proc_unres_qlen(struct ctl_table *ctl, int write,
2909 void __user *buffer, size_t *lenp, loff_t *ppos)
Eric Dumazet8b5c1712011-11-09 12:07:14 +00002910{
2911 int size, ret;
Joe Perchesfe2c6332013-06-11 23:04:25 -07002912 struct ctl_table tmp = *ctl;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00002913
Shan Weice46cc62012-12-04 18:49:15 +00002914 tmp.extra1 = &zero;
2915 tmp.extra2 = &unres_qlen_max;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00002916 tmp.data = &size;
Shan Weice46cc62012-12-04 18:49:15 +00002917
2918 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2919 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2920
Eric Dumazet8b5c1712011-11-09 12:07:14 +00002921 if (write && !ret)
2922 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2923 return ret;
2924}
2925
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01002926static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2927 int family)
2928{
Jiri Pirkobba24892013-12-07 19:26:57 +01002929 switch (family) {
2930 case AF_INET:
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01002931 return __in_dev_arp_parms_get_rcu(dev);
Jiri Pirkobba24892013-12-07 19:26:57 +01002932 case AF_INET6:
2933 return __in6_dev_nd_parms_get_rcu(dev);
2934 }
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01002935 return NULL;
2936}
2937
2938static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2939 int index)
2940{
2941 struct net_device *dev;
2942 int family = neigh_parms_family(p);
2943
2944 rcu_read_lock();
2945 for_each_netdev_rcu(net, dev) {
2946 struct neigh_parms *dst_p =
2947 neigh_get_dev_parms_rcu(dev, family);
2948
2949 if (dst_p && !test_bit(index, dst_p->data_state))
2950 dst_p->data[index] = p->data[index];
2951 }
2952 rcu_read_unlock();
2953}
2954
2955static void neigh_proc_update(struct ctl_table *ctl, int write)
2956{
2957 struct net_device *dev = ctl->extra1;
2958 struct neigh_parms *p = ctl->extra2;
Jiri Pirko77d47af2013-12-10 23:55:07 +01002959 struct net *net = neigh_parms_net(p);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01002960 int index = (int *) ctl->data - p->data;
2961
2962 if (!write)
2963 return;
2964
2965 set_bit(index, p->data_state);
Marcus Huewe6c854af2017-02-15 01:00:36 +01002966 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2967 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01002968 if (!dev) /* NULL dev means this is default value */
2969 neigh_copy_dflt_parms(net, p, index);
2970}
2971
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002972static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2973 void __user *buffer,
2974 size_t *lenp, loff_t *ppos)
2975{
2976 struct ctl_table tmp = *ctl;
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01002977 int ret;
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002978
2979 tmp.extra1 = &zero;
2980 tmp.extra2 = &int_max;
2981
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01002982 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2983 neigh_proc_update(ctl, write);
2984 return ret;
Jiri Pirko1f9248e52013-12-07 19:26:53 +01002985}
2986
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01002987int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2988 void __user *buffer, size_t *lenp, loff_t *ppos)
2989{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01002990 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2991
2992 neigh_proc_update(ctl, write);
2993 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01002994}
2995EXPORT_SYMBOL(neigh_proc_dointvec);
2996
2997int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2998 void __user *buffer,
2999 size_t *lenp, loff_t *ppos)
3000{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003001 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3002
3003 neigh_proc_update(ctl, write);
3004 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003005}
3006EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3007
3008static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3009 void __user *buffer,
3010 size_t *lenp, loff_t *ppos)
3011{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003012 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3013
3014 neigh_proc_update(ctl, write);
3015 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003016}
3017
3018int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3019 void __user *buffer,
3020 size_t *lenp, loff_t *ppos)
3021{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003022 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3023
3024 neigh_proc_update(ctl, write);
3025 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003026}
3027EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3028
3029static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3030 void __user *buffer,
3031 size_t *lenp, loff_t *ppos)
3032{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003033 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3034
3035 neigh_proc_update(ctl, write);
3036 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003037}
3038
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003039static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3040 void __user *buffer,
3041 size_t *lenp, loff_t *ppos)
3042{
3043 struct neigh_parms *p = ctl->extra2;
3044 int ret;
3045
3046 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3047 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3048 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3049 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3050 else
3051 ret = -1;
3052
3053 if (write && ret == 0) {
3054 /* update reachable_time as well, otherwise, the change will
3055 * only be effective after the next time neigh_periodic_work
3056 * decides to recompute it
3057 */
3058 p->reachable_time =
3059 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3060 }
3061 return ret;
3062}
3063
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003064#define NEIGH_PARMS_DATA_OFFSET(index) \
3065 (&((struct neigh_parms *) 0)->data[index])
3066
3067#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3068 [NEIGH_VAR_ ## attr] = { \
3069 .procname = name, \
3070 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3071 .maxlen = sizeof(int), \
3072 .mode = mval, \
3073 .proc_handler = proc, \
3074 }
3075
3076#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3077 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3078
3079#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003080 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003081
3082#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003083 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003084
3085#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003086 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003087
3088#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003089 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003090
3091#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003092 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
Eric W. Biederman54716e32010-02-14 03:27:03 +00003093
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094static struct neigh_sysctl_table {
3095 struct ctl_table_header *sysctl_header;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003096 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
Brian Haleyab32ea52006-09-22 14:15:41 -07003097} neigh_sysctl_template __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 .neigh_vars = {
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003099 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3100 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3101 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09003102 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003103 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3104 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3105 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3106 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3107 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3108 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3109 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3110 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3111 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3112 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3113 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3114 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003115 [NEIGH_VAR_GC_INTERVAL] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 .procname = "gc_interval",
3117 .maxlen = sizeof(int),
3118 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003119 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003121 [NEIGH_VAR_GC_THRESH1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122 .procname = "gc_thresh1",
3123 .maxlen = sizeof(int),
3124 .mode = 0644,
Francesco Fusco555445c2013-07-24 10:39:06 +02003125 .extra1 = &zero,
3126 .extra2 = &int_max,
3127 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003129 [NEIGH_VAR_GC_THRESH2] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 .procname = "gc_thresh2",
3131 .maxlen = sizeof(int),
3132 .mode = 0644,
Francesco Fusco555445c2013-07-24 10:39:06 +02003133 .extra1 = &zero,
3134 .extra2 = &int_max,
3135 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003137 [NEIGH_VAR_GC_THRESH3] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 .procname = "gc_thresh3",
3139 .maxlen = sizeof(int),
3140 .mode = 0644,
Francesco Fusco555445c2013-07-24 10:39:06 +02003141 .extra1 = &zero,
3142 .extra2 = &int_max,
3143 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 },
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11003145 {},
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146 },
3147};
3148
3149int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
Jiri Pirko73af6142013-12-07 19:26:55 +01003150 proc_handler *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151{
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003152 int i;
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003153 struct neigh_sysctl_table *t;
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003154 const char *dev_name_source;
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003155 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
Jiri Pirko73af6142013-12-07 19:26:55 +01003156 char *p_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003158 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 if (!t)
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003160 goto err;
3161
Jiri Pirkob194c1f2014-02-21 14:52:57 +01003162 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
Jiri Pirko1f9248e52013-12-07 19:26:53 +01003163 t->neigh_vars[i].data += (long) p;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003164 t->neigh_vars[i].extra1 = dev;
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003165 t->neigh_vars[i].extra2 = p;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167
3168 if (dev) {
3169 dev_name_source = dev->name;
Eric W. Biedermand12af672007-10-18 03:05:25 -07003170 /* Terminate the table early */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003171 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3172 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 } else {
Mathias Krause9ecf07a2014-07-12 22:36:44 +02003174 struct neigh_table *tbl = p->tbl;
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003175 dev_name_source = "default";
Mathias Krause9ecf07a2014-07-12 22:36:44 +02003176 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3177 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3178 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3179 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 }
3181
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003182 if (handler) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 /* RetransTime */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003184 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185 /* ReachableTime */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003186 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 /* RetransTime (in milliseconds)*/
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003188 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 /* ReachableTime (in milliseconds) */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003190 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003191 } else {
3192 /* Those handlers will update p->reachable_time after
3193 * base_reachable_time(_ms) is set to ensure the new timer starts being
3194 * applied after the next neighbour update instead of waiting for
3195 * neigh_periodic_work to update its value (can be multiple minutes)
3196 * So any handler that replaces them should do this as well
3197 */
3198 /* ReachableTime */
3199 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3200 neigh_proc_base_reachable_time;
3201 /* ReachableTime (in milliseconds) */
3202 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3203 neigh_proc_base_reachable_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 }
3205
Eric W. Biederman464dc802012-11-16 03:02:59 +00003206 /* Don't export sysctls to unprivileged users */
3207 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3208 t->neigh_vars[0].procname = NULL;
3209
Jiri Pirko73af6142013-12-07 19:26:55 +01003210 switch (neigh_parms_family(p)) {
3211 case AF_INET:
3212 p_name = "ipv4";
3213 break;
3214 case AF_INET6:
3215 p_name = "ipv6";
3216 break;
3217 default:
3218 BUG();
3219 }
3220
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003221 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3222 p_name, dev_name_source);
Denis V. Lunev4ab438f2008-02-28 20:48:01 -08003223 t->sysctl_header =
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003224 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003225 if (!t->sysctl_header)
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003226 goto free;
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003227
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 p->sysctl_table = t;
3229 return 0;
3230
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003231free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 kfree(t);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003233err:
3234 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003236EXPORT_SYMBOL(neigh_sysctl_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237
3238void neigh_sysctl_unregister(struct neigh_parms *p)
3239{
3240 if (p->sysctl_table) {
3241 struct neigh_sysctl_table *t = p->sysctl_table;
3242 p->sysctl_table = NULL;
Eric W. Biederman5dd3df12012-04-19 13:24:33 +00003243 unregister_net_sysctl_table(t->sysctl_header);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 kfree(t);
3245 }
3246}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003247EXPORT_SYMBOL(neigh_sysctl_unregister);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
3249#endif /* CONFIG_SYSCTL */
3250
Thomas Grafc8822a42007-03-22 11:50:06 -07003251static int __init neigh_init(void)
3252{
Greg Rosec7ac8672011-06-10 01:27:09 +00003253 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3254 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3255 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
Thomas Grafc8822a42007-03-22 11:50:06 -07003256
Greg Rosec7ac8672011-06-10 01:27:09 +00003257 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3258 NULL);
3259 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
Thomas Grafc8822a42007-03-22 11:50:06 -07003260
3261 return 0;
3262}
3263
3264subsys_initcall(neigh_init);
3265