blob: 39cc11968cf95708c58945f49d6483d700ef2860 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/core/dst.c Protocol independent destination cache.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8#include <linux/bitops.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
Eric Dumazet86bba262007-09-12 14:29:01 +020012#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
14#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/netdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/skbuff.h>
18#include <linux/string.h>
19#include <linux/types.h>
Eric W. Biedermane9dc8652007-09-12 13:02:17 +020020#include <net/net_namespace.h>
Eric Dumazet2fc1b5d2010-02-08 15:00:39 -080021#include <linux/sched.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070022#include <linux/prefetch.h>
Jiri Benc61adedf2015-08-20 13:56:25 +020023#include <net/lwtunnel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <net/dst.h>
Thomas Graff38a9eb2015-07-21 10:43:56 +020026#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Eric Dumazet86bba262007-09-12 14:29:01 +020028/*
29 * Theory of operations:
30 * 1) We use a list, protected by a spinlock, to add
31 * new entries from both BH and non-BH context.
32 * 2) In order to keep spinlock held for a small delay,
33 * we use a second list where are stored long lived
34 * entries, that are handled by the garbage collect thread
35 * fired by a workqueue.
36 * 3) This list is guarded by a mutex,
37 * so that the gc_task and dst_dev_event() can be synchronized.
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Eric Dumazet86bba262007-09-12 14:29:01 +020040/*
41 * We want to keep lock & list close together
42 * to dirty as few cache lines as possible in __dst_free().
43 * As this is not a very strong hint, we dont force an alignment on SMP.
44 */
45static struct {
46 spinlock_t lock;
laurent chavey598ed932010-03-29 10:41:36 +000047 struct dst_entry *list;
Eric Dumazet86bba262007-09-12 14:29:01 +020048 unsigned long timer_inc;
49 unsigned long timer_expires;
50} dst_garbage = {
51 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
52 .timer_inc = DST_GC_MAX,
53};
54static void dst_gc_task(struct work_struct *work);
laurent chavey598ed932010-03-29 10:41:36 +000055static void ___dst_free(struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Eric Dumazet86bba262007-09-12 14:29:01 +020057static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Eric Dumazet86bba262007-09-12 14:29:01 +020059static DEFINE_MUTEX(dst_gc_mutex);
60/*
61 * long lived entries are maintained in this list, guarded by dst_gc_mutex
62 */
63static struct dst_entry *dst_busy_list;
64
65static void dst_gc_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
67 int delayed = 0;
Eric Dumazet86bba262007-09-12 14:29:01 +020068 int work_performed = 0;
69 unsigned long expires = ~0L;
70 struct dst_entry *dst, *next, head;
71 struct dst_entry *last = &head;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Eric Dumazet86bba262007-09-12 14:29:01 +020073 mutex_lock(&dst_gc_mutex);
74 next = dst_busy_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Eric Dumazet86bba262007-09-12 14:29:01 +020076loop:
77 while ((dst = next) != NULL) {
78 next = dst->next;
79 prefetch(&next->next);
Eric Dumazet2fc1b5d2010-02-08 15:00:39 -080080 cond_resched();
Eric Dumazet86bba262007-09-12 14:29:01 +020081 if (likely(atomic_read(&dst->__refcnt))) {
82 last->next = dst;
83 last = dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 delayed++;
85 continue;
86 }
Eric Dumazet86bba262007-09-12 14:29:01 +020087 work_performed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 dst = dst_destroy(dst);
90 if (dst) {
91 /* NOHASH and still referenced. Unless it is already
92 * on gc list, invalidate it and add to gc list.
93 *
94 * Note: this is temporary. Actually, NOHASH dst's
95 * must be obsoleted when parent is obsoleted.
96 * But we do not have state "obsoleted, but
97 * referenced by parent", so it is right.
98 */
David S. Millerf5b0a872012-07-19 12:31:33 -070099 if (dst->obsolete > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 continue;
101
102 ___dst_free(dst);
Eric Dumazet86bba262007-09-12 14:29:01 +0200103 dst->next = next;
104 next = dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Eric Dumazet86bba262007-09-12 14:29:01 +0200108 spin_lock_bh(&dst_garbage.lock);
109 next = dst_garbage.list;
110 if (next) {
111 dst_garbage.list = NULL;
112 spin_unlock_bh(&dst_garbage.lock);
113 goto loop;
114 }
115 last->next = NULL;
116 dst_busy_list = head.next;
117 if (!dst_busy_list)
118 dst_garbage.timer_inc = DST_GC_MAX;
119 else {
120 /*
121 * if we freed less than 1/10 of delayed entries,
122 * we can sleep longer.
123 */
124 if (work_performed <= delayed/10) {
125 dst_garbage.timer_expires += dst_garbage.timer_inc;
126 if (dst_garbage.timer_expires > DST_GC_MAX)
127 dst_garbage.timer_expires = DST_GC_MAX;
128 dst_garbage.timer_inc += DST_GC_INC;
129 } else {
130 dst_garbage.timer_inc = DST_GC_INC;
131 dst_garbage.timer_expires = DST_GC_MIN;
132 }
133 expires = dst_garbage.timer_expires;
134 /*
laurent chavey598ed932010-03-29 10:41:36 +0000135 * if the next desired timer is more than 4 seconds in the
136 * future then round the timer to whole seconds
Eric Dumazet86bba262007-09-12 14:29:01 +0200137 */
138 if (expires > 4*HZ)
139 expires = round_jiffies_relative(expires);
140 schedule_delayed_work(&dst_gc_work, expires);
141 }
142
143 spin_unlock_bh(&dst_garbage.lock);
144 mutex_unlock(&dst_gc_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
Eric W. Biedermanede20592015-10-07 16:48:47 -0500147int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148{
149 kfree_skb(skb);
150 return 0;
151}
Eric W. Biedermanede20592015-10-07 16:48:47 -0500152EXPORT_SYMBOL(dst_discard_out);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Eric Dumazet988b9792017-05-25 14:27:35 -0700154const struct dst_metrics dst_default_metrics = {
Eric Dumazeta37e6e32012-08-07 10:55:45 +0000155 /* This initializer is needed to force linker to place this variable
156 * into const section. Otherwise it might end into bss section.
157 * We really want to avoid false sharing on this variable, and catch
158 * any writes on it.
159 */
Eric Dumazet988b9792017-05-25 14:27:35 -0700160 .refcnt = ATOMIC_INIT(1),
Eric Dumazeta37e6e32012-08-07 10:55:45 +0000161};
162
Thomas Graff38a9eb2015-07-21 10:43:56 +0200163void dst_init(struct dst_entry *dst, struct dst_ops *ops,
164 struct net_device *dev, int initial_ref, int initial_obsolete,
165 unsigned short flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
David S. Millercf911662011-04-28 14:31:47 -0700167 dst->child = NULL;
David S. Miller5c1e6aa2011-04-28 14:13:38 -0700168 dst->dev = dev;
169 if (dev)
170 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 dst->ops = ops;
Eric Dumazet988b9792017-05-25 14:27:35 -0700172 dst_init_metrics(dst, dst_default_metrics.metrics, true);
David S. Millercf911662011-04-28 14:31:47 -0700173 dst->expires = 0UL;
David S. Miller5c1e6aa2011-04-28 14:13:38 -0700174 dst->path = dst;
YOSHIFUJI Hideaki / 吉藤英明ecd98832013-02-20 00:29:08 +0000175 dst->from = NULL;
David S. Millercf911662011-04-28 14:31:47 -0700176#ifdef CONFIG_XFRM
177 dst->xfrm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178#endif
David S. Miller5c1e6aa2011-04-28 14:13:38 -0700179 dst->input = dst_discard;
Eric W. Biedermanede20592015-10-07 16:48:47 -0500180 dst->output = dst_discard_out;
David S. Millercf911662011-04-28 14:31:47 -0700181 dst->error = 0;
David S. Miller5c1e6aa2011-04-28 14:13:38 -0700182 dst->obsolete = initial_obsolete;
David S. Millercf911662011-04-28 14:31:47 -0700183 dst->header_len = 0;
184 dst->trailer_len = 0;
185#ifdef CONFIG_IP_ROUTE_CLASSID
186 dst->tclassid = 0;
187#endif
Jiri Benc61adedf2015-08-20 13:56:25 +0200188 dst->lwtstate = NULL;
David S. Miller5c1e6aa2011-04-28 14:13:38 -0700189 atomic_set(&dst->__refcnt, initial_ref);
David S. Millercf911662011-04-28 14:31:47 -0700190 dst->__use = 0;
David S. Miller5c1e6aa2011-04-28 14:13:38 -0700191 dst->lastuse = jiffies;
192 dst->flags = flags;
David S. Miller5110effe2012-07-02 02:21:03 -0700193 dst->pending_confirm = 0;
David S. Millercf911662011-04-28 14:31:47 -0700194 dst->next = NULL;
David S. Miller957c6652011-06-24 15:25:00 -0700195 if (!(flags & DST_NOCOUNT))
196 dst_entries_add(ops, 1);
Thomas Graff38a9eb2015-07-21 10:43:56 +0200197}
198EXPORT_SYMBOL(dst_init);
199
200void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
201 int initial_ref, int initial_obsolete, unsigned short flags)
202{
203 struct dst_entry *dst;
204
205 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
206 if (ops->gc(ops))
207 return NULL;
208 }
209
210 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
211 if (!dst)
212 return NULL;
213
214 dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 return dst;
217}
laurent chavey598ed932010-03-29 10:41:36 +0000218EXPORT_SYMBOL(dst_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
laurent chavey598ed932010-03-29 10:41:36 +0000220static void ___dst_free(struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
222 /* The first case (dev==NULL) is required, when
223 protocol module is unloaded.
224 */
Eric Dumazetaad88722014-04-15 13:47:15 -0400225 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
226 dst->input = dst_discard;
Eric W. Biedermanede20592015-10-07 16:48:47 -0500227 dst->output = dst_discard_out;
Eric Dumazetaad88722014-04-15 13:47:15 -0400228 }
David S. Millerf5b0a872012-07-19 12:31:33 -0700229 dst->obsolete = DST_OBSOLETE_DEAD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
231
laurent chavey598ed932010-03-29 10:41:36 +0000232void __dst_free(struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233{
Eric Dumazet86bba262007-09-12 14:29:01 +0200234 spin_lock_bh(&dst_garbage.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 ___dst_free(dst);
Eric Dumazet86bba262007-09-12 14:29:01 +0200236 dst->next = dst_garbage.list;
237 dst_garbage.list = dst;
238 if (dst_garbage.timer_inc > DST_GC_INC) {
239 dst_garbage.timer_inc = DST_GC_INC;
240 dst_garbage.timer_expires = DST_GC_MIN;
Tejun Heo41f63c52012-08-03 10:30:47 -0700241 mod_delayed_work(system_wq, &dst_gc_work,
242 dst_garbage.timer_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 }
Eric Dumazet86bba262007-09-12 14:29:01 +0200244 spin_unlock_bh(&dst_garbage.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245}
Nicolas Dichteld79d9912010-07-19 23:51:38 +0000246EXPORT_SYMBOL(__dst_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248struct dst_entry *dst_destroy(struct dst_entry * dst)
249{
250 struct dst_entry *child;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 smp_rmb();
253
254again:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 child = dst->child;
256
David S. Miller957c6652011-06-24 15:25:00 -0700257 if (!(dst->flags & DST_NOCOUNT))
258 dst_entries_add(dst->ops, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 if (dst->ops->destroy)
261 dst->ops->destroy(dst);
262 if (dst->dev)
263 dev_put(dst->dev);
Thomas Graff38a9eb2015-07-21 10:43:56 +0200264
WANG Conge252b3d2015-08-25 10:38:53 -0700265 lwtstate_put(dst->lwtstate);
266
Thomas Graff38a9eb2015-07-21 10:43:56 +0200267 if (dst->flags & DST_METADATA)
Paolo Abenid71785f2016-02-12 15:43:57 +0100268 metadata_dst_free((struct metadata_dst *)dst);
Thomas Graff38a9eb2015-07-21 10:43:56 +0200269 else
270 kmem_cache_free(dst->ops->kmem_cachep, dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
272 dst = child;
273 if (dst) {
Herbert Xu6775cab2005-04-16 15:24:10 -0700274 int nohash = dst->flags & DST_NOHASH;
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 if (atomic_dec_and_test(&dst->__refcnt)) {
277 /* We were real parent of this dst, so kill child. */
Herbert Xu6775cab2005-04-16 15:24:10 -0700278 if (nohash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 goto again;
280 } else {
281 /* Child is still referenced, return it for freeing. */
Herbert Xu6775cab2005-04-16 15:24:10 -0700282 if (nohash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 return dst;
284 /* Child is still in his hash table */
285 }
286 }
287 return NULL;
288}
laurent chavey598ed932010-03-29 10:41:36 +0000289EXPORT_SYMBOL(dst_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Eric Dumazetf8864972014-06-24 10:05:11 -0700291static void dst_destroy_rcu(struct rcu_head *head)
292{
293 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
294
295 dst = dst_destroy(dst);
296 if (dst)
297 __dst_free(dst);
298}
299
Ilpo Järvinen8d330862008-03-27 17:53:31 -0700300void dst_release(struct dst_entry *dst)
301{
302 if (dst) {
laurent chavey598ed932010-03-29 10:41:36 +0000303 int newrefcnt;
Francesco Ruggeri07a5d382016-01-06 00:18:48 -0800304 unsigned short nocache = dst->flags & DST_NOCACHE;
Eric Dumazetef711cf2008-11-14 00:53:54 -0800305
laurent chavey598ed932010-03-29 10:41:36 +0000306 newrefcnt = atomic_dec_return(&dst->__refcnt);
Konstantin Khlebnikov8bf4ada2015-07-17 14:01:11 +0300307 if (unlikely(newrefcnt < 0))
308 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
309 __func__, dst, newrefcnt);
Francesco Ruggeri07a5d382016-01-06 00:18:48 -0800310 if (!newrefcnt && unlikely(nocache))
Eric Dumazetf8864972014-06-24 10:05:11 -0700311 call_rcu(&dst->rcu_head, dst_destroy_rcu);
Ilpo Järvinen8d330862008-03-27 17:53:31 -0700312 }
313}
314EXPORT_SYMBOL(dst_release);
315
David S. Miller62fa8a82011-01-26 20:51:05 -0800316u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
317{
Eric Dumazet988b9792017-05-25 14:27:35 -0700318 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
David S. Miller62fa8a82011-01-26 20:51:05 -0800319
320 if (p) {
Eric Dumazet988b9792017-05-25 14:27:35 -0700321 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
David S. Miller62fa8a82011-01-26 20:51:05 -0800322 unsigned long prev, new;
323
Eric Dumazet988b9792017-05-25 14:27:35 -0700324 atomic_set(&p->refcnt, 1);
325 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
David S. Miller62fa8a82011-01-26 20:51:05 -0800326
327 new = (unsigned long) p;
328 prev = cmpxchg(&dst->_metrics, old, new);
329
330 if (prev != old) {
331 kfree(p);
Eric Dumazet988b9792017-05-25 14:27:35 -0700332 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
David S. Miller62fa8a82011-01-26 20:51:05 -0800333 if (prev & DST_METRICS_READ_ONLY)
334 p = NULL;
Eric Dumazet988b9792017-05-25 14:27:35 -0700335 } else if (prev & DST_METRICS_REFCOUNTED) {
336 if (atomic_dec_and_test(&old_p->refcnt))
337 kfree(old_p);
David S. Miller62fa8a82011-01-26 20:51:05 -0800338 }
339 }
Eric Dumazet988b9792017-05-25 14:27:35 -0700340 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
341 return (u32 *)p;
David S. Miller62fa8a82011-01-26 20:51:05 -0800342}
343EXPORT_SYMBOL(dst_cow_metrics_generic);
344
345/* Caller asserts that dst_metrics_read_only(dst) is false. */
346void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
347{
348 unsigned long prev, new;
349
Eric Dumazet988b9792017-05-25 14:27:35 -0700350 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
David S. Miller62fa8a82011-01-26 20:51:05 -0800351 prev = cmpxchg(&dst->_metrics, old, new);
352 if (prev == old)
353 kfree(__DST_METRICS_PTR(old));
354}
355EXPORT_SYMBOL(__dst_destroy_metrics_generic);
356
Thomas Graff38a9eb2015-07-21 10:43:56 +0200357static struct dst_ops md_dst_ops = {
358 .family = AF_UNSPEC,
359};
360
Eric W. Biedermanede20592015-10-07 16:48:47 -0500361static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Thomas Graff38a9eb2015-07-21 10:43:56 +0200362{
363 WARN_ONCE(1, "Attempting to call output on metadata dst\n");
364 kfree_skb(skb);
365 return 0;
366}
367
368static int dst_md_discard(struct sk_buff *skb)
369{
370 WARN_ONCE(1, "Attempting to call input on metadata dst\n");
371 kfree_skb(skb);
372 return 0;
373}
374
Alexei Starovoitovd3aa45c2015-07-30 15:36:57 -0700375static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
Thomas Graff38a9eb2015-07-21 10:43:56 +0200376{
Thomas Graff38a9eb2015-07-21 10:43:56 +0200377 struct dst_entry *dst;
378
Thomas Graff38a9eb2015-07-21 10:43:56 +0200379 dst = &md_dst->dst;
380 dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
381 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
382
383 dst->input = dst_md_discard;
Eric W. Biedermanede20592015-10-07 16:48:47 -0500384 dst->output = dst_md_discard_out;
Thomas Graff38a9eb2015-07-21 10:43:56 +0200385
386 memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
Alexei Starovoitovd3aa45c2015-07-30 15:36:57 -0700387}
388
389struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
390{
391 struct metadata_dst *md_dst;
392
393 md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
394 if (!md_dst)
395 return NULL;
396
397 __metadata_dst_init(md_dst, optslen);
Thomas Graff38a9eb2015-07-21 10:43:56 +0200398
399 return md_dst;
400}
401EXPORT_SYMBOL_GPL(metadata_dst_alloc);
402
Paolo Abenid71785f2016-02-12 15:43:57 +0100403void metadata_dst_free(struct metadata_dst *md_dst)
404{
405#ifdef CONFIG_DST_CACHE
406 dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
407#endif
408 kfree(md_dst);
409}
410
Alexei Starovoitovd3aa45c2015-07-30 15:36:57 -0700411struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
412{
413 int cpu;
414 struct metadata_dst __percpu *md_dst;
415
416 md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
417 __alignof__(struct metadata_dst), flags);
418 if (!md_dst)
419 return NULL;
420
421 for_each_possible_cpu(cpu)
422 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
423
424 return md_dst;
425}
426EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428/* Dirty hack. We did it in 2.2 (in __dst_free),
429 * we have _very_ good reasons not to repeat
430 * this mistake in 2.3, but we have no choice
431 * now. _It_ _is_ _explicit_ _deliberate_
432 * _race_ _condition_.
433 *
434 * Commented and originally written by Alexey.
435 */
stephen hemminger56115512010-04-12 07:38:05 +0000436static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
437 int unregister)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438{
439 if (dst->ops->ifdown)
440 dst->ops->ifdown(dst, dev, unregister);
441
442 if (dev != dst->dev)
443 return;
444
445 if (!unregister) {
Eric Dumazetaad88722014-04-15 13:47:15 -0400446 dst->input = dst_discard;
Eric W. Biedermanede20592015-10-07 16:48:47 -0500447 dst->output = dst_discard_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 } else {
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900449 dst->dev = dev_net(dst->dev)->loopback_dev;
Daniel Lezcanode3cb742007-09-25 19:16:28 -0700450 dev_hold(dst->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 }
453}
454
laurent chavey598ed932010-03-29 10:41:36 +0000455static int dst_dev_event(struct notifier_block *this, unsigned long event,
456 void *ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457{
Jiri Pirko351638e2013-05-28 01:30:21 +0000458 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eric Dumazet86bba262007-09-12 14:29:01 +0200459 struct dst_entry *dst, *last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 switch (event) {
Eric Dumazet0115e8e2012-08-22 17:19:46 +0000462 case NETDEV_UNREGISTER_FINAL:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 case NETDEV_DOWN:
Eric Dumazet86bba262007-09-12 14:29:01 +0200464 mutex_lock(&dst_gc_mutex);
465 for (dst = dst_busy_list; dst; dst = dst->next) {
466 last = dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 dst_ifdown(dst, dev, event != NETDEV_DOWN);
468 }
Eric Dumazet86bba262007-09-12 14:29:01 +0200469
470 spin_lock_bh(&dst_garbage.lock);
471 dst = dst_garbage.list;
472 dst_garbage.list = NULL;
Krister Johansenc6d4ff82017-06-08 13:12:38 -0700473 /* The code in dst_ifdown places a hold on the loopback device.
474 * If the gc entry processing is set to expire after a lengthy
475 * interval, this hold can cause netdev_wait_allrefs() to hang
476 * out and wait for a long time -- until the the loopback
477 * interface is released. If we're really unlucky, it'll emit
478 * pr_emerg messages to console too. Reset the interval here,
479 * so dst cleanups occur in a more timely fashion.
480 */
481 if (dst_garbage.timer_inc > DST_GC_INC) {
482 dst_garbage.timer_inc = DST_GC_INC;
483 dst_garbage.timer_expires = DST_GC_MIN;
484 mod_delayed_work(system_wq, &dst_gc_work,
485 dst_garbage.timer_expires);
486 }
Eric Dumazet86bba262007-09-12 14:29:01 +0200487 spin_unlock_bh(&dst_garbage.lock);
488
489 if (last)
490 last->next = dst;
491 else
492 dst_busy_list = dst;
laurent chavey598ed932010-03-29 10:41:36 +0000493 for (; dst; dst = dst->next)
Eric Dumazet86bba262007-09-12 14:29:01 +0200494 dst_ifdown(dst, dev, event != NETDEV_DOWN);
Eric Dumazet86bba262007-09-12 14:29:01 +0200495 mutex_unlock(&dst_gc_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 break;
497 }
498 return NOTIFY_DONE;
499}
500
501static struct notifier_block dst_dev_notifier = {
502 .notifier_call = dst_dev_event,
Eric Dumazet332dd962010-11-09 11:46:33 -0800503 .priority = -10, /* must be called after other network notifiers */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504};
505
Thomas Graff38a9eb2015-07-21 10:43:56 +0200506void __init dst_subsys_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
508 register_netdevice_notifier(&dst_dev_notifier);
509}