Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * net/core/dst.c Protocol independent destination cache. |
| 3 | * |
| 4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 5 | * |
| 6 | */ |
| 7 | |
| 8 | #include <linux/bitops.h> |
| 9 | #include <linux/errno.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/netdevice.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/skbuff.h> |
| 17 | #include <linux/string.h> |
| 18 | #include <linux/types.h> |
| 19 | |
| 20 | #include <net/dst.h> |
| 21 | |
| 22 | /* Locking strategy: |
| 23 | * 1) Garbage collection state of dead destination cache |
| 24 | * entries is protected by dst_lock. |
| 25 | * 2) GC is run only from BH context, and is the only remover |
| 26 | * of entries. |
| 27 | * 3) Entries are added to the garbage list from both BH |
| 28 | * and non-BH context, so local BH disabling is needed. |
| 29 | * 4) All operations modify state, so a spinlock is used. |
| 30 | */ |
| 31 | static struct dst_entry *dst_garbage_list; |
| 32 | #if RT_CACHE_DEBUG >= 2 |
| 33 | static atomic_t dst_total = ATOMIC_INIT(0); |
| 34 | #endif |
| 35 | static DEFINE_SPINLOCK(dst_lock); |
| 36 | |
| 37 | static unsigned long dst_gc_timer_expires; |
| 38 | static unsigned long dst_gc_timer_inc = DST_GC_MAX; |
| 39 | static void dst_run_gc(unsigned long); |
| 40 | static void ___dst_free(struct dst_entry * dst); |
| 41 | |
Ingo Molnar | 8d06afa | 2005-09-09 13:10:40 -0700 | [diff] [blame] | 42 | static DEFINE_TIMER(dst_gc_timer, dst_run_gc, DST_GC_MIN, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
| 44 | static void dst_run_gc(unsigned long dummy) |
| 45 | { |
| 46 | int delayed = 0; |
Denis Lunev | f0098f7 | 2005-07-30 17:47:25 -0700 | [diff] [blame] | 47 | int work_performed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | struct dst_entry * dst, **dstp; |
| 49 | |
| 50 | if (!spin_trylock(&dst_lock)) { |
| 51 | mod_timer(&dst_gc_timer, jiffies + HZ/10); |
| 52 | return; |
| 53 | } |
| 54 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | del_timer(&dst_gc_timer); |
| 56 | dstp = &dst_garbage_list; |
Denis Lunev | f0098f7 | 2005-07-30 17:47:25 -0700 | [diff] [blame] | 57 | work_performed = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | while ((dst = *dstp) != NULL) { |
| 59 | if (atomic_read(&dst->__refcnt)) { |
| 60 | dstp = &dst->next; |
| 61 | delayed++; |
| 62 | continue; |
| 63 | } |
| 64 | *dstp = dst->next; |
Denis Lunev | f0098f7 | 2005-07-30 17:47:25 -0700 | [diff] [blame] | 65 | work_performed = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
| 67 | dst = dst_destroy(dst); |
| 68 | if (dst) { |
| 69 | /* NOHASH and still referenced. Unless it is already |
| 70 | * on gc list, invalidate it and add to gc list. |
| 71 | * |
| 72 | * Note: this is temporary. Actually, NOHASH dst's |
| 73 | * must be obsoleted when parent is obsoleted. |
| 74 | * But we do not have state "obsoleted, but |
| 75 | * referenced by parent", so it is right. |
| 76 | */ |
| 77 | if (dst->obsolete > 1) |
| 78 | continue; |
| 79 | |
| 80 | ___dst_free(dst); |
| 81 | dst->next = *dstp; |
| 82 | *dstp = dst; |
| 83 | dstp = &dst->next; |
| 84 | } |
| 85 | } |
| 86 | if (!dst_garbage_list) { |
| 87 | dst_gc_timer_inc = DST_GC_MAX; |
| 88 | goto out; |
| 89 | } |
Denis Lunev | f0098f7 | 2005-07-30 17:47:25 -0700 | [diff] [blame] | 90 | if (!work_performed) { |
| 91 | if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX) |
| 92 | dst_gc_timer_expires = DST_GC_MAX; |
| 93 | dst_gc_timer_inc += DST_GC_INC; |
| 94 | } else { |
| 95 | dst_gc_timer_inc = DST_GC_INC; |
| 96 | dst_gc_timer_expires = DST_GC_MIN; |
| 97 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | dst_gc_timer.expires = jiffies + dst_gc_timer_expires; |
| 99 | #if RT_CACHE_DEBUG >= 2 |
| 100 | printk("dst_total: %d/%d %ld\n", |
| 101 | atomic_read(&dst_total), delayed, dst_gc_timer_expires); |
| 102 | #endif |
| 103 | add_timer(&dst_gc_timer); |
| 104 | |
| 105 | out: |
| 106 | spin_unlock(&dst_lock); |
| 107 | } |
| 108 | |
| 109 | static int dst_discard_in(struct sk_buff *skb) |
| 110 | { |
| 111 | kfree_skb(skb); |
| 112 | return 0; |
| 113 | } |
| 114 | |
| 115 | static int dst_discard_out(struct sk_buff *skb) |
| 116 | { |
| 117 | kfree_skb(skb); |
| 118 | return 0; |
| 119 | } |
| 120 | |
| 121 | void * dst_alloc(struct dst_ops * ops) |
| 122 | { |
| 123 | struct dst_entry * dst; |
| 124 | |
| 125 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { |
| 126 | if (ops->gc()) |
| 127 | return NULL; |
| 128 | } |
| 129 | dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC); |
| 130 | if (!dst) |
| 131 | return NULL; |
| 132 | memset(dst, 0, ops->entry_size); |
| 133 | atomic_set(&dst->__refcnt, 0); |
| 134 | dst->ops = ops; |
| 135 | dst->lastuse = jiffies; |
| 136 | dst->path = dst; |
| 137 | dst->input = dst_discard_in; |
| 138 | dst->output = dst_discard_out; |
| 139 | #if RT_CACHE_DEBUG >= 2 |
| 140 | atomic_inc(&dst_total); |
| 141 | #endif |
| 142 | atomic_inc(&ops->entries); |
| 143 | return dst; |
| 144 | } |
| 145 | |
| 146 | static void ___dst_free(struct dst_entry * dst) |
| 147 | { |
| 148 | /* The first case (dev==NULL) is required, when |
| 149 | protocol module is unloaded. |
| 150 | */ |
| 151 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { |
| 152 | dst->input = dst_discard_in; |
| 153 | dst->output = dst_discard_out; |
| 154 | } |
| 155 | dst->obsolete = 2; |
| 156 | } |
| 157 | |
| 158 | void __dst_free(struct dst_entry * dst) |
| 159 | { |
| 160 | spin_lock_bh(&dst_lock); |
| 161 | ___dst_free(dst); |
| 162 | dst->next = dst_garbage_list; |
| 163 | dst_garbage_list = dst; |
| 164 | if (dst_gc_timer_inc > DST_GC_INC) { |
| 165 | dst_gc_timer_inc = DST_GC_INC; |
| 166 | dst_gc_timer_expires = DST_GC_MIN; |
| 167 | mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); |
| 168 | } |
| 169 | spin_unlock_bh(&dst_lock); |
| 170 | } |
| 171 | |
| 172 | struct dst_entry *dst_destroy(struct dst_entry * dst) |
| 173 | { |
| 174 | struct dst_entry *child; |
| 175 | struct neighbour *neigh; |
| 176 | struct hh_cache *hh; |
| 177 | |
| 178 | smp_rmb(); |
| 179 | |
| 180 | again: |
| 181 | neigh = dst->neighbour; |
| 182 | hh = dst->hh; |
| 183 | child = dst->child; |
| 184 | |
| 185 | dst->hh = NULL; |
| 186 | if (hh && atomic_dec_and_test(&hh->hh_refcnt)) |
| 187 | kfree(hh); |
| 188 | |
| 189 | if (neigh) { |
| 190 | dst->neighbour = NULL; |
| 191 | neigh_release(neigh); |
| 192 | } |
| 193 | |
| 194 | atomic_dec(&dst->ops->entries); |
| 195 | |
| 196 | if (dst->ops->destroy) |
| 197 | dst->ops->destroy(dst); |
| 198 | if (dst->dev) |
| 199 | dev_put(dst->dev); |
| 200 | #if RT_CACHE_DEBUG >= 2 |
| 201 | atomic_dec(&dst_total); |
| 202 | #endif |
| 203 | kmem_cache_free(dst->ops->kmem_cachep, dst); |
| 204 | |
| 205 | dst = child; |
| 206 | if (dst) { |
Herbert Xu | 6775cab | 2005-04-16 15:24:10 -0700 | [diff] [blame] | 207 | int nohash = dst->flags & DST_NOHASH; |
| 208 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | if (atomic_dec_and_test(&dst->__refcnt)) { |
| 210 | /* We were real parent of this dst, so kill child. */ |
Herbert Xu | 6775cab | 2005-04-16 15:24:10 -0700 | [diff] [blame] | 211 | if (nohash) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | goto again; |
| 213 | } else { |
| 214 | /* Child is still referenced, return it for freeing. */ |
Herbert Xu | 6775cab | 2005-04-16 15:24:10 -0700 | [diff] [blame] | 215 | if (nohash) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | return dst; |
| 217 | /* Child is still in his hash table */ |
| 218 | } |
| 219 | } |
| 220 | return NULL; |
| 221 | } |
| 222 | |
| 223 | /* Dirty hack. We did it in 2.2 (in __dst_free), |
| 224 | * we have _very_ good reasons not to repeat |
| 225 | * this mistake in 2.3, but we have no choice |
| 226 | * now. _It_ _is_ _explicit_ _deliberate_ |
| 227 | * _race_ _condition_. |
| 228 | * |
| 229 | * Commented and originally written by Alexey. |
| 230 | */ |
| 231 | static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, |
| 232 | int unregister) |
| 233 | { |
| 234 | if (dst->ops->ifdown) |
| 235 | dst->ops->ifdown(dst, dev, unregister); |
| 236 | |
| 237 | if (dev != dst->dev) |
| 238 | return; |
| 239 | |
| 240 | if (!unregister) { |
| 241 | dst->input = dst_discard_in; |
| 242 | dst->output = dst_discard_out; |
| 243 | } else { |
| 244 | dst->dev = &loopback_dev; |
| 245 | dev_hold(&loopback_dev); |
| 246 | dev_put(dev); |
| 247 | if (dst->neighbour && dst->neighbour->dev == dev) { |
| 248 | dst->neighbour->dev = &loopback_dev; |
| 249 | dev_put(dev); |
| 250 | dev_hold(&loopback_dev); |
| 251 | } |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) |
| 256 | { |
| 257 | struct net_device *dev = ptr; |
| 258 | struct dst_entry *dst; |
| 259 | |
| 260 | switch (event) { |
| 261 | case NETDEV_UNREGISTER: |
| 262 | case NETDEV_DOWN: |
| 263 | spin_lock_bh(&dst_lock); |
| 264 | for (dst = dst_garbage_list; dst; dst = dst->next) { |
| 265 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
| 266 | } |
| 267 | spin_unlock_bh(&dst_lock); |
| 268 | break; |
| 269 | } |
| 270 | return NOTIFY_DONE; |
| 271 | } |
| 272 | |
| 273 | static struct notifier_block dst_dev_notifier = { |
| 274 | .notifier_call = dst_dev_event, |
| 275 | }; |
| 276 | |
| 277 | void __init dst_init(void) |
| 278 | { |
| 279 | register_netdevice_notifier(&dst_dev_notifier); |
| 280 | } |
| 281 | |
| 282 | EXPORT_SYMBOL(__dst_free); |
| 283 | EXPORT_SYMBOL(dst_alloc); |
| 284 | EXPORT_SYMBOL(dst_destroy); |