Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * INETPEER - A storage for permanent information about peers |
| 3 | * |
| 4 | * This source is covered by the GNU GPL, the same as all kernel sources. |
| 5 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Authors: Andrey V. Savochkin <saw@msu.ru> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/spinlock.h> |
| 14 | #include <linux/random.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/timer.h> |
| 16 | #include <linux/time.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/net.h> |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 20 | #include <net/ip.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <net/inetpeer.h> |
| 22 | |
| 23 | /* |
| 24 | * Theory of operations. |
| 25 | * We keep one entry for each peer IP address. The nodes contains long-living |
| 26 | * information about the peer which doesn't depend on routes. |
| 27 | * At this moment this information consists only of ID field for the next |
| 28 | * outgoing IP packet. This field is incremented with each packet as encoded |
| 29 | * in inet_getid() function (include/net/inetpeer.h). |
| 30 | * At the moment of writing this notes identifier of IP packets is generated |
| 31 | * to be unpredictable using this code only for packets subjected |
| 32 | * (actually or potentially) to defragmentation. I.e. DF packets less than |
| 33 | * PMTU in size uses a constant ID and do not use this code (see |
| 34 | * ip_select_ident() in include/net/ip.h). |
| 35 | * |
| 36 | * Route cache entries hold references to our nodes. |
| 37 | * New cache entries get references via lookup by destination IP address in |
| 38 | * the avl tree. The reference is grabbed only when it's needed i.e. only |
| 39 | * when we try to output IP packet which needs an unpredictable ID (see |
| 40 | * __ip_select_ident() in net/ipv4/route.c). |
| 41 | * Nodes are removed only when reference counter goes to 0. |
| 42 | * When it's happened the node may be removed when a sufficient amount of |
| 43 | * time has been passed since its last use. The less-recently-used entry can |
| 44 | * also be removed if the pool is overloaded i.e. if the total amount of |
| 45 | * entries is greater-or-equal than the threshold. |
| 46 | * |
| 47 | * Node pool is organised as an AVL tree. |
| 48 | * Such an implementation has been chosen not just for fun. It's a way to |
| 49 | * prevent easy and efficient DoS attacks by creating hash collisions. A huge |
| 50 | * amount of long living nodes in a single hash slot would significantly delay |
| 51 | * lookups performed with disabled BHs. |
| 52 | * |
| 53 | * Serialisation issues. |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 54 | * 1. Nodes may appear in the tree only with the pool lock held. |
| 55 | * 2. Nodes may disappear from the tree only with the pool lock held |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | * AND reference count being 0. |
| 57 | * 3. Nodes appears and disappears from unused node list only under |
| 58 | * "inet_peer_unused_lock". |
| 59 | * 4. Global variable peer_total is modified under the pool lock. |
| 60 | * 5. struct inet_peer fields modification: |
| 61 | * avl_left, avl_right, avl_parent, avl_height: pool lock |
Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 62 | * unused: unused node list lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | * refcnt: atomically against modifications on other CPU; |
| 64 | * usually under some other lock to prevent node disappearing |
| 65 | * dtime: unused node list lock |
| 66 | * v4daddr: unchangeable |
Eric Dumazet | 317fe0e | 2010-06-16 04:52:13 +0000 | [diff] [blame] | 67 | * ip_id_count: atomic value (no lock needed) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | */ |
| 69 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 70 | static struct kmem_cache *peer_cachep __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
| 72 | #define node_height(x) x->avl_height |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 73 | |
| 74 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 75 | #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node) |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 76 | static const struct inet_peer peer_fake_node = { |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 77 | .avl_left = peer_avl_empty_rcu, |
| 78 | .avl_right = peer_avl_empty_rcu, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | .avl_height = 0 |
| 80 | }; |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 81 | |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 82 | static struct inet_peer_base { |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 83 | struct inet_peer __rcu *root; |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 84 | spinlock_t lock; |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 85 | int total; |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 86 | } v4_peers = { |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 87 | .root = peer_avl_empty_rcu, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 88 | .lock = __SPIN_LOCK_UNLOCKED(v4_peers.lock), |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 89 | .total = 0, |
| 90 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ |
| 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | /* Exported for sysctl_net_ipv4. */ |
Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 94 | int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | * aggressively at this stage */ |
Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 96 | int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ |
| 97 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ |
| 98 | int inet_peer_gc_mintime __read_mostly = 10 * HZ; |
| 99 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 101 | static struct { |
| 102 | struct list_head list; |
| 103 | spinlock_t lock; |
| 104 | } unused_peers = { |
| 105 | .list = LIST_HEAD_INIT(unused_peers.list), |
| 106 | .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock), |
| 107 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
| 109 | static void peer_check_expire(unsigned long dummy); |
Ingo Molnar | 8d06afa | 2005-09-09 13:10:40 -0700 | [diff] [blame] | 110 | static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
| 113 | /* Called from ip_output.c:ip_init */ |
| 114 | void __init inet_initpeers(void) |
| 115 | { |
| 116 | struct sysinfo si; |
| 117 | |
| 118 | /* Use the straight interface to information about memory. */ |
| 119 | si_meminfo(&si); |
| 120 | /* The values below were suggested by Alexey Kuznetsov |
| 121 | * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values |
| 122 | * myself. --SAW |
| 123 | */ |
| 124 | if (si.totalram <= (32768*1024)/PAGE_SIZE) |
| 125 | inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ |
| 126 | if (si.totalram <= (16384*1024)/PAGE_SIZE) |
| 127 | inet_peer_threshold >>= 1; /* about 512KB */ |
| 128 | if (si.totalram <= (8192*1024)/PAGE_SIZE) |
| 129 | inet_peer_threshold >>= 2; /* about 128KB */ |
| 130 | |
| 131 | peer_cachep = kmem_cache_create("inet_peer_cache", |
| 132 | sizeof(struct inet_peer), |
Eric Dumazet | 317fe0e | 2010-06-16 04:52:13 +0000 | [diff] [blame] | 133 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 134 | NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | /* All the timers, started at system startup tend |
| 137 | to synchronize. Perturb it a bit. |
| 138 | */ |
| 139 | peer_periodic_timer.expires = jiffies |
| 140 | + net_random() % inet_peer_gc_maxtime |
| 141 | + inet_peer_gc_maxtime; |
| 142 | add_timer(&peer_periodic_timer); |
| 143 | } |
| 144 | |
| 145 | /* Called with or without local BH being disabled. */ |
| 146 | static void unlink_from_unused(struct inet_peer *p) |
| 147 | { |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 148 | if (!list_empty(&p->unused)) { |
| 149 | spin_lock_bh(&unused_peers.lock); |
| 150 | list_del_init(&p->unused); |
| 151 | spin_unlock_bh(&unused_peers.lock); |
| 152 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } |
| 154 | |
Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 155 | /* |
| 156 | * Called with local BH disabled and the pool lock held. |
Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 157 | */ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 158 | #define lookup(_daddr, _stack, _base) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | ({ \ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 160 | struct inet_peer *u; \ |
| 161 | struct inet_peer __rcu **v; \ |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 162 | \ |
| 163 | stackptr = _stack; \ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 164 | *stackptr++ = &_base->root; \ |
| 165 | for (u = rcu_dereference_protected(_base->root, \ |
| 166 | lockdep_is_held(&_base->lock)); \ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 167 | u != peer_avl_empty; ) { \ |
Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 168 | if (_daddr == u->v4daddr) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | break; \ |
Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 170 | if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | v = &u->avl_left; \ |
| 172 | else \ |
| 173 | v = &u->avl_right; \ |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 174 | *stackptr++ = v; \ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 175 | u = rcu_dereference_protected(*v, \ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 176 | lockdep_is_held(&_base->lock)); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | } \ |
| 178 | u; \ |
| 179 | }) |
| 180 | |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 181 | /* |
| 182 | * Called with rcu_read_lock_bh() |
| 183 | * Because we hold no lock against a writer, its quite possible we fall |
| 184 | * in an endless loop. |
| 185 | * But every pointer we follow is guaranteed to be valid thanks to RCU. |
| 186 | * We exit from this function if number of links exceeds PEER_MAXDEPTH |
| 187 | */ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 188 | static struct inet_peer *lookup_rcu_bh(__be32 daddr, struct inet_peer_base *base) |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 189 | { |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 190 | struct inet_peer *u = rcu_dereference_bh(base->root); |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 191 | int count = 0; |
| 192 | |
| 193 | while (u != peer_avl_empty) { |
| 194 | if (daddr == u->v4daddr) { |
Eric Dumazet | 5f2f892 | 2010-06-15 21:47:39 -0700 | [diff] [blame] | 195 | /* Before taking a reference, check if this entry was |
| 196 | * deleted, unlink_from_pool() sets refcnt=-1 to make |
| 197 | * distinction between an unused entry (refcnt=0) and |
| 198 | * a freed one. |
| 199 | */ |
| 200 | if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1))) |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 201 | u = NULL; |
| 202 | return u; |
| 203 | } |
| 204 | if ((__force __u32)daddr < (__force __u32)u->v4daddr) |
| 205 | u = rcu_dereference_bh(u->avl_left); |
| 206 | else |
| 207 | u = rcu_dereference_bh(u->avl_right); |
| 208 | if (unlikely(++count == PEER_MAXDEPTH)) |
| 209 | break; |
| 210 | } |
| 211 | return NULL; |
| 212 | } |
| 213 | |
| 214 | /* Called with local BH disabled and the pool lock held. */ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 215 | #define lookup_rightempty(start, base) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | ({ \ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 217 | struct inet_peer *u; \ |
| 218 | struct inet_peer __rcu **v; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | *stackptr++ = &start->avl_left; \ |
| 220 | v = &start->avl_left; \ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 221 | for (u = rcu_dereference_protected(*v, \ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 222 | lockdep_is_held(&base->lock)); \ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 223 | u->avl_right != peer_avl_empty_rcu; ) { \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | v = &u->avl_right; \ |
| 225 | *stackptr++ = v; \ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 226 | u = rcu_dereference_protected(*v, \ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 227 | lockdep_is_held(&base->lock)); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | } \ |
| 229 | u; \ |
| 230 | }) |
| 231 | |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 232 | /* Called with local BH disabled and the pool lock held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | * Variable names are the proof of operation correctness. |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 234 | * Look into mm/map_avl.c for more detail description of the ideas. |
| 235 | */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 236 | static void peer_avl_rebalance(struct inet_peer __rcu **stack[], |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 237 | struct inet_peer __rcu ***stackend, |
| 238 | struct inet_peer_base *base) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | { |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 240 | struct inet_peer __rcu **nodep; |
| 241 | struct inet_peer *node, *l, *r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | int lh, rh; |
| 243 | |
| 244 | while (stackend > stack) { |
| 245 | nodep = *--stackend; |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 246 | node = rcu_dereference_protected(*nodep, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 247 | lockdep_is_held(&base->lock)); |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 248 | l = rcu_dereference_protected(node->avl_left, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 249 | lockdep_is_held(&base->lock)); |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 250 | r = rcu_dereference_protected(node->avl_right, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 251 | lockdep_is_held(&base->lock)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | lh = node_height(l); |
| 253 | rh = node_height(r); |
| 254 | if (lh > rh + 1) { /* l: RH+2 */ |
| 255 | struct inet_peer *ll, *lr, *lrl, *lrr; |
| 256 | int lrh; |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 257 | ll = rcu_dereference_protected(l->avl_left, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 258 | lockdep_is_held(&base->lock)); |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 259 | lr = rcu_dereference_protected(l->avl_right, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 260 | lockdep_is_held(&base->lock)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | lrh = node_height(lr); |
| 262 | if (lrh <= node_height(ll)) { /* ll: RH+1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 263 | RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ |
| 264 | RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | node->avl_height = lrh + 1; /* RH+1 or RH+2 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 266 | RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */ |
| 267 | RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | l->avl_height = node->avl_height + 1; |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 269 | RCU_INIT_POINTER(*nodep, l); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | } else { /* ll: RH, lr: RH+1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 271 | lrl = rcu_dereference_protected(lr->avl_left, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 272 | lockdep_is_held(&base->lock)); /* lrl: RH or RH-1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 273 | lrr = rcu_dereference_protected(lr->avl_right, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 274 | lockdep_is_held(&base->lock)); /* lrr: RH or RH-1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 275 | RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ |
| 276 | RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | node->avl_height = rh + 1; /* node: RH+1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 278 | RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */ |
| 279 | RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | l->avl_height = rh + 1; /* l: RH+1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 281 | RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */ |
| 282 | RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | lr->avl_height = rh + 2; |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 284 | RCU_INIT_POINTER(*nodep, lr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | } |
| 286 | } else if (rh > lh + 1) { /* r: LH+2 */ |
| 287 | struct inet_peer *rr, *rl, *rlr, *rll; |
| 288 | int rlh; |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 289 | rr = rcu_dereference_protected(r->avl_right, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 290 | lockdep_is_held(&base->lock)); |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 291 | rl = rcu_dereference_protected(r->avl_left, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 292 | lockdep_is_held(&base->lock)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | rlh = node_height(rl); |
| 294 | if (rlh <= node_height(rr)) { /* rr: LH+1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 295 | RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ |
| 296 | RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | node->avl_height = rlh + 1; /* LH+1 or LH+2 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 298 | RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */ |
| 299 | RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | r->avl_height = node->avl_height + 1; |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 301 | RCU_INIT_POINTER(*nodep, r); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | } else { /* rr: RH, rl: RH+1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 303 | rlr = rcu_dereference_protected(rl->avl_right, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 304 | lockdep_is_held(&base->lock)); /* rlr: LH or LH-1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 305 | rll = rcu_dereference_protected(rl->avl_left, |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 306 | lockdep_is_held(&base->lock)); /* rll: LH or LH-1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 307 | RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ |
| 308 | RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | node->avl_height = lh + 1; /* node: LH+1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 310 | RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */ |
| 311 | RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | r->avl_height = lh + 1; /* r: LH+1 */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 313 | RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */ |
| 314 | RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | rl->avl_height = lh + 2; |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 316 | RCU_INIT_POINTER(*nodep, rl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | } |
| 318 | } else { |
| 319 | node->avl_height = (lh > rh ? lh : rh) + 1; |
| 320 | } |
| 321 | } |
| 322 | } |
| 323 | |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 324 | /* Called with local BH disabled and the pool lock held. */ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 325 | #define link_to_pool(n, base) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | do { \ |
| 327 | n->avl_height = 1; \ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 328 | n->avl_left = peer_avl_empty_rcu; \ |
| 329 | n->avl_right = peer_avl_empty_rcu; \ |
| 330 | /* lockless readers can catch us now */ \ |
| 331 | rcu_assign_pointer(**--stackptr, n); \ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 332 | peer_avl_rebalance(stack, stackptr, base); \ |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 333 | } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 335 | static void inetpeer_free_rcu(struct rcu_head *head) |
| 336 | { |
| 337 | kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); |
| 338 | } |
| 339 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | /* May be called with local BH enabled. */ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 341 | static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | { |
| 343 | int do_free; |
| 344 | |
| 345 | do_free = 0; |
| 346 | |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 347 | spin_lock_bh(&base->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | /* Check the reference counter. It was artificially incremented by 1 |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 349 | * in cleanup() function to prevent sudden disappearing. If we can |
| 350 | * atomically (because of lockless readers) take this last reference, |
| 351 | * it's safe to remove the node and free it later. |
Eric Dumazet | 5f2f892 | 2010-06-15 21:47:39 -0700 | [diff] [blame] | 352 | * We use refcnt=-1 to alert lockless readers this entry is deleted. |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 353 | */ |
Eric Dumazet | 5f2f892 | 2010-06-15 21:47:39 -0700 | [diff] [blame] | 354 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 355 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]; |
| 356 | struct inet_peer __rcu ***stackptr, ***delp; |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 357 | if (lookup(p->v4daddr, stack, base) != p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | BUG(); |
| 359 | delp = stackptr - 1; /* *delp[0] == p */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 360 | if (p->avl_left == peer_avl_empty_rcu) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | *delp[0] = p->avl_right; |
| 362 | --stackptr; |
| 363 | } else { |
| 364 | /* look for a node to insert instead of p */ |
| 365 | struct inet_peer *t; |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 366 | t = lookup_rightempty(p, base); |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 367 | BUG_ON(rcu_dereference_protected(*stackptr[-1], |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 368 | lockdep_is_held(&base->lock)) != t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | **--stackptr = t->avl_left; |
| 370 | /* t is removed, t->v4daddr > x->v4daddr for any |
| 371 | * x in p->avl_left subtree. |
| 372 | * Put t in the old place of p. */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 373 | RCU_INIT_POINTER(*delp[0], t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | t->avl_left = p->avl_left; |
| 375 | t->avl_right = p->avl_right; |
| 376 | t->avl_height = p->avl_height; |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 377 | BUG_ON(delp[1] != &p->avl_left); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | delp[1] = &t->avl_left; /* was &p->avl_left */ |
| 379 | } |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 380 | peer_avl_rebalance(stack, stackptr, base); |
| 381 | base->total--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | do_free = 1; |
| 383 | } |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 384 | spin_unlock_bh(&base->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | |
| 386 | if (do_free) |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 387 | call_rcu_bh(&p->rcu, inetpeer_free_rcu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | else |
| 389 | /* The node is used again. Decrease the reference counter |
| 390 | * back. The loop "cleanup -> unlink_from_unused |
| 391 | * -> unlink_from_pool -> putpeer -> link_to_unused |
| 392 | * -> cleanup (for the same node)" |
| 393 | * doesn't really exist because the entry will have a |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 394 | * recent deletion time and will not be cleaned again soon. |
| 395 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | inet_putpeer(p); |
| 397 | } |
| 398 | |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 399 | static struct inet_peer_base *peer_to_base(struct inet_peer *p) |
| 400 | { |
| 401 | return &v4_peers; |
| 402 | } |
| 403 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | /* May be called with local BH enabled. */ |
| 405 | static int cleanup_once(unsigned long ttl) |
| 406 | { |
Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 407 | struct inet_peer *p = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | |
| 409 | /* Remove the first entry from the list of unused nodes. */ |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 410 | spin_lock_bh(&unused_peers.lock); |
| 411 | if (!list_empty(&unused_peers.list)) { |
Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 412 | __u32 delta; |
| 413 | |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 414 | p = list_first_entry(&unused_peers.list, struct inet_peer, unused); |
Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 415 | delta = (__u32)jiffies - p->dtime; |
| 416 | |
Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 417 | if (delta < ttl) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | /* Do not prune fresh entries. */ |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 419 | spin_unlock_bh(&unused_peers.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | return -1; |
| 421 | } |
Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 422 | |
| 423 | list_del_init(&p->unused); |
| 424 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | /* Grab an extra reference to prevent node disappearing |
| 426 | * before unlink_from_pool() call. */ |
| 427 | atomic_inc(&p->refcnt); |
| 428 | } |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 429 | spin_unlock_bh(&unused_peers.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | |
| 431 | if (p == NULL) |
| 432 | /* It means that the total number of USED entries has |
| 433 | * grown over inet_peer_threshold. It shouldn't really |
| 434 | * happen because of entry limits in route cache. */ |
| 435 | return -1; |
| 436 | |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 437 | unlink_from_pool(p, peer_to_base(p)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | return 0; |
| 439 | } |
| 440 | |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 441 | static struct inet_peer_base *family_to_base(int family) |
| 442 | { |
| 443 | return &v4_peers; |
| 444 | } |
| 445 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | /* Called with or without local BH being disabled. */ |
Al Viro | 53576d9 | 2006-09-26 22:18:43 -0700 | [diff] [blame] | 447 | struct inet_peer *inet_getpeer(__be32 daddr, int create) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | { |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 449 | struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 450 | struct inet_peer_base *base = family_to_base(AF_INET); |
| 451 | struct inet_peer *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 453 | /* Look up for the address quickly, lockless. |
| 454 | * Because of a concurrent writer, we might not find an existing entry. |
| 455 | */ |
| 456 | rcu_read_lock_bh(); |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 457 | p = lookup_rcu_bh(daddr, base); |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 458 | rcu_read_unlock_bh(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 460 | if (p) { |
| 461 | /* The existing node has been found. |
| 462 | * Remove the entry from unused list if it was there. |
| 463 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | unlink_from_unused(p); |
| 465 | return p; |
| 466 | } |
| 467 | |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 468 | /* retry an exact lookup, taking the lock before. |
| 469 | * At least, nodes should be hot in our cache. |
| 470 | */ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 471 | spin_lock_bh(&base->lock); |
| 472 | p = lookup(daddr, stack, base); |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 473 | if (p != peer_avl_empty) { |
| 474 | atomic_inc(&p->refcnt); |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 475 | spin_unlock_bh(&base->lock); |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 476 | /* Remove the entry from unused list if it was there. */ |
| 477 | unlink_from_unused(p); |
| 478 | return p; |
| 479 | } |
| 480 | p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; |
| 481 | if (p) { |
| 482 | p->v4daddr = daddr; |
| 483 | atomic_set(&p->refcnt, 1); |
| 484 | atomic_set(&p->rid, 0); |
| 485 | atomic_set(&p->ip_id_count, secure_ip_id(daddr)); |
| 486 | p->tcp_ts_stamp = 0; |
| 487 | INIT_LIST_HEAD(&p->unused); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 489 | |
| 490 | /* Link the node. */ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 491 | link_to_pool(p, base); |
| 492 | base->total++; |
Eric Dumazet | aa1039e | 2010-06-15 08:23:14 +0000 | [diff] [blame] | 493 | } |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 494 | spin_unlock_bh(&base->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 496 | if (base->total >= inet_peer_threshold) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | /* Remove one less-recently-used entry. */ |
| 498 | cleanup_once(0); |
| 499 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | return p; |
| 501 | } |
| 502 | |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 503 | static int compute_total(void) |
| 504 | { |
| 505 | return v4_peers.total; |
| 506 | } |
| 507 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | /* Called with local BH disabled. */ |
| 509 | static void peer_check_expire(unsigned long dummy) |
| 510 | { |
Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 511 | unsigned long now = jiffies; |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 512 | int ttl, total; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 514 | total = compute_total(); |
| 515 | if (total >= inet_peer_threshold) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | ttl = inet_peer_minttl; |
| 517 | else |
| 518 | ttl = inet_peer_maxttl |
| 519 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 520 | total / inet_peer_threshold * HZ; |
Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 521 | while (!cleanup_once(ttl)) { |
| 522 | if (jiffies != now) |
| 523 | break; |
| 524 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | |
| 526 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime |
| 527 | * interval depending on the total number of entries (more entries, |
| 528 | * less interval). */ |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 529 | total = compute_total(); |
| 530 | if (total >= inet_peer_threshold) |
Dave Johnson | 1344a41 | 2005-08-23 10:10:15 -0700 | [diff] [blame] | 531 | peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; |
| 532 | else |
| 533 | peer_periodic_timer.expires = jiffies |
| 534 | + inet_peer_gc_maxtime |
| 535 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * |
David S. Miller | 98158f5 | 2010-11-30 11:41:59 -0800 | [diff] [blame^] | 536 | total / inet_peer_threshold * HZ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | add_timer(&peer_periodic_timer); |
| 538 | } |
Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 539 | |
| 540 | void inet_putpeer(struct inet_peer *p) |
| 541 | { |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 542 | local_bh_disable(); |
| 543 | |
| 544 | if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) { |
| 545 | list_add_tail(&p->unused, &unused_peers.list); |
Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 546 | p->dtime = (__u32)jiffies; |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 547 | spin_unlock(&unused_peers.lock); |
Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 548 | } |
Eric Dumazet | d6cc1d6 | 2010-06-14 19:35:21 +0000 | [diff] [blame] | 549 | |
| 550 | local_bh_enable(); |
Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 551 | } |