blob: 709fbb4132d7e9a7d4a4760fde0955ef790b8c5d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INETPEER - A storage for permanent information about peers
3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
13#include <linux/spinlock.h>
14#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/timer.h>
16#include <linux/time.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/net.h>
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -030020#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/inetpeer.h>
22
23/*
24 * Theory of operations.
25 * We keep one entry for each peer IP address. The nodes contains long-living
26 * information about the peer which doesn't depend on routes.
27 * At this moment this information consists only of ID field for the next
28 * outgoing IP packet. This field is incremented with each packet as encoded
29 * in inet_getid() function (include/net/inetpeer.h).
30 * At the moment of writing this notes identifier of IP packets is generated
31 * to be unpredictable using this code only for packets subjected
32 * (actually or potentially) to defragmentation. I.e. DF packets less than
33 * PMTU in size uses a constant ID and do not use this code (see
34 * ip_select_ident() in include/net/ip.h).
35 *
36 * Route cache entries hold references to our nodes.
37 * New cache entries get references via lookup by destination IP address in
38 * the avl tree. The reference is grabbed only when it's needed i.e. only
39 * when we try to output IP packet which needs an unpredictable ID (see
40 * __ip_select_ident() in net/ipv4/route.c).
41 * Nodes are removed only when reference counter goes to 0.
42 * When it's happened the node may be removed when a sufficient amount of
43 * time has been passed since its last use. The less-recently-used entry can
44 * also be removed if the pool is overloaded i.e. if the total amount of
45 * entries is greater-or-equal than the threshold.
46 *
47 * Node pool is organised as an AVL tree.
48 * Such an implementation has been chosen not just for fun. It's a way to
49 * prevent easy and efficient DoS attacks by creating hash collisions. A huge
50 * amount of long living nodes in a single hash slot would significantly delay
51 * lookups performed with disabled BHs.
52 *
53 * Serialisation issues.
Eric Dumazetaa1039e2010-06-15 08:23:14 +000054 * 1. Nodes may appear in the tree only with the pool lock held.
55 * 2. Nodes may disappear from the tree only with the pool lock held
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 * AND reference count being 0.
57 * 3. Nodes appears and disappears from unused node list only under
58 * "inet_peer_unused_lock".
59 * 4. Global variable peer_total is modified under the pool lock.
60 * 5. struct inet_peer fields modification:
61 * avl_left, avl_right, avl_parent, avl_height: pool lock
Pavel Emelyanovd71209d2007-11-12 21:27:28 -080062 * unused: unused node list lock
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 * refcnt: atomically against modifications on other CPU;
64 * usually under some other lock to prevent node disappearing
65 * dtime: unused node list lock
David S. Miller582a72d2010-11-30 11:53:55 -080066 * daddr: unchangeable
Eric Dumazet317fe0e2010-06-16 04:52:13 +000067 * ip_id_count: atomic value (no lock needed)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 */
69
Christoph Lametere18b8902006-12-06 20:33:20 -080070static struct kmem_cache *peer_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72#define node_height(x) x->avl_height
Eric Dumazetd6cc1d62010-06-14 19:35:21 +000073
74#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
Eric Dumazetb914c4e2010-10-25 23:55:38 +000075#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
Eric Dumazetd6cc1d62010-06-14 19:35:21 +000076static const struct inet_peer peer_fake_node = {
Eric Dumazetb914c4e2010-10-25 23:55:38 +000077 .avl_left = peer_avl_empty_rcu,
78 .avl_right = peer_avl_empty_rcu,
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 .avl_height = 0
80};
Eric Dumazetd6cc1d62010-06-14 19:35:21 +000081
David S. Miller021e9292010-11-30 12:12:23 -080082struct inet_peer_base {
Eric Dumazetb914c4e2010-10-25 23:55:38 +000083 struct inet_peer __rcu *root;
Eric Dumazetaa1039e2010-06-15 08:23:14 +000084 spinlock_t lock;
Eric Dumazetd6cc1d62010-06-14 19:35:21 +000085 int total;
David S. Miller021e9292010-11-30 12:12:23 -080086};
87
88static struct inet_peer_base v4_peers = {
Eric Dumazetb914c4e2010-10-25 23:55:38 +000089 .root = peer_avl_empty_rcu,
David S. Miller98158f52010-11-30 11:41:59 -080090 .lock = __SPIN_LOCK_UNLOCKED(v4_peers.lock),
Eric Dumazetd6cc1d62010-06-14 19:35:21 +000091 .total = 0,
92};
David S. Miller021e9292010-11-30 12:12:23 -080093
94static struct inet_peer_base v6_peers = {
95 .root = peer_avl_empty_rcu,
96 .lock = __SPIN_LOCK_UNLOCKED(v6_peers.lock),
97 .total = 0,
98};
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102/* Exported for sysctl_net_ipv4. */
Eric Dumazet243bbca2007-03-06 20:23:10 -0800103int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 * aggressively at this stage */
Eric Dumazet243bbca2007-03-06 20:23:10 -0800105int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
106int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
107int inet_peer_gc_mintime __read_mostly = 10 * HZ;
108int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000110static struct {
111 struct list_head list;
112 spinlock_t lock;
113} unused_peers = {
114 .list = LIST_HEAD_INIT(unused_peers.list),
115 .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock),
116};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118static void peer_check_expire(unsigned long dummy);
Ingo Molnar8d06afa2005-09-09 13:10:40 -0700119static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122/* Called from ip_output.c:ip_init */
123void __init inet_initpeers(void)
124{
125 struct sysinfo si;
126
127 /* Use the straight interface to information about memory. */
128 si_meminfo(&si);
129 /* The values below were suggested by Alexey Kuznetsov
130 * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
131 * myself. --SAW
132 */
133 if (si.totalram <= (32768*1024)/PAGE_SIZE)
134 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
135 if (si.totalram <= (16384*1024)/PAGE_SIZE)
136 inet_peer_threshold >>= 1; /* about 512KB */
137 if (si.totalram <= (8192*1024)/PAGE_SIZE)
138 inet_peer_threshold >>= 2; /* about 128KB */
139
140 peer_cachep = kmem_cache_create("inet_peer_cache",
141 sizeof(struct inet_peer),
Eric Dumazet317fe0e2010-06-16 04:52:13 +0000142 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +0900143 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 /* All the timers, started at system startup tend
146 to synchronize. Perturb it a bit.
147 */
148 peer_periodic_timer.expires = jiffies
149 + net_random() % inet_peer_gc_maxtime
150 + inet_peer_gc_maxtime;
151 add_timer(&peer_periodic_timer);
152}
153
154/* Called with or without local BH being disabled. */
155static void unlink_from_unused(struct inet_peer *p)
156{
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000157 if (!list_empty(&p->unused)) {
158 spin_lock_bh(&unused_peers.lock);
159 list_del_init(&p->unused);
160 spin_unlock_bh(&unused_peers.lock);
161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
David S. Miller8790ca12010-12-01 17:28:18 -0800164static int addr_compare(const struct inetpeer_addr *a,
165 const struct inetpeer_addr *b)
David S. Miller02663042010-11-30 12:08:53 -0800166{
167 int i, n = (a->family == AF_INET ? 1 : 4);
168
169 for (i = 0; i < n; i++) {
170 if (a->a6[i] == b->a6[i])
171 continue;
172 if (a->a6[i] < b->a6[i])
173 return -1;
174 return 1;
175 }
176
177 return 0;
178}
179
Eric Dumazet243bbca2007-03-06 20:23:10 -0800180/*
181 * Called with local BH disabled and the pool lock held.
Eric Dumazet243bbca2007-03-06 20:23:10 -0800182 */
David S. Miller98158f52010-11-30 11:41:59 -0800183#define lookup(_daddr, _stack, _base) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184({ \
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000185 struct inet_peer *u; \
186 struct inet_peer __rcu **v; \
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000187 \
188 stackptr = _stack; \
David S. Miller98158f52010-11-30 11:41:59 -0800189 *stackptr++ = &_base->root; \
190 for (u = rcu_dereference_protected(_base->root, \
191 lockdep_is_held(&_base->lock)); \
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000192 u != peer_avl_empty; ) { \
David S. Miller02663042010-11-30 12:08:53 -0800193 int cmp = addr_compare(_daddr, &u->daddr); \
194 if (cmp == 0) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 break; \
David S. Miller02663042010-11-30 12:08:53 -0800196 if (cmp == -1) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 v = &u->avl_left; \
198 else \
199 v = &u->avl_right; \
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000200 *stackptr++ = v; \
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000201 u = rcu_dereference_protected(*v, \
David S. Miller98158f52010-11-30 11:41:59 -0800202 lockdep_is_held(&_base->lock)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 } \
204 u; \
205})
206
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000207/*
208 * Called with rcu_read_lock_bh()
209 * Because we hold no lock against a writer, its quite possible we fall
210 * in an endless loop.
211 * But every pointer we follow is guaranteed to be valid thanks to RCU.
212 * We exit from this function if number of links exceeds PEER_MAXDEPTH
213 */
David S. Miller8790ca12010-12-01 17:28:18 -0800214static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
David S. Miller02663042010-11-30 12:08:53 -0800215 struct inet_peer_base *base)
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000216{
David S. Miller98158f52010-11-30 11:41:59 -0800217 struct inet_peer *u = rcu_dereference_bh(base->root);
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000218 int count = 0;
219
220 while (u != peer_avl_empty) {
David S. Miller02663042010-11-30 12:08:53 -0800221 int cmp = addr_compare(daddr, &u->daddr);
222 if (cmp == 0) {
Eric Dumazet5f2f8922010-06-15 21:47:39 -0700223 /* Before taking a reference, check if this entry was
224 * deleted, unlink_from_pool() sets refcnt=-1 to make
225 * distinction between an unused entry (refcnt=0) and
226 * a freed one.
227 */
228 if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000229 u = NULL;
230 return u;
231 }
David S. Miller02663042010-11-30 12:08:53 -0800232 if (cmp == -1)
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000233 u = rcu_dereference_bh(u->avl_left);
234 else
235 u = rcu_dereference_bh(u->avl_right);
236 if (unlikely(++count == PEER_MAXDEPTH))
237 break;
238 }
239 return NULL;
240}
241
242/* Called with local BH disabled and the pool lock held. */
David S. Miller98158f52010-11-30 11:41:59 -0800243#define lookup_rightempty(start, base) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244({ \
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000245 struct inet_peer *u; \
246 struct inet_peer __rcu **v; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 *stackptr++ = &start->avl_left; \
248 v = &start->avl_left; \
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000249 for (u = rcu_dereference_protected(*v, \
David S. Miller98158f52010-11-30 11:41:59 -0800250 lockdep_is_held(&base->lock)); \
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000251 u->avl_right != peer_avl_empty_rcu; ) { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 v = &u->avl_right; \
253 *stackptr++ = v; \
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000254 u = rcu_dereference_protected(*v, \
David S. Miller98158f52010-11-30 11:41:59 -0800255 lockdep_is_held(&base->lock)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 } \
257 u; \
258})
259
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000260/* Called with local BH disabled and the pool lock held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 * Variable names are the proof of operation correctness.
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000262 * Look into mm/map_avl.c for more detail description of the ideas.
263 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000264static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
David S. Miller98158f52010-11-30 11:41:59 -0800265 struct inet_peer __rcu ***stackend,
266 struct inet_peer_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267{
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000268 struct inet_peer __rcu **nodep;
269 struct inet_peer *node, *l, *r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 int lh, rh;
271
272 while (stackend > stack) {
273 nodep = *--stackend;
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000274 node = rcu_dereference_protected(*nodep,
David S. Miller98158f52010-11-30 11:41:59 -0800275 lockdep_is_held(&base->lock));
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000276 l = rcu_dereference_protected(node->avl_left,
David S. Miller98158f52010-11-30 11:41:59 -0800277 lockdep_is_held(&base->lock));
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000278 r = rcu_dereference_protected(node->avl_right,
David S. Miller98158f52010-11-30 11:41:59 -0800279 lockdep_is_held(&base->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 lh = node_height(l);
281 rh = node_height(r);
282 if (lh > rh + 1) { /* l: RH+2 */
283 struct inet_peer *ll, *lr, *lrl, *lrr;
284 int lrh;
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000285 ll = rcu_dereference_protected(l->avl_left,
David S. Miller98158f52010-11-30 11:41:59 -0800286 lockdep_is_held(&base->lock));
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000287 lr = rcu_dereference_protected(l->avl_right,
David S. Miller98158f52010-11-30 11:41:59 -0800288 lockdep_is_held(&base->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 lrh = node_height(lr);
290 if (lrh <= node_height(ll)) { /* ll: RH+1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000291 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
292 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 node->avl_height = lrh + 1; /* RH+1 or RH+2 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000294 RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
295 RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 l->avl_height = node->avl_height + 1;
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000297 RCU_INIT_POINTER(*nodep, l);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 } else { /* ll: RH, lr: RH+1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000299 lrl = rcu_dereference_protected(lr->avl_left,
David S. Miller98158f52010-11-30 11:41:59 -0800300 lockdep_is_held(&base->lock)); /* lrl: RH or RH-1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000301 lrr = rcu_dereference_protected(lr->avl_right,
David S. Miller98158f52010-11-30 11:41:59 -0800302 lockdep_is_held(&base->lock)); /* lrr: RH or RH-1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000303 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
304 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 node->avl_height = rh + 1; /* node: RH+1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000306 RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
307 RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 l->avl_height = rh + 1; /* l: RH+1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000309 RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
310 RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 lr->avl_height = rh + 2;
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000312 RCU_INIT_POINTER(*nodep, lr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 }
314 } else if (rh > lh + 1) { /* r: LH+2 */
315 struct inet_peer *rr, *rl, *rlr, *rll;
316 int rlh;
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000317 rr = rcu_dereference_protected(r->avl_right,
David S. Miller98158f52010-11-30 11:41:59 -0800318 lockdep_is_held(&base->lock));
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000319 rl = rcu_dereference_protected(r->avl_left,
David S. Miller98158f52010-11-30 11:41:59 -0800320 lockdep_is_held(&base->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 rlh = node_height(rl);
322 if (rlh <= node_height(rr)) { /* rr: LH+1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000323 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
324 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 node->avl_height = rlh + 1; /* LH+1 or LH+2 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000326 RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
327 RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 r->avl_height = node->avl_height + 1;
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000329 RCU_INIT_POINTER(*nodep, r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 } else { /* rr: RH, rl: RH+1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000331 rlr = rcu_dereference_protected(rl->avl_right,
David S. Miller98158f52010-11-30 11:41:59 -0800332 lockdep_is_held(&base->lock)); /* rlr: LH or LH-1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000333 rll = rcu_dereference_protected(rl->avl_left,
David S. Miller98158f52010-11-30 11:41:59 -0800334 lockdep_is_held(&base->lock)); /* rll: LH or LH-1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000335 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
336 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 node->avl_height = lh + 1; /* node: LH+1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000338 RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
339 RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 r->avl_height = lh + 1; /* r: LH+1 */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000341 RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
342 RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 rl->avl_height = lh + 2;
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000344 RCU_INIT_POINTER(*nodep, rl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
346 } else {
347 node->avl_height = (lh > rh ? lh : rh) + 1;
348 }
349 }
350}
351
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000352/* Called with local BH disabled and the pool lock held. */
David S. Miller98158f52010-11-30 11:41:59 -0800353#define link_to_pool(n, base) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354do { \
355 n->avl_height = 1; \
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000356 n->avl_left = peer_avl_empty_rcu; \
357 n->avl_right = peer_avl_empty_rcu; \
358 /* lockless readers can catch us now */ \
359 rcu_assign_pointer(**--stackptr, n); \
David S. Miller98158f52010-11-30 11:41:59 -0800360 peer_avl_rebalance(stack, stackptr, base); \
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000361} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000363static void inetpeer_free_rcu(struct rcu_head *head)
364{
365 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
366}
367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368/* May be called with local BH enabled. */
David S. Miller98158f52010-11-30 11:41:59 -0800369static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
371 int do_free;
372
373 do_free = 0;
374
David S. Miller98158f52010-11-30 11:41:59 -0800375 spin_lock_bh(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 /* Check the reference counter. It was artificially incremented by 1
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000377 * in cleanup() function to prevent sudden disappearing. If we can
378 * atomically (because of lockless readers) take this last reference,
379 * it's safe to remove the node and free it later.
Eric Dumazet5f2f8922010-06-15 21:47:39 -0700380 * We use refcnt=-1 to alert lockless readers this entry is deleted.
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000381 */
Eric Dumazet5f2f8922010-06-15 21:47:39 -0700382 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000383 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
384 struct inet_peer __rcu ***stackptr, ***delp;
David S. Miller02663042010-11-30 12:08:53 -0800385 if (lookup(&p->daddr, stack, base) != p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 BUG();
387 delp = stackptr - 1; /* *delp[0] == p */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000388 if (p->avl_left == peer_avl_empty_rcu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 *delp[0] = p->avl_right;
390 --stackptr;
391 } else {
392 /* look for a node to insert instead of p */
393 struct inet_peer *t;
David S. Miller98158f52010-11-30 11:41:59 -0800394 t = lookup_rightempty(p, base);
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000395 BUG_ON(rcu_dereference_protected(*stackptr[-1],
David S. Miller98158f52010-11-30 11:41:59 -0800396 lockdep_is_held(&base->lock)) != t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 **--stackptr = t->avl_left;
David S. Miller582a72d2010-11-30 11:53:55 -0800398 /* t is removed, t->daddr > x->daddr for any
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 * x in p->avl_left subtree.
400 * Put t in the old place of p. */
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000401 RCU_INIT_POINTER(*delp[0], t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 t->avl_left = p->avl_left;
403 t->avl_right = p->avl_right;
404 t->avl_height = p->avl_height;
Kris Katterjohn09a62662006-01-08 22:24:28 -0800405 BUG_ON(delp[1] != &p->avl_left);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 delp[1] = &t->avl_left; /* was &p->avl_left */
407 }
David S. Miller98158f52010-11-30 11:41:59 -0800408 peer_avl_rebalance(stack, stackptr, base);
409 base->total--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 do_free = 1;
411 }
David S. Miller98158f52010-11-30 11:41:59 -0800412 spin_unlock_bh(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 if (do_free)
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000415 call_rcu_bh(&p->rcu, inetpeer_free_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 else
417 /* The node is used again. Decrease the reference counter
418 * back. The loop "cleanup -> unlink_from_unused
419 * -> unlink_from_pool -> putpeer -> link_to_unused
420 * -> cleanup (for the same node)"
421 * doesn't really exist because the entry will have a
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000422 * recent deletion time and will not be cleaned again soon.
423 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 inet_putpeer(p);
425}
426
David S. Miller021e9292010-11-30 12:12:23 -0800427static struct inet_peer_base *family_to_base(int family)
428{
429 return (family == AF_INET ? &v4_peers : &v6_peers);
430}
431
David S. Miller98158f52010-11-30 11:41:59 -0800432static struct inet_peer_base *peer_to_base(struct inet_peer *p)
433{
David S. Miller021e9292010-11-30 12:12:23 -0800434 return family_to_base(p->daddr.family);
David S. Miller98158f52010-11-30 11:41:59 -0800435}
436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437/* May be called with local BH enabled. */
438static int cleanup_once(unsigned long ttl)
439{
Pavel Emelyanovd71209d2007-11-12 21:27:28 -0800440 struct inet_peer *p = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
442 /* Remove the first entry from the list of unused nodes. */
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000443 spin_lock_bh(&unused_peers.lock);
444 if (!list_empty(&unused_peers.list)) {
Pavel Emelyanovd71209d2007-11-12 21:27:28 -0800445 __u32 delta;
446
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000447 p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
Pavel Emelyanovd71209d2007-11-12 21:27:28 -0800448 delta = (__u32)jiffies - p->dtime;
449
Eric Dumazet4663afe2006-10-12 21:21:06 -0700450 if (delta < ttl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 /* Do not prune fresh entries. */
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000452 spin_unlock_bh(&unused_peers.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 return -1;
454 }
Pavel Emelyanovd71209d2007-11-12 21:27:28 -0800455
456 list_del_init(&p->unused);
457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 /* Grab an extra reference to prevent node disappearing
459 * before unlink_from_pool() call. */
460 atomic_inc(&p->refcnt);
461 }
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000462 spin_unlock_bh(&unused_peers.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 if (p == NULL)
465 /* It means that the total number of USED entries has
466 * grown over inet_peer_threshold. It shouldn't really
467 * happen because of entry limits in route cache. */
468 return -1;
469
David S. Miller98158f52010-11-30 11:41:59 -0800470 unlink_from_pool(p, peer_to_base(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 return 0;
472}
473
474/* Called with or without local BH being disabled. */
David S. Miller8790ca12010-12-01 17:28:18 -0800475struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
Eric Dumazetb914c4e2010-10-25 23:55:38 +0000477 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
David S. Miller34084042011-01-24 14:37:46 -0800478 struct inet_peer_base *base = family_to_base(daddr->family);
David S. Miller98158f52010-11-30 11:41:59 -0800479 struct inet_peer *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000481 /* Look up for the address quickly, lockless.
482 * Because of a concurrent writer, we might not find an existing entry.
483 */
484 rcu_read_lock_bh();
David S. Miller02663042010-11-30 12:08:53 -0800485 p = lookup_rcu_bh(daddr, base);
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000486 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000488 if (p) {
489 /* The existing node has been found.
490 * Remove the entry from unused list if it was there.
491 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 unlink_from_unused(p);
493 return p;
494 }
495
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000496 /* retry an exact lookup, taking the lock before.
497 * At least, nodes should be hot in our cache.
498 */
David S. Miller98158f52010-11-30 11:41:59 -0800499 spin_lock_bh(&base->lock);
David S. Miller02663042010-11-30 12:08:53 -0800500 p = lookup(daddr, stack, base);
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000501 if (p != peer_avl_empty) {
502 atomic_inc(&p->refcnt);
David S. Miller98158f52010-11-30 11:41:59 -0800503 spin_unlock_bh(&base->lock);
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000504 /* Remove the entry from unused list if it was there. */
505 unlink_from_unused(p);
506 return p;
507 }
508 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
509 if (p) {
David S. Millerb534ecf2010-11-30 11:54:19 -0800510 p->daddr = *daddr;
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000511 atomic_set(&p->refcnt, 1);
512 atomic_set(&p->rid, 0);
David S. Millerb534ecf2010-11-30 11:54:19 -0800513 atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000514 p->tcp_ts_stamp = 0;
David S. Miller144001b2011-01-27 13:52:16 -0800515 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
David S. Miller92d86822011-02-04 15:55:25 -0800516 p->rate_tokens = 0;
517 p->rate_last = 0;
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000518 INIT_LIST_HEAD(&p->unused);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000520
521 /* Link the node. */
David S. Miller98158f52010-11-30 11:41:59 -0800522 link_to_pool(p, base);
523 base->total++;
Eric Dumazetaa1039e2010-06-15 08:23:14 +0000524 }
David S. Miller98158f52010-11-30 11:41:59 -0800525 spin_unlock_bh(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
David S. Miller98158f52010-11-30 11:41:59 -0800527 if (base->total >= inet_peer_threshold)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 /* Remove one less-recently-used entry. */
529 cleanup_once(0);
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 return p;
532}
533
David S. Miller98158f52010-11-30 11:41:59 -0800534static int compute_total(void)
535{
David S. Miller021e9292010-11-30 12:12:23 -0800536 return v4_peers.total + v6_peers.total;
David S. Miller98158f52010-11-30 11:41:59 -0800537}
David S. Millerb3419362010-11-30 12:27:11 -0800538EXPORT_SYMBOL_GPL(inet_getpeer);
David S. Miller98158f52010-11-30 11:41:59 -0800539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540/* Called with local BH disabled. */
541static void peer_check_expire(unsigned long dummy)
542{
Eric Dumazet4663afe2006-10-12 21:21:06 -0700543 unsigned long now = jiffies;
David S. Miller98158f52010-11-30 11:41:59 -0800544 int ttl, total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
David S. Miller98158f52010-11-30 11:41:59 -0800546 total = compute_total();
547 if (total >= inet_peer_threshold)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 ttl = inet_peer_minttl;
549 else
550 ttl = inet_peer_maxttl
551 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
David S. Miller98158f52010-11-30 11:41:59 -0800552 total / inet_peer_threshold * HZ;
Eric Dumazet4663afe2006-10-12 21:21:06 -0700553 while (!cleanup_once(ttl)) {
554 if (jiffies != now)
555 break;
556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
558 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
559 * interval depending on the total number of entries (more entries,
560 * less interval). */
David S. Miller98158f52010-11-30 11:41:59 -0800561 total = compute_total();
562 if (total >= inet_peer_threshold)
Dave Johnson1344a412005-08-23 10:10:15 -0700563 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
564 else
565 peer_periodic_timer.expires = jiffies
566 + inet_peer_gc_maxtime
567 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
David S. Miller98158f52010-11-30 11:41:59 -0800568 total / inet_peer_threshold * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 add_timer(&peer_periodic_timer);
570}
Eric Dumazet4663afe2006-10-12 21:21:06 -0700571
572void inet_putpeer(struct inet_peer *p)
573{
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000574 local_bh_disable();
575
576 if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
577 list_add_tail(&p->unused, &unused_peers.list);
Eric Dumazet4663afe2006-10-12 21:21:06 -0700578 p->dtime = (__u32)jiffies;
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000579 spin_unlock(&unused_peers.lock);
Eric Dumazet4663afe2006-10-12 21:21:06 -0700580 }
Eric Dumazetd6cc1d62010-06-14 19:35:21 +0000581
582 local_bh_enable();
Eric Dumazet4663afe2006-10-12 21:21:06 -0700583}
David S. Millerb3419362010-11-30 12:27:11 -0800584EXPORT_SYMBOL_GPL(inet_putpeer);
David S. Miller92d86822011-02-04 15:55:25 -0800585
586/*
587 * Check transmit rate limitation for given message.
588 * The rate information is held in the inet_peer entries now.
589 * This function is generic and could be used for other purposes
590 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
591 *
592 * Note that the same inet_peer fields are modified by functions in
593 * route.c too, but these work for packet destinations while xrlim_allow
594 * works for icmp destinations. This means the rate limiting information
595 * for one "ip object" is shared - and these ICMPs are twice limited:
596 * by source and by destination.
597 *
598 * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
599 * SHOULD allow setting of rate limits
600 *
601 * Shared between ICMPv4 and ICMPv6.
602 */
603#define XRLIM_BURST_FACTOR 6
604bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
605{
606 unsigned long now, token;
607 bool rc = false;
608
609 if (!peer)
610 return true;
611
612 token = peer->rate_tokens;
613 now = jiffies;
614 token += now - peer->rate_last;
615 peer->rate_last = now;
616 if (token > XRLIM_BURST_FACTOR * timeout)
617 token = XRLIM_BURST_FACTOR * timeout;
618 if (token >= timeout) {
619 token -= timeout;
620 rc = true;
621 }
622 peer->rate_tokens = token;
623 return rc;
624}
625EXPORT_SYMBOL(inet_peer_xrlim_allow);