blob: 445263c54c94e4405305dcf3ff327b2ff85604d0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
Trent Jaegerdf718372005-12-13 23:12:27 -080013 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
16#include <linux/workqueue.h>
17#include <net/xfrm.h>
18#include <linux/pfkeyv2.h>
19#include <linux/ipsec.h>
20#include <linux/module.h>
David S. Millerf034b5d2006-08-24 03:08:07 -070021#include <linux/bootmem.h>
22#include <linux/vmalloc.h>
23#include <linux/cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/uaccess.h>
25
David S. Milleree857a72006-03-20 19:18:37 -080026struct sock *xfrm_nl;
27EXPORT_SYMBOL(xfrm_nl);
28
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -080029u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
David S. Millera70fcb02006-03-20 19:18:52 -080030EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
31
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -080032u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
David S. Millera70fcb02006-03-20 19:18:52 -080033EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035/* Each xfrm_state may be linked to two tables:
36
37 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
38 2. Hash table by daddr to find what SAs exist for given
39 destination/tunnel endpoint. (output)
40 */
41
42static DEFINE_SPINLOCK(xfrm_state_lock);
43
44/* Hash table to find appropriate SA towards given target (endpoint
45 * of tunnel or destination of transport mode) allowed by selector.
46 *
47 * Main use is finding SA after policy selected tunnel or transport mode.
48 * Also, it can be used by ah/esp icmp error handler to find offending SA.
49 */
David S. Millerf034b5d2006-08-24 03:08:07 -070050static struct hlist_head *xfrm_state_bydst __read_mostly;
51static struct hlist_head *xfrm_state_bysrc __read_mostly;
52static struct hlist_head *xfrm_state_byspi __read_mostly;
53static unsigned int xfrm_state_hmask __read_mostly;
54static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
55static unsigned int xfrm_state_num;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
David S. Millerf034b5d2006-08-24 03:08:07 -070057static inline unsigned int __xfrm4_dst_hash(xfrm_address_t *addr, unsigned int hmask)
David S. Milleredcd5822006-08-24 00:42:45 -070058{
David S. Millerf034b5d2006-08-24 03:08:07 -070059 unsigned int h;
David S. Milleredcd5822006-08-24 00:42:45 -070060 h = ntohl(addr->a4);
David S. Millerf034b5d2006-08-24 03:08:07 -070061 h = (h ^ (h>>16)) & hmask;
David S. Milleredcd5822006-08-24 00:42:45 -070062 return h;
63}
64
David S. Millerf034b5d2006-08-24 03:08:07 -070065static inline unsigned int __xfrm6_dst_hash(xfrm_address_t *addr, unsigned int hmask)
David S. Milleredcd5822006-08-24 00:42:45 -070066{
David S. Millerf034b5d2006-08-24 03:08:07 -070067 unsigned int h;
David S. Milleredcd5822006-08-24 00:42:45 -070068 h = ntohl(addr->a6[2]^addr->a6[3]);
David S. Millerf034b5d2006-08-24 03:08:07 -070069 h = (h ^ (h>>16)) & hmask;
David S. Milleredcd5822006-08-24 00:42:45 -070070 return h;
71}
72
David S. Millerf034b5d2006-08-24 03:08:07 -070073static inline unsigned int __xfrm4_src_hash(xfrm_address_t *addr, unsigned int hmask)
David S. Milleredcd5822006-08-24 00:42:45 -070074{
David S. Millerf034b5d2006-08-24 03:08:07 -070075 return __xfrm4_dst_hash(addr, hmask);
David S. Milleredcd5822006-08-24 00:42:45 -070076}
77
David S. Millerf034b5d2006-08-24 03:08:07 -070078static inline unsigned int __xfrm6_src_hash(xfrm_address_t *addr, unsigned int hmask)
David S. Milleredcd5822006-08-24 00:42:45 -070079{
David S. Millerf034b5d2006-08-24 03:08:07 -070080 return __xfrm6_dst_hash(addr, hmask);
David S. Milleredcd5822006-08-24 00:42:45 -070081}
82
David S. Millerf034b5d2006-08-24 03:08:07 -070083static inline unsigned __xfrm_src_hash(xfrm_address_t *addr, unsigned short family, unsigned int hmask)
David S. Milleredcd5822006-08-24 00:42:45 -070084{
85 switch (family) {
86 case AF_INET:
David S. Millerf034b5d2006-08-24 03:08:07 -070087 return __xfrm4_src_hash(addr, hmask);
David S. Milleredcd5822006-08-24 00:42:45 -070088 case AF_INET6:
David S. Millerf034b5d2006-08-24 03:08:07 -070089 return __xfrm6_src_hash(addr, hmask);
David S. Milleredcd5822006-08-24 00:42:45 -070090 }
91 return 0;
92}
93
David S. Millerf034b5d2006-08-24 03:08:07 -070094static inline unsigned xfrm_src_hash(xfrm_address_t *addr, unsigned short family)
95{
96 return __xfrm_src_hash(addr, family, xfrm_state_hmask);
97}
98
99static inline unsigned int __xfrm_dst_hash(xfrm_address_t *addr, unsigned short family, unsigned int hmask)
David S. Miller27708342006-08-24 00:13:10 -0700100{
101 switch (family) {
102 case AF_INET:
David S. Millerf034b5d2006-08-24 03:08:07 -0700103 return __xfrm4_dst_hash(addr, hmask);
David S. Miller27708342006-08-24 00:13:10 -0700104 case AF_INET6:
David S. Millerf034b5d2006-08-24 03:08:07 -0700105 return __xfrm6_dst_hash(addr, hmask);
David S. Miller27708342006-08-24 00:13:10 -0700106 }
107 return 0;
108}
109
David S. Millerf034b5d2006-08-24 03:08:07 -0700110static inline unsigned int xfrm_dst_hash(xfrm_address_t *addr, unsigned short family)
David S. Milleredcd5822006-08-24 00:42:45 -0700111{
David S. Millerf034b5d2006-08-24 03:08:07 -0700112 return __xfrm_dst_hash(addr, family, xfrm_state_hmask);
113}
114
115static inline unsigned int __xfrm4_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto,
116 unsigned int hmask)
117{
118 unsigned int h;
David S. Milleredcd5822006-08-24 00:42:45 -0700119 h = ntohl(addr->a4^spi^proto);
David S. Millerf034b5d2006-08-24 03:08:07 -0700120 h = (h ^ (h>>10) ^ (h>>20)) & hmask;
David S. Milleredcd5822006-08-24 00:42:45 -0700121 return h;
122}
123
David S. Millerf034b5d2006-08-24 03:08:07 -0700124static inline unsigned int __xfrm6_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto,
125 unsigned int hmask)
David S. Milleredcd5822006-08-24 00:42:45 -0700126{
David S. Millerf034b5d2006-08-24 03:08:07 -0700127 unsigned int h;
David S. Milleredcd5822006-08-24 00:42:45 -0700128 h = ntohl(addr->a6[2]^addr->a6[3]^spi^proto);
David S. Millerf034b5d2006-08-24 03:08:07 -0700129 h = (h ^ (h>>10) ^ (h>>20)) & hmask;
David S. Milleredcd5822006-08-24 00:42:45 -0700130 return h;
131}
132
David S. Millerf034b5d2006-08-24 03:08:07 -0700133static inline
134unsigned __xfrm_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto, unsigned short family,
135 unsigned int hmask)
David S. Milleredcd5822006-08-24 00:42:45 -0700136{
137 switch (family) {
138 case AF_INET:
David S. Millerf034b5d2006-08-24 03:08:07 -0700139 return __xfrm4_spi_hash(addr, spi, proto, hmask);
David S. Milleredcd5822006-08-24 00:42:45 -0700140 case AF_INET6:
David S. Millerf034b5d2006-08-24 03:08:07 -0700141 return __xfrm6_spi_hash(addr, spi, proto, hmask);
David S. Milleredcd5822006-08-24 00:42:45 -0700142 }
143 return 0; /*XXX*/
144}
145
David S. Millerf034b5d2006-08-24 03:08:07 -0700146static inline unsigned int
147xfrm_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto, unsigned short family)
148{
149 return __xfrm_spi_hash(addr, spi, proto, family, xfrm_state_hmask);
150}
151
152static struct hlist_head *xfrm_state_hash_alloc(unsigned int sz)
153{
154 struct hlist_head *n;
155
156 if (sz <= PAGE_SIZE)
157 n = kmalloc(sz, GFP_KERNEL);
158 else if (hashdist)
159 n = __vmalloc(sz, GFP_KERNEL, PAGE_KERNEL);
160 else
161 n = (struct hlist_head *)
162 __get_free_pages(GFP_KERNEL, get_order(sz));
163
164 if (n)
165 memset(n, 0, sz);
166
167 return n;
168}
169
170static void xfrm_state_hash_free(struct hlist_head *n, unsigned int sz)
171{
172 if (sz <= PAGE_SIZE)
173 kfree(n);
174 else if (hashdist)
175 vfree(n);
176 else
177 free_pages((unsigned long)n, get_order(sz));
178}
179
180static void xfrm_hash_transfer(struct hlist_head *list,
181 struct hlist_head *ndsttable,
182 struct hlist_head *nsrctable,
183 struct hlist_head *nspitable,
184 unsigned int nhashmask)
185{
186 struct hlist_node *entry, *tmp;
187 struct xfrm_state *x;
188
189 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
190 unsigned int h;
191
192 h = __xfrm_dst_hash(&x->id.daddr, x->props.family, nhashmask);
193 hlist_add_head(&x->bydst, ndsttable+h);
194
195 h = __xfrm_src_hash(&x->props.saddr, x->props.family,
196 nhashmask);
197 hlist_add_head(&x->bysrc, nsrctable+h);
198
199 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
200 x->props.family, nhashmask);
201 hlist_add_head(&x->byspi, nspitable+h);
202 }
203}
204
205static unsigned long xfrm_hash_new_size(void)
206{
207 return ((xfrm_state_hmask + 1) << 1) *
208 sizeof(struct hlist_head);
209}
210
211static DEFINE_MUTEX(hash_resize_mutex);
212
213static void xfrm_hash_resize(void *__unused)
214{
215 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
216 unsigned long nsize, osize;
217 unsigned int nhashmask, ohashmask;
218 int i;
219
220 mutex_lock(&hash_resize_mutex);
221
222 nsize = xfrm_hash_new_size();
223 ndst = xfrm_state_hash_alloc(nsize);
224 if (!ndst)
225 goto out_unlock;
226 nsrc = xfrm_state_hash_alloc(nsize);
227 if (!nsrc) {
228 xfrm_state_hash_free(ndst, nsize);
229 goto out_unlock;
230 }
231 nspi = xfrm_state_hash_alloc(nsize);
232 if (!nspi) {
233 xfrm_state_hash_free(ndst, nsize);
234 xfrm_state_hash_free(nsrc, nsize);
235 goto out_unlock;
236 }
237
238 spin_lock_bh(&xfrm_state_lock);
239
240 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
241 for (i = xfrm_state_hmask; i >= 0; i--)
242 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
243 nhashmask);
244
245 odst = xfrm_state_bydst;
246 osrc = xfrm_state_bysrc;
247 ospi = xfrm_state_byspi;
248 ohashmask = xfrm_state_hmask;
249
250 xfrm_state_bydst = ndst;
251 xfrm_state_bysrc = nsrc;
252 xfrm_state_byspi = nspi;
253 xfrm_state_hmask = nhashmask;
254
255 spin_unlock_bh(&xfrm_state_lock);
256
257 osize = (ohashmask + 1) * sizeof(struct hlist_head);
258 xfrm_state_hash_free(odst, osize);
259 xfrm_state_hash_free(osrc, osize);
260 xfrm_state_hash_free(ospi, osize);
261
262out_unlock:
263 mutex_unlock(&hash_resize_mutex);
264}
265
266static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268DECLARE_WAIT_QUEUE_HEAD(km_waitq);
269EXPORT_SYMBOL(km_waitq);
270
271static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
272static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
273
274static struct work_struct xfrm_state_gc_work;
David S. Miller8f126e32006-08-24 02:45:07 -0700275static HLIST_HEAD(xfrm_state_gc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276static DEFINE_SPINLOCK(xfrm_state_gc_lock);
277
278static int xfrm_state_gc_flush_bundles;
279
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800280int __xfrm_state_delete(struct xfrm_state *x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
283static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
284
Jamal Hadi Salim980ebd22006-03-20 19:16:40 -0800285int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800286void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288static void xfrm_state_gc_destroy(struct xfrm_state *x)
289{
290 if (del_timer(&x->timer))
291 BUG();
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -0800292 if (del_timer(&x->rtimer))
293 BUG();
Jesper Juhla51482b2005-11-08 09:41:34 -0800294 kfree(x->aalg);
295 kfree(x->ealg);
296 kfree(x->calg);
297 kfree(x->encap);
Noriaki TAKAMIYA060f02a2006-08-23 18:18:55 -0700298 kfree(x->coaddr);
Herbert Xub59f45d2006-05-27 23:05:54 -0700299 if (x->mode)
300 xfrm_put_mode(x->mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 if (x->type) {
302 x->type->destructor(x);
303 xfrm_put_type(x->type);
304 }
Trent Jaegerdf718372005-12-13 23:12:27 -0800305 security_xfrm_state_free(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 kfree(x);
307}
308
309static void xfrm_state_gc_task(void *data)
310{
311 struct xfrm_state *x;
David S. Miller8f126e32006-08-24 02:45:07 -0700312 struct hlist_node *entry, *tmp;
313 struct hlist_head gc_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
315 if (xfrm_state_gc_flush_bundles) {
316 xfrm_state_gc_flush_bundles = 0;
317 xfrm_flush_bundles();
318 }
319
320 spin_lock_bh(&xfrm_state_gc_lock);
David S. Miller8f126e32006-08-24 02:45:07 -0700321 gc_list.first = xfrm_state_gc_list.first;
322 INIT_HLIST_HEAD(&xfrm_state_gc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 spin_unlock_bh(&xfrm_state_gc_lock);
324
David S. Miller8f126e32006-08-24 02:45:07 -0700325 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 xfrm_state_gc_destroy(x);
David S. Miller8f126e32006-08-24 02:45:07 -0700327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 wake_up(&km_waitq);
329}
330
331static inline unsigned long make_jiffies(long secs)
332{
333 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
334 return MAX_SCHEDULE_TIMEOUT-1;
335 else
336 return secs*HZ;
337}
338
339static void xfrm_timer_handler(unsigned long data)
340{
341 struct xfrm_state *x = (struct xfrm_state*)data;
342 unsigned long now = (unsigned long)xtime.tv_sec;
343 long next = LONG_MAX;
344 int warn = 0;
345
346 spin_lock(&x->lock);
347 if (x->km.state == XFRM_STATE_DEAD)
348 goto out;
349 if (x->km.state == XFRM_STATE_EXPIRED)
350 goto expired;
351 if (x->lft.hard_add_expires_seconds) {
352 long tmo = x->lft.hard_add_expires_seconds +
353 x->curlft.add_time - now;
354 if (tmo <= 0)
355 goto expired;
356 if (tmo < next)
357 next = tmo;
358 }
359 if (x->lft.hard_use_expires_seconds) {
360 long tmo = x->lft.hard_use_expires_seconds +
361 (x->curlft.use_time ? : now) - now;
362 if (tmo <= 0)
363 goto expired;
364 if (tmo < next)
365 next = tmo;
366 }
367 if (x->km.dying)
368 goto resched;
369 if (x->lft.soft_add_expires_seconds) {
370 long tmo = x->lft.soft_add_expires_seconds +
371 x->curlft.add_time - now;
372 if (tmo <= 0)
373 warn = 1;
374 else if (tmo < next)
375 next = tmo;
376 }
377 if (x->lft.soft_use_expires_seconds) {
378 long tmo = x->lft.soft_use_expires_seconds +
379 (x->curlft.use_time ? : now) - now;
380 if (tmo <= 0)
381 warn = 1;
382 else if (tmo < next)
383 next = tmo;
384 }
385
Herbert Xu4666faa2005-06-18 22:43:22 -0700386 x->km.dying = warn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 if (warn)
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800388 km_state_expired(x, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389resched:
390 if (next != LONG_MAX &&
391 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
392 xfrm_state_hold(x);
393 goto out;
394
395expired:
396 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
397 x->km.state = XFRM_STATE_EXPIRED;
398 wake_up(&km_waitq);
399 next = 2;
400 goto resched;
401 }
Herbert Xu4666faa2005-06-18 22:43:22 -0700402 if (!__xfrm_state_delete(x) && x->id.spi)
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800403 km_state_expired(x, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405out:
406 spin_unlock(&x->lock);
407 xfrm_state_put(x);
408}
409
David S. Miller0ac84752006-03-20 19:18:23 -0800410static void xfrm_replay_timer_handler(unsigned long data);
411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412struct xfrm_state *xfrm_state_alloc(void)
413{
414 struct xfrm_state *x;
415
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700416 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
418 if (x) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 atomic_set(&x->refcnt, 1);
420 atomic_set(&x->tunnel_users, 0);
David S. Miller8f126e32006-08-24 02:45:07 -0700421 INIT_HLIST_NODE(&x->bydst);
422 INIT_HLIST_NODE(&x->bysrc);
423 INIT_HLIST_NODE(&x->byspi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 init_timer(&x->timer);
425 x->timer.function = xfrm_timer_handler;
426 x->timer.data = (unsigned long)x;
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -0800427 init_timer(&x->rtimer);
428 x->rtimer.function = xfrm_replay_timer_handler;
429 x->rtimer.data = (unsigned long)x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 x->curlft.add_time = (unsigned long)xtime.tv_sec;
431 x->lft.soft_byte_limit = XFRM_INF;
432 x->lft.soft_packet_limit = XFRM_INF;
433 x->lft.hard_byte_limit = XFRM_INF;
434 x->lft.hard_packet_limit = XFRM_INF;
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -0800435 x->replay_maxage = 0;
436 x->replay_maxdiff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 spin_lock_init(&x->lock);
438 }
439 return x;
440}
441EXPORT_SYMBOL(xfrm_state_alloc);
442
443void __xfrm_state_destroy(struct xfrm_state *x)
444{
445 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
446
447 spin_lock_bh(&xfrm_state_gc_lock);
David S. Miller8f126e32006-08-24 02:45:07 -0700448 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 spin_unlock_bh(&xfrm_state_gc_lock);
450 schedule_work(&xfrm_state_gc_work);
451}
452EXPORT_SYMBOL(__xfrm_state_destroy);
453
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800454int __xfrm_state_delete(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700456 int err = -ESRCH;
457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 if (x->km.state != XFRM_STATE_DEAD) {
459 x->km.state = XFRM_STATE_DEAD;
460 spin_lock(&xfrm_state_lock);
David S. Miller8f126e32006-08-24 02:45:07 -0700461 hlist_del(&x->bydst);
Herbert Xu21380b82006-02-22 14:47:13 -0800462 __xfrm_state_put(x);
David S. Miller8f126e32006-08-24 02:45:07 -0700463 hlist_del(&x->bysrc);
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -0700464 __xfrm_state_put(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 if (x->id.spi) {
David S. Miller8f126e32006-08-24 02:45:07 -0700466 hlist_del(&x->byspi);
Herbert Xu21380b82006-02-22 14:47:13 -0800467 __xfrm_state_put(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 }
David S. Millerf034b5d2006-08-24 03:08:07 -0700469 xfrm_state_num--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 spin_unlock(&xfrm_state_lock);
471 if (del_timer(&x->timer))
Herbert Xu21380b82006-02-22 14:47:13 -0800472 __xfrm_state_put(x);
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -0800473 if (del_timer(&x->rtimer))
474 __xfrm_state_put(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476 /* The number two in this test is the reference
477 * mentioned in the comment below plus the reference
478 * our caller holds. A larger value means that
479 * there are DSTs attached to this xfrm_state.
480 */
481 if (atomic_read(&x->refcnt) > 2) {
482 xfrm_state_gc_flush_bundles = 1;
483 schedule_work(&xfrm_state_gc_work);
484 }
485
486 /* All xfrm_state objects are created by xfrm_state_alloc.
487 * The xfrm_state_alloc call gives a reference, and that
488 * is what we are dropping here.
489 */
Herbert Xu21380b82006-02-22 14:47:13 -0800490 __xfrm_state_put(x);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700491 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 }
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700493
494 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800496EXPORT_SYMBOL(__xfrm_state_delete);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700498int xfrm_state_delete(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700500 int err;
501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 spin_lock_bh(&x->lock);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700503 err = __xfrm_state_delete(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 spin_unlock_bh(&x->lock);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700505
506 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507}
508EXPORT_SYMBOL(xfrm_state_delete);
509
510void xfrm_state_flush(u8 proto)
511{
512 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
514 spin_lock_bh(&xfrm_state_lock);
David S. Millerf034b5d2006-08-24 03:08:07 -0700515 for (i = 0; i < xfrm_state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -0700516 struct hlist_node *entry;
517 struct xfrm_state *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518restart:
David S. Miller8f126e32006-08-24 02:45:07 -0700519 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 if (!xfrm_state_kern(x) &&
Masahide NAKAMURA57947082006-09-22 15:06:24 -0700521 xfrm_id_proto_match(x->id.proto, proto)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 xfrm_state_hold(x);
523 spin_unlock_bh(&xfrm_state_lock);
524
525 xfrm_state_delete(x);
526 xfrm_state_put(x);
527
528 spin_lock_bh(&xfrm_state_lock);
529 goto restart;
530 }
531 }
532 }
533 spin_unlock_bh(&xfrm_state_lock);
534 wake_up(&km_waitq);
535}
536EXPORT_SYMBOL(xfrm_state_flush);
537
538static int
539xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
540 struct xfrm_tmpl *tmpl,
541 xfrm_address_t *daddr, xfrm_address_t *saddr,
542 unsigned short family)
543{
544 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
545 if (!afinfo)
546 return -1;
547 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
548 xfrm_state_put_afinfo(afinfo);
549 return 0;
550}
551
David S. Milleredcd5822006-08-24 00:42:45 -0700552static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family)
553{
554 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
555 struct xfrm_state *x;
David S. Miller8f126e32006-08-24 02:45:07 -0700556 struct hlist_node *entry;
David S. Milleredcd5822006-08-24 00:42:45 -0700557
David S. Miller8f126e32006-08-24 02:45:07 -0700558 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
David S. Milleredcd5822006-08-24 00:42:45 -0700559 if (x->props.family != family ||
560 x->id.spi != spi ||
561 x->id.proto != proto)
562 continue;
563
564 switch (family) {
565 case AF_INET:
566 if (x->id.daddr.a4 != daddr->a4)
567 continue;
568 break;
569 case AF_INET6:
570 if (!ipv6_addr_equal((struct in6_addr *)daddr,
571 (struct in6_addr *)
572 x->id.daddr.a6))
573 continue;
574 break;
575 };
576
577 xfrm_state_hold(x);
578 return x;
579 }
580
581 return NULL;
582}
583
584static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
585{
586 unsigned int h = xfrm_src_hash(saddr, family);
587 struct xfrm_state *x;
David S. Miller8f126e32006-08-24 02:45:07 -0700588 struct hlist_node *entry;
David S. Milleredcd5822006-08-24 00:42:45 -0700589
David S. Miller8f126e32006-08-24 02:45:07 -0700590 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
David S. Milleredcd5822006-08-24 00:42:45 -0700591 if (x->props.family != family ||
592 x->id.proto != proto)
593 continue;
594
595 switch (family) {
596 case AF_INET:
597 if (x->id.daddr.a4 != daddr->a4 ||
598 x->props.saddr.a4 != saddr->a4)
599 continue;
600 break;
601 case AF_INET6:
602 if (!ipv6_addr_equal((struct in6_addr *)daddr,
603 (struct in6_addr *)
604 x->id.daddr.a6) ||
605 !ipv6_addr_equal((struct in6_addr *)saddr,
606 (struct in6_addr *)
607 x->props.saddr.a6))
608 continue;
609 break;
610 };
611
612 xfrm_state_hold(x);
613 return x;
614 }
615
616 return NULL;
617}
618
619static inline struct xfrm_state *
620__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
621{
622 if (use_spi)
623 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
624 x->id.proto, family);
625 else
626 return __xfrm_state_lookup_byaddr(&x->id.daddr,
627 &x->props.saddr,
628 x->id.proto, family);
629}
630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631struct xfrm_state *
632xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
633 struct flowi *fl, struct xfrm_tmpl *tmpl,
634 struct xfrm_policy *pol, int *err,
635 unsigned short family)
636{
David S. Miller8f126e32006-08-24 02:45:07 -0700637 unsigned int h = xfrm_dst_hash(daddr, family);
638 struct hlist_node *entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 struct xfrm_state *x, *x0;
640 int acquire_in_progress = 0;
641 int error = 0;
642 struct xfrm_state *best = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 spin_lock_bh(&xfrm_state_lock);
David S. Miller8f126e32006-08-24 02:45:07 -0700645 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 if (x->props.family == family &&
647 x->props.reqid == tmpl->reqid &&
Masahide NAKAMURAfbd9a5b2006-08-23 18:08:21 -0700648 !(x->props.flags & XFRM_STATE_WILDRECV) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 xfrm_state_addr_check(x, daddr, saddr, family) &&
650 tmpl->mode == x->props.mode &&
651 tmpl->id.proto == x->id.proto &&
652 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
653 /* Resolution logic:
654 1. There is a valid state with matching selector.
655 Done.
656 2. Valid state with inappropriate selector. Skip.
657
658 Entering area of "sysdeps".
659
660 3. If state is not valid, selector is temporary,
661 it selects only session which triggered
662 previous resolution. Key manager will do
663 something to install a state with proper
664 selector.
665 */
666 if (x->km.state == XFRM_STATE_VALID) {
Trent Jaegerdf718372005-12-13 23:12:27 -0800667 if (!xfrm_selector_match(&x->sel, fl, family) ||
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -0700668 !security_xfrm_state_pol_flow_match(x, pol, fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 continue;
670 if (!best ||
671 best->km.dying > x->km.dying ||
672 (best->km.dying == x->km.dying &&
673 best->curlft.add_time < x->curlft.add_time))
674 best = x;
675 } else if (x->km.state == XFRM_STATE_ACQ) {
676 acquire_in_progress = 1;
677 } else if (x->km.state == XFRM_STATE_ERROR ||
678 x->km.state == XFRM_STATE_EXPIRED) {
Trent Jaegerdf718372005-12-13 23:12:27 -0800679 if (xfrm_selector_match(&x->sel, fl, family) &&
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -0700680 security_xfrm_state_pol_flow_match(x, pol, fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 error = -ESRCH;
682 }
683 }
684 }
685
686 x = best;
687 if (!x && !error && !acquire_in_progress) {
Patrick McHardy5c5d2812005-04-21 20:12:32 -0700688 if (tmpl->id.spi &&
David S. Milleredcd5822006-08-24 00:42:45 -0700689 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
690 tmpl->id.proto, family)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 xfrm_state_put(x0);
692 error = -EEXIST;
693 goto out;
694 }
695 x = xfrm_state_alloc();
696 if (x == NULL) {
697 error = -ENOMEM;
698 goto out;
699 }
700 /* Initialize temporary selector matching only
701 * to current session. */
702 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
703
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -0700704 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
705 if (error) {
706 x->km.state = XFRM_STATE_DEAD;
707 xfrm_state_put(x);
708 x = NULL;
709 goto out;
710 }
711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 if (km_query(x, tmpl, pol) == 0) {
713 x->km.state = XFRM_STATE_ACQ;
David S. Miller8f126e32006-08-24 02:45:07 -0700714 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 xfrm_state_hold(x);
David S. Miller8f126e32006-08-24 02:45:07 -0700716 h = xfrm_src_hash(saddr, family);
717 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -0700718 xfrm_state_hold(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 if (x->id.spi) {
720 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
David S. Miller8f126e32006-08-24 02:45:07 -0700721 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 xfrm_state_hold(x);
723 }
724 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
725 xfrm_state_hold(x);
726 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
727 add_timer(&x->timer);
728 } else {
729 x->km.state = XFRM_STATE_DEAD;
730 xfrm_state_put(x);
731 x = NULL;
732 error = -ESRCH;
733 }
734 }
735out:
736 if (x)
737 xfrm_state_hold(x);
738 else
739 *err = acquire_in_progress ? -EAGAIN : error;
740 spin_unlock_bh(&xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return x;
742}
743
744static void __xfrm_state_insert(struct xfrm_state *x)
745{
David S. Millerf034b5d2006-08-24 03:08:07 -0700746 unsigned int h = xfrm_dst_hash(&x->id.daddr, x->props.family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
David S. Miller8f126e32006-08-24 02:45:07 -0700748 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 xfrm_state_hold(x);
750
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -0700751 h = xfrm_src_hash(&x->props.saddr, x->props.family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
David S. Miller8f126e32006-08-24 02:45:07 -0700753 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 xfrm_state_hold(x);
755
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -0700756 if (xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY)) {
757 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
758 x->props.family);
759
David S. Miller8f126e32006-08-24 02:45:07 -0700760 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -0700761 xfrm_state_hold(x);
762 }
763
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 if (!mod_timer(&x->timer, jiffies + HZ))
765 xfrm_state_hold(x);
766
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -0800767 if (x->replay_maxage &&
768 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
769 xfrm_state_hold(x);
770
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 wake_up(&km_waitq);
David S. Millerf034b5d2006-08-24 03:08:07 -0700772
773 xfrm_state_num++;
774
775 if (x->bydst.next != NULL &&
776 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
777 xfrm_state_num > xfrm_state_hmask)
778 schedule_work(&xfrm_hash_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
781void xfrm_state_insert(struct xfrm_state *x)
782{
783 spin_lock_bh(&xfrm_state_lock);
784 __xfrm_state_insert(x);
785 spin_unlock_bh(&xfrm_state_lock);
David S. Miller399c1802005-12-19 14:23:23 -0800786
787 xfrm_flush_all_bundles();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788}
789EXPORT_SYMBOL(xfrm_state_insert);
790
David S. Miller27708342006-08-24 00:13:10 -0700791/* xfrm_state_lock is held */
792static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
793{
794 unsigned int h = xfrm_dst_hash(daddr, family);
David S. Miller8f126e32006-08-24 02:45:07 -0700795 struct hlist_node *entry;
David S. Miller27708342006-08-24 00:13:10 -0700796 struct xfrm_state *x;
797
David S. Miller8f126e32006-08-24 02:45:07 -0700798 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
David S. Miller27708342006-08-24 00:13:10 -0700799 if (x->props.reqid != reqid ||
800 x->props.mode != mode ||
801 x->props.family != family ||
802 x->km.state != XFRM_STATE_ACQ ||
803 x->id.spi != 0)
804 continue;
805
806 switch (family) {
807 case AF_INET:
808 if (x->id.daddr.a4 != daddr->a4 ||
809 x->props.saddr.a4 != saddr->a4)
810 continue;
811 break;
812 case AF_INET6:
813 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
814 (struct in6_addr *)daddr) ||
815 !ipv6_addr_equal((struct in6_addr *)
816 x->props.saddr.a6,
817 (struct in6_addr *)saddr))
818 continue;
819 break;
820 };
821
822 xfrm_state_hold(x);
823 return x;
824 }
825
826 if (!create)
827 return NULL;
828
829 x = xfrm_state_alloc();
830 if (likely(x)) {
831 switch (family) {
832 case AF_INET:
833 x->sel.daddr.a4 = daddr->a4;
834 x->sel.saddr.a4 = saddr->a4;
835 x->sel.prefixlen_d = 32;
836 x->sel.prefixlen_s = 32;
837 x->props.saddr.a4 = saddr->a4;
838 x->id.daddr.a4 = daddr->a4;
839 break;
840
841 case AF_INET6:
842 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
843 (struct in6_addr *)daddr);
844 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
845 (struct in6_addr *)saddr);
846 x->sel.prefixlen_d = 128;
847 x->sel.prefixlen_s = 128;
848 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
849 (struct in6_addr *)saddr);
850 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
851 (struct in6_addr *)daddr);
852 break;
853 };
854
855 x->km.state = XFRM_STATE_ACQ;
856 x->id.proto = proto;
857 x->props.family = family;
858 x->props.mode = mode;
859 x->props.reqid = reqid;
860 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
861 xfrm_state_hold(x);
862 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
863 add_timer(&x->timer);
864 xfrm_state_hold(x);
David S. Miller8f126e32006-08-24 02:45:07 -0700865 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
David S. Miller27708342006-08-24 00:13:10 -0700866 h = xfrm_src_hash(saddr, family);
867 xfrm_state_hold(x);
David S. Miller8f126e32006-08-24 02:45:07 -0700868 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
David S. Miller27708342006-08-24 00:13:10 -0700869 wake_up(&km_waitq);
870 }
871
872 return x;
873}
874
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
876
877int xfrm_state_add(struct xfrm_state *x)
878{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 struct xfrm_state *x1;
880 int family;
881 int err;
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700882 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
884 family = x->props.family;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
886 spin_lock_bh(&xfrm_state_lock);
887
David S. Milleredcd5822006-08-24 00:42:45 -0700888 x1 = __xfrm_state_locate(x, use_spi, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 if (x1) {
890 xfrm_state_put(x1);
891 x1 = NULL;
892 err = -EEXIST;
893 goto out;
894 }
895
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700896 if (use_spi && x->km.seq) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 x1 = __xfrm_find_acq_byseq(x->km.seq);
898 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
899 xfrm_state_put(x1);
900 x1 = NULL;
901 }
902 }
903
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700904 if (use_spi && !x1)
David S. Miller27708342006-08-24 00:13:10 -0700905 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
906 x->id.proto,
907 &x->id.daddr, &x->props.saddr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
909 __xfrm_state_insert(x);
910 err = 0;
911
912out:
913 spin_unlock_bh(&xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
David S. Miller399c1802005-12-19 14:23:23 -0800915 if (!err)
916 xfrm_flush_all_bundles();
917
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 if (x1) {
919 xfrm_state_delete(x1);
920 xfrm_state_put(x1);
921 }
922
923 return err;
924}
925EXPORT_SYMBOL(xfrm_state_add);
926
927int xfrm_state_update(struct xfrm_state *x)
928{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 struct xfrm_state *x1;
930 int err;
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700931 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 spin_lock_bh(&xfrm_state_lock);
David S. Milleredcd5822006-08-24 00:42:45 -0700934 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936 err = -ESRCH;
937 if (!x1)
938 goto out;
939
940 if (xfrm_state_kern(x1)) {
941 xfrm_state_put(x1);
942 err = -EEXIST;
943 goto out;
944 }
945
946 if (x1->km.state == XFRM_STATE_ACQ) {
947 __xfrm_state_insert(x);
948 x = NULL;
949 }
950 err = 0;
951
952out:
953 spin_unlock_bh(&xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955 if (err)
956 return err;
957
958 if (!x) {
959 xfrm_state_delete(x1);
960 xfrm_state_put(x1);
961 return 0;
962 }
963
964 err = -EINVAL;
965 spin_lock_bh(&x1->lock);
966 if (likely(x1->km.state == XFRM_STATE_VALID)) {
967 if (x->encap && x1->encap)
968 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
Noriaki TAKAMIYA060f02a2006-08-23 18:18:55 -0700969 if (x->coaddr && x1->coaddr) {
970 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
971 }
972 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
973 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
975 x1->km.dying = 0;
976
977 if (!mod_timer(&x1->timer, jiffies + HZ))
978 xfrm_state_hold(x1);
979 if (x1->curlft.use_time)
980 xfrm_state_check_expire(x1);
981
982 err = 0;
983 }
984 spin_unlock_bh(&x1->lock);
985
986 xfrm_state_put(x1);
987
988 return err;
989}
990EXPORT_SYMBOL(xfrm_state_update);
991
992int xfrm_state_check_expire(struct xfrm_state *x)
993{
994 if (!x->curlft.use_time)
995 x->curlft.use_time = (unsigned long)xtime.tv_sec;
996
997 if (x->km.state != XFRM_STATE_VALID)
998 return -EINVAL;
999
1000 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1001 x->curlft.packets >= x->lft.hard_packet_limit) {
Herbert Xu4666faa2005-06-18 22:43:22 -07001002 x->km.state = XFRM_STATE_EXPIRED;
1003 if (!mod_timer(&x->timer, jiffies))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 xfrm_state_hold(x);
1005 return -EINVAL;
1006 }
1007
1008 if (!x->km.dying &&
1009 (x->curlft.bytes >= x->lft.soft_byte_limit ||
Herbert Xu4666faa2005-06-18 22:43:22 -07001010 x->curlft.packets >= x->lft.soft_packet_limit)) {
1011 x->km.dying = 1;
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -08001012 km_state_expired(x, 0, 0);
Herbert Xu4666faa2005-06-18 22:43:22 -07001013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 return 0;
1015}
1016EXPORT_SYMBOL(xfrm_state_check_expire);
1017
1018static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
1019{
1020 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
1021 - skb_headroom(skb);
1022
1023 if (nhead > 0)
1024 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
1025
1026 /* Check tail too... */
1027 return 0;
1028}
1029
1030int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
1031{
1032 int err = xfrm_state_check_expire(x);
1033 if (err < 0)
1034 goto err;
1035 err = xfrm_state_check_space(x, skb);
1036err:
1037 return err;
1038}
1039EXPORT_SYMBOL(xfrm_state_check);
1040
1041struct xfrm_state *
1042xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
1043 unsigned short family)
1044{
1045 struct xfrm_state *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
1047 spin_lock_bh(&xfrm_state_lock);
David S. Milleredcd5822006-08-24 00:42:45 -07001048 x = __xfrm_state_lookup(daddr, spi, proto, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 spin_unlock_bh(&xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 return x;
1051}
1052EXPORT_SYMBOL(xfrm_state_lookup);
1053
1054struct xfrm_state *
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001055xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1056 u8 proto, unsigned short family)
1057{
1058 struct xfrm_state *x;
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001059
1060 spin_lock_bh(&xfrm_state_lock);
David S. Milleredcd5822006-08-24 00:42:45 -07001061 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001062 spin_unlock_bh(&xfrm_state_lock);
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -07001063 return x;
1064}
1065EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1066
1067struct xfrm_state *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1069 xfrm_address_t *daddr, xfrm_address_t *saddr,
1070 int create, unsigned short family)
1071{
1072 struct xfrm_state *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074 spin_lock_bh(&xfrm_state_lock);
David S. Miller27708342006-08-24 00:13:10 -07001075 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 spin_unlock_bh(&xfrm_state_lock);
David S. Miller27708342006-08-24 00:13:10 -07001077
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 return x;
1079}
1080EXPORT_SYMBOL(xfrm_find_acq);
1081
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -07001082#ifdef CONFIG_XFRM_SUB_POLICY
1083int
1084xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1085 unsigned short family)
1086{
1087 int err = 0;
1088 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1089 if (!afinfo)
1090 return -EAFNOSUPPORT;
1091
1092 spin_lock_bh(&xfrm_state_lock);
1093 if (afinfo->tmpl_sort)
1094 err = afinfo->tmpl_sort(dst, src, n);
1095 spin_unlock_bh(&xfrm_state_lock);
1096 xfrm_state_put_afinfo(afinfo);
1097 return err;
1098}
1099EXPORT_SYMBOL(xfrm_tmpl_sort);
1100
1101int
1102xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1103 unsigned short family)
1104{
1105 int err = 0;
1106 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1107 if (!afinfo)
1108 return -EAFNOSUPPORT;
1109
1110 spin_lock_bh(&xfrm_state_lock);
1111 if (afinfo->state_sort)
1112 err = afinfo->state_sort(dst, src, n);
1113 spin_unlock_bh(&xfrm_state_lock);
1114 xfrm_state_put_afinfo(afinfo);
1115 return err;
1116}
1117EXPORT_SYMBOL(xfrm_state_sort);
1118#endif
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120/* Silly enough, but I'm lazy to build resolution list */
1121
1122static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1123{
1124 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
David S. Millerf034b5d2006-08-24 03:08:07 -07001126 for (i = 0; i <= xfrm_state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -07001127 struct hlist_node *entry;
1128 struct xfrm_state *x;
1129
1130 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1131 if (x->km.seq == seq &&
1132 x->km.state == XFRM_STATE_ACQ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 xfrm_state_hold(x);
1134 return x;
1135 }
1136 }
1137 }
1138 return NULL;
1139}
1140
1141struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1142{
1143 struct xfrm_state *x;
1144
1145 spin_lock_bh(&xfrm_state_lock);
1146 x = __xfrm_find_acq_byseq(seq);
1147 spin_unlock_bh(&xfrm_state_lock);
1148 return x;
1149}
1150EXPORT_SYMBOL(xfrm_find_acq_byseq);
1151
1152u32 xfrm_get_acqseq(void)
1153{
1154 u32 res;
1155 static u32 acqseq;
1156 static DEFINE_SPINLOCK(acqseq_lock);
1157
1158 spin_lock_bh(&acqseq_lock);
1159 res = (++acqseq ? : ++acqseq);
1160 spin_unlock_bh(&acqseq_lock);
1161 return res;
1162}
1163EXPORT_SYMBOL(xfrm_get_acqseq);
1164
1165void
1166xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
1167{
David S. Millerf034b5d2006-08-24 03:08:07 -07001168 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 struct xfrm_state *x0;
1170
1171 if (x->id.spi)
1172 return;
1173
1174 if (minspi == maxspi) {
1175 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1176 if (x0) {
1177 xfrm_state_put(x0);
1178 return;
1179 }
1180 x->id.spi = minspi;
1181 } else {
1182 u32 spi = 0;
1183 minspi = ntohl(minspi);
1184 maxspi = ntohl(maxspi);
1185 for (h=0; h<maxspi-minspi+1; h++) {
1186 spi = minspi + net_random()%(maxspi-minspi+1);
1187 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1188 if (x0 == NULL) {
1189 x->id.spi = htonl(spi);
1190 break;
1191 }
1192 xfrm_state_put(x0);
1193 }
1194 }
1195 if (x->id.spi) {
1196 spin_lock_bh(&xfrm_state_lock);
1197 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
David S. Miller8f126e32006-08-24 02:45:07 -07001198 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 xfrm_state_hold(x);
1200 spin_unlock_bh(&xfrm_state_lock);
1201 wake_up(&km_waitq);
1202 }
1203}
1204EXPORT_SYMBOL(xfrm_alloc_spi);
1205
1206int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1207 void *data)
1208{
1209 int i;
1210 struct xfrm_state *x;
David S. Miller8f126e32006-08-24 02:45:07 -07001211 struct hlist_node *entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 int count = 0;
1213 int err = 0;
1214
1215 spin_lock_bh(&xfrm_state_lock);
David S. Millerf034b5d2006-08-24 03:08:07 -07001216 for (i = 0; i <= xfrm_state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -07001217 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
Masahide NAKAMURA57947082006-09-22 15:06:24 -07001218 if (xfrm_id_proto_match(x->id.proto, proto))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 count++;
1220 }
1221 }
1222 if (count == 0) {
1223 err = -ENOENT;
1224 goto out;
1225 }
1226
David S. Millerf034b5d2006-08-24 03:08:07 -07001227 for (i = 0; i <= xfrm_state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -07001228 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
Masahide NAKAMURA57947082006-09-22 15:06:24 -07001229 if (!xfrm_id_proto_match(x->id.proto, proto))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 continue;
1231 err = func(x, --count, data);
1232 if (err)
1233 goto out;
1234 }
1235 }
1236out:
1237 spin_unlock_bh(&xfrm_state_lock);
1238 return err;
1239}
1240EXPORT_SYMBOL(xfrm_state_walk);
1241
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001242
1243void xfrm_replay_notify(struct xfrm_state *x, int event)
1244{
1245 struct km_event c;
1246 /* we send notify messages in case
1247 * 1. we updated on of the sequence numbers, and the seqno difference
1248 * is at least x->replay_maxdiff, in this case we also update the
1249 * timeout of our timer function
1250 * 2. if x->replay_maxage has elapsed since last update,
1251 * and there were changes
1252 *
1253 * The state structure must be locked!
1254 */
1255
1256 switch (event) {
1257 case XFRM_REPLAY_UPDATE:
1258 if (x->replay_maxdiff &&
1259 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001260 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1261 if (x->xflags & XFRM_TIME_DEFER)
1262 event = XFRM_REPLAY_TIMEOUT;
1263 else
1264 return;
1265 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001266
1267 break;
1268
1269 case XFRM_REPLAY_TIMEOUT:
1270 if ((x->replay.seq == x->preplay.seq) &&
1271 (x->replay.bitmap == x->preplay.bitmap) &&
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001272 (x->replay.oseq == x->preplay.oseq)) {
1273 x->xflags |= XFRM_TIME_DEFER;
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001274 return;
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001275 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001276
1277 break;
1278 }
1279
1280 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1281 c.event = XFRM_MSG_NEWAE;
1282 c.data.aevent = event;
1283 km_state_notify(x, &c);
1284
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001285 if (x->replay_maxage &&
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001286 !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) {
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001287 xfrm_state_hold(x);
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001288 x->xflags &= ~XFRM_TIME_DEFER;
1289 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001290}
David S. Millera70fcb02006-03-20 19:18:52 -08001291EXPORT_SYMBOL(xfrm_replay_notify);
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001292
1293static void xfrm_replay_timer_handler(unsigned long data)
1294{
1295 struct xfrm_state *x = (struct xfrm_state*)data;
1296
1297 spin_lock(&x->lock);
1298
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001299 if (x->km.state == XFRM_STATE_VALID) {
1300 if (xfrm_aevent_is_on())
1301 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1302 else
1303 x->xflags |= XFRM_TIME_DEFER;
1304 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001305
1306 spin_unlock(&x->lock);
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001307 xfrm_state_put(x);
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001308}
1309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310int xfrm_replay_check(struct xfrm_state *x, u32 seq)
1311{
1312 u32 diff;
1313
1314 seq = ntohl(seq);
1315
1316 if (unlikely(seq == 0))
1317 return -EINVAL;
1318
1319 if (likely(seq > x->replay.seq))
1320 return 0;
1321
1322 diff = x->replay.seq - seq;
1323 if (diff >= x->props.replay_window) {
1324 x->stats.replay_window++;
1325 return -EINVAL;
1326 }
1327
1328 if (x->replay.bitmap & (1U << diff)) {
1329 x->stats.replay++;
1330 return -EINVAL;
1331 }
1332 return 0;
1333}
1334EXPORT_SYMBOL(xfrm_replay_check);
1335
1336void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
1337{
1338 u32 diff;
1339
1340 seq = ntohl(seq);
1341
1342 if (seq > x->replay.seq) {
1343 diff = seq - x->replay.seq;
1344 if (diff < x->props.replay_window)
1345 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1346 else
1347 x->replay.bitmap = 1;
1348 x->replay.seq = seq;
1349 } else {
1350 diff = x->replay.seq - seq;
1351 x->replay.bitmap |= (1U << diff);
1352 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001353
1354 if (xfrm_aevent_is_on())
1355 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356}
1357EXPORT_SYMBOL(xfrm_replay_advance);
1358
1359static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1360static DEFINE_RWLOCK(xfrm_km_lock);
1361
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001362void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363{
1364 struct xfrm_mgr *km;
1365
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001366 read_lock(&xfrm_km_lock);
1367 list_for_each_entry(km, &xfrm_km_list, list)
1368 if (km->notify_policy)
1369 km->notify_policy(xp, dir, c);
1370 read_unlock(&xfrm_km_lock);
1371}
1372
1373void km_state_notify(struct xfrm_state *x, struct km_event *c)
1374{
1375 struct xfrm_mgr *km;
1376 read_lock(&xfrm_km_lock);
1377 list_for_each_entry(km, &xfrm_km_list, list)
1378 if (km->notify)
1379 km->notify(x, c);
1380 read_unlock(&xfrm_km_lock);
1381}
1382
1383EXPORT_SYMBOL(km_policy_notify);
1384EXPORT_SYMBOL(km_state_notify);
1385
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -08001386void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001387{
1388 struct km_event c;
1389
Herbert Xubf088672005-06-18 22:44:00 -07001390 c.data.hard = hard;
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -08001391 c.pid = pid;
Herbert Xuf60f6b82005-06-18 22:44:37 -07001392 c.event = XFRM_MSG_EXPIRE;
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001393 km_state_notify(x, &c);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
1395 if (hard)
1396 wake_up(&km_waitq);
1397}
1398
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -08001399EXPORT_SYMBOL(km_state_expired);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001400/*
1401 * We send to all registered managers regardless of failure
1402 * We are happy with one success
1403*/
Jamal Hadi Salim980ebd22006-03-20 19:16:40 -08001404int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001406 int err = -EINVAL, acqret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 struct xfrm_mgr *km;
1408
1409 read_lock(&xfrm_km_lock);
1410 list_for_each_entry(km, &xfrm_km_list, list) {
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001411 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1412 if (!acqret)
1413 err = acqret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 }
1415 read_unlock(&xfrm_km_lock);
1416 return err;
1417}
Jamal Hadi Salim980ebd22006-03-20 19:16:40 -08001418EXPORT_SYMBOL(km_query);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
1420int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
1421{
1422 int err = -EINVAL;
1423 struct xfrm_mgr *km;
1424
1425 read_lock(&xfrm_km_lock);
1426 list_for_each_entry(km, &xfrm_km_list, list) {
1427 if (km->new_mapping)
1428 err = km->new_mapping(x, ipaddr, sport);
1429 if (!err)
1430 break;
1431 }
1432 read_unlock(&xfrm_km_lock);
1433 return err;
1434}
1435EXPORT_SYMBOL(km_new_mapping);
1436
Jamal Hadi Salim6c5c8ca2006-03-20 19:17:25 -08001437void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001439 struct km_event c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
Herbert Xubf088672005-06-18 22:44:00 -07001441 c.data.hard = hard;
Jamal Hadi Salim6c5c8ca2006-03-20 19:17:25 -08001442 c.pid = pid;
Herbert Xuf60f6b82005-06-18 22:44:37 -07001443 c.event = XFRM_MSG_POLEXPIRE;
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001444 km_policy_notify(pol, dir, &c);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
1446 if (hard)
1447 wake_up(&km_waitq);
1448}
David S. Millera70fcb02006-03-20 19:18:52 -08001449EXPORT_SYMBOL(km_policy_expired);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Masahide NAKAMURA97a64b42006-08-23 20:44:06 -07001451int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1452{
1453 int err = -EINVAL;
1454 int ret;
1455 struct xfrm_mgr *km;
1456
1457 read_lock(&xfrm_km_lock);
1458 list_for_each_entry(km, &xfrm_km_list, list) {
1459 if (km->report) {
1460 ret = km->report(proto, sel, addr);
1461 if (!ret)
1462 err = ret;
1463 }
1464 }
1465 read_unlock(&xfrm_km_lock);
1466 return err;
1467}
1468EXPORT_SYMBOL(km_report);
1469
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1471{
1472 int err;
1473 u8 *data;
1474 struct xfrm_mgr *km;
1475 struct xfrm_policy *pol = NULL;
1476
1477 if (optlen <= 0 || optlen > PAGE_SIZE)
1478 return -EMSGSIZE;
1479
1480 data = kmalloc(optlen, GFP_KERNEL);
1481 if (!data)
1482 return -ENOMEM;
1483
1484 err = -EFAULT;
1485 if (copy_from_user(data, optval, optlen))
1486 goto out;
1487
1488 err = -EINVAL;
1489 read_lock(&xfrm_km_lock);
1490 list_for_each_entry(km, &xfrm_km_list, list) {
Venkat Yekkiralacb969f02006-07-24 23:32:20 -07001491 pol = km->compile_policy(sk, optname, data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 optlen, &err);
1493 if (err >= 0)
1494 break;
1495 }
1496 read_unlock(&xfrm_km_lock);
1497
1498 if (err >= 0) {
1499 xfrm_sk_policy_insert(sk, err, pol);
1500 xfrm_pol_put(pol);
1501 err = 0;
1502 }
1503
1504out:
1505 kfree(data);
1506 return err;
1507}
1508EXPORT_SYMBOL(xfrm_user_policy);
1509
1510int xfrm_register_km(struct xfrm_mgr *km)
1511{
1512 write_lock_bh(&xfrm_km_lock);
1513 list_add_tail(&km->list, &xfrm_km_list);
1514 write_unlock_bh(&xfrm_km_lock);
1515 return 0;
1516}
1517EXPORT_SYMBOL(xfrm_register_km);
1518
1519int xfrm_unregister_km(struct xfrm_mgr *km)
1520{
1521 write_lock_bh(&xfrm_km_lock);
1522 list_del(&km->list);
1523 write_unlock_bh(&xfrm_km_lock);
1524 return 0;
1525}
1526EXPORT_SYMBOL(xfrm_unregister_km);
1527
1528int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1529{
1530 int err = 0;
1531 if (unlikely(afinfo == NULL))
1532 return -EINVAL;
1533 if (unlikely(afinfo->family >= NPROTO))
1534 return -EAFNOSUPPORT;
Ingo Molnarf3111502006-04-28 15:30:03 -07001535 write_lock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1537 err = -ENOBUFS;
David S. Milleredcd5822006-08-24 00:42:45 -07001538 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 xfrm_state_afinfo[afinfo->family] = afinfo;
Ingo Molnarf3111502006-04-28 15:30:03 -07001540 write_unlock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 return err;
1542}
1543EXPORT_SYMBOL(xfrm_state_register_afinfo);
1544
1545int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1546{
1547 int err = 0;
1548 if (unlikely(afinfo == NULL))
1549 return -EINVAL;
1550 if (unlikely(afinfo->family >= NPROTO))
1551 return -EAFNOSUPPORT;
Ingo Molnarf3111502006-04-28 15:30:03 -07001552 write_lock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1554 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1555 err = -EINVAL;
David S. Milleredcd5822006-08-24 00:42:45 -07001556 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 xfrm_state_afinfo[afinfo->family] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 }
Ingo Molnarf3111502006-04-28 15:30:03 -07001559 write_unlock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 return err;
1561}
1562EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1563
1564static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1565{
1566 struct xfrm_state_afinfo *afinfo;
1567 if (unlikely(family >= NPROTO))
1568 return NULL;
1569 read_lock(&xfrm_state_afinfo_lock);
1570 afinfo = xfrm_state_afinfo[family];
Herbert Xu546be242006-05-27 23:03:58 -07001571 if (unlikely(!afinfo))
1572 read_unlock(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 return afinfo;
1574}
1575
1576static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1577{
Herbert Xu546be242006-05-27 23:03:58 -07001578 read_unlock(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579}
1580
1581/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1582void xfrm_state_delete_tunnel(struct xfrm_state *x)
1583{
1584 if (x->tunnel) {
1585 struct xfrm_state *t = x->tunnel;
1586
1587 if (atomic_read(&t->tunnel_users) == 2)
1588 xfrm_state_delete(t);
1589 atomic_dec(&t->tunnel_users);
1590 xfrm_state_put(t);
1591 x->tunnel = NULL;
1592 }
1593}
1594EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1595
Herbert Xu80b30c12005-10-15 10:58:30 +10001596/*
1597 * This function is NOT optimal. For example, with ESP it will give an
1598 * MTU that's usually two bytes short of being optimal. However, it will
1599 * usually give an answer that's a multiple of 4 provided the input is
1600 * also a multiple of 4.
1601 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1603{
1604 int res = mtu;
1605
1606 res -= x->props.header_len;
1607
1608 for (;;) {
1609 int m = res;
1610
1611 if (m < 68)
1612 return 68;
1613
1614 spin_lock_bh(&x->lock);
1615 if (x->km.state == XFRM_STATE_VALID &&
1616 x->type && x->type->get_max_size)
1617 m = x->type->get_max_size(x, m);
1618 else
1619 m += x->props.header_len;
1620 spin_unlock_bh(&x->lock);
1621
1622 if (m <= mtu)
1623 break;
1624 res -= (m - mtu);
1625 }
1626
1627 return res;
1628}
1629
Herbert Xu72cb6962005-06-20 13:18:08 -07001630int xfrm_init_state(struct xfrm_state *x)
1631{
Herbert Xud094cd82005-06-20 13:19:41 -07001632 struct xfrm_state_afinfo *afinfo;
1633 int family = x->props.family;
Herbert Xu72cb6962005-06-20 13:18:08 -07001634 int err;
1635
Herbert Xud094cd82005-06-20 13:19:41 -07001636 err = -EAFNOSUPPORT;
1637 afinfo = xfrm_state_get_afinfo(family);
1638 if (!afinfo)
1639 goto error;
1640
1641 err = 0;
1642 if (afinfo->init_flags)
1643 err = afinfo->init_flags(x);
1644
1645 xfrm_state_put_afinfo(afinfo);
1646
1647 if (err)
1648 goto error;
1649
1650 err = -EPROTONOSUPPORT;
1651 x->type = xfrm_get_type(x->id.proto, family);
Herbert Xu72cb6962005-06-20 13:18:08 -07001652 if (x->type == NULL)
1653 goto error;
1654
1655 err = x->type->init_state(x);
1656 if (err)
1657 goto error;
1658
Herbert Xub59f45d2006-05-27 23:05:54 -07001659 x->mode = xfrm_get_mode(x->props.mode, family);
1660 if (x->mode == NULL)
1661 goto error;
1662
Herbert Xu72cb6962005-06-20 13:18:08 -07001663 x->km.state = XFRM_STATE_VALID;
1664
1665error:
1666 return err;
1667}
1668
1669EXPORT_SYMBOL(xfrm_init_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
1671void __init xfrm_state_init(void)
1672{
David S. Millerf034b5d2006-08-24 03:08:07 -07001673 unsigned int sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
David S. Millerf034b5d2006-08-24 03:08:07 -07001675 sz = sizeof(struct hlist_head) * 8;
1676
1677 xfrm_state_bydst = xfrm_state_hash_alloc(sz);
1678 xfrm_state_bysrc = xfrm_state_hash_alloc(sz);
1679 xfrm_state_byspi = xfrm_state_hash_alloc(sz);
1680 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1681 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1682 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1683
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
1685}
1686