blob: 9f63edd39346538252f07a40b5925850e61f1511 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
Trent Jaegerdf718372005-12-13 23:12:27 -080013 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
16#include <linux/workqueue.h>
17#include <net/xfrm.h>
18#include <linux/pfkeyv2.h>
19#include <linux/ipsec.h>
20#include <linux/module.h>
David S. Millerf034b5d2006-08-24 03:08:07 -070021#include <linux/cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/uaccess.h>
23
David S. Miller44e36b42006-08-24 04:50:50 -070024#include "xfrm_hash.h"
25
David S. Milleree857a72006-03-20 19:18:37 -080026struct sock *xfrm_nl;
27EXPORT_SYMBOL(xfrm_nl);
28
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -080029u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
David S. Millera70fcb02006-03-20 19:18:52 -080030EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
31
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -080032u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
David S. Millera70fcb02006-03-20 19:18:52 -080033EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035/* Each xfrm_state may be linked to two tables:
36
37 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
David S. Millera624c102006-08-24 03:24:33 -070038 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 destination/tunnel endpoint. (output)
40 */
41
42static DEFINE_SPINLOCK(xfrm_state_lock);
43
44/* Hash table to find appropriate SA towards given target (endpoint
45 * of tunnel or destination of transport mode) allowed by selector.
46 *
47 * Main use is finding SA after policy selected tunnel or transport mode.
48 * Also, it can be used by ah/esp icmp error handler to find offending SA.
49 */
David S. Millerf034b5d2006-08-24 03:08:07 -070050static struct hlist_head *xfrm_state_bydst __read_mostly;
51static struct hlist_head *xfrm_state_bysrc __read_mostly;
52static struct hlist_head *xfrm_state_byspi __read_mostly;
53static unsigned int xfrm_state_hmask __read_mostly;
54static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
55static unsigned int xfrm_state_num;
David S. Miller9d4a7062006-08-24 03:18:09 -070056static unsigned int xfrm_state_genid;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
David S. Millerc1969f22006-08-24 04:00:03 -070058static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
59 xfrm_address_t *saddr,
60 u32 reqid,
David S. Millera624c102006-08-24 03:24:33 -070061 unsigned short family)
62{
David S. Millerc1969f22006-08-24 04:00:03 -070063 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
David S. Millera624c102006-08-24 03:24:33 -070064}
65
David S. Miller44e36b42006-08-24 04:50:50 -070066static inline unsigned int xfrm_src_hash(xfrm_address_t *addr,
67 unsigned short family)
David S. Millerf034b5d2006-08-24 03:08:07 -070068{
69 return __xfrm_src_hash(addr, family, xfrm_state_hmask);
70}
71
David S. Miller2575b652006-08-24 03:26:44 -070072static inline unsigned int
David S. Millerc1969f22006-08-24 04:00:03 -070073xfrm_spi_hash(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family)
David S. Millerf034b5d2006-08-24 03:08:07 -070074{
David S. Millerc1969f22006-08-24 04:00:03 -070075 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
David S. Millerf034b5d2006-08-24 03:08:07 -070076}
77
David S. Millerf034b5d2006-08-24 03:08:07 -070078static void xfrm_hash_transfer(struct hlist_head *list,
79 struct hlist_head *ndsttable,
80 struct hlist_head *nsrctable,
81 struct hlist_head *nspitable,
82 unsigned int nhashmask)
83{
84 struct hlist_node *entry, *tmp;
85 struct xfrm_state *x;
86
87 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
88 unsigned int h;
89
David S. Millerc1969f22006-08-24 04:00:03 -070090 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
91 x->props.reqid, x->props.family,
92 nhashmask);
David S. Millerf034b5d2006-08-24 03:08:07 -070093 hlist_add_head(&x->bydst, ndsttable+h);
94
95 h = __xfrm_src_hash(&x->props.saddr, x->props.family,
96 nhashmask);
97 hlist_add_head(&x->bysrc, nsrctable+h);
98
99 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
100 x->props.family, nhashmask);
101 hlist_add_head(&x->byspi, nspitable+h);
102 }
103}
104
105static unsigned long xfrm_hash_new_size(void)
106{
107 return ((xfrm_state_hmask + 1) << 1) *
108 sizeof(struct hlist_head);
109}
110
111static DEFINE_MUTEX(hash_resize_mutex);
112
113static void xfrm_hash_resize(void *__unused)
114{
115 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
116 unsigned long nsize, osize;
117 unsigned int nhashmask, ohashmask;
118 int i;
119
120 mutex_lock(&hash_resize_mutex);
121
122 nsize = xfrm_hash_new_size();
David S. Miller44e36b42006-08-24 04:50:50 -0700123 ndst = xfrm_hash_alloc(nsize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700124 if (!ndst)
125 goto out_unlock;
David S. Miller44e36b42006-08-24 04:50:50 -0700126 nsrc = xfrm_hash_alloc(nsize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700127 if (!nsrc) {
David S. Miller44e36b42006-08-24 04:50:50 -0700128 xfrm_hash_free(ndst, nsize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700129 goto out_unlock;
130 }
David S. Miller44e36b42006-08-24 04:50:50 -0700131 nspi = xfrm_hash_alloc(nsize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700132 if (!nspi) {
David S. Miller44e36b42006-08-24 04:50:50 -0700133 xfrm_hash_free(ndst, nsize);
134 xfrm_hash_free(nsrc, nsize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700135 goto out_unlock;
136 }
137
138 spin_lock_bh(&xfrm_state_lock);
139
140 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
141 for (i = xfrm_state_hmask; i >= 0; i--)
142 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
143 nhashmask);
144
145 odst = xfrm_state_bydst;
146 osrc = xfrm_state_bysrc;
147 ospi = xfrm_state_byspi;
148 ohashmask = xfrm_state_hmask;
149
150 xfrm_state_bydst = ndst;
151 xfrm_state_bysrc = nsrc;
152 xfrm_state_byspi = nspi;
153 xfrm_state_hmask = nhashmask;
154
155 spin_unlock_bh(&xfrm_state_lock);
156
157 osize = (ohashmask + 1) * sizeof(struct hlist_head);
David S. Miller44e36b42006-08-24 04:50:50 -0700158 xfrm_hash_free(odst, osize);
159 xfrm_hash_free(osrc, osize);
160 xfrm_hash_free(ospi, osize);
David S. Millerf034b5d2006-08-24 03:08:07 -0700161
162out_unlock:
163 mutex_unlock(&hash_resize_mutex);
164}
165
166static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168DECLARE_WAIT_QUEUE_HEAD(km_waitq);
169EXPORT_SYMBOL(km_waitq);
170
171static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
172static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
173
174static struct work_struct xfrm_state_gc_work;
David S. Miller8f126e32006-08-24 02:45:07 -0700175static HLIST_HEAD(xfrm_state_gc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176static DEFINE_SPINLOCK(xfrm_state_gc_lock);
177
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800178int __xfrm_state_delete(struct xfrm_state *x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
180static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
181static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
182
Jamal Hadi Salim980ebd22006-03-20 19:16:40 -0800183int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800184void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
186static void xfrm_state_gc_destroy(struct xfrm_state *x)
187{
David S. Millera47f0ce2006-08-24 03:54:22 -0700188 del_timer_sync(&x->timer);
189 del_timer_sync(&x->rtimer);
Jesper Juhla51482b2005-11-08 09:41:34 -0800190 kfree(x->aalg);
191 kfree(x->ealg);
192 kfree(x->calg);
193 kfree(x->encap);
Noriaki TAKAMIYA060f02a2006-08-23 18:18:55 -0700194 kfree(x->coaddr);
Herbert Xub59f45d2006-05-27 23:05:54 -0700195 if (x->mode)
196 xfrm_put_mode(x->mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 if (x->type) {
198 x->type->destructor(x);
199 xfrm_put_type(x->type);
200 }
Trent Jaegerdf718372005-12-13 23:12:27 -0800201 security_xfrm_state_free(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 kfree(x);
203}
204
205static void xfrm_state_gc_task(void *data)
206{
207 struct xfrm_state *x;
David S. Miller8f126e32006-08-24 02:45:07 -0700208 struct hlist_node *entry, *tmp;
209 struct hlist_head gc_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 spin_lock_bh(&xfrm_state_gc_lock);
David S. Miller8f126e32006-08-24 02:45:07 -0700212 gc_list.first = xfrm_state_gc_list.first;
213 INIT_HLIST_HEAD(&xfrm_state_gc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 spin_unlock_bh(&xfrm_state_gc_lock);
215
David S. Miller8f126e32006-08-24 02:45:07 -0700216 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 xfrm_state_gc_destroy(x);
David S. Miller8f126e32006-08-24 02:45:07 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 wake_up(&km_waitq);
220}
221
222static inline unsigned long make_jiffies(long secs)
223{
224 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
225 return MAX_SCHEDULE_TIMEOUT-1;
226 else
227 return secs*HZ;
228}
229
230static void xfrm_timer_handler(unsigned long data)
231{
232 struct xfrm_state *x = (struct xfrm_state*)data;
233 unsigned long now = (unsigned long)xtime.tv_sec;
234 long next = LONG_MAX;
235 int warn = 0;
236
237 spin_lock(&x->lock);
238 if (x->km.state == XFRM_STATE_DEAD)
239 goto out;
240 if (x->km.state == XFRM_STATE_EXPIRED)
241 goto expired;
242 if (x->lft.hard_add_expires_seconds) {
243 long tmo = x->lft.hard_add_expires_seconds +
244 x->curlft.add_time - now;
245 if (tmo <= 0)
246 goto expired;
247 if (tmo < next)
248 next = tmo;
249 }
250 if (x->lft.hard_use_expires_seconds) {
251 long tmo = x->lft.hard_use_expires_seconds +
252 (x->curlft.use_time ? : now) - now;
253 if (tmo <= 0)
254 goto expired;
255 if (tmo < next)
256 next = tmo;
257 }
258 if (x->km.dying)
259 goto resched;
260 if (x->lft.soft_add_expires_seconds) {
261 long tmo = x->lft.soft_add_expires_seconds +
262 x->curlft.add_time - now;
263 if (tmo <= 0)
264 warn = 1;
265 else if (tmo < next)
266 next = tmo;
267 }
268 if (x->lft.soft_use_expires_seconds) {
269 long tmo = x->lft.soft_use_expires_seconds +
270 (x->curlft.use_time ? : now) - now;
271 if (tmo <= 0)
272 warn = 1;
273 else if (tmo < next)
274 next = tmo;
275 }
276
Herbert Xu4666faa2005-06-18 22:43:22 -0700277 x->km.dying = warn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 if (warn)
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800279 km_state_expired(x, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280resched:
David S. Millera47f0ce2006-08-24 03:54:22 -0700281 if (next != LONG_MAX)
282 mod_timer(&x->timer, jiffies + make_jiffies(next));
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 goto out;
285
286expired:
287 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
288 x->km.state = XFRM_STATE_EXPIRED;
289 wake_up(&km_waitq);
290 next = 2;
291 goto resched;
292 }
Herbert Xu4666faa2005-06-18 22:43:22 -0700293 if (!__xfrm_state_delete(x) && x->id.spi)
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800294 km_state_expired(x, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
296out:
297 spin_unlock(&x->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
David S. Miller0ac84752006-03-20 19:18:23 -0800300static void xfrm_replay_timer_handler(unsigned long data);
301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302struct xfrm_state *xfrm_state_alloc(void)
303{
304 struct xfrm_state *x;
305
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700306 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308 if (x) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 atomic_set(&x->refcnt, 1);
310 atomic_set(&x->tunnel_users, 0);
David S. Miller8f126e32006-08-24 02:45:07 -0700311 INIT_HLIST_NODE(&x->bydst);
312 INIT_HLIST_NODE(&x->bysrc);
313 INIT_HLIST_NODE(&x->byspi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 init_timer(&x->timer);
315 x->timer.function = xfrm_timer_handler;
316 x->timer.data = (unsigned long)x;
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -0800317 init_timer(&x->rtimer);
318 x->rtimer.function = xfrm_replay_timer_handler;
319 x->rtimer.data = (unsigned long)x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 x->curlft.add_time = (unsigned long)xtime.tv_sec;
321 x->lft.soft_byte_limit = XFRM_INF;
322 x->lft.soft_packet_limit = XFRM_INF;
323 x->lft.hard_byte_limit = XFRM_INF;
324 x->lft.hard_packet_limit = XFRM_INF;
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -0800325 x->replay_maxage = 0;
326 x->replay_maxdiff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 spin_lock_init(&x->lock);
328 }
329 return x;
330}
331EXPORT_SYMBOL(xfrm_state_alloc);
332
333void __xfrm_state_destroy(struct xfrm_state *x)
334{
335 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
336
337 spin_lock_bh(&xfrm_state_gc_lock);
David S. Miller8f126e32006-08-24 02:45:07 -0700338 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 spin_unlock_bh(&xfrm_state_gc_lock);
340 schedule_work(&xfrm_state_gc_work);
341}
342EXPORT_SYMBOL(__xfrm_state_destroy);
343
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800344int __xfrm_state_delete(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700346 int err = -ESRCH;
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (x->km.state != XFRM_STATE_DEAD) {
349 x->km.state = XFRM_STATE_DEAD;
350 spin_lock(&xfrm_state_lock);
David S. Miller8f126e32006-08-24 02:45:07 -0700351 hlist_del(&x->bydst);
David S. Miller8f126e32006-08-24 02:45:07 -0700352 hlist_del(&x->bysrc);
David S. Millera47f0ce2006-08-24 03:54:22 -0700353 if (x->id.spi)
David S. Miller8f126e32006-08-24 02:45:07 -0700354 hlist_del(&x->byspi);
David S. Millerf034b5d2006-08-24 03:08:07 -0700355 xfrm_state_num--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 spin_unlock(&xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 /* All xfrm_state objects are created by xfrm_state_alloc.
359 * The xfrm_state_alloc call gives a reference, and that
360 * is what we are dropping here.
361 */
Herbert Xu21380b82006-02-22 14:47:13 -0800362 __xfrm_state_put(x);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700363 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 }
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700365
366 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367}
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800368EXPORT_SYMBOL(__xfrm_state_delete);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700370int xfrm_state_delete(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700372 int err;
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 spin_lock_bh(&x->lock);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700375 err = __xfrm_state_delete(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 spin_unlock_bh(&x->lock);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -0700377
378 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
380EXPORT_SYMBOL(xfrm_state_delete);
381
382void xfrm_state_flush(u8 proto)
383{
384 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 spin_lock_bh(&xfrm_state_lock);
Masahide NAKAMURAa9917c02006-08-31 15:14:32 -0700387 for (i = 0; i <= xfrm_state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -0700388 struct hlist_node *entry;
389 struct xfrm_state *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390restart:
David S. Miller8f126e32006-08-24 02:45:07 -0700391 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 if (!xfrm_state_kern(x) &&
Masahide NAKAMURA57947082006-09-22 15:06:24 -0700393 xfrm_id_proto_match(x->id.proto, proto)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 xfrm_state_hold(x);
395 spin_unlock_bh(&xfrm_state_lock);
396
397 xfrm_state_delete(x);
398 xfrm_state_put(x);
399
400 spin_lock_bh(&xfrm_state_lock);
401 goto restart;
402 }
403 }
404 }
405 spin_unlock_bh(&xfrm_state_lock);
406 wake_up(&km_waitq);
407}
408EXPORT_SYMBOL(xfrm_state_flush);
409
410static int
411xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
412 struct xfrm_tmpl *tmpl,
413 xfrm_address_t *daddr, xfrm_address_t *saddr,
414 unsigned short family)
415{
416 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
417 if (!afinfo)
418 return -1;
419 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
420 xfrm_state_put_afinfo(afinfo);
421 return 0;
422}
423
David S. Milleredcd5822006-08-24 00:42:45 -0700424static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family)
425{
426 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
427 struct xfrm_state *x;
David S. Miller8f126e32006-08-24 02:45:07 -0700428 struct hlist_node *entry;
David S. Milleredcd5822006-08-24 00:42:45 -0700429
David S. Miller8f126e32006-08-24 02:45:07 -0700430 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
David S. Milleredcd5822006-08-24 00:42:45 -0700431 if (x->props.family != family ||
432 x->id.spi != spi ||
433 x->id.proto != proto)
434 continue;
435
436 switch (family) {
437 case AF_INET:
438 if (x->id.daddr.a4 != daddr->a4)
439 continue;
440 break;
441 case AF_INET6:
442 if (!ipv6_addr_equal((struct in6_addr *)daddr,
443 (struct in6_addr *)
444 x->id.daddr.a6))
445 continue;
446 break;
447 };
448
449 xfrm_state_hold(x);
450 return x;
451 }
452
453 return NULL;
454}
455
456static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
457{
458 unsigned int h = xfrm_src_hash(saddr, family);
459 struct xfrm_state *x;
David S. Miller8f126e32006-08-24 02:45:07 -0700460 struct hlist_node *entry;
David S. Milleredcd5822006-08-24 00:42:45 -0700461
David S. Miller8f126e32006-08-24 02:45:07 -0700462 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
David S. Milleredcd5822006-08-24 00:42:45 -0700463 if (x->props.family != family ||
464 x->id.proto != proto)
465 continue;
466
467 switch (family) {
468 case AF_INET:
469 if (x->id.daddr.a4 != daddr->a4 ||
470 x->props.saddr.a4 != saddr->a4)
471 continue;
472 break;
473 case AF_INET6:
474 if (!ipv6_addr_equal((struct in6_addr *)daddr,
475 (struct in6_addr *)
476 x->id.daddr.a6) ||
477 !ipv6_addr_equal((struct in6_addr *)saddr,
478 (struct in6_addr *)
479 x->props.saddr.a6))
480 continue;
481 break;
482 };
483
484 xfrm_state_hold(x);
485 return x;
486 }
487
488 return NULL;
489}
490
491static inline struct xfrm_state *
492__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
493{
494 if (use_spi)
495 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
496 x->id.proto, family);
497 else
498 return __xfrm_state_lookup_byaddr(&x->id.daddr,
499 &x->props.saddr,
500 x->id.proto, family);
501}
502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503struct xfrm_state *
504xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
505 struct flowi *fl, struct xfrm_tmpl *tmpl,
506 struct xfrm_policy *pol, int *err,
507 unsigned short family)
508{
David S. Millerc1969f22006-08-24 04:00:03 -0700509 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
David S. Miller8f126e32006-08-24 02:45:07 -0700510 struct hlist_node *entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 struct xfrm_state *x, *x0;
512 int acquire_in_progress = 0;
513 int error = 0;
514 struct xfrm_state *best = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 spin_lock_bh(&xfrm_state_lock);
David S. Miller8f126e32006-08-24 02:45:07 -0700517 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 if (x->props.family == family &&
519 x->props.reqid == tmpl->reqid &&
Masahide NAKAMURAfbd9a5b2006-08-23 18:08:21 -0700520 !(x->props.flags & XFRM_STATE_WILDRECV) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 xfrm_state_addr_check(x, daddr, saddr, family) &&
522 tmpl->mode == x->props.mode &&
523 tmpl->id.proto == x->id.proto &&
524 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
525 /* Resolution logic:
526 1. There is a valid state with matching selector.
527 Done.
528 2. Valid state with inappropriate selector. Skip.
529
530 Entering area of "sysdeps".
531
532 3. If state is not valid, selector is temporary,
533 it selects only session which triggered
534 previous resolution. Key manager will do
535 something to install a state with proper
536 selector.
537 */
538 if (x->km.state == XFRM_STATE_VALID) {
Trent Jaegerdf718372005-12-13 23:12:27 -0800539 if (!xfrm_selector_match(&x->sel, fl, family) ||
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -0700540 !security_xfrm_state_pol_flow_match(x, pol, fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 continue;
542 if (!best ||
543 best->km.dying > x->km.dying ||
544 (best->km.dying == x->km.dying &&
545 best->curlft.add_time < x->curlft.add_time))
546 best = x;
547 } else if (x->km.state == XFRM_STATE_ACQ) {
548 acquire_in_progress = 1;
549 } else if (x->km.state == XFRM_STATE_ERROR ||
550 x->km.state == XFRM_STATE_EXPIRED) {
Trent Jaegerdf718372005-12-13 23:12:27 -0800551 if (xfrm_selector_match(&x->sel, fl, family) &&
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -0700552 security_xfrm_state_pol_flow_match(x, pol, fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 error = -ESRCH;
554 }
555 }
556 }
557
558 x = best;
559 if (!x && !error && !acquire_in_progress) {
Patrick McHardy5c5d2812005-04-21 20:12:32 -0700560 if (tmpl->id.spi &&
David S. Milleredcd5822006-08-24 00:42:45 -0700561 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
562 tmpl->id.proto, family)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 xfrm_state_put(x0);
564 error = -EEXIST;
565 goto out;
566 }
567 x = xfrm_state_alloc();
568 if (x == NULL) {
569 error = -ENOMEM;
570 goto out;
571 }
572 /* Initialize temporary selector matching only
573 * to current session. */
574 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
575
Venkat Yekkiralae0d1caa2006-07-24 23:29:07 -0700576 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
577 if (error) {
578 x->km.state = XFRM_STATE_DEAD;
579 xfrm_state_put(x);
580 x = NULL;
581 goto out;
582 }
583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 if (km_query(x, tmpl, pol) == 0) {
585 x->km.state = XFRM_STATE_ACQ;
David S. Miller8f126e32006-08-24 02:45:07 -0700586 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
David S. Miller8f126e32006-08-24 02:45:07 -0700587 h = xfrm_src_hash(saddr, family);
588 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 if (x->id.spi) {
590 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
David S. Miller8f126e32006-08-24 02:45:07 -0700591 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 }
593 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
595 add_timer(&x->timer);
596 } else {
597 x->km.state = XFRM_STATE_DEAD;
598 xfrm_state_put(x);
599 x = NULL;
600 error = -ESRCH;
601 }
602 }
603out:
604 if (x)
605 xfrm_state_hold(x);
606 else
607 *err = acquire_in_progress ? -EAGAIN : error;
608 spin_unlock_bh(&xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 return x;
610}
611
612static void __xfrm_state_insert(struct xfrm_state *x)
613{
David S. Millera624c102006-08-24 03:24:33 -0700614 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
David S. Miller9d4a7062006-08-24 03:18:09 -0700616 x->genid = ++xfrm_state_genid;
617
David S. Millerc1969f22006-08-24 04:00:03 -0700618 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
619 x->props.reqid, x->props.family);
David S. Miller8f126e32006-08-24 02:45:07 -0700620 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -0700622 h = xfrm_src_hash(&x->props.saddr, x->props.family);
David S. Miller8f126e32006-08-24 02:45:07 -0700623 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -0700625 if (xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY)) {
626 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
627 x->props.family);
628
David S. Miller8f126e32006-08-24 02:45:07 -0700629 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
Masahide NAKAMURA6c44e6b2006-08-23 17:53:57 -0700630 }
631
David S. Millera47f0ce2006-08-24 03:54:22 -0700632 mod_timer(&x->timer, jiffies + HZ);
633 if (x->replay_maxage)
634 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -0800635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 wake_up(&km_waitq);
David S. Millerf034b5d2006-08-24 03:08:07 -0700637
638 xfrm_state_num++;
639
640 if (x->bydst.next != NULL &&
641 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
642 xfrm_state_num > xfrm_state_hmask)
643 schedule_work(&xfrm_hash_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644}
645
David S. Millerc7f5ea32006-08-24 03:29:04 -0700646/* xfrm_state_lock is held */
647static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
648{
649 unsigned short family = xnew->props.family;
650 u32 reqid = xnew->props.reqid;
651 struct xfrm_state *x;
652 struct hlist_node *entry;
653 unsigned int h;
654
David S. Millerc1969f22006-08-24 04:00:03 -0700655 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
David S. Millerc7f5ea32006-08-24 03:29:04 -0700656 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
657 if (x->props.family == family &&
658 x->props.reqid == reqid &&
David S. Millerc1969f22006-08-24 04:00:03 -0700659 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
660 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
David S. Millerc7f5ea32006-08-24 03:29:04 -0700661 x->genid = xfrm_state_genid;
662 }
663}
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665void xfrm_state_insert(struct xfrm_state *x)
666{
667 spin_lock_bh(&xfrm_state_lock);
David S. Millerc7f5ea32006-08-24 03:29:04 -0700668 __xfrm_state_bump_genids(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 __xfrm_state_insert(x);
670 spin_unlock_bh(&xfrm_state_lock);
671}
672EXPORT_SYMBOL(xfrm_state_insert);
673
David S. Miller27708342006-08-24 00:13:10 -0700674/* xfrm_state_lock is held */
675static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
676{
David S. Millerc1969f22006-08-24 04:00:03 -0700677 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
David S. Miller8f126e32006-08-24 02:45:07 -0700678 struct hlist_node *entry;
David S. Miller27708342006-08-24 00:13:10 -0700679 struct xfrm_state *x;
680
David S. Miller8f126e32006-08-24 02:45:07 -0700681 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
David S. Miller27708342006-08-24 00:13:10 -0700682 if (x->props.reqid != reqid ||
683 x->props.mode != mode ||
684 x->props.family != family ||
685 x->km.state != XFRM_STATE_ACQ ||
686 x->id.spi != 0)
687 continue;
688
689 switch (family) {
690 case AF_INET:
691 if (x->id.daddr.a4 != daddr->a4 ||
692 x->props.saddr.a4 != saddr->a4)
693 continue;
694 break;
695 case AF_INET6:
696 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
697 (struct in6_addr *)daddr) ||
698 !ipv6_addr_equal((struct in6_addr *)
699 x->props.saddr.a6,
700 (struct in6_addr *)saddr))
701 continue;
702 break;
703 };
704
705 xfrm_state_hold(x);
706 return x;
707 }
708
709 if (!create)
710 return NULL;
711
712 x = xfrm_state_alloc();
713 if (likely(x)) {
714 switch (family) {
715 case AF_INET:
716 x->sel.daddr.a4 = daddr->a4;
717 x->sel.saddr.a4 = saddr->a4;
718 x->sel.prefixlen_d = 32;
719 x->sel.prefixlen_s = 32;
720 x->props.saddr.a4 = saddr->a4;
721 x->id.daddr.a4 = daddr->a4;
722 break;
723
724 case AF_INET6:
725 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
726 (struct in6_addr *)daddr);
727 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
728 (struct in6_addr *)saddr);
729 x->sel.prefixlen_d = 128;
730 x->sel.prefixlen_s = 128;
731 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
732 (struct in6_addr *)saddr);
733 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
734 (struct in6_addr *)daddr);
735 break;
736 };
737
738 x->km.state = XFRM_STATE_ACQ;
739 x->id.proto = proto;
740 x->props.family = family;
741 x->props.mode = mode;
742 x->props.reqid = reqid;
743 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
744 xfrm_state_hold(x);
745 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
746 add_timer(&x->timer);
David S. Miller8f126e32006-08-24 02:45:07 -0700747 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
David S. Miller27708342006-08-24 00:13:10 -0700748 h = xfrm_src_hash(saddr, family);
David S. Miller8f126e32006-08-24 02:45:07 -0700749 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
David S. Miller27708342006-08-24 00:13:10 -0700750 wake_up(&km_waitq);
751 }
752
753 return x;
754}
755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
757
758int xfrm_state_add(struct xfrm_state *x)
759{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 struct xfrm_state *x1;
761 int family;
762 int err;
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700763 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
765 family = x->props.family;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767 spin_lock_bh(&xfrm_state_lock);
768
David S. Milleredcd5822006-08-24 00:42:45 -0700769 x1 = __xfrm_state_locate(x, use_spi, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 if (x1) {
771 xfrm_state_put(x1);
772 x1 = NULL;
773 err = -EEXIST;
774 goto out;
775 }
776
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700777 if (use_spi && x->km.seq) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 x1 = __xfrm_find_acq_byseq(x->km.seq);
779 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
780 xfrm_state_put(x1);
781 x1 = NULL;
782 }
783 }
784
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700785 if (use_spi && !x1)
David S. Miller27708342006-08-24 00:13:10 -0700786 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
787 x->id.proto,
788 &x->id.daddr, &x->props.saddr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
David S. Millerc7f5ea32006-08-24 03:29:04 -0700790 __xfrm_state_bump_genids(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 __xfrm_state_insert(x);
792 err = 0;
793
794out:
795 spin_unlock_bh(&xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
797 if (x1) {
798 xfrm_state_delete(x1);
799 xfrm_state_put(x1);
800 }
801
802 return err;
803}
804EXPORT_SYMBOL(xfrm_state_add);
805
806int xfrm_state_update(struct xfrm_state *x)
807{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 struct xfrm_state *x1;
809 int err;
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700810 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 spin_lock_bh(&xfrm_state_lock);
David S. Milleredcd5822006-08-24 00:42:45 -0700813 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 err = -ESRCH;
816 if (!x1)
817 goto out;
818
819 if (xfrm_state_kern(x1)) {
820 xfrm_state_put(x1);
821 err = -EEXIST;
822 goto out;
823 }
824
825 if (x1->km.state == XFRM_STATE_ACQ) {
826 __xfrm_state_insert(x);
827 x = NULL;
828 }
829 err = 0;
830
831out:
832 spin_unlock_bh(&xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
834 if (err)
835 return err;
836
837 if (!x) {
838 xfrm_state_delete(x1);
839 xfrm_state_put(x1);
840 return 0;
841 }
842
843 err = -EINVAL;
844 spin_lock_bh(&x1->lock);
845 if (likely(x1->km.state == XFRM_STATE_VALID)) {
846 if (x->encap && x1->encap)
847 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
Noriaki TAKAMIYA060f02a2006-08-23 18:18:55 -0700848 if (x->coaddr && x1->coaddr) {
849 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
850 }
851 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
852 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
854 x1->km.dying = 0;
855
David S. Millera47f0ce2006-08-24 03:54:22 -0700856 mod_timer(&x1->timer, jiffies + HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 if (x1->curlft.use_time)
858 xfrm_state_check_expire(x1);
859
860 err = 0;
861 }
862 spin_unlock_bh(&x1->lock);
863
864 xfrm_state_put(x1);
865
866 return err;
867}
868EXPORT_SYMBOL(xfrm_state_update);
869
870int xfrm_state_check_expire(struct xfrm_state *x)
871{
872 if (!x->curlft.use_time)
873 x->curlft.use_time = (unsigned long)xtime.tv_sec;
874
875 if (x->km.state != XFRM_STATE_VALID)
876 return -EINVAL;
877
878 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
879 x->curlft.packets >= x->lft.hard_packet_limit) {
Herbert Xu4666faa2005-06-18 22:43:22 -0700880 x->km.state = XFRM_STATE_EXPIRED;
David S. Millera47f0ce2006-08-24 03:54:22 -0700881 mod_timer(&x->timer, jiffies);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 return -EINVAL;
883 }
884
885 if (!x->km.dying &&
886 (x->curlft.bytes >= x->lft.soft_byte_limit ||
Herbert Xu4666faa2005-06-18 22:43:22 -0700887 x->curlft.packets >= x->lft.soft_packet_limit)) {
888 x->km.dying = 1;
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -0800889 km_state_expired(x, 0, 0);
Herbert Xu4666faa2005-06-18 22:43:22 -0700890 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 return 0;
892}
893EXPORT_SYMBOL(xfrm_state_check_expire);
894
895static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
896{
897 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
898 - skb_headroom(skb);
899
900 if (nhead > 0)
901 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
902
903 /* Check tail too... */
904 return 0;
905}
906
907int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
908{
909 int err = xfrm_state_check_expire(x);
910 if (err < 0)
911 goto err;
912 err = xfrm_state_check_space(x, skb);
913err:
914 return err;
915}
916EXPORT_SYMBOL(xfrm_state_check);
917
918struct xfrm_state *
919xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
920 unsigned short family)
921{
922 struct xfrm_state *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 spin_lock_bh(&xfrm_state_lock);
David S. Milleredcd5822006-08-24 00:42:45 -0700925 x = __xfrm_state_lookup(daddr, spi, proto, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 spin_unlock_bh(&xfrm_state_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 return x;
928}
929EXPORT_SYMBOL(xfrm_state_lookup);
930
931struct xfrm_state *
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700932xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
933 u8 proto, unsigned short family)
934{
935 struct xfrm_state *x;
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700936
937 spin_lock_bh(&xfrm_state_lock);
David S. Milleredcd5822006-08-24 00:42:45 -0700938 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700939 spin_unlock_bh(&xfrm_state_lock);
Masahide NAKAMURAeb2971b2006-08-23 17:56:04 -0700940 return x;
941}
942EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
943
944struct xfrm_state *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
946 xfrm_address_t *daddr, xfrm_address_t *saddr,
947 int create, unsigned short family)
948{
949 struct xfrm_state *x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951 spin_lock_bh(&xfrm_state_lock);
David S. Miller27708342006-08-24 00:13:10 -0700952 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 spin_unlock_bh(&xfrm_state_lock);
David S. Miller27708342006-08-24 00:13:10 -0700954
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 return x;
956}
957EXPORT_SYMBOL(xfrm_find_acq);
958
Masahide NAKAMURA41a49cc2006-08-23 22:48:31 -0700959#ifdef CONFIG_XFRM_SUB_POLICY
960int
961xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
962 unsigned short family)
963{
964 int err = 0;
965 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
966 if (!afinfo)
967 return -EAFNOSUPPORT;
968
969 spin_lock_bh(&xfrm_state_lock);
970 if (afinfo->tmpl_sort)
971 err = afinfo->tmpl_sort(dst, src, n);
972 spin_unlock_bh(&xfrm_state_lock);
973 xfrm_state_put_afinfo(afinfo);
974 return err;
975}
976EXPORT_SYMBOL(xfrm_tmpl_sort);
977
978int
979xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
980 unsigned short family)
981{
982 int err = 0;
983 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
984 if (!afinfo)
985 return -EAFNOSUPPORT;
986
987 spin_lock_bh(&xfrm_state_lock);
988 if (afinfo->state_sort)
989 err = afinfo->state_sort(dst, src, n);
990 spin_unlock_bh(&xfrm_state_lock);
991 xfrm_state_put_afinfo(afinfo);
992 return err;
993}
994EXPORT_SYMBOL(xfrm_state_sort);
995#endif
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997/* Silly enough, but I'm lazy to build resolution list */
998
999static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1000{
1001 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
David S. Millerf034b5d2006-08-24 03:08:07 -07001003 for (i = 0; i <= xfrm_state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -07001004 struct hlist_node *entry;
1005 struct xfrm_state *x;
1006
1007 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1008 if (x->km.seq == seq &&
1009 x->km.state == XFRM_STATE_ACQ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 xfrm_state_hold(x);
1011 return x;
1012 }
1013 }
1014 }
1015 return NULL;
1016}
1017
1018struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1019{
1020 struct xfrm_state *x;
1021
1022 spin_lock_bh(&xfrm_state_lock);
1023 x = __xfrm_find_acq_byseq(seq);
1024 spin_unlock_bh(&xfrm_state_lock);
1025 return x;
1026}
1027EXPORT_SYMBOL(xfrm_find_acq_byseq);
1028
1029u32 xfrm_get_acqseq(void)
1030{
1031 u32 res;
1032 static u32 acqseq;
1033 static DEFINE_SPINLOCK(acqseq_lock);
1034
1035 spin_lock_bh(&acqseq_lock);
1036 res = (++acqseq ? : ++acqseq);
1037 spin_unlock_bh(&acqseq_lock);
1038 return res;
1039}
1040EXPORT_SYMBOL(xfrm_get_acqseq);
1041
1042void
1043xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
1044{
David S. Millerf034b5d2006-08-24 03:08:07 -07001045 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 struct xfrm_state *x0;
1047
1048 if (x->id.spi)
1049 return;
1050
1051 if (minspi == maxspi) {
1052 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1053 if (x0) {
1054 xfrm_state_put(x0);
1055 return;
1056 }
1057 x->id.spi = minspi;
1058 } else {
1059 u32 spi = 0;
1060 minspi = ntohl(minspi);
1061 maxspi = ntohl(maxspi);
1062 for (h=0; h<maxspi-minspi+1; h++) {
1063 spi = minspi + net_random()%(maxspi-minspi+1);
1064 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1065 if (x0 == NULL) {
1066 x->id.spi = htonl(spi);
1067 break;
1068 }
1069 xfrm_state_put(x0);
1070 }
1071 }
1072 if (x->id.spi) {
1073 spin_lock_bh(&xfrm_state_lock);
1074 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
David S. Miller8f126e32006-08-24 02:45:07 -07001075 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 spin_unlock_bh(&xfrm_state_lock);
1077 wake_up(&km_waitq);
1078 }
1079}
1080EXPORT_SYMBOL(xfrm_alloc_spi);
1081
1082int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1083 void *data)
1084{
1085 int i;
1086 struct xfrm_state *x;
David S. Miller8f126e32006-08-24 02:45:07 -07001087 struct hlist_node *entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 int count = 0;
1089 int err = 0;
1090
1091 spin_lock_bh(&xfrm_state_lock);
David S. Millerf034b5d2006-08-24 03:08:07 -07001092 for (i = 0; i <= xfrm_state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -07001093 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
Masahide NAKAMURA57947082006-09-22 15:06:24 -07001094 if (xfrm_id_proto_match(x->id.proto, proto))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 count++;
1096 }
1097 }
1098 if (count == 0) {
1099 err = -ENOENT;
1100 goto out;
1101 }
1102
David S. Millerf034b5d2006-08-24 03:08:07 -07001103 for (i = 0; i <= xfrm_state_hmask; i++) {
David S. Miller8f126e32006-08-24 02:45:07 -07001104 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
Masahide NAKAMURA57947082006-09-22 15:06:24 -07001105 if (!xfrm_id_proto_match(x->id.proto, proto))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 continue;
1107 err = func(x, --count, data);
1108 if (err)
1109 goto out;
1110 }
1111 }
1112out:
1113 spin_unlock_bh(&xfrm_state_lock);
1114 return err;
1115}
1116EXPORT_SYMBOL(xfrm_state_walk);
1117
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001118
1119void xfrm_replay_notify(struct xfrm_state *x, int event)
1120{
1121 struct km_event c;
1122 /* we send notify messages in case
1123 * 1. we updated on of the sequence numbers, and the seqno difference
1124 * is at least x->replay_maxdiff, in this case we also update the
1125 * timeout of our timer function
1126 * 2. if x->replay_maxage has elapsed since last update,
1127 * and there were changes
1128 *
1129 * The state structure must be locked!
1130 */
1131
1132 switch (event) {
1133 case XFRM_REPLAY_UPDATE:
1134 if (x->replay_maxdiff &&
1135 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001136 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1137 if (x->xflags & XFRM_TIME_DEFER)
1138 event = XFRM_REPLAY_TIMEOUT;
1139 else
1140 return;
1141 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001142
1143 break;
1144
1145 case XFRM_REPLAY_TIMEOUT:
1146 if ((x->replay.seq == x->preplay.seq) &&
1147 (x->replay.bitmap == x->preplay.bitmap) &&
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001148 (x->replay.oseq == x->preplay.oseq)) {
1149 x->xflags |= XFRM_TIME_DEFER;
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001150 return;
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001151 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001152
1153 break;
1154 }
1155
1156 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1157 c.event = XFRM_MSG_NEWAE;
1158 c.data.aevent = event;
1159 km_state_notify(x, &c);
1160
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001161 if (x->replay_maxage &&
David S. Millera47f0ce2006-08-24 03:54:22 -07001162 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001163 x->xflags &= ~XFRM_TIME_DEFER;
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001164}
David S. Millera70fcb02006-03-20 19:18:52 -08001165EXPORT_SYMBOL(xfrm_replay_notify);
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001166
1167static void xfrm_replay_timer_handler(unsigned long data)
1168{
1169 struct xfrm_state *x = (struct xfrm_state*)data;
1170
1171 spin_lock(&x->lock);
1172
Jamal Hadi Salim27170962006-04-14 15:03:05 -07001173 if (x->km.state == XFRM_STATE_VALID) {
1174 if (xfrm_aevent_is_on())
1175 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1176 else
1177 x->xflags |= XFRM_TIME_DEFER;
1178 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001179
1180 spin_unlock(&x->lock);
1181}
1182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183int xfrm_replay_check(struct xfrm_state *x, u32 seq)
1184{
1185 u32 diff;
1186
1187 seq = ntohl(seq);
1188
1189 if (unlikely(seq == 0))
1190 return -EINVAL;
1191
1192 if (likely(seq > x->replay.seq))
1193 return 0;
1194
1195 diff = x->replay.seq - seq;
1196 if (diff >= x->props.replay_window) {
1197 x->stats.replay_window++;
1198 return -EINVAL;
1199 }
1200
1201 if (x->replay.bitmap & (1U << diff)) {
1202 x->stats.replay++;
1203 return -EINVAL;
1204 }
1205 return 0;
1206}
1207EXPORT_SYMBOL(xfrm_replay_check);
1208
1209void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
1210{
1211 u32 diff;
1212
1213 seq = ntohl(seq);
1214
1215 if (seq > x->replay.seq) {
1216 diff = seq - x->replay.seq;
1217 if (diff < x->props.replay_window)
1218 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1219 else
1220 x->replay.bitmap = 1;
1221 x->replay.seq = seq;
1222 } else {
1223 diff = x->replay.seq - seq;
1224 x->replay.bitmap |= (1U << diff);
1225 }
Jamal Hadi Salimf8cd5482006-03-20 19:15:11 -08001226
1227 if (xfrm_aevent_is_on())
1228 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230EXPORT_SYMBOL(xfrm_replay_advance);
1231
1232static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1233static DEFINE_RWLOCK(xfrm_km_lock);
1234
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001235void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236{
1237 struct xfrm_mgr *km;
1238
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001239 read_lock(&xfrm_km_lock);
1240 list_for_each_entry(km, &xfrm_km_list, list)
1241 if (km->notify_policy)
1242 km->notify_policy(xp, dir, c);
1243 read_unlock(&xfrm_km_lock);
1244}
1245
1246void km_state_notify(struct xfrm_state *x, struct km_event *c)
1247{
1248 struct xfrm_mgr *km;
1249 read_lock(&xfrm_km_lock);
1250 list_for_each_entry(km, &xfrm_km_list, list)
1251 if (km->notify)
1252 km->notify(x, c);
1253 read_unlock(&xfrm_km_lock);
1254}
1255
1256EXPORT_SYMBOL(km_policy_notify);
1257EXPORT_SYMBOL(km_state_notify);
1258
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -08001259void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001260{
1261 struct km_event c;
1262
Herbert Xubf088672005-06-18 22:44:00 -07001263 c.data.hard = hard;
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -08001264 c.pid = pid;
Herbert Xuf60f6b82005-06-18 22:44:37 -07001265 c.event = XFRM_MSG_EXPIRE;
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001266 km_state_notify(x, &c);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268 if (hard)
1269 wake_up(&km_waitq);
1270}
1271
Jamal Hadi Salim53bc6b42006-03-20 19:17:03 -08001272EXPORT_SYMBOL(km_state_expired);
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001273/*
1274 * We send to all registered managers regardless of failure
1275 * We are happy with one success
1276*/
Jamal Hadi Salim980ebd22006-03-20 19:16:40 -08001277int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001279 int err = -EINVAL, acqret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 struct xfrm_mgr *km;
1281
1282 read_lock(&xfrm_km_lock);
1283 list_for_each_entry(km, &xfrm_km_list, list) {
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001284 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1285 if (!acqret)
1286 err = acqret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 }
1288 read_unlock(&xfrm_km_lock);
1289 return err;
1290}
Jamal Hadi Salim980ebd22006-03-20 19:16:40 -08001291EXPORT_SYMBOL(km_query);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292
1293int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
1294{
1295 int err = -EINVAL;
1296 struct xfrm_mgr *km;
1297
1298 read_lock(&xfrm_km_lock);
1299 list_for_each_entry(km, &xfrm_km_list, list) {
1300 if (km->new_mapping)
1301 err = km->new_mapping(x, ipaddr, sport);
1302 if (!err)
1303 break;
1304 }
1305 read_unlock(&xfrm_km_lock);
1306 return err;
1307}
1308EXPORT_SYMBOL(km_new_mapping);
1309
Jamal Hadi Salim6c5c8ca2006-03-20 19:17:25 -08001310void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311{
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001312 struct km_event c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Herbert Xubf088672005-06-18 22:44:00 -07001314 c.data.hard = hard;
Jamal Hadi Salim6c5c8ca2006-03-20 19:17:25 -08001315 c.pid = pid;
Herbert Xuf60f6b82005-06-18 22:44:37 -07001316 c.event = XFRM_MSG_POLEXPIRE;
Jamal Hadi Salim26b15da2005-06-18 22:42:13 -07001317 km_policy_notify(pol, dir, &c);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 if (hard)
1320 wake_up(&km_waitq);
1321}
David S. Millera70fcb02006-03-20 19:18:52 -08001322EXPORT_SYMBOL(km_policy_expired);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Masahide NAKAMURA97a64b42006-08-23 20:44:06 -07001324int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1325{
1326 int err = -EINVAL;
1327 int ret;
1328 struct xfrm_mgr *km;
1329
1330 read_lock(&xfrm_km_lock);
1331 list_for_each_entry(km, &xfrm_km_list, list) {
1332 if (km->report) {
1333 ret = km->report(proto, sel, addr);
1334 if (!ret)
1335 err = ret;
1336 }
1337 }
1338 read_unlock(&xfrm_km_lock);
1339 return err;
1340}
1341EXPORT_SYMBOL(km_report);
1342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1344{
1345 int err;
1346 u8 *data;
1347 struct xfrm_mgr *km;
1348 struct xfrm_policy *pol = NULL;
1349
1350 if (optlen <= 0 || optlen > PAGE_SIZE)
1351 return -EMSGSIZE;
1352
1353 data = kmalloc(optlen, GFP_KERNEL);
1354 if (!data)
1355 return -ENOMEM;
1356
1357 err = -EFAULT;
1358 if (copy_from_user(data, optval, optlen))
1359 goto out;
1360
1361 err = -EINVAL;
1362 read_lock(&xfrm_km_lock);
1363 list_for_each_entry(km, &xfrm_km_list, list) {
Venkat Yekkiralacb969f02006-07-24 23:32:20 -07001364 pol = km->compile_policy(sk, optname, data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 optlen, &err);
1366 if (err >= 0)
1367 break;
1368 }
1369 read_unlock(&xfrm_km_lock);
1370
1371 if (err >= 0) {
1372 xfrm_sk_policy_insert(sk, err, pol);
1373 xfrm_pol_put(pol);
1374 err = 0;
1375 }
1376
1377out:
1378 kfree(data);
1379 return err;
1380}
1381EXPORT_SYMBOL(xfrm_user_policy);
1382
1383int xfrm_register_km(struct xfrm_mgr *km)
1384{
1385 write_lock_bh(&xfrm_km_lock);
1386 list_add_tail(&km->list, &xfrm_km_list);
1387 write_unlock_bh(&xfrm_km_lock);
1388 return 0;
1389}
1390EXPORT_SYMBOL(xfrm_register_km);
1391
1392int xfrm_unregister_km(struct xfrm_mgr *km)
1393{
1394 write_lock_bh(&xfrm_km_lock);
1395 list_del(&km->list);
1396 write_unlock_bh(&xfrm_km_lock);
1397 return 0;
1398}
1399EXPORT_SYMBOL(xfrm_unregister_km);
1400
1401int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1402{
1403 int err = 0;
1404 if (unlikely(afinfo == NULL))
1405 return -EINVAL;
1406 if (unlikely(afinfo->family >= NPROTO))
1407 return -EAFNOSUPPORT;
Ingo Molnarf3111502006-04-28 15:30:03 -07001408 write_lock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1410 err = -ENOBUFS;
David S. Milleredcd5822006-08-24 00:42:45 -07001411 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 xfrm_state_afinfo[afinfo->family] = afinfo;
Ingo Molnarf3111502006-04-28 15:30:03 -07001413 write_unlock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 return err;
1415}
1416EXPORT_SYMBOL(xfrm_state_register_afinfo);
1417
1418int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1419{
1420 int err = 0;
1421 if (unlikely(afinfo == NULL))
1422 return -EINVAL;
1423 if (unlikely(afinfo->family >= NPROTO))
1424 return -EAFNOSUPPORT;
Ingo Molnarf3111502006-04-28 15:30:03 -07001425 write_lock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1427 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1428 err = -EINVAL;
David S. Milleredcd5822006-08-24 00:42:45 -07001429 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 xfrm_state_afinfo[afinfo->family] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 }
Ingo Molnarf3111502006-04-28 15:30:03 -07001432 write_unlock_bh(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 return err;
1434}
1435EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1436
1437static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1438{
1439 struct xfrm_state_afinfo *afinfo;
1440 if (unlikely(family >= NPROTO))
1441 return NULL;
1442 read_lock(&xfrm_state_afinfo_lock);
1443 afinfo = xfrm_state_afinfo[family];
Herbert Xu546be242006-05-27 23:03:58 -07001444 if (unlikely(!afinfo))
1445 read_unlock(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 return afinfo;
1447}
1448
1449static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1450{
Herbert Xu546be242006-05-27 23:03:58 -07001451 read_unlock(&xfrm_state_afinfo_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452}
1453
1454/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1455void xfrm_state_delete_tunnel(struct xfrm_state *x)
1456{
1457 if (x->tunnel) {
1458 struct xfrm_state *t = x->tunnel;
1459
1460 if (atomic_read(&t->tunnel_users) == 2)
1461 xfrm_state_delete(t);
1462 atomic_dec(&t->tunnel_users);
1463 xfrm_state_put(t);
1464 x->tunnel = NULL;
1465 }
1466}
1467EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1468
Herbert Xu80b30c12005-10-15 10:58:30 +10001469/*
1470 * This function is NOT optimal. For example, with ESP it will give an
1471 * MTU that's usually two bytes short of being optimal. However, it will
1472 * usually give an answer that's a multiple of 4 provided the input is
1473 * also a multiple of 4.
1474 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1476{
1477 int res = mtu;
1478
1479 res -= x->props.header_len;
1480
1481 for (;;) {
1482 int m = res;
1483
1484 if (m < 68)
1485 return 68;
1486
1487 spin_lock_bh(&x->lock);
1488 if (x->km.state == XFRM_STATE_VALID &&
1489 x->type && x->type->get_max_size)
1490 m = x->type->get_max_size(x, m);
1491 else
1492 m += x->props.header_len;
1493 spin_unlock_bh(&x->lock);
1494
1495 if (m <= mtu)
1496 break;
1497 res -= (m - mtu);
1498 }
1499
1500 return res;
1501}
1502
Herbert Xu72cb6962005-06-20 13:18:08 -07001503int xfrm_init_state(struct xfrm_state *x)
1504{
Herbert Xud094cd82005-06-20 13:19:41 -07001505 struct xfrm_state_afinfo *afinfo;
1506 int family = x->props.family;
Herbert Xu72cb6962005-06-20 13:18:08 -07001507 int err;
1508
Herbert Xud094cd82005-06-20 13:19:41 -07001509 err = -EAFNOSUPPORT;
1510 afinfo = xfrm_state_get_afinfo(family);
1511 if (!afinfo)
1512 goto error;
1513
1514 err = 0;
1515 if (afinfo->init_flags)
1516 err = afinfo->init_flags(x);
1517
1518 xfrm_state_put_afinfo(afinfo);
1519
1520 if (err)
1521 goto error;
1522
1523 err = -EPROTONOSUPPORT;
1524 x->type = xfrm_get_type(x->id.proto, family);
Herbert Xu72cb6962005-06-20 13:18:08 -07001525 if (x->type == NULL)
1526 goto error;
1527
1528 err = x->type->init_state(x);
1529 if (err)
1530 goto error;
1531
Herbert Xub59f45d2006-05-27 23:05:54 -07001532 x->mode = xfrm_get_mode(x->props.mode, family);
1533 if (x->mode == NULL)
1534 goto error;
1535
Herbert Xu72cb6962005-06-20 13:18:08 -07001536 x->km.state = XFRM_STATE_VALID;
1537
1538error:
1539 return err;
1540}
1541
1542EXPORT_SYMBOL(xfrm_init_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
1544void __init xfrm_state_init(void)
1545{
David S. Millerf034b5d2006-08-24 03:08:07 -07001546 unsigned int sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
David S. Millerf034b5d2006-08-24 03:08:07 -07001548 sz = sizeof(struct hlist_head) * 8;
1549
David S. Miller44e36b42006-08-24 04:50:50 -07001550 xfrm_state_bydst = xfrm_hash_alloc(sz);
1551 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1552 xfrm_state_byspi = xfrm_hash_alloc(sz);
David S. Millerf034b5d2006-08-24 03:08:07 -07001553 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1554 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1555 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1556
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
1558}
1559