blob: bcc19fa4ed1e07ab4277f7c02c933235cd0dc261 [file] [log] [blame]
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -07001/* Structure dynamic extension infrastructure
2 * Copyright (C) 2004 Rusty Russell IBM Corporation
3 * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/rcupdate.h>
15#include <linux/slab.h>
16#include <linux/skbuff.h>
17#include <net/netfilter/nf_conntrack_extend.h>
18
19static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
20static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070022void __nf_ct_ext_destroy(struct nf_conn *ct)
23{
24 unsigned int i;
25 struct nf_ct_ext_type *t;
26
27 for (i = 0; i < NF_CT_EXT_NUM; i++) {
28 if (!nf_ct_ext_exist(ct, i))
29 continue;
30
31 rcu_read_lock();
32 t = rcu_dereference(nf_ct_ext_types[i]);
33
34 /* Here the nf_ct_ext_type might have been unregisterd.
35 * I.e., it has responsible to cleanup private
36 * area in all conntracks when it is unregisterd.
37 */
38 if (t && t->destroy)
39 t->destroy(ct);
40 rcu_read_unlock();
41 }
42}
43EXPORT_SYMBOL(__nf_ct_ext_destroy);
44
45static void *
46nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
47{
Pekka Enberg019f6922008-03-10 16:43:41 -070048 unsigned int off, len;
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070049 struct nf_ct_ext_type *t;
50
51 rcu_read_lock();
52 t = rcu_dereference(nf_ct_ext_types[id]);
53 BUG_ON(t == NULL);
54 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
55 len = off + t->len;
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070056 rcu_read_unlock();
57
Pekka Enberg019f6922008-03-10 16:43:41 -070058 *ext = kzalloc(t->alloc_size, gfp);
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070059 if (!*ext)
60 return NULL;
61
62 (*ext)->offset[id] = off;
63 (*ext)->len = len;
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070064
65 return (void *)(*ext) + off;
66}
67
68void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
69{
70 struct nf_ct_ext *new;
71 int i, newlen, newoff;
72 struct nf_ct_ext_type *t;
73
Patrick McHardy55871d02008-04-14 11:15:51 +020074 /* Conntrack must not be confirmed to avoid races on reallocation. */
75 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
76
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070077 if (!ct->ext)
78 return nf_ct_ext_create(&ct->ext, id, gfp);
79
80 if (nf_ct_ext_exist(ct, id))
81 return NULL;
82
83 rcu_read_lock();
84 t = rcu_dereference(nf_ct_ext_types[id]);
85 BUG_ON(t == NULL);
86
87 newoff = ALIGN(ct->ext->len, t->align);
88 newlen = newoff + t->len;
89 rcu_read_unlock();
90
Pekka Enberg019f6922008-03-10 16:43:41 -070091 if (newlen >= ksize(ct->ext)) {
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070092 new = kmalloc(newlen, gfp);
93 if (!new)
94 return NULL;
95
96 memcpy(new, ct->ext, ct->ext->len);
97
98 for (i = 0; i < NF_CT_EXT_NUM; i++) {
99 if (!nf_ct_ext_exist(ct, i))
100 continue;
101
102 rcu_read_lock();
103 t = rcu_dereference(nf_ct_ext_types[i]);
104 if (t && t->move)
Patrick McHardy86577c62008-02-07 17:56:34 -0800105 t->move((void *)new + new->offset[i],
106 (void *)ct->ext + ct->ext->offset[i]);
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -0700107 rcu_read_unlock();
108 }
109 kfree(ct->ext);
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -0700110 ct->ext = new;
111 }
112
113 ct->ext->offset[id] = newoff;
114 ct->ext->len = newlen;
115 memset((void *)ct->ext + newoff, 0, newlen - newoff);
116 return (void *)ct->ext + newoff;
117}
118EXPORT_SYMBOL(__nf_ct_ext_add);
119
120static void update_alloc_size(struct nf_ct_ext_type *type)
121{
122 int i, j;
123 struct nf_ct_ext_type *t1, *t2;
124 enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
125
126 /* unnecessary to update all types */
127 if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
128 min = type->id;
129 max = type->id;
130 }
131
132 /* This assumes that extended areas in conntrack for the types
133 whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
134 for (i = min; i <= max; i++) {
135 t1 = nf_ct_ext_types[i];
136 if (!t1)
137 continue;
138
139 t1->alloc_size = sizeof(struct nf_ct_ext)
140 + ALIGN(sizeof(struct nf_ct_ext), t1->align)
141 + t1->len;
142 for (j = 0; j < NF_CT_EXT_NUM; j++) {
143 t2 = nf_ct_ext_types[j];
144 if (t2 == NULL || t2 == t1 ||
145 (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
146 continue;
147
148 t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
149 + t2->len;
150 }
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -0700151 }
152}
153
154/* This MUST be called in process context. */
155int nf_ct_extend_register(struct nf_ct_ext_type *type)
156{
157 int ret = 0;
158
159 mutex_lock(&nf_ct_ext_type_mutex);
160 if (nf_ct_ext_types[type->id]) {
161 ret = -EBUSY;
162 goto out;
163 }
164
165 /* This ensures that nf_ct_ext_create() can allocate enough area
166 before updating alloc_size */
167 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
168 + type->len;
169 rcu_assign_pointer(nf_ct_ext_types[type->id], type);
170 update_alloc_size(type);
171out:
172 mutex_unlock(&nf_ct_ext_type_mutex);
173 return ret;
174}
175EXPORT_SYMBOL_GPL(nf_ct_extend_register);
176
177/* This MUST be called in process context. */
178void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
179{
180 mutex_lock(&nf_ct_ext_type_mutex);
181 rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
182 update_alloc_size(type);
183 mutex_unlock(&nf_ct_ext_type_mutex);
184 synchronize_rcu();
185}
186EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);