blob: ba1c4915e9eba571a0349ebee872d1a81edfd563 [file] [log] [blame]
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -07001/* Structure dynamic extension infrastructure
2 * Copyright (C) 2004 Rusty Russell IBM Corporation
3 * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/rcupdate.h>
15#include <linux/slab.h>
16#include <linux/skbuff.h>
17#include <net/netfilter/nf_conntrack_extend.h>
18
19static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
20static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070022void __nf_ct_ext_destroy(struct nf_conn *ct)
23{
24 unsigned int i;
25 struct nf_ct_ext_type *t;
26
27 for (i = 0; i < NF_CT_EXT_NUM; i++) {
28 if (!nf_ct_ext_exist(ct, i))
29 continue;
30
31 rcu_read_lock();
32 t = rcu_dereference(nf_ct_ext_types[i]);
33
34 /* Here the nf_ct_ext_type might have been unregisterd.
35 * I.e., it has responsible to cleanup private
36 * area in all conntracks when it is unregisterd.
37 */
38 if (t && t->destroy)
39 t->destroy(ct);
40 rcu_read_unlock();
41 }
42}
43EXPORT_SYMBOL(__nf_ct_ext_destroy);
44
45static void *
46nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
47{
Pekka Enberg019f6922008-03-10 16:43:41 -070048 unsigned int off, len;
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070049 struct nf_ct_ext_type *t;
50
51 rcu_read_lock();
52 t = rcu_dereference(nf_ct_ext_types[id]);
53 BUG_ON(t == NULL);
54 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
55 len = off + t->len;
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070056 rcu_read_unlock();
57
Pekka Enberg019f6922008-03-10 16:43:41 -070058 *ext = kzalloc(t->alloc_size, gfp);
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070059 if (!*ext)
60 return NULL;
61
62 (*ext)->offset[id] = off;
63 (*ext)->len = len;
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070064
65 return (void *)(*ext) + off;
66}
67
68void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
69{
70 struct nf_ct_ext *new;
71 int i, newlen, newoff;
72 struct nf_ct_ext_type *t;
73
Patrick McHardy55871d02008-04-14 11:15:51 +020074 /* Conntrack must not be confirmed to avoid races on reallocation. */
75 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
76
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070077 if (!ct->ext)
78 return nf_ct_ext_create(&ct->ext, id, gfp);
79
80 if (nf_ct_ext_exist(ct, id))
81 return NULL;
82
83 rcu_read_lock();
84 t = rcu_dereference(nf_ct_ext_types[id]);
85 BUG_ON(t == NULL);
86
87 newoff = ALIGN(ct->ext->len, t->align);
88 newlen = newoff + t->len;
89 rcu_read_unlock();
90
Pekka Enberg31d85192008-06-09 15:58:39 -070091 new = krealloc(ct->ext, newlen, gfp);
92 if (!new)
93 return NULL;
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070094
Pekka Enberg31d85192008-06-09 15:58:39 -070095 if (new != ct->ext) {
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -070096 for (i = 0; i < NF_CT_EXT_NUM; i++) {
97 if (!nf_ct_ext_exist(ct, i))
98 continue;
99
100 rcu_read_lock();
101 t = rcu_dereference(nf_ct_ext_types[i]);
102 if (t && t->move)
Patrick McHardy86577c62008-02-07 17:56:34 -0800103 t->move((void *)new + new->offset[i],
104 (void *)ct->ext + ct->ext->offset[i]);
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -0700105 rcu_read_unlock();
106 }
107 kfree(ct->ext);
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -0700108 ct->ext = new;
109 }
110
111 ct->ext->offset[id] = newoff;
112 ct->ext->len = newlen;
113 memset((void *)ct->ext + newoff, 0, newlen - newoff);
114 return (void *)ct->ext + newoff;
115}
116EXPORT_SYMBOL(__nf_ct_ext_add);
117
118static void update_alloc_size(struct nf_ct_ext_type *type)
119{
120 int i, j;
121 struct nf_ct_ext_type *t1, *t2;
122 enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
123
124 /* unnecessary to update all types */
125 if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
126 min = type->id;
127 max = type->id;
128 }
129
130 /* This assumes that extended areas in conntrack for the types
131 whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
132 for (i = min; i <= max; i++) {
133 t1 = nf_ct_ext_types[i];
134 if (!t1)
135 continue;
136
137 t1->alloc_size = sizeof(struct nf_ct_ext)
138 + ALIGN(sizeof(struct nf_ct_ext), t1->align)
139 + t1->len;
140 for (j = 0; j < NF_CT_EXT_NUM; j++) {
141 t2 = nf_ct_ext_types[j];
142 if (t2 == NULL || t2 == t1 ||
143 (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
144 continue;
145
146 t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
147 + t2->len;
148 }
Yasuyuki Kozakaiecfab2c2007-07-07 22:23:21 -0700149 }
150}
151
152/* This MUST be called in process context. */
153int nf_ct_extend_register(struct nf_ct_ext_type *type)
154{
155 int ret = 0;
156
157 mutex_lock(&nf_ct_ext_type_mutex);
158 if (nf_ct_ext_types[type->id]) {
159 ret = -EBUSY;
160 goto out;
161 }
162
163 /* This ensures that nf_ct_ext_create() can allocate enough area
164 before updating alloc_size */
165 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
166 + type->len;
167 rcu_assign_pointer(nf_ct_ext_types[type->id], type);
168 update_alloc_size(type);
169out:
170 mutex_unlock(&nf_ct_ext_type_mutex);
171 return ret;
172}
173EXPORT_SYMBOL_GPL(nf_ct_extend_register);
174
175/* This MUST be called in process context. */
176void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
177{
178 mutex_lock(&nf_ct_ext_type_mutex);
179 rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
180 update_alloc_size(type);
181 mutex_unlock(&nf_ct_ext_type_mutex);
182 synchronize_rcu();
183}
184EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);