blob: eaf3e2c8646a438a938f341151c04920746a3c5f [file] [log] [blame]
Pavel Emelyanov7eb95152007-10-15 02:31:52 -07001/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070019#include <linux/random.h>
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070020#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070022
23#include <net/inet_frag.h>
24
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070025static void inet_frag_secret_rebuild(unsigned long dummy)
26{
27 struct inet_frags *f = (struct inet_frags *)dummy;
28 unsigned long now = jiffies;
29 int i;
30
31 write_lock(&f->lock);
32 get_random_bytes(&f->rnd, sizeof(u32));
33 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
34 struct inet_frag_queue *q;
35 struct hlist_node *p, *n;
36
37 hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
38 unsigned int hval = f->hashfn(q);
39
40 if (hval != i) {
41 hlist_del(&q->list);
42
43 /* Relink to new hash chain. */
44 hlist_add_head(&q->list, &f->hash[hval]);
45 }
46 }
47 }
48 write_unlock(&f->lock);
49
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -080050 mod_timer(&f->secret_timer, now + f->secret_interval);
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070051}
52
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070053void inet_frags_init(struct inet_frags *f)
54{
55 int i;
56
57 for (i = 0; i < INETFRAGS_HASHSZ; i++)
58 INIT_HLIST_HEAD(&f->hash[i]);
59
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070060 rwlock_init(&f->lock);
61
62 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
63 (jiffies ^ (jiffies >> 6)));
64
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -080065 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
66 (unsigned long)f);
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -080067 f->secret_timer.expires = jiffies + f->secret_interval;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070068 add_timer(&f->secret_timer);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070069}
70EXPORT_SYMBOL(inet_frags_init);
71
Pavel Emelyanove5a2bb8422008-01-22 06:06:23 -080072void inet_frags_init_net(struct netns_frags *nf)
73{
74 nf->nqueues = 0;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -080075 atomic_set(&nf->mem, 0);
Pavel Emelyanov3140c252008-01-22 06:11:48 -080076 INIT_LIST_HEAD(&nf->lru_list);
Pavel Emelyanove5a2bb8422008-01-22 06:06:23 -080077}
78EXPORT_SYMBOL(inet_frags_init_net);
79
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070080void inet_frags_fini(struct inet_frags *f)
81{
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070082 del_timer(&f->secret_timer);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070083}
84EXPORT_SYMBOL(inet_frags_fini);
Pavel Emelyanov277e6502007-10-15 02:37:18 -070085
Pavel Emelyanov81566e82008-01-22 06:12:39 -080086void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
87{
88 nf->low_thresh = 0;
David S. Millere8e16b72008-03-28 17:30:18 -070089
90 local_bh_disable();
Pavel Emelyanov81566e82008-01-22 06:12:39 -080091 inet_frag_evictor(nf, f);
David S. Millere8e16b72008-03-28 17:30:18 -070092 local_bh_enable();
Pavel Emelyanov81566e82008-01-22 06:12:39 -080093}
94EXPORT_SYMBOL(inet_frags_exit_net);
95
Pavel Emelyanov277e6502007-10-15 02:37:18 -070096static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
97{
98 write_lock(&f->lock);
99 hlist_del(&fq->list);
100 list_del(&fq->lru_list);
Pavel Emelyanove5a2bb8422008-01-22 06:06:23 -0800101 fq->net->nqueues--;
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700102 write_unlock(&f->lock);
103}
104
105void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
106{
107 if (del_timer(&fq->timer))
108 atomic_dec(&fq->refcnt);
109
Joe Perchesbc578a52008-03-28 16:35:27 -0700110 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700111 fq_unlink(fq, f);
112 atomic_dec(&fq->refcnt);
Joe Perchesbc578a52008-03-28 16:35:27 -0700113 fq->last_in |= INET_FRAG_COMPLETE;
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700114 }
115}
116
117EXPORT_SYMBOL(inet_frag_kill);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700118
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800119static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
120 struct sk_buff *skb, int *work)
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700121{
122 if (work)
123 *work -= skb->truesize;
124
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800125 atomic_sub(skb->truesize, &nf->mem);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700126 if (f->skb_free)
127 f->skb_free(skb);
128 kfree_skb(skb);
129}
130
131void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
132 int *work)
133{
134 struct sk_buff *fp;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800135 struct netns_frags *nf;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700136
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700137 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
138 WARN_ON(del_timer(&q->timer) != 0);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700139
140 /* Release all fragment data. */
141 fp = q->fragments;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800142 nf = q->net;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700143 while (fp) {
144 struct sk_buff *xp = fp->next;
145
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800146 frag_kfree_skb(nf, f, fp, work);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700147 fp = xp;
148 }
149
150 if (work)
151 *work -= f->qsize;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800152 atomic_sub(f->qsize, &nf->mem);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700153
Pavel Emelyanovc9547702007-10-17 19:48:26 -0700154 if (f->destructor)
155 f->destructor(q);
156 kfree(q);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700157
158}
159EXPORT_SYMBOL(inet_frag_destroy);
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700160
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800161int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f)
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700162{
163 struct inet_frag_queue *q;
164 int work, evicted = 0;
165
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800166 work = atomic_read(&nf->mem) - nf->low_thresh;
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700167 while (work > 0) {
168 read_lock(&f->lock);
Pavel Emelyanov3140c252008-01-22 06:11:48 -0800169 if (list_empty(&nf->lru_list)) {
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700170 read_unlock(&f->lock);
171 break;
172 }
173
Pavel Emelyanov3140c252008-01-22 06:11:48 -0800174 q = list_first_entry(&nf->lru_list,
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700175 struct inet_frag_queue, lru_list);
176 atomic_inc(&q->refcnt);
177 read_unlock(&f->lock);
178
179 spin_lock(&q->lock);
Joe Perchesbc578a52008-03-28 16:35:27 -0700180 if (!(q->last_in & INET_FRAG_COMPLETE))
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700181 inet_frag_kill(q, f);
182 spin_unlock(&q->lock);
183
184 if (atomic_dec_and_test(&q->refcnt))
185 inet_frag_destroy(q, f, &work);
186 evicted++;
187 }
188
189 return evicted;
190}
191EXPORT_SYMBOL(inet_frag_evictor);
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700192
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800193static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
194 struct inet_frag_queue *qp_in, struct inet_frags *f,
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700195 void *arg)
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700196{
197 struct inet_frag_queue *qp;
198#ifdef CONFIG_SMP
199 struct hlist_node *n;
200#endif
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700201 unsigned int hash;
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700202
203 write_lock(&f->lock);
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700204 /*
205 * While we stayed w/o the lock other CPU could update
206 * the rnd seed, so we need to re-calculate the hash
207 * chain. Fortunatelly the qp_in can be used to get one.
208 */
209 hash = f->hashfn(qp_in);
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700210#ifdef CONFIG_SMP
211 /* With SMP race we have to recheck hash table, because
212 * such entry could be created on other cpu, while we
213 * promoted read lock to write lock.
214 */
215 hlist_for_each_entry(qp, n, &f->hash[hash], list) {
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800216 if (qp->net == nf && f->match(qp, arg)) {
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700217 atomic_inc(&qp->refcnt);
218 write_unlock(&f->lock);
Joe Perchesbc578a52008-03-28 16:35:27 -0700219 qp_in->last_in |= INET_FRAG_COMPLETE;
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700220 inet_frag_put(qp_in, f);
221 return qp;
222 }
223 }
224#endif
225 qp = qp_in;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800226 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700227 atomic_inc(&qp->refcnt);
228
229 atomic_inc(&qp->refcnt);
230 hlist_add_head(&qp->list, &f->hash[hash]);
Pavel Emelyanov3140c252008-01-22 06:11:48 -0800231 list_add_tail(&qp->lru_list, &nf->lru_list);
Pavel Emelyanove5a2bb8422008-01-22 06:06:23 -0800232 nf->nqueues++;
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700233 write_unlock(&f->lock);
234 return qp;
235}
Pavel Emelyanove521db92007-10-17 19:45:23 -0700236
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800237static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
238 struct inet_frags *f, void *arg)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700239{
240 struct inet_frag_queue *q;
241
242 q = kzalloc(f->qsize, GFP_ATOMIC);
243 if (q == NULL)
244 return NULL;
245
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700246 f->constructor(q, arg);
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800247 atomic_add(f->qsize, &nf->mem);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700248 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
249 spin_lock_init(&q->lock);
250 atomic_set(&q->refcnt, 1);
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800251 q->net = nf;
Pavel Emelyanove521db92007-10-17 19:45:23 -0700252
253 return q;
254}
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700255
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800256static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700257 struct inet_frags *f, void *arg)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700258{
259 struct inet_frag_queue *q;
260
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800261 q = inet_frag_alloc(nf, f, arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700262 if (q == NULL)
263 return NULL;
264
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700265 return inet_frag_intern(nf, q, f, arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700266}
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700267
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800268struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
269 struct inet_frags *f, void *key, unsigned int hash)
Hannes Eder56bca312009-02-25 10:32:52 +0000270 __releases(&f->lock)
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700271{
272 struct inet_frag_queue *q;
273 struct hlist_node *n;
274
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700275 hlist_for_each_entry(q, n, &f->hash[hash], list) {
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800276 if (q->net == nf && f->match(q, key)) {
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700277 atomic_inc(&q->refcnt);
278 read_unlock(&f->lock);
279 return q;
280 }
281 }
282 read_unlock(&f->lock);
283
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700284 return inet_frag_create(nf, f, key);
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700285}
286EXPORT_SYMBOL(inet_frag_find);