blob: e15e04fc66615dc354241214fdadaa366512264f [file] [log] [blame]
Pavel Emelyanov7eb95152007-10-15 02:31:52 -07001/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070019#include <linux/random.h>
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070020#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070022
23#include <net/inet_frag.h>
24
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070025static void inet_frag_secret_rebuild(unsigned long dummy)
26{
27 struct inet_frags *f = (struct inet_frags *)dummy;
28 unsigned long now = jiffies;
29 int i;
30
31 write_lock(&f->lock);
32 get_random_bytes(&f->rnd, sizeof(u32));
33 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
34 struct inet_frag_queue *q;
35 struct hlist_node *p, *n;
36
37 hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
38 unsigned int hval = f->hashfn(q);
39
40 if (hval != i) {
41 hlist_del(&q->list);
42
43 /* Relink to new hash chain. */
44 hlist_add_head(&q->list, &f->hash[hval]);
45 }
46 }
47 }
48 write_unlock(&f->lock);
49
50 mod_timer(&f->secret_timer, now + f->ctl->secret_interval);
51}
52
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070053void inet_frags_init(struct inet_frags *f)
54{
55 int i;
56
57 for (i = 0; i < INETFRAGS_HASHSZ; i++)
58 INIT_HLIST_HEAD(&f->hash[i]);
59
60 INIT_LIST_HEAD(&f->lru_list);
61 rwlock_init(&f->lock);
62
63 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
64 (jiffies ^ (jiffies >> 6)));
65
66 f->nqueues = 0;
67 atomic_set(&f->mem, 0);
68
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070069 init_timer(&f->secret_timer);
70 f->secret_timer.function = inet_frag_secret_rebuild;
71 f->secret_timer.data = (unsigned long)f;
72 f->secret_timer.expires = jiffies + f->ctl->secret_interval;
73 add_timer(&f->secret_timer);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070074}
75EXPORT_SYMBOL(inet_frags_init);
76
77void inet_frags_fini(struct inet_frags *f)
78{
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070079 del_timer(&f->secret_timer);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070080}
81EXPORT_SYMBOL(inet_frags_fini);
Pavel Emelyanov277e6502007-10-15 02:37:18 -070082
83static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
84{
85 write_lock(&f->lock);
86 hlist_del(&fq->list);
87 list_del(&fq->lru_list);
88 f->nqueues--;
89 write_unlock(&f->lock);
90}
91
92void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
93{
94 if (del_timer(&fq->timer))
95 atomic_dec(&fq->refcnt);
96
97 if (!(fq->last_in & COMPLETE)) {
98 fq_unlink(fq, f);
99 atomic_dec(&fq->refcnt);
100 fq->last_in |= COMPLETE;
101 }
102}
103
104EXPORT_SYMBOL(inet_frag_kill);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700105
106static inline void frag_kfree_skb(struct inet_frags *f, struct sk_buff *skb,
107 int *work)
108{
109 if (work)
110 *work -= skb->truesize;
111
112 atomic_sub(skb->truesize, &f->mem);
113 if (f->skb_free)
114 f->skb_free(skb);
115 kfree_skb(skb);
116}
117
118void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
119 int *work)
120{
121 struct sk_buff *fp;
122
123 BUG_TRAP(q->last_in & COMPLETE);
124 BUG_TRAP(del_timer(&q->timer) == 0);
125
126 /* Release all fragment data. */
127 fp = q->fragments;
128 while (fp) {
129 struct sk_buff *xp = fp->next;
130
131 frag_kfree_skb(f, fp, work);
132 fp = xp;
133 }
134
135 if (work)
136 *work -= f->qsize;
137 atomic_sub(f->qsize, &f->mem);
138
Pavel Emelyanovc9547702007-10-17 19:48:26 -0700139 if (f->destructor)
140 f->destructor(q);
141 kfree(q);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700142
143}
144EXPORT_SYMBOL(inet_frag_destroy);
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700145
146int inet_frag_evictor(struct inet_frags *f)
147{
148 struct inet_frag_queue *q;
149 int work, evicted = 0;
150
151 work = atomic_read(&f->mem) - f->ctl->low_thresh;
152 while (work > 0) {
153 read_lock(&f->lock);
154 if (list_empty(&f->lru_list)) {
155 read_unlock(&f->lock);
156 break;
157 }
158
159 q = list_first_entry(&f->lru_list,
160 struct inet_frag_queue, lru_list);
161 atomic_inc(&q->refcnt);
162 read_unlock(&f->lock);
163
164 spin_lock(&q->lock);
165 if (!(q->last_in & COMPLETE))
166 inet_frag_kill(q, f);
167 spin_unlock(&q->lock);
168
169 if (atomic_dec_and_test(&q->refcnt))
170 inet_frag_destroy(q, f, &work);
171 evicted++;
172 }
173
174 return evicted;
175}
176EXPORT_SYMBOL(inet_frag_evictor);
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700177
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700178static struct inet_frag_queue *inet_frag_intern(struct inet_frag_queue *qp_in,
Pavel Emelyanov48d60052007-10-17 19:47:56 -0700179 struct inet_frags *f, unsigned int hash, void *arg)
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700180{
181 struct inet_frag_queue *qp;
182#ifdef CONFIG_SMP
183 struct hlist_node *n;
184#endif
185
186 write_lock(&f->lock);
187#ifdef CONFIG_SMP
188 /* With SMP race we have to recheck hash table, because
189 * such entry could be created on other cpu, while we
190 * promoted read lock to write lock.
191 */
192 hlist_for_each_entry(qp, n, &f->hash[hash], list) {
Pavel Emelyanov48d60052007-10-17 19:47:56 -0700193 if (f->match(qp, arg)) {
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700194 atomic_inc(&qp->refcnt);
195 write_unlock(&f->lock);
196 qp_in->last_in |= COMPLETE;
197 inet_frag_put(qp_in, f);
198 return qp;
199 }
200 }
201#endif
202 qp = qp_in;
203 if (!mod_timer(&qp->timer, jiffies + f->ctl->timeout))
204 atomic_inc(&qp->refcnt);
205
206 atomic_inc(&qp->refcnt);
207 hlist_add_head(&qp->list, &f->hash[hash]);
208 list_add_tail(&qp->lru_list, &f->lru_list);
209 f->nqueues++;
210 write_unlock(&f->lock);
211 return qp;
212}
Pavel Emelyanove521db92007-10-17 19:45:23 -0700213
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700214static struct inet_frag_queue *inet_frag_alloc(struct inet_frags *f, void *arg)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700215{
216 struct inet_frag_queue *q;
217
218 q = kzalloc(f->qsize, GFP_ATOMIC);
219 if (q == NULL)
220 return NULL;
221
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700222 f->constructor(q, arg);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700223 atomic_add(f->qsize, &f->mem);
224 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
225 spin_lock_init(&q->lock);
226 atomic_set(&q->refcnt, 1);
227
228 return q;
229}
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700230
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700231static struct inet_frag_queue *inet_frag_create(struct inet_frags *f,
232 void *arg, unsigned int hash)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700233{
234 struct inet_frag_queue *q;
235
236 q = inet_frag_alloc(f, arg);
237 if (q == NULL)
238 return NULL;
239
Pavel Emelyanov48d60052007-10-17 19:47:56 -0700240 return inet_frag_intern(q, f, hash, arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700241}
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700242
243struct inet_frag_queue *inet_frag_find(struct inet_frags *f, void *key,
244 unsigned int hash)
245{
246 struct inet_frag_queue *q;
247 struct hlist_node *n;
248
249 read_lock(&f->lock);
250 hlist_for_each_entry(q, n, &f->hash[hash], list) {
251 if (f->match(q, key)) {
252 atomic_inc(&q->refcnt);
253 read_unlock(&f->lock);
254 return q;
255 }
256 }
257 read_unlock(&f->lock);
258
259 return inet_frag_create(f, key, hash);
260}
261EXPORT_SYMBOL(inet_frag_find);