blob: 4750d2b74d79324cdc3176b7a9cbbe0d13c4e9c7 [file] [log] [blame]
Pavel Emelyanov7eb95152007-10-15 02:31:52 -07001/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070019#include <linux/random.h>
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070020#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070023
24#include <net/inet_frag.h>
25
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070026static void inet_frag_secret_rebuild(unsigned long dummy)
27{
28 struct inet_frags *f = (struct inet_frags *)dummy;
29 unsigned long now = jiffies;
30 int i;
31
32 write_lock(&f->lock);
33 get_random_bytes(&f->rnd, sizeof(u32));
34 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
35 struct inet_frag_queue *q;
36 struct hlist_node *p, *n;
37
38 hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
39 unsigned int hval = f->hashfn(q);
40
41 if (hval != i) {
42 hlist_del(&q->list);
43
44 /* Relink to new hash chain. */
45 hlist_add_head(&q->list, &f->hash[hval]);
46 }
47 }
48 }
49 write_unlock(&f->lock);
50
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -080051 mod_timer(&f->secret_timer, now + f->secret_interval);
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070052}
53
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070054void inet_frags_init(struct inet_frags *f)
55{
56 int i;
57
58 for (i = 0; i < INETFRAGS_HASHSZ; i++)
59 INIT_HLIST_HEAD(&f->hash[i]);
60
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070061 rwlock_init(&f->lock);
62
63 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
64 (jiffies ^ (jiffies >> 6)));
65
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -080066 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
67 (unsigned long)f);
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -080068 f->secret_timer.expires = jiffies + f->secret_interval;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070069 add_timer(&f->secret_timer);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070070}
71EXPORT_SYMBOL(inet_frags_init);
72
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080073void inet_frags_init_net(struct netns_frags *nf)
74{
75 nf->nqueues = 0;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -080076 atomic_set(&nf->mem, 0);
Pavel Emelyanov3140c252008-01-22 06:11:48 -080077 INIT_LIST_HEAD(&nf->lru_list);
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -080078}
79EXPORT_SYMBOL(inet_frags_init_net);
80
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070081void inet_frags_fini(struct inet_frags *f)
82{
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070083 del_timer(&f->secret_timer);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070084}
85EXPORT_SYMBOL(inet_frags_fini);
Pavel Emelyanov277e6502007-10-15 02:37:18 -070086
Pavel Emelyanov81566e82008-01-22 06:12:39 -080087void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
88{
89 nf->low_thresh = 0;
David S. Millere8e16b702008-03-28 17:30:18 -070090
91 local_bh_disable();
Amerigo Wang6b102862012-09-18 16:50:11 +000092 inet_frag_evictor(nf, f, true);
David S. Millere8e16b702008-03-28 17:30:18 -070093 local_bh_enable();
Pavel Emelyanov81566e82008-01-22 06:12:39 -080094}
95EXPORT_SYMBOL(inet_frags_exit_net);
96
Pavel Emelyanov277e6502007-10-15 02:37:18 -070097static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
98{
99 write_lock(&f->lock);
100 hlist_del(&fq->list);
101 list_del(&fq->lru_list);
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -0800102 fq->net->nqueues--;
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700103 write_unlock(&f->lock);
104}
105
106void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
107{
108 if (del_timer(&fq->timer))
109 atomic_dec(&fq->refcnt);
110
Joe Perchesbc578a52008-03-28 16:35:27 -0700111 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700112 fq_unlink(fq, f);
113 atomic_dec(&fq->refcnt);
Joe Perchesbc578a52008-03-28 16:35:27 -0700114 fq->last_in |= INET_FRAG_COMPLETE;
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700115 }
116}
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700117EXPORT_SYMBOL(inet_frag_kill);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700118
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800119static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
120 struct sk_buff *skb, int *work)
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700121{
122 if (work)
123 *work -= skb->truesize;
124
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800125 atomic_sub(skb->truesize, &nf->mem);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700126 if (f->skb_free)
127 f->skb_free(skb);
128 kfree_skb(skb);
129}
130
131void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
132 int *work)
133{
134 struct sk_buff *fp;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800135 struct netns_frags *nf;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700136
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700137 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
138 WARN_ON(del_timer(&q->timer) != 0);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700139
140 /* Release all fragment data. */
141 fp = q->fragments;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800142 nf = q->net;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700143 while (fp) {
144 struct sk_buff *xp = fp->next;
145
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800146 frag_kfree_skb(nf, f, fp, work);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700147 fp = xp;
148 }
149
150 if (work)
151 *work -= f->qsize;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800152 atomic_sub(f->qsize, &nf->mem);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700153
Pavel Emelyanovc9547702007-10-17 19:48:26 -0700154 if (f->destructor)
155 f->destructor(q);
156 kfree(q);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700157
158}
159EXPORT_SYMBOL(inet_frag_destroy);
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700160
Amerigo Wang6b102862012-09-18 16:50:11 +0000161int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700162{
163 struct inet_frag_queue *q;
164 int work, evicted = 0;
165
Amerigo Wang6b102862012-09-18 16:50:11 +0000166 if (!force) {
167 if (atomic_read(&nf->mem) <= nf->high_thresh)
168 return 0;
169 }
170
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800171 work = atomic_read(&nf->mem) - nf->low_thresh;
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700172 while (work > 0) {
173 read_lock(&f->lock);
Pavel Emelyanov3140c252008-01-22 06:11:48 -0800174 if (list_empty(&nf->lru_list)) {
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700175 read_unlock(&f->lock);
176 break;
177 }
178
Pavel Emelyanov3140c252008-01-22 06:11:48 -0800179 q = list_first_entry(&nf->lru_list,
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700180 struct inet_frag_queue, lru_list);
181 atomic_inc(&q->refcnt);
182 read_unlock(&f->lock);
183
184 spin_lock(&q->lock);
Joe Perchesbc578a52008-03-28 16:35:27 -0700185 if (!(q->last_in & INET_FRAG_COMPLETE))
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700186 inet_frag_kill(q, f);
187 spin_unlock(&q->lock);
188
189 if (atomic_dec_and_test(&q->refcnt))
190 inet_frag_destroy(q, f, &work);
191 evicted++;
192 }
193
194 return evicted;
195}
196EXPORT_SYMBOL(inet_frag_evictor);
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700197
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800198static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
199 struct inet_frag_queue *qp_in, struct inet_frags *f,
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700200 void *arg)
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700201{
202 struct inet_frag_queue *qp;
203#ifdef CONFIG_SMP
204 struct hlist_node *n;
205#endif
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700206 unsigned int hash;
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700207
208 write_lock(&f->lock);
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700209 /*
210 * While we stayed w/o the lock other CPU could update
211 * the rnd seed, so we need to re-calculate the hash
212 * chain. Fortunatelly the qp_in can be used to get one.
213 */
214 hash = f->hashfn(qp_in);
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700215#ifdef CONFIG_SMP
216 /* With SMP race we have to recheck hash table, because
217 * such entry could be created on other cpu, while we
218 * promoted read lock to write lock.
219 */
220 hlist_for_each_entry(qp, n, &f->hash[hash], list) {
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800221 if (qp->net == nf && f->match(qp, arg)) {
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700222 atomic_inc(&qp->refcnt);
223 write_unlock(&f->lock);
Joe Perchesbc578a52008-03-28 16:35:27 -0700224 qp_in->last_in |= INET_FRAG_COMPLETE;
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700225 inet_frag_put(qp_in, f);
226 return qp;
227 }
228 }
229#endif
230 qp = qp_in;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800231 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700232 atomic_inc(&qp->refcnt);
233
234 atomic_inc(&qp->refcnt);
235 hlist_add_head(&qp->list, &f->hash[hash]);
Pavel Emelyanov3140c252008-01-22 06:11:48 -0800236 list_add_tail(&qp->lru_list, &nf->lru_list);
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -0800237 nf->nqueues++;
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700238 write_unlock(&f->lock);
239 return qp;
240}
Pavel Emelyanove521db92007-10-17 19:45:23 -0700241
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800242static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
243 struct inet_frags *f, void *arg)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700244{
245 struct inet_frag_queue *q;
246
247 q = kzalloc(f->qsize, GFP_ATOMIC);
248 if (q == NULL)
249 return NULL;
250
Gao feng54db0cc2012-06-08 01:21:40 +0000251 q->net = nf;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700252 f->constructor(q, arg);
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800253 atomic_add(f->qsize, &nf->mem);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700254 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
255 spin_lock_init(&q->lock);
256 atomic_set(&q->refcnt, 1);
257
258 return q;
259}
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700260
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800261static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700262 struct inet_frags *f, void *arg)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700263{
264 struct inet_frag_queue *q;
265
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800266 q = inet_frag_alloc(nf, f, arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700267 if (q == NULL)
268 return NULL;
269
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700270 return inet_frag_intern(nf, q, f, arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700271}
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700272
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800273struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
274 struct inet_frags *f, void *key, unsigned int hash)
Hannes Eder56bca312009-02-25 10:32:52 +0000275 __releases(&f->lock)
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700276{
277 struct inet_frag_queue *q;
278 struct hlist_node *n;
279
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700280 hlist_for_each_entry(q, n, &f->hash[hash], list) {
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800281 if (q->net == nf && f->match(q, key)) {
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700282 atomic_inc(&q->refcnt);
283 read_unlock(&f->lock);
284 return q;
285 }
286 }
287 read_unlock(&f->lock);
288
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700289 return inet_frag_create(nf, f, key);
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700290}
291EXPORT_SYMBOL(inet_frag_find);