blob: 966a88848406ad8fac48b19bffb9f518705765ba [file] [log] [blame]
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001/*
2 * IPv6 fragment reassembly for connection tracking
3 *
4 * Copyright (C)2004 USAGI/WIDE Project
5 *
6 * Author:
7 * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
8 *
9 * Based on: net/ipv6/reassembly.c
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080017#include <linux/errno.h>
18#include <linux/types.h>
19#include <linux/string.h>
20#include <linux/socket.h>
21#include <linux/sockios.h>
22#include <linux/jiffies.h>
23#include <linux/net.h>
24#include <linux/list.h>
25#include <linux/netdevice.h>
26#include <linux/in6.h>
27#include <linux/ipv6.h>
28#include <linux/icmpv6.h>
29#include <linux/random.h>
30#include <linux/jhash.h>
31
32#include <net/sock.h>
33#include <net/snmp.h>
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070034#include <net/inet_frag.h>
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080035
36#include <net/ipv6.h>
37#include <net/protocol.h>
38#include <net/transp_v6.h>
39#include <net/rawv6.h>
40#include <net/ndisc.h>
41#include <net/addrconf.h>
42#include <linux/sysctl.h>
43#include <linux/netfilter.h>
44#include <linux/netfilter_ipv6.h>
45#include <linux/kernel.h>
46#include <linux/module.h>
47
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080048#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
49#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */
50#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
51
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080052struct nf_ct_frag6_skb_cb
53{
54 struct inet6_skb_parm h;
55 int offset;
56 struct sk_buff *orig;
57};
58
59#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
60
61struct nf_ct_frag6_queue
62{
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070063 struct inet_frag_queue q;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080064
Patrick McHardybff9a892006-12-02 22:05:08 -080065 __be32 id; /* fragment id */
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080066 struct in6_addr saddr;
67 struct in6_addr daddr;
68
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080069 unsigned int csum;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080070 __u16 nhoffset;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080071};
72
Pavel Emelyanov04128f22007-10-15 02:33:45 -070073struct inet_frags_ctl nf_frags_ctl __read_mostly = {
74 .high_thresh = 256 * 1024,
75 .low_thresh = 192 * 1024,
76 .timeout = IPV6_FRAG_TIMEOUT,
77 .secret_interval = 10 * 60 * HZ,
78};
79
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070080static struct inet_frags nf_frags;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080081
82static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq)
83{
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070084 hlist_del(&fq->q.list);
85 list_del(&fq->q.lru_list);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070086 nf_frags.nqueues--;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080087}
88
89static __inline__ void fq_unlink(struct nf_ct_frag6_queue *fq)
90{
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070091 write_lock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080092 __fq_unlink(fq);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070093 write_unlock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080094}
95
Patrick McHardybff9a892006-12-02 22:05:08 -080096static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080097 struct in6_addr *daddr)
98{
99 u32 a, b, c;
100
Patrick McHardybff9a892006-12-02 22:05:08 -0800101 a = (__force u32)saddr->s6_addr32[0];
102 b = (__force u32)saddr->s6_addr32[1];
103 c = (__force u32)saddr->s6_addr32[2];
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800104
105 a += JHASH_GOLDEN_RATIO;
106 b += JHASH_GOLDEN_RATIO;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700107 c += nf_frags.rnd;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800108 __jhash_mix(a, b, c);
109
Patrick McHardybff9a892006-12-02 22:05:08 -0800110 a += (__force u32)saddr->s6_addr32[3];
111 b += (__force u32)daddr->s6_addr32[0];
112 c += (__force u32)daddr->s6_addr32[1];
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800113 __jhash_mix(a, b, c);
114
Patrick McHardybff9a892006-12-02 22:05:08 -0800115 a += (__force u32)daddr->s6_addr32[2];
116 b += (__force u32)daddr->s6_addr32[3];
117 c += (__force u32)id;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800118 __jhash_mix(a, b, c);
119
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700120 return c & (INETFRAGS_HASHSZ - 1);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800121}
122
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800123static void nf_ct_frag6_secret_rebuild(unsigned long dummy)
124{
125 unsigned long now = jiffies;
126 int i;
127
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700128 write_lock(&nf_frags.lock);
129 get_random_bytes(&nf_frags.rnd, sizeof(u32));
130 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800131 struct nf_ct_frag6_queue *q;
Harald Welte2e4e6a12006-01-12 13:30:04 -0800132 struct hlist_node *p, *n;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800133
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700134 hlist_for_each_entry_safe(q, p, n, &nf_frags.hash[i], q.list) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800135 unsigned int hval = ip6qhashfn(q->id,
136 &q->saddr,
137 &q->daddr);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800138 if (hval != i) {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700139 hlist_del(&q->q.list);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800140 /* Relink to new hash chain. */
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700141 hlist_add_head(&q->q.list,
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700142 &nf_frags.hash[hval]);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800143 }
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800144 }
145 }
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700146 write_unlock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800147
Pavel Emelyanov04128f22007-10-15 02:33:45 -0700148 mod_timer(&nf_frags.secret_timer, now + nf_frags_ctl.secret_interval);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800149}
150
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800151/* Memory Tracking Functions. */
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800152static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800153{
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800154 if (work)
155 *work -= skb->truesize;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700156 atomic_sub(skb->truesize, &nf_frags.mem);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800157 if (NFCT_FRAG6_CB(skb)->orig)
158 kfree_skb(NFCT_FRAG6_CB(skb)->orig);
159
160 kfree_skb(skb);
161}
162
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800163static inline void frag_free_queue(struct nf_ct_frag6_queue *fq,
164 unsigned int *work)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800165{
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800166 if (work)
167 *work -= sizeof(struct nf_ct_frag6_queue);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700168 atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_frags.mem);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800169 kfree(fq);
170}
171
172static inline struct nf_ct_frag6_queue *frag_alloc_queue(void)
173{
174 struct nf_ct_frag6_queue *fq = kmalloc(sizeof(struct nf_ct_frag6_queue), GFP_ATOMIC);
175
176 if (!fq)
177 return NULL;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700178 atomic_add(sizeof(struct nf_ct_frag6_queue), &nf_frags.mem);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800179 return fq;
180}
181
182/* Destruction primitives. */
183
184/* Complete destruction of fq. */
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800185static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq,
186 unsigned int *work)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800187{
188 struct sk_buff *fp;
189
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700190 BUG_TRAP(fq->q.last_in&COMPLETE);
191 BUG_TRAP(del_timer(&fq->q.timer) == 0);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800192
193 /* Release all fragment data. */
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700194 fp = fq->q.fragments;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800195 while (fp) {
196 struct sk_buff *xp = fp->next;
197
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800198 frag_kfree_skb(fp, work);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800199 fp = xp;
200 }
201
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800202 frag_free_queue(fq, work);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800203}
204
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800205static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800206{
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700207 if (atomic_dec_and_test(&fq->q.refcnt))
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800208 nf_ct_frag6_destroy(fq, work);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800209}
210
211/* Kill fq entry. It is not destroyed immediately,
212 * because caller (and someone more) holds reference count.
213 */
214static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
215{
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700216 if (del_timer(&fq->q.timer))
217 atomic_dec(&fq->q.refcnt);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800218
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700219 if (!(fq->q.last_in & COMPLETE)) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800220 fq_unlink(fq);
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700221 atomic_dec(&fq->q.refcnt);
222 fq->q.last_in |= COMPLETE;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800223 }
224}
225
226static void nf_ct_frag6_evictor(void)
227{
228 struct nf_ct_frag6_queue *fq;
229 struct list_head *tmp;
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800230 unsigned int work;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800231
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700232 work = atomic_read(&nf_frags.mem);
Pavel Emelyanov04128f22007-10-15 02:33:45 -0700233 if (work <= nf_frags_ctl.low_thresh)
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800234 return;
235
Pavel Emelyanov04128f22007-10-15 02:33:45 -0700236 work -= nf_frags_ctl.low_thresh;
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800237 while (work > 0) {
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700238 read_lock(&nf_frags.lock);
239 if (list_empty(&nf_frags.lru_list)) {
240 read_unlock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800241 return;
242 }
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700243 tmp = nf_frags.lru_list.next;
Yasuyuki Kozakai302fe172005-11-14 15:28:45 -0800244 BUG_ON(tmp == NULL);
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700245 fq = list_entry(tmp, struct nf_ct_frag6_queue, q.lru_list);
246 atomic_inc(&fq->q.refcnt);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700247 read_unlock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800248
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700249 spin_lock(&fq->q.lock);
250 if (!(fq->q.last_in&COMPLETE))
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800251 fq_kill(fq);
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700252 spin_unlock(&fq->q.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800253
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800254 fq_put(fq, &work);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800255 }
256}
257
258static void nf_ct_frag6_expire(unsigned long data)
259{
260 struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data;
261
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700262 spin_lock(&fq->q.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800263
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700264 if (fq->q.last_in & COMPLETE)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800265 goto out;
266
267 fq_kill(fq);
268
269out:
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700270 spin_unlock(&fq->q.lock);
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800271 fq_put(fq, NULL);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800272}
273
274/* Creation primitives. */
275
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800276static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
277 struct nf_ct_frag6_queue *fq_in)
278{
279 struct nf_ct_frag6_queue *fq;
Harald Welte2e4e6a12006-01-12 13:30:04 -0800280#ifdef CONFIG_SMP
281 struct hlist_node *n;
282#endif
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800283
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700284 write_lock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800285#ifdef CONFIG_SMP
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700286 hlist_for_each_entry(fq, n, &nf_frags.hash[hash], q.list) {
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900287 if (fq->id == fq_in->id &&
Yasuyuki Kozakai6ea46c92006-03-20 17:58:44 -0800288 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
289 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700290 atomic_inc(&fq->q.refcnt);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700291 write_unlock(&nf_frags.lock);
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700292 fq_in->q.last_in |= COMPLETE;
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800293 fq_put(fq_in, NULL);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800294 return fq;
295 }
296 }
297#endif
298 fq = fq_in;
299
Pavel Emelyanov04128f22007-10-15 02:33:45 -0700300 if (!mod_timer(&fq->q.timer, jiffies + nf_frags_ctl.timeout))
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700301 atomic_inc(&fq->q.refcnt);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800302
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700303 atomic_inc(&fq->q.refcnt);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700304 hlist_add_head(&fq->q.list, &nf_frags.hash[hash]);
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700305 INIT_LIST_HEAD(&fq->q.lru_list);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700306 list_add_tail(&fq->q.lru_list, &nf_frags.lru_list);
307 nf_frags.nqueues++;
308 write_unlock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800309 return fq;
310}
311
312
313static struct nf_ct_frag6_queue *
Patrick McHardybff9a892006-12-02 22:05:08 -0800314nf_ct_frag6_create(unsigned int hash, __be32 id, struct in6_addr *src, struct in6_addr *dst)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800315{
316 struct nf_ct_frag6_queue *fq;
317
318 if ((fq = frag_alloc_queue()) == NULL) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700319 pr_debug("Can't alloc new queue\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800320 goto oom;
321 }
322
323 memset(fq, 0, sizeof(struct nf_ct_frag6_queue));
324
325 fq->id = id;
326 ipv6_addr_copy(&fq->saddr, src);
327 ipv6_addr_copy(&fq->daddr, dst);
328
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700329 setup_timer(&fq->q.timer, nf_ct_frag6_expire, (unsigned long)fq);
330 spin_lock_init(&fq->q.lock);
331 atomic_set(&fq->q.refcnt, 1);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800332
333 return nf_ct_frag6_intern(hash, fq);
334
335oom:
336 return NULL;
337}
338
339static __inline__ struct nf_ct_frag6_queue *
Patrick McHardybff9a892006-12-02 22:05:08 -0800340fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800341{
342 struct nf_ct_frag6_queue *fq;
Harald Welte2e4e6a12006-01-12 13:30:04 -0800343 struct hlist_node *n;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800344 unsigned int hash = ip6qhashfn(id, src, dst);
345
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700346 read_lock(&nf_frags.lock);
347 hlist_for_each_entry(fq, n, &nf_frags.hash[hash], q.list) {
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900348 if (fq->id == id &&
Yasuyuki Kozakai6ea46c92006-03-20 17:58:44 -0800349 ipv6_addr_equal(src, &fq->saddr) &&
350 ipv6_addr_equal(dst, &fq->daddr)) {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700351 atomic_inc(&fq->q.refcnt);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700352 read_unlock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800353 return fq;
354 }
355 }
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700356 read_unlock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800357
358 return nf_ct_frag6_create(hash, id, src, dst);
359}
360
361
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900362static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800363 struct frag_hdr *fhdr, int nhoff)
364{
365 struct sk_buff *prev, *next;
366 int offset, end;
367
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700368 if (fq->q.last_in & COMPLETE) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700369 pr_debug("Allready completed\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800370 goto err;
371 }
372
373 offset = ntohs(fhdr->frag_off) & ~0x7;
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700374 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
375 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800376
377 if ((unsigned int)end > IPV6_MAXPLEN) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700378 pr_debug("offset is too large.\n");
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900379 return -1;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800380 }
381
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700382 if (skb->ip_summed == CHECKSUM_COMPLETE) {
383 const unsigned char *nh = skb_network_header(skb);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900384 skb->csum = csum_sub(skb->csum,
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700385 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800386 0));
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700387 }
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800388
389 /* Is this the final fragment? */
390 if (!(fhdr->frag_off & htons(IP6_MF))) {
391 /* If we already have some bits beyond end
392 * or have different end, the segment is corrupted.
393 */
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700394 if (end < fq->q.len ||
395 ((fq->q.last_in & LAST_IN) && end != fq->q.len)) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700396 pr_debug("already received last fragment\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800397 goto err;
398 }
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700399 fq->q.last_in |= LAST_IN;
400 fq->q.len = end;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800401 } else {
402 /* Check if the fragment is rounded to 8 bytes.
403 * Required by the RFC.
404 */
405 if (end & 0x7) {
406 /* RFC2460 says always send parameter problem in
407 * this case. -DaveM
408 */
Patrick McHardy0d537782007-07-07 22:39:38 -0700409 pr_debug("end of fragment not rounded to 8 bytes.\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800410 return -1;
411 }
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700412 if (end > fq->q.len) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800413 /* Some bits beyond end -> corruption. */
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700414 if (fq->q.last_in & LAST_IN) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700415 pr_debug("last packet already reached.\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800416 goto err;
417 }
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700418 fq->q.len = end;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800419 }
420 }
421
422 if (end == offset)
423 goto err;
424
425 /* Point into the IP datagram 'data' part. */
426 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700427 pr_debug("queue: message is too short.\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800428 goto err;
429 }
Herbert Xub38dfee2006-06-09 16:13:01 -0700430 if (pskb_trim_rcsum(skb, end - offset)) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700431 pr_debug("Can't trim\n");
Herbert Xub38dfee2006-06-09 16:13:01 -0700432 goto err;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800433 }
434
435 /* Find out which fragments are in front and at the back of us
436 * in the chain of fragments so far. We must know where to put
437 * this fragment, right?
438 */
439 prev = NULL;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700440 for (next = fq->q.fragments; next != NULL; next = next->next) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800441 if (NFCT_FRAG6_CB(next)->offset >= offset)
442 break; /* bingo! */
443 prev = next;
444 }
445
446 /* We found where to put this one. Check for overlap with
447 * preceding fragment, and, if needed, align things so that
448 * any overlaps are eliminated.
449 */
450 if (prev) {
451 int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;
452
453 if (i > 0) {
454 offset += i;
455 if (end <= offset) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700456 pr_debug("overlap\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800457 goto err;
458 }
459 if (!pskb_pull(skb, i)) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700460 pr_debug("Can't pull\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800461 goto err;
462 }
463 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
464 skb->ip_summed = CHECKSUM_NONE;
465 }
466 }
467
468 /* Look for overlap with succeeding segments.
469 * If we can merge fragments, do it.
470 */
471 while (next && NFCT_FRAG6_CB(next)->offset < end) {
472 /* overlap is 'i' bytes */
473 int i = end - NFCT_FRAG6_CB(next)->offset;
474
475 if (i < next->len) {
476 /* Eat head of the next overlapped fragment
477 * and leave the loop. The next ones cannot overlap.
478 */
Patrick McHardy0d537782007-07-07 22:39:38 -0700479 pr_debug("Eat head of the overlapped parts.: %d", i);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800480 if (!pskb_pull(next, i))
481 goto err;
482
483 /* next fragment */
484 NFCT_FRAG6_CB(next)->offset += i;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700485 fq->q.meat -= i;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800486 if (next->ip_summed != CHECKSUM_UNNECESSARY)
487 next->ip_summed = CHECKSUM_NONE;
488 break;
489 } else {
490 struct sk_buff *free_it = next;
491
492 /* Old fragmnet is completely overridden with
493 * new one drop it.
494 */
495 next = next->next;
496
497 if (prev)
498 prev->next = next;
499 else
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700500 fq->q.fragments = next;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800501
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700502 fq->q.meat -= free_it->len;
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800503 frag_kfree_skb(free_it, NULL);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800504 }
505 }
506
507 NFCT_FRAG6_CB(skb)->offset = offset;
508
509 /* Insert this fragment in the chain of fragments. */
510 skb->next = next;
511 if (prev)
512 prev->next = skb;
513 else
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700514 fq->q.fragments = skb;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800515
516 skb->dev = NULL;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700517 fq->q.stamp = skb->tstamp;
518 fq->q.meat += skb->len;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700519 atomic_add(skb->truesize, &nf_frags.mem);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800520
521 /* The first fragment.
522 * nhoffset is obtained from the first fragment, of course.
523 */
524 if (offset == 0) {
525 fq->nhoffset = nhoff;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700526 fq->q.last_in |= FIRST_IN;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800527 }
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700528 write_lock(&nf_frags.lock);
529 list_move_tail(&fq->q.lru_list, &nf_frags.lru_list);
530 write_unlock(&nf_frags.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800531 return 0;
532
533err:
534 return -1;
535}
536
537/*
538 * Check if this packet is complete.
539 * Returns NULL on failure by any reason, and pointer
540 * to current nexthdr field in reassembled frame.
541 *
542 * It is called with locked fq, and caller must check that
543 * queue is eligible for reassembly i.e. it is not COMPLETE,
544 * the last and the first frames arrived and all the bits are here.
545 */
546static struct sk_buff *
547nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
548{
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700549 struct sk_buff *fp, *op, *head = fq->q.fragments;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800550 int payload_len;
551
552 fq_kill(fq);
553
554 BUG_TRAP(head != NULL);
555 BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0);
556
557 /* Unfragmented part is taken from the first segment. */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700558 payload_len = ((head->data - skb_network_header(head)) -
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700559 sizeof(struct ipv6hdr) + fq->q.len -
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700560 sizeof(struct frag_hdr));
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800561 if (payload_len > IPV6_MAXPLEN) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700562 pr_debug("payload len is too large.\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800563 goto out_oversize;
564 }
565
566 /* Head of list must not be cloned. */
567 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700568 pr_debug("skb is cloned but can't expand head");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800569 goto out_oom;
570 }
571
572 /* If the first fragment is fragmented itself, we split
573 * it to two chunks: the first with data and paged part
574 * and the second, holding only fragments. */
575 if (skb_shinfo(head)->frag_list) {
576 struct sk_buff *clone;
577 int i, plen = 0;
578
579 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700580 pr_debug("Can't alloc skb\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800581 goto out_oom;
582 }
583 clone->next = head->next;
584 head->next = clone;
585 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
586 skb_shinfo(head)->frag_list = NULL;
587 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
588 plen += skb_shinfo(head)->frags[i].size;
589 clone->len = clone->data_len = head->data_len - plen;
590 head->data_len -= clone->len;
591 head->len -= clone->len;
592 clone->csum = 0;
593 clone->ip_summed = head->ip_summed;
594
595 NFCT_FRAG6_CB(clone)->orig = NULL;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700596 atomic_add(clone->truesize, &nf_frags.mem);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800597 }
598
599 /* We have to remove fragment header from datagram and to relocate
600 * header in order to calculate ICV correctly. */
Arnaldo Carvalho de Melobff9b612007-03-16 17:19:57 -0300601 skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900602 memmove(head->head + sizeof(struct frag_hdr), head->head,
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800603 (head->data - head->head) - sizeof(struct frag_hdr));
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700604 head->mac_header += sizeof(struct frag_hdr);
605 head->network_header += sizeof(struct frag_hdr);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800606
607 skb_shinfo(head)->frag_list = head->next;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300608 skb_reset_transport_header(head);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700609 skb_push(head, head->data - skb_network_header(head));
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700610 atomic_sub(head->truesize, &nf_frags.mem);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800611
612 for (fp=head->next; fp; fp = fp->next) {
613 head->data_len += fp->len;
614 head->len += fp->len;
615 if (head->ip_summed != fp->ip_summed)
616 head->ip_summed = CHECKSUM_NONE;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700617 else if (head->ip_summed == CHECKSUM_COMPLETE)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800618 head->csum = csum_add(head->csum, fp->csum);
619 head->truesize += fp->truesize;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700620 atomic_sub(fp->truesize, &nf_frags.mem);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800621 }
622
623 head->next = NULL;
624 head->dev = dev;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700625 head->tstamp = fq->q.stamp;
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700626 ipv6_hdr(head)->payload_len = htons(payload_len);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800627
628 /* Yes, and fold redundant checksum back. 8) */
Patrick McHardy84fa7932006-08-29 16:44:56 -0700629 if (head->ip_summed == CHECKSUM_COMPLETE)
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700630 head->csum = csum_partial(skb_network_header(head),
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -0300631 skb_network_header_len(head),
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700632 head->csum);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800633
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700634 fq->q.fragments = NULL;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800635
636 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
637 fp = skb_shinfo(head)->frag_list;
638 if (NFCT_FRAG6_CB(fp)->orig == NULL)
639 /* at above code, head skb is divided into two skbs. */
640 fp = fp->next;
641
642 op = NFCT_FRAG6_CB(head)->orig;
643 for (; fp; fp = fp->next) {
644 struct sk_buff *orig = NFCT_FRAG6_CB(fp)->orig;
645
646 op->next = orig;
647 op = orig;
648 NFCT_FRAG6_CB(fp)->orig = NULL;
649 }
650
651 return head;
652
653out_oversize:
654 if (net_ratelimit())
655 printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len);
656 goto out_fail;
657out_oom:
658 if (net_ratelimit())
659 printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
660out_fail:
661 return NULL;
662}
663
664/*
665 * find the header just before Fragment Header.
666 *
667 * if success return 0 and set ...
668 * (*prevhdrp): the value of "Next Header Field" in the header
669 * just before Fragment Header.
670 * (*prevhoff): the offset of "Next Header Field" in the header
671 * just before Fragment Header.
672 * (*fhoff) : the offset of Fragment Header.
673 *
674 * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
675 *
676 */
677static int
678find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
679{
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700680 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
Arnaldo Carvalho de Melo6b88dd92007-03-19 22:29:03 -0300681 const int netoff = skb_network_offset(skb);
682 u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
683 int start = netoff + sizeof(struct ipv6hdr);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800684 int len = skb->len - start;
685 u8 prevhdr = NEXTHDR_IPV6;
686
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900687 while (nexthdr != NEXTHDR_FRAGMENT) {
688 struct ipv6_opt_hdr hdr;
689 int hdrlen;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800690
691 if (!ipv6_ext_hdr(nexthdr)) {
692 return -1;
693 }
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900694 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700695 pr_debug("too short\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800696 return -1;
697 }
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900698 if (nexthdr == NEXTHDR_NONE) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700699 pr_debug("next header is none\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800700 return -1;
701 }
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900702 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
703 BUG();
704 if (nexthdr == NEXTHDR_AUTH)
705 hdrlen = (hdr.hdrlen+2)<<2;
706 else
707 hdrlen = ipv6_optlen(&hdr);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800708
709 prevhdr = nexthdr;
710 prev_nhoff = start;
711
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900712 nexthdr = hdr.nexthdr;
713 len -= hdrlen;
714 start += hdrlen;
715 }
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800716
717 if (len < 0)
718 return -1;
719
720 *prevhdrp = prevhdr;
721 *prevhoff = prev_nhoff;
722 *fhoff = start;
723
724 return 0;
725}
726
727struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
728{
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900729 struct sk_buff *clone;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800730 struct net_device *dev = skb->dev;
731 struct frag_hdr *fhdr;
732 struct nf_ct_frag6_queue *fq;
733 struct ipv6hdr *hdr;
734 int fhoff, nhoff;
735 u8 prevhdr;
736 struct sk_buff *ret_skb = NULL;
737
738 /* Jumbo payload inhibits frag. header */
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700739 if (ipv6_hdr(skb)->payload_len == 0) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700740 pr_debug("payload len = 0\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800741 return skb;
742 }
743
744 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
745 return skb;
746
747 clone = skb_clone(skb, GFP_ATOMIC);
748 if (clone == NULL) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700749 pr_debug("Can't clone skb\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800750 return skb;
751 }
752
753 NFCT_FRAG6_CB(clone)->orig = skb;
754
755 if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700756 pr_debug("message is too short.\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800757 goto ret_orig;
758 }
759
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -0300760 skb_set_transport_header(clone, fhoff);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -0700761 hdr = ipv6_hdr(clone);
Arnaldo Carvalho de Melobff9b612007-03-16 17:19:57 -0300762 fhdr = (struct frag_hdr *)skb_transport_header(clone);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800763
764 if (!(fhdr->frag_off & htons(0xFFF9))) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700765 pr_debug("Invalid fragment offset\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800766 /* It is not a fragmented frame */
767 goto ret_orig;
768 }
769
Pavel Emelyanov04128f22007-10-15 02:33:45 -0700770 if (atomic_read(&nf_frags.mem) > nf_frags_ctl.high_thresh)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800771 nf_ct_frag6_evictor();
772
773 fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
774 if (fq == NULL) {
Patrick McHardy0d537782007-07-07 22:39:38 -0700775 pr_debug("Can't find and can't create new queue\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800776 goto ret_orig;
777 }
778
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700779 spin_lock(&fq->q.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800780
781 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700782 spin_unlock(&fq->q.lock);
Patrick McHardy0d537782007-07-07 22:39:38 -0700783 pr_debug("Can't insert skb to queue\n");
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800784 fq_put(fq, NULL);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800785 goto ret_orig;
786 }
787
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700788 if (fq->q.last_in == (FIRST_IN|LAST_IN) && fq->q.meat == fq->q.len) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800789 ret_skb = nf_ct_frag6_reasm(fq, dev);
790 if (ret_skb == NULL)
Patrick McHardy0d537782007-07-07 22:39:38 -0700791 pr_debug("Can't reassemble fragmented packets\n");
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800792 }
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700793 spin_unlock(&fq->q.lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800794
Yasuyuki Kozakai1ba430b2005-11-14 15:28:18 -0800795 fq_put(fq, NULL);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800796 return ret_skb;
797
798ret_orig:
799 kfree_skb(clone);
800 return skb;
801}
802
803void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
804 struct net_device *in, struct net_device *out,
805 int (*okfn)(struct sk_buff *))
806{
807 struct sk_buff *s, *s2;
808
809 for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
810 nf_conntrack_put_reasm(s->nfct_reasm);
811 nf_conntrack_get_reasm(skb);
812 s->nfct_reasm = skb;
813
814 s2 = s->next;
Patrick McHardyf9f02cc2007-01-09 14:32:41 -0800815 s->next = NULL;
816
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800817 NF_HOOK_THRESH(PF_INET6, hooknum, s, in, out, okfn,
818 NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
819 s = s2;
820 }
821 nf_conntrack_put_reasm(skb);
822}
823
824int nf_ct_frag6_kfree_frags(struct sk_buff *skb)
825{
826 struct sk_buff *s, *s2;
827
828 for (s = NFCT_FRAG6_CB(skb)->orig; s; s = s2) {
829
830 s2 = s->next;
831 kfree_skb(s);
832 }
833
834 kfree_skb(skb);
835
836 return 0;
837}
838
839int nf_ct_frag6_init(void)
840{
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700841 setup_timer(&nf_frags.secret_timer, nf_ct_frag6_secret_rebuild, 0);
Pavel Emelyanov04128f22007-10-15 02:33:45 -0700842 nf_frags.secret_timer.expires = jiffies + nf_frags_ctl.secret_interval;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700843 add_timer(&nf_frags.secret_timer);
844
Pavel Emelyanov04128f22007-10-15 02:33:45 -0700845 nf_frags.ctl = &nf_frags_ctl;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700846 inet_frags_init(&nf_frags);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800847
848 return 0;
849}
850
851void nf_ct_frag6_cleanup(void)
852{
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700853 inet_frags_fini(&nf_frags);
854
855 del_timer(&nf_frags.secret_timer);
Pavel Emelyanov04128f22007-10-15 02:33:45 -0700856 nf_frags_ctl.low_thresh = 0;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800857 nf_ct_frag6_evictor();
858}