blob: f4ac95778977176ad25a126d831e97cc2f830e70 [file] [log] [blame]
Alexander Aring7240cde2014-02-28 07:32:50 +01001/* 6LoWPAN fragment reassembly
2 *
3 *
4 * Authors:
5 * Alexander Aring <aar@pengutronix.de>
6 *
7 * Based on: net/ipv6/reassembly.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#define pr_fmt(fmt) "6LoWPAN: " fmt
16
17#include <linux/net.h>
18#include <linux/list.h>
19#include <linux/netdevice.h>
20#include <linux/random.h>
21#include <linux/jhash.h>
22#include <linux/skbuff.h>
23#include <linux/slab.h>
24#include <linux/export.h>
25
26#include <net/ieee802154_netdev.h>
Alexander Aringcefc8c82014-03-05 14:29:05 +010027#include <net/6lowpan.h>
Alexander Aring7240cde2014-02-28 07:32:50 +010028#include <net/ipv6.h>
29#include <net/inet_frag.h>
30
Alexander Aring7240cde2014-02-28 07:32:50 +010031#include "reassembly.h"
32
33static struct inet_frags lowpan_frags;
34
35static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
36 struct sk_buff *prev, struct net_device *dev);
37
Alexander Aring4c7f7782014-03-02 08:09:33 +010038static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
Alexander Aring7240cde2014-02-28 07:32:50 +010039 const struct ieee802154_addr *saddr,
40 const struct ieee802154_addr *daddr)
41{
42 u32 c;
43
44 net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
45 c = jhash_3words(ieee802154_addr_hash(saddr),
46 ieee802154_addr_hash(daddr),
47 (__force u32)(tag + (d_size << 16)),
48 lowpan_frags.rnd);
49
50 return c & (INETFRAGS_HASHSZ - 1);
51}
52
53static unsigned int lowpan_hashfn(struct inet_frag_queue *q)
54{
55 struct lowpan_frag_queue *fq;
56
57 fq = container_of(q, struct lowpan_frag_queue, q);
58 return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
59}
60
61bool lowpan_frag_match(struct inet_frag_queue *q, void *a)
62{
63 struct lowpan_frag_queue *fq;
64 struct lowpan_create_arg *arg = a;
65
66 fq = container_of(q, struct lowpan_frag_queue, q);
67 return fq->tag == arg->tag && fq->d_size == arg->d_size &&
68 ieee802154_addr_addr_equal(&fq->saddr, arg->src) &&
69 ieee802154_addr_addr_equal(&fq->daddr, arg->dst);
70}
71EXPORT_SYMBOL(lowpan_frag_match);
72
73void lowpan_frag_init(struct inet_frag_queue *q, void *a)
74{
75 struct lowpan_frag_queue *fq;
76 struct lowpan_create_arg *arg = a;
77
78 fq = container_of(q, struct lowpan_frag_queue, q);
79
80 fq->tag = arg->tag;
81 fq->d_size = arg->d_size;
82 fq->saddr = *arg->src;
83 fq->daddr = *arg->dst;
84}
85EXPORT_SYMBOL(lowpan_frag_init);
86
87void lowpan_expire_frag_queue(struct frag_queue *fq, struct inet_frags *frags)
88{
89 spin_lock(&fq->q.lock);
90
91 if (fq->q.last_in & INET_FRAG_COMPLETE)
92 goto out;
93
94 inet_frag_kill(&fq->q, frags);
95out:
96 spin_unlock(&fq->q.lock);
97 inet_frag_put(&fq->q, frags);
98}
99EXPORT_SYMBOL(lowpan_expire_frag_queue);
100
101static void lowpan_frag_expire(unsigned long data)
102{
103 struct frag_queue *fq;
104 struct net *net;
105
106 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
107 net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
108
109 lowpan_expire_frag_queue(fq, &lowpan_frags);
110}
111
112static inline struct lowpan_frag_queue *
113fq_find(struct net *net, const struct ieee802154_frag_info *frag_info,
114 const struct ieee802154_addr *src, const struct ieee802154_addr *dst)
115{
116 struct inet_frag_queue *q;
117 struct lowpan_create_arg arg;
118 unsigned int hash;
119
120 arg.tag = frag_info->d_tag;
121 arg.d_size = frag_info->d_size;
122 arg.src = src;
123 arg.dst = dst;
124
125 read_lock(&lowpan_frags.lock);
126 hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
127
128 q = inet_frag_find(&net->ieee802154_lowpan.frags,
129 &lowpan_frags, &arg, hash);
130 if (IS_ERR_OR_NULL(q)) {
131 inet_frag_maybe_warn_overflow(q, pr_fmt());
132 return NULL;
133 }
134 return container_of(q, struct lowpan_frag_queue, q);
135}
136
137static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
138 struct sk_buff *skb, const u8 frag_type)
139{
140 struct sk_buff *prev, *next;
141 struct net_device *dev;
142 int end, offset;
143
144 if (fq->q.last_in & INET_FRAG_COMPLETE)
145 goto err;
146
147 offset = mac_cb(skb)->frag_info.d_offset << 3;
148 end = mac_cb(skb)->frag_info.d_size;
149
150 /* Is this the final fragment? */
151 if (offset + skb->len == end) {
152 /* If we already have some bits beyond end
153 * or have different end, the segment is corrupted.
154 */
155 if (end < fq->q.len ||
156 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
157 goto err;
158 fq->q.last_in |= INET_FRAG_LAST_IN;
159 fq->q.len = end;
160 } else {
161 if (end > fq->q.len) {
162 /* Some bits beyond end -> corruption. */
163 if (fq->q.last_in & INET_FRAG_LAST_IN)
164 goto err;
165 fq->q.len = end;
166 }
167 }
168
169 /* Find out which fragments are in front and at the back of us
170 * in the chain of fragments so far. We must know where to put
171 * this fragment, right?
172 */
173 prev = fq->q.fragments_tail;
174 if (!prev || mac_cb(prev)->frag_info.d_offset <
175 mac_cb(skb)->frag_info.d_offset) {
176 next = NULL;
177 goto found;
178 }
179 prev = NULL;
180 for (next = fq->q.fragments; next != NULL; next = next->next) {
181 if (mac_cb(next)->frag_info.d_offset >=
182 mac_cb(skb)->frag_info.d_offset)
183 break; /* bingo! */
184 prev = next;
185 }
186
187found:
188 /* Insert this fragment in the chain of fragments. */
189 skb->next = next;
190 if (!next)
191 fq->q.fragments_tail = skb;
192 if (prev)
193 prev->next = skb;
194 else
195 fq->q.fragments = skb;
196
197 dev = skb->dev;
198 if (dev)
199 skb->dev = NULL;
200
201 fq->q.stamp = skb->tstamp;
202 if (frag_type == LOWPAN_DISPATCH_FRAG1) {
203 /* Calculate uncomp. 6lowpan header to estimate full size */
204 fq->q.meat += lowpan_uncompress_size(skb, NULL);
205 fq->q.last_in |= INET_FRAG_FIRST_IN;
206 } else {
207 fq->q.meat += skb->len;
208 }
209 add_frag_mem_limit(&fq->q, skb->truesize);
210
211 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
212 fq->q.meat == fq->q.len) {
213 int res;
214 unsigned long orefdst = skb->_skb_refdst;
215
216 skb->_skb_refdst = 0UL;
217 res = lowpan_frag_reasm(fq, prev, dev);
218 skb->_skb_refdst = orefdst;
219 return res;
220 }
221
222 inet_frag_lru_move(&fq->q);
223 return -1;
224err:
225 kfree_skb(skb);
226 return -1;
227}
228
229/* Check if this packet is complete.
230 * Returns NULL on failure by any reason, and pointer
231 * to current nexthdr field in reassembled frame.
232 *
233 * It is called with locked fq, and caller must check that
234 * queue is eligible for reassembly i.e. it is not COMPLETE,
235 * the last and the first frames arrived and all the bits are here.
236 */
237static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
238 struct net_device *dev)
239{
240 struct sk_buff *fp, *head = fq->q.fragments;
241 int sum_truesize;
242
243 inet_frag_kill(&fq->q, &lowpan_frags);
244
245 /* Make the one we just received the head. */
246 if (prev) {
247 head = prev->next;
248 fp = skb_clone(head, GFP_ATOMIC);
249
250 if (!fp)
251 goto out_oom;
252
253 fp->next = head->next;
254 if (!fp->next)
255 fq->q.fragments_tail = fp;
256 prev->next = fp;
257
258 skb_morph(head, fq->q.fragments);
259 head->next = fq->q.fragments->next;
260
261 consume_skb(fq->q.fragments);
262 fq->q.fragments = head;
263 }
264
265 /* Head of list must not be cloned. */
266 if (skb_unclone(head, GFP_ATOMIC))
267 goto out_oom;
268
269 /* If the first fragment is fragmented itself, we split
270 * it to two chunks: the first with data and paged part
271 * and the second, holding only fragments.
272 */
273 if (skb_has_frag_list(head)) {
274 struct sk_buff *clone;
275 int i, plen = 0;
276
277 clone = alloc_skb(0, GFP_ATOMIC);
278 if (!clone)
279 goto out_oom;
280 clone->next = head->next;
281 head->next = clone;
282 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
283 skb_frag_list_init(head);
284 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
285 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
286 clone->len = head->data_len - plen;
287 clone->data_len = clone->len;
288 head->data_len -= clone->len;
289 head->len -= clone->len;
290 add_frag_mem_limit(&fq->q, clone->truesize);
291 }
292
293 WARN_ON(head == NULL);
294
295 sum_truesize = head->truesize;
296 for (fp = head->next; fp;) {
297 bool headstolen;
298 int delta;
299 struct sk_buff *next = fp->next;
300
301 sum_truesize += fp->truesize;
302 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
303 kfree_skb_partial(fp, headstolen);
304 } else {
305 if (!skb_shinfo(head)->frag_list)
306 skb_shinfo(head)->frag_list = fp;
307 head->data_len += fp->len;
308 head->len += fp->len;
309 head->truesize += fp->truesize;
310 }
311 fp = next;
312 }
313 sub_frag_mem_limit(&fq->q, sum_truesize);
314
315 head->next = NULL;
316 head->dev = dev;
317 head->tstamp = fq->q.stamp;
318
319 fq->q.fragments = NULL;
320 fq->q.fragments_tail = NULL;
321
322 return 1;
323out_oom:
324 net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
325 return -1;
326}
327
328static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
329 struct ieee802154_frag_info *frag_info)
330{
331 bool fail;
332 u8 pattern = 0, low = 0;
333
334 fail = lowpan_fetch_skb(skb, &pattern, 1);
335 fail |= lowpan_fetch_skb(skb, &low, 1);
336 frag_info->d_size = (pattern & 7) << 8 | low;
337 fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
338
339 if (frag_type == LOWPAN_DISPATCH_FRAGN) {
340 fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
341 } else {
342 skb_reset_network_header(skb);
343 frag_info->d_offset = 0;
344 }
345
346 if (unlikely(fail))
347 return -EIO;
348
349 return 0;
350}
351
352int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
353{
354 struct lowpan_frag_queue *fq;
355 struct net *net = dev_net(skb->dev);
356 struct ieee802154_frag_info *frag_info = &mac_cb(skb)->frag_info;
357 int err;
358
359 err = lowpan_get_frag_info(skb, frag_type, frag_info);
360 if (err < 0)
361 goto err;
362
363 if (frag_info->d_size > net->ieee802154_lowpan.max_dsize)
364 goto err;
365
366 inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false);
367
368 fq = fq_find(net, frag_info, &mac_cb(skb)->sa, &mac_cb(skb)->da);
369 if (fq != NULL) {
370 int ret;
371 spin_lock(&fq->q.lock);
372 ret = lowpan_frag_queue(fq, skb, frag_type);
373 spin_unlock(&fq->q.lock);
374
375 inet_frag_put(&fq->q, &lowpan_frags);
376 return ret;
377 }
378
379err:
380 kfree_skb(skb);
381 return -1;
382}
383EXPORT_SYMBOL(lowpan_frag_rcv);
384
385#ifdef CONFIG_SYSCTL
386static struct ctl_table lowpan_frags_ns_ctl_table[] = {
387 {
388 .procname = "6lowpanfrag_high_thresh",
389 .data = &init_net.ieee802154_lowpan.frags.high_thresh,
390 .maxlen = sizeof(int),
391 .mode = 0644,
392 .proc_handler = proc_dointvec
393 },
394 {
395 .procname = "6lowpanfrag_low_thresh",
396 .data = &init_net.ieee802154_lowpan.frags.low_thresh,
397 .maxlen = sizeof(int),
398 .mode = 0644,
399 .proc_handler = proc_dointvec
400 },
401 {
402 .procname = "6lowpanfrag_time",
403 .data = &init_net.ieee802154_lowpan.frags.timeout,
404 .maxlen = sizeof(int),
405 .mode = 0644,
406 .proc_handler = proc_dointvec_jiffies,
407 },
408 {
409 .procname = "6lowpanfrag_max_datagram_size",
410 .data = &init_net.ieee802154_lowpan.max_dsize,
411 .maxlen = sizeof(int),
412 .mode = 0644,
413 .proc_handler = proc_dointvec
414 },
415 { }
416};
417
418static struct ctl_table lowpan_frags_ctl_table[] = {
419 {
420 .procname = "6lowpanfrag_secret_interval",
421 .data = &lowpan_frags.secret_interval,
422 .maxlen = sizeof(int),
423 .mode = 0644,
424 .proc_handler = proc_dointvec_jiffies,
425 },
426 { }
427};
428
429static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
430{
431 struct ctl_table *table;
432 struct ctl_table_header *hdr;
433
434 table = lowpan_frags_ns_ctl_table;
435 if (!net_eq(net, &init_net)) {
436 table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
437 GFP_KERNEL);
438 if (table == NULL)
439 goto err_alloc;
440
441 table[0].data = &net->ieee802154_lowpan.frags.high_thresh;
442 table[1].data = &net->ieee802154_lowpan.frags.low_thresh;
443 table[2].data = &net->ieee802154_lowpan.frags.timeout;
Alexander Aring3772ab1d2014-03-09 09:51:40 +0100444 table[3].data = &net->ieee802154_lowpan.max_dsize;
Alexander Aring7240cde2014-02-28 07:32:50 +0100445
446 /* Don't export sysctls to unprivileged users */
447 if (net->user_ns != &init_user_ns)
448 table[0].procname = NULL;
449 }
450
451 hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
452 if (hdr == NULL)
453 goto err_reg;
454
455 net->ieee802154_lowpan.sysctl.frags_hdr = hdr;
456 return 0;
457
458err_reg:
459 if (!net_eq(net, &init_net))
460 kfree(table);
461err_alloc:
462 return -ENOMEM;
463}
464
465static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
466{
467 struct ctl_table *table;
468
469 table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg;
470 unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr);
471 if (!net_eq(net, &init_net))
472 kfree(table);
473}
474
475static struct ctl_table_header *lowpan_ctl_header;
476
477static int lowpan_frags_sysctl_register(void)
478{
479 lowpan_ctl_header = register_net_sysctl(&init_net,
480 "net/ieee802154/6lowpan",
481 lowpan_frags_ctl_table);
482 return lowpan_ctl_header == NULL ? -ENOMEM : 0;
483}
484
485static void lowpan_frags_sysctl_unregister(void)
486{
487 unregister_net_sysctl_table(lowpan_ctl_header);
488}
489#else
490static inline int lowpan_frags_ns_sysctl_register(struct net *net)
491{
492 return 0;
493}
494
495static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
496{
497}
498
499static inline int lowpan_frags_sysctl_register(void)
500{
501 return 0;
502}
503
504static inline void lowpan_frags_sysctl_unregister(void)
505{
506}
507#endif
508
509static int __net_init lowpan_frags_init_net(struct net *net)
510{
511 net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
512 net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
513 net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT;
514 net->ieee802154_lowpan.max_dsize = 0xFFFF;
515
516 inet_frags_init_net(&net->ieee802154_lowpan.frags);
517
518 return lowpan_frags_ns_sysctl_register(net);
519}
520
521static void __net_exit lowpan_frags_exit_net(struct net *net)
522{
523 lowpan_frags_ns_sysctl_unregister(net);
524 inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags);
525}
526
527static struct pernet_operations lowpan_frags_ops = {
528 .init = lowpan_frags_init_net,
529 .exit = lowpan_frags_exit_net,
530};
531
532int __init lowpan_net_frag_init(void)
533{
534 int ret;
535
536 ret = lowpan_frags_sysctl_register();
537 if (ret)
Alexander Aring37147652014-03-07 11:06:54 +0100538 return ret;
Alexander Aring7240cde2014-02-28 07:32:50 +0100539
540 ret = register_pernet_subsys(&lowpan_frags_ops);
541 if (ret)
542 goto err_pernet;
543
544 lowpan_frags.hashfn = lowpan_hashfn;
545 lowpan_frags.constructor = lowpan_frag_init;
546 lowpan_frags.destructor = NULL;
547 lowpan_frags.skb_free = NULL;
548 lowpan_frags.qsize = sizeof(struct frag_queue);
549 lowpan_frags.match = lowpan_frag_match;
550 lowpan_frags.frag_expire = lowpan_frag_expire;
551 lowpan_frags.secret_interval = 10 * 60 * HZ;
552 inet_frags_init(&lowpan_frags);
Alexander Aring37147652014-03-07 11:06:54 +0100553
554 return ret;
Alexander Aring7240cde2014-02-28 07:32:50 +0100555err_pernet:
556 lowpan_frags_sysctl_unregister();
Alexander Aring7240cde2014-02-28 07:32:50 +0100557 return ret;
558}
559
560void lowpan_net_frag_exit(void)
561{
562 inet_frags_fini(&lowpan_frags);
563 lowpan_frags_sysctl_unregister();
564 unregister_pernet_subsys(&lowpan_frags_ops);
565}