blob: 7d4cb58bbddcb1cdfdee374a9d35fe792a8e8319 [file] [log] [blame]
Alexander Smirnov44331fe2011-08-24 19:34:42 -07001/*
2 * Copyright 2011, Siemens AG
3 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4 */
5
6/*
7 * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24/* Jon's code is based on 6lowpan implementation for Contiki which is:
25 * Copyright (c) 2008, Swedish Institute of Computer Science.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the Institute nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#define DEBUG
54
55#include <linux/bitops.h>
56#include <linux/if_arp.h>
57#include <linux/module.h>
58#include <linux/moduleparam.h>
59#include <linux/netdevice.h>
60#include <net/af_ieee802154.h>
61#include <net/ieee802154.h>
62#include <net/ieee802154_netdev.h>
63#include <net/ipv6.h>
64
65#include "6lowpan.h"
66
67/* TTL uncompression values */
68static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
69
70static LIST_HEAD(lowpan_devices);
71
72/*
73 * Uncompression of linklocal:
74 * 0 -> 16 bytes from packet
75 * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
76 * 2 -> 2 bytes from prefix - zeroes + 2 from packet
77 * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
78 *
79 * NOTE: => the uncompress function does change 0xf to 0x10
80 * NOTE: 0x00 => no-autoconfig => unspecified
81 */
82static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
83
84/*
85 * Uncompression of ctx-based:
86 * 0 -> 0 bits from packet [unspecified / reserved]
87 * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
88 * 2 -> 8 bytes from prefix - zeroes + 2 from packet
89 * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
90 */
91static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
92
93/*
94 * Uncompression of ctx-base
95 * 0 -> 0 bits from packet
96 * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
97 * 2 -> 2 bytes from prefix - zeroes + 3 from packet
98 * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
99 */
100static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
101
102/* Link local prefix */
103static const u8 lowpan_llprefix[] = {0xfe, 0x80};
104
105/* private device info */
106struct lowpan_dev_info {
107 struct net_device *real_dev; /* real WPAN device ptr */
108 struct mutex dev_list_mtx; /* mutex for list ops */
109};
110
111struct lowpan_dev_record {
112 struct net_device *ldev;
113 struct list_head list;
114};
115
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000116struct lowpan_fragment {
117 struct sk_buff *skb; /* skb to be assembled */
118 spinlock_t lock; /* concurency lock */
119 u16 length; /* length to be assemled */
120 u32 bytes_rcv; /* bytes received */
121 u16 tag; /* current fragment tag */
122 struct timer_list timer; /* assembling timer */
123 struct list_head list; /* fragments list */
124};
125
126static unsigned short fragment_tag;
127static LIST_HEAD(lowpan_fragments);
128spinlock_t flist_lock;
129
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700130static inline struct
131lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
132{
133 return netdev_priv(dev);
134}
135
136static inline void lowpan_address_flip(u8 *src, u8 *dest)
137{
138 int i;
139 for (i = 0; i < IEEE802154_ADDR_LEN; i++)
140 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
141}
142
143/* list of all 6lowpan devices, uses for package delivering */
144/* print data in line */
145static inline void lowpan_raw_dump_inline(const char *caller, char *msg,
146 unsigned char *buf, int len)
147{
148#ifdef DEBUG
149 if (msg)
150 pr_debug("(%s) %s: ", caller, msg);
151 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,
152 16, 1, buf, len, false);
153#endif /* DEBUG */
154}
155
156/*
157 * print data in a table format:
158 *
159 * addr: xx xx xx xx xx xx
160 * addr: xx xx xx xx xx xx
161 * ...
162 */
163static inline void lowpan_raw_dump_table(const char *caller, char *msg,
164 unsigned char *buf, int len)
165{
166#ifdef DEBUG
167 if (msg)
168 pr_debug("(%s) %s:\n", caller, msg);
169 print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET,
170 16, 1, buf, len, false);
171#endif /* DEBUG */
172}
173
174static u8
175lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
176 const unsigned char *lladdr)
177{
178 u8 val = 0;
179
180 if (is_addr_mac_addr_based(ipaddr, lladdr))
181 val = 3; /* 0-bits */
182 else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
183 /* compress IID to 16 bits xxxx::XXXX */
184 memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
185 *hc06_ptr += 2;
186 val = 2; /* 16-bits */
187 } else {
188 /* do not compress IID => xxxx::IID */
189 memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
190 *hc06_ptr += 8;
191 val = 1; /* 64-bits */
192 }
193
194 return rol8(val, shift);
195}
196
197static void
198lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
199{
200 memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN);
201 /* second bit-flip (Universe/Local) is done according RFC2464 */
202 ipaddr->s6_addr[8] ^= 0x02;
203}
204
205/*
206 * Uncompress addresses based on a prefix and a postfix with zeroes in
207 * between. If the postfix is zero in length it will use the link address
208 * to configure the IP address (autoconf style).
209 * pref_post_count takes a byte where the first nibble specify prefix count
210 * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
211 */
212static int
213lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
214 u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
215{
216 u8 prefcount = pref_post_count >> 4;
217 u8 postcount = pref_post_count & 0x0f;
218
219 /* full nibble 15 => 16 */
220 prefcount = (prefcount == 15 ? 16 : prefcount);
221 postcount = (postcount == 15 ? 16 : postcount);
222
223 if (lladdr)
224 lowpan_raw_dump_inline(__func__, "linklocal address",
225 lladdr, IEEE802154_ALEN);
226 if (prefcount > 0)
227 memcpy(ipaddr, prefix, prefcount);
228
229 if (prefcount + postcount < 16)
230 memset(&ipaddr->s6_addr[prefcount], 0,
231 16 - (prefcount + postcount));
232
233 if (postcount > 0) {
234 memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
235 skb_pull(skb, postcount);
236 } else if (prefcount > 0) {
237 if (lladdr == NULL)
238 return -EINVAL;
239
240 /* no IID based configuration if no prefix and no data */
241 lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
242 }
243
244 pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount,
245 postcount);
246 lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
247
248 return 0;
249}
250
251static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
252{
253 u8 ret;
254
255 ret = skb->data[0];
256 skb_pull(skb, 1);
257
258 return ret;
259}
260
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000261static u16 lowpan_fetch_skb_u16(struct sk_buff *skb)
262{
263 u16 ret;
264
265 BUG_ON(!pskb_may_pull(skb, 2));
266
267 ret = skb->data[0] | (skb->data[1] << 8);
268 skb_pull(skb, 2);
269 return ret;
270}
271
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700272static int lowpan_header_create(struct sk_buff *skb,
273 struct net_device *dev,
274 unsigned short type, const void *_daddr,
275 const void *_saddr, unsigned len)
276{
277 u8 tmp, iphc0, iphc1, *hc06_ptr;
278 struct ipv6hdr *hdr;
279 const u8 *saddr = _saddr;
280 const u8 *daddr = _daddr;
281 u8 *head;
282 struct ieee802154_addr sa, da;
283
284 if (type != ETH_P_IPV6)
285 return 0;
286 /* TODO:
287 * if this package isn't ipv6 one, where should it be routed?
288 */
289 head = kzalloc(100, GFP_KERNEL);
290 if (head == NULL)
291 return -ENOMEM;
292
293 hdr = ipv6_hdr(skb);
294 hc06_ptr = head + 2;
295
296 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
297 "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__,
298 hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
299 hdr->hop_limit);
300
301 lowpan_raw_dump_table(__func__, "raw skb network header dump",
302 skb_network_header(skb), sizeof(struct ipv6hdr));
303
304 if (!saddr)
305 saddr = dev->dev_addr;
306
307 lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
308
309 /*
310 * As we copy some bit-length fields, in the IPHC encoding bytes,
311 * we sometimes use |=
312 * If the field is 0, and the current bit value in memory is 1,
313 * this does not work. We therefore reset the IPHC encoding here
314 */
315 iphc0 = LOWPAN_DISPATCH_IPHC;
316 iphc1 = 0;
317
318 /* TODO: context lookup */
319
320 lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
321
322 /*
323 * Traffic class, flow label
324 * If flow label is 0, compress it. If traffic class is 0, compress it
325 * We have to process both in the same time as the offset of traffic
326 * class depends on the presence of version and flow label
327 */
328
329 /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
330 tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
331 tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
332
333 if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
334 (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
335 /* flow label can be compressed */
336 iphc0 |= LOWPAN_IPHC_FL_C;
337 if ((hdr->priority == 0) &&
338 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
339 /* compress (elide) all */
340 iphc0 |= LOWPAN_IPHC_TC_C;
341 } else {
342 /* compress only the flow label */
343 *hc06_ptr = tmp;
344 hc06_ptr += 1;
345 }
346 } else {
347 /* Flow label cannot be compressed */
348 if ((hdr->priority == 0) &&
349 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
350 /* compress only traffic class */
351 iphc0 |= LOWPAN_IPHC_TC_C;
352 *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
353 memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
354 hc06_ptr += 3;
355 } else {
356 /* compress nothing */
357 memcpy(hc06_ptr, &hdr, 4);
358 /* replace the top byte with new ECN | DSCP format */
359 *hc06_ptr = tmp;
360 hc06_ptr += 4;
361 }
362 }
363
364 /* NOTE: payload length is always compressed */
365
366 /* Next Header is compress if UDP */
367 if (hdr->nexthdr == UIP_PROTO_UDP)
368 iphc0 |= LOWPAN_IPHC_NH_C;
369
370/* TODO: next header compression */
371
372 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
373 *hc06_ptr = hdr->nexthdr;
374 hc06_ptr += 1;
375 }
376
377 /*
378 * Hop limit
379 * if 1: compress, encoding is 01
380 * if 64: compress, encoding is 10
381 * if 255: compress, encoding is 11
382 * else do not compress
383 */
384 switch (hdr->hop_limit) {
385 case 1:
386 iphc0 |= LOWPAN_IPHC_TTL_1;
387 break;
388 case 64:
389 iphc0 |= LOWPAN_IPHC_TTL_64;
390 break;
391 case 255:
392 iphc0 |= LOWPAN_IPHC_TTL_255;
393 break;
394 default:
395 *hc06_ptr = hdr->hop_limit;
396 break;
397 }
398
399 /* source address compression */
400 if (is_addr_unspecified(&hdr->saddr)) {
401 pr_debug("(%s): source address is unspecified, setting SAC\n",
402 __func__);
403 iphc1 |= LOWPAN_IPHC_SAC;
404 /* TODO: context lookup */
405 } else if (is_addr_link_local(&hdr->saddr)) {
406 pr_debug("(%s): source address is link-local\n", __func__);
407 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
408 LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
409 } else {
410 pr_debug("(%s): send the full source address\n", __func__);
411 memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
412 hc06_ptr += 16;
413 }
414
415 /* destination address compression */
416 if (is_addr_mcast(&hdr->daddr)) {
417 pr_debug("(%s): destination address is multicast", __func__);
418 iphc1 |= LOWPAN_IPHC_M;
419 if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
420 pr_debug("compressed to 1 octet\n");
421 iphc1 |= LOWPAN_IPHC_DAM_11;
422 /* use last byte */
423 *hc06_ptr = hdr->daddr.s6_addr[15];
424 hc06_ptr += 1;
425 } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
426 pr_debug("compressed to 4 octets\n");
427 iphc1 |= LOWPAN_IPHC_DAM_10;
428 /* second byte + the last three */
429 *hc06_ptr = hdr->daddr.s6_addr[1];
430 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
431 hc06_ptr += 4;
432 } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
433 pr_debug("compressed to 6 octets\n");
434 iphc1 |= LOWPAN_IPHC_DAM_01;
435 /* second byte + the last five */
436 *hc06_ptr = hdr->daddr.s6_addr[1];
437 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
438 hc06_ptr += 6;
439 } else {
440 pr_debug("using full address\n");
441 iphc1 |= LOWPAN_IPHC_DAM_00;
442 memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
443 hc06_ptr += 16;
444 }
445 } else {
446 pr_debug("(%s): destination address is unicast: ", __func__);
447 /* TODO: context lookup */
448 if (is_addr_link_local(&hdr->daddr)) {
449 pr_debug("destination address is link-local\n");
450 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
451 LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
452 } else {
453 pr_debug("using full address\n");
454 memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
455 hc06_ptr += 16;
456 }
457 }
458
459 /* TODO: UDP header compression */
460 /* TODO: Next Header compression */
461
462 head[0] = iphc0;
463 head[1] = iphc1;
464
465 skb_pull(skb, sizeof(struct ipv6hdr));
466 memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
467
468 kfree(head);
469
470 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
471 skb->len);
472
473 /*
474 * NOTE1: I'm still unsure about the fact that compression and WPAN
475 * header are created here and not later in the xmit. So wait for
476 * an opinion of net maintainers.
477 */
478 /*
479 * NOTE2: to be absolutely correct, we must derive PANid information
480 * from MAC subif of the 'dev' and 'real_dev' network devices, but
481 * this isn't implemented in mainline yet, so currently we assign 0xff
482 */
483 {
484 /* prepare wpan address data */
485 sa.addr_type = IEEE802154_ADDR_LONG;
486 sa.pan_id = 0xff;
487
488 da.addr_type = IEEE802154_ADDR_LONG;
489 da.pan_id = 0xff;
490
491 memcpy(&(da.hwaddr), daddr, 8);
492 memcpy(&(sa.hwaddr), saddr, 8);
493
494 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000495
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700496 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
497 type, (void *)&da, (void *)&sa, skb->len);
498 }
499}
500
501static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
502{
503 struct sk_buff *new;
504 struct lowpan_dev_record *entry;
505 int stat = NET_RX_SUCCESS;
506
507 new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
alex.bluesman.smirnov@gmail.comdcef1152011-09-01 03:55:15 +0000508 GFP_ATOMIC);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700509 kfree_skb(skb);
510
alex.bluesman.smirnov@gmail.comdcef1152011-09-01 03:55:15 +0000511 if (!new)
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700512 return -ENOMEM;
513
514 skb_push(new, sizeof(struct ipv6hdr));
515 skb_reset_network_header(new);
516 skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
517
518 new->protocol = htons(ETH_P_IPV6);
519 new->pkt_type = PACKET_HOST;
520
521 rcu_read_lock();
522 list_for_each_entry_rcu(entry, &lowpan_devices, list)
523 if (lowpan_dev_info(entry->ldev)->real_dev == new->dev) {
alex.bluesman.smirnov@gmail.comdcef1152011-09-01 03:55:15 +0000524 skb = skb_copy(new, GFP_ATOMIC);
525 if (!skb) {
526 stat = -ENOMEM;
527 break;
528 }
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700529
alex.bluesman.smirnov@gmail.comdcef1152011-09-01 03:55:15 +0000530 skb->dev = entry->ldev;
531 stat = netif_rx(skb);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700532 }
533 rcu_read_unlock();
534
535 kfree_skb(new);
536
537 return stat;
538}
539
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000540static void lowpan_fragment_timer_expired(unsigned long entry_addr)
541{
542 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
543
544 pr_debug("%s: timer expired for frame with tag %d\n", __func__,
545 entry->tag);
546
547 spin_lock(&flist_lock);
548 list_del(&entry->list);
549 spin_unlock(&flist_lock);
550
551 dev_kfree_skb(entry->skb);
552 kfree(entry);
553}
554
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700555static int
556lowpan_process_data(struct sk_buff *skb)
557{
558 struct ipv6hdr hdr;
559 u8 tmp, iphc0, iphc1, num_context = 0;
560 u8 *_saddr, *_daddr;
561 int err;
562
563 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
564 skb->len);
565 /* at least two bytes will be used for the encoding */
566 if (skb->len < 2)
567 goto drop;
568 iphc0 = lowpan_fetch_skb_u8(skb);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000569
570 /* fragments assembling */
571 switch (iphc0 & LOWPAN_DISPATCH_MASK) {
572 case LOWPAN_DISPATCH_FRAG1:
573 case LOWPAN_DISPATCH_FRAGN:
574 {
575 struct lowpan_fragment *frame;
576 u8 len, offset;
577 u16 tag;
578 bool found = false;
579
580 len = lowpan_fetch_skb_u8(skb); /* frame length */
581 tag = lowpan_fetch_skb_u16(skb);
582
583 /*
584 * check if frame assembling with the same tag is
585 * already in progress
586 */
587 spin_lock(&flist_lock);
588
589 list_for_each_entry(frame, &lowpan_fragments, list)
590 if (frame->tag == tag) {
591 found = true;
592 break;
593 }
594
595 /* alloc new frame structure */
596 if (!found) {
597 frame = kzalloc(sizeof(struct lowpan_fragment),
598 GFP_ATOMIC);
599 if (!frame)
600 goto unlock_and_drop;
601
602 INIT_LIST_HEAD(&frame->list);
603
604 frame->length = (iphc0 & 7) | (len << 3);
605 frame->tag = tag;
606
607 /* allocate buffer for frame assembling */
608 frame->skb = alloc_skb(frame->length +
609 sizeof(struct ipv6hdr), GFP_ATOMIC);
610
611 if (!frame->skb) {
612 kfree(frame);
613 goto unlock_and_drop;
614 }
615
616 frame->skb->priority = skb->priority;
617 frame->skb->dev = skb->dev;
618
619 /* reserve headroom for uncompressed ipv6 header */
620 skb_reserve(frame->skb, sizeof(struct ipv6hdr));
621 skb_put(frame->skb, frame->length);
622
623 init_timer(&frame->timer);
624 /* time out is the same as for ipv6 - 60 sec */
625 frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
626 frame->timer.data = (unsigned long)frame;
627 frame->timer.function = lowpan_fragment_timer_expired;
628
629 add_timer(&frame->timer);
630
631 list_add_tail(&frame->list, &lowpan_fragments);
632 }
633
634 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
635 goto unlock_and_drop;
636
637 offset = lowpan_fetch_skb_u8(skb); /* fetch offset */
638
639 /* if payload fits buffer, copy it */
640 if (likely((offset * 8 + skb->len) <= frame->length))
641 skb_copy_to_linear_data_offset(frame->skb, offset * 8,
642 skb->data, skb->len);
643 else
644 goto unlock_and_drop;
645
646 frame->bytes_rcv += skb->len;
647
648 /* frame assembling complete */
649 if ((frame->bytes_rcv == frame->length) &&
650 frame->timer.expires > jiffies) {
651 /* if timer haven't expired - first of all delete it */
652 del_timer(&frame->timer);
653 list_del(&frame->list);
654 spin_unlock(&flist_lock);
655
656 dev_kfree_skb(skb);
657 skb = frame->skb;
658 kfree(frame);
659 iphc0 = lowpan_fetch_skb_u8(skb);
660 break;
661 }
662 spin_unlock(&flist_lock);
663
664 return kfree_skb(skb), 0;
665 }
666 default:
667 break;
668 }
669
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700670 iphc1 = lowpan_fetch_skb_u8(skb);
671
672 _saddr = mac_cb(skb)->sa.hwaddr;
673 _daddr = mac_cb(skb)->da.hwaddr;
674
675 pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1);
676
677 /* another if the CID flag is set */
678 if (iphc1 & LOWPAN_IPHC_CID) {
679 pr_debug("(%s): CID flag is set, increase header with one\n",
680 __func__);
681 if (!skb->len)
682 goto drop;
683 num_context = lowpan_fetch_skb_u8(skb);
684 }
685
686 hdr.version = 6;
687
688 /* Traffic Class and Flow Label */
689 switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
690 /*
691 * Traffic Class and FLow Label carried in-line
692 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
693 */
694 case 0: /* 00b */
695 if (!skb->len)
696 goto drop;
697 tmp = lowpan_fetch_skb_u8(skb);
698 memcpy(&hdr.flow_lbl, &skb->data[0], 3);
699 skb_pull(skb, 3);
700 hdr.priority = ((tmp >> 2) & 0x0f);
701 hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
702 (hdr.flow_lbl[0] & 0x0f);
703 break;
704 /*
705 * Traffic class carried in-line
706 * ECN + DSCP (1 byte), Flow Label is elided
707 */
708 case 1: /* 10b */
709 if (!skb->len)
710 goto drop;
711 tmp = lowpan_fetch_skb_u8(skb);
712 hdr.priority = ((tmp >> 2) & 0x0f);
713 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
714 hdr.flow_lbl[1] = 0;
715 hdr.flow_lbl[2] = 0;
716 break;
717 /*
718 * Flow Label carried in-line
719 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
720 */
721 case 2: /* 01b */
722 if (!skb->len)
723 goto drop;
724 tmp = lowpan_fetch_skb_u8(skb);
725 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
726 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
727 skb_pull(skb, 2);
728 break;
729 /* Traffic Class and Flow Label are elided */
730 case 3: /* 11b */
731 hdr.priority = 0;
732 hdr.flow_lbl[0] = 0;
733 hdr.flow_lbl[1] = 0;
734 hdr.flow_lbl[2] = 0;
735 break;
736 default:
737 break;
738 }
739
740 /* Next Header */
741 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
742 /* Next header is carried inline */
743 if (!skb->len)
744 goto drop;
745 hdr.nexthdr = lowpan_fetch_skb_u8(skb);
746 pr_debug("(%s): NH flag is set, next header is carried "
747 "inline: %02x\n", __func__, hdr.nexthdr);
748 }
749
750 /* Hop Limit */
751 if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
752 hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
753 else {
754 if (!skb->len)
755 goto drop;
756 hdr.hop_limit = lowpan_fetch_skb_u8(skb);
757 }
758
759 /* Extract SAM to the tmp variable */
760 tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
761
762 /* Source address uncompression */
763 pr_debug("(%s): source address stateless compression\n", __func__);
764 err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
765 lowpan_unc_llconf[tmp], skb->data);
766 if (err)
767 goto drop;
768
769 /* Extract DAM to the tmp variable */
770 tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
771
772 /* check for Multicast Compression */
773 if (iphc1 & LOWPAN_IPHC_M) {
774 if (iphc1 & LOWPAN_IPHC_DAC) {
775 pr_debug("(%s): destination address context-based "
776 "multicast compression\n", __func__);
777 /* TODO: implement this */
778 } else {
779 u8 prefix[] = {0xff, 0x02};
780
781 pr_debug("(%s): destination address non-context-based"
782 " multicast compression\n", __func__);
783 if (0 < tmp && tmp < 3) {
784 if (!skb->len)
785 goto drop;
786 else
787 prefix[1] = lowpan_fetch_skb_u8(skb);
788 }
789
790 err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
791 lowpan_unc_mxconf[tmp], NULL);
792 if (err)
793 goto drop;
794 }
795 } else {
796 pr_debug("(%s): destination address stateless compression\n",
797 __func__);
798 err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
799 lowpan_unc_llconf[tmp], skb->data);
800 if (err)
801 goto drop;
802 }
803
804 /* TODO: UDP header parse */
805
806 /* Not fragmented package */
807 hdr.payload_len = htons(skb->len);
808
809 pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__,
810 skb_headroom(skb), skb->len);
811
812 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
813 "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version,
814 ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
815
816 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
817 sizeof(hdr));
818 return lowpan_skb_deliver(skb, &hdr);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000819
820unlock_and_drop:
821 spin_unlock(&flist_lock);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700822drop:
Dan Carpenter90d09632011-08-30 03:45:52 +0000823 kfree_skb(skb);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700824 return -EINVAL;
825}
826
827static int lowpan_set_address(struct net_device *dev, void *p)
828{
829 struct sockaddr *sa = p;
830
831 if (netif_running(dev))
832 return -EBUSY;
833
834 /* TODO: validate addr */
835 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
836
837 return 0;
838}
839
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000840static int lowpan_get_mac_header_length(struct sk_buff *skb)
841{
842 /*
843 * Currently long addressing mode is supported only, so the overall
844 * header size is 21:
845 * FC SeqNum DPAN DA SA Sec
846 * 2 + 1 + 2 + 8 + 8 + 0 = 21
847 */
848 return 21;
849}
850
851static int
852lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
853 int mlen, int plen, int offset)
854{
855 struct sk_buff *frag;
856 int hlen, ret;
857
858 /* if payload length is zero, therefore it's a first fragment */
859 hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE);
860
861 lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
862
863 frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
864 if (!frag)
865 return -ENOMEM;
866
867 frag->priority = skb->priority;
868 frag->dev = skb->dev;
869
870 /* copy header, MFR and payload */
871 memcpy(skb_put(frag, mlen), skb->data, mlen);
872 memcpy(skb_put(frag, hlen), head, hlen);
873
874 if (plen)
875 skb_copy_from_linear_data_offset(skb, offset + mlen,
876 skb_put(frag, plen), plen);
877
878 lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
879 frag->len);
880
881 ret = dev_queue_xmit(frag);
882
883 if (ret < 0)
884 dev_kfree_skb(frag);
885
886 return ret;
887}
888
889static int
890lowpan_skb_fragmentation(struct sk_buff *skb)
891{
892 int err, header_length, payload_length, tag, offset = 0;
893 u8 head[5];
894
895 header_length = lowpan_get_mac_header_length(skb);
896 payload_length = skb->len - header_length;
897 tag = fragment_tag++;
898
899 /* first fragment header */
900 head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7);
901 head[1] = (payload_length >> 3) & 0xff;
902 head[2] = tag & 0xff;
903 head[3] = tag >> 8;
904
905 err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
906
907 /* next fragment header */
908 head[0] &= ~LOWPAN_DISPATCH_FRAG1;
909 head[0] |= LOWPAN_DISPATCH_FRAGN;
910
911 while ((payload_length - offset > 0) && (err >= 0)) {
912 int len = LOWPAN_FRAG_SIZE;
913
914 head[4] = offset / 8;
915
916 if (payload_length - offset < len)
917 len = payload_length - offset;
918
919 err = lowpan_fragment_xmit(skb, head, header_length,
920 len, offset);
921 offset += len;
922 }
923
924 return err;
925}
926
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700927static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
928{
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000929 int err = -1;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700930
931 pr_debug("(%s): package xmit\n", __func__);
932
933 skb->dev = lowpan_dev_info(dev)->real_dev;
934 if (skb->dev == NULL) {
935 pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000936 goto error;
937 }
938
939 if (skb->len <= IEEE802154_MTU) {
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700940 err = dev_queue_xmit(skb);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000941 goto out;
942 }
943
944 pr_debug("(%s): frame is too big, fragmentation is needed\n",
945 __func__);
946 err = lowpan_skb_fragmentation(skb);
947error:
948 dev_kfree_skb(skb);
949out:
950 if (err < 0)
951 pr_debug("(%s): ERROR: xmit failed\n", __func__);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700952
953 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
954}
955
956static void lowpan_dev_free(struct net_device *dev)
957{
958 dev_put(lowpan_dev_info(dev)->real_dev);
959 free_netdev(dev);
960}
961
962static struct header_ops lowpan_header_ops = {
963 .create = lowpan_header_create,
964};
965
966static const struct net_device_ops lowpan_netdev_ops = {
967 .ndo_start_xmit = lowpan_xmit,
968 .ndo_set_mac_address = lowpan_set_address,
969};
970
971static void lowpan_setup(struct net_device *dev)
972{
973 pr_debug("(%s)\n", __func__);
974
975 dev->addr_len = IEEE802154_ADDR_LEN;
976 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
977 dev->type = ARPHRD_IEEE802154;
978 dev->features = NETIF_F_NO_CSUM;
979 /* Frame Control + Sequence Number + Address fields + Security Header */
980 dev->hard_header_len = 2 + 1 + 20 + 14;
981 dev->needed_tailroom = 2; /* FCS */
982 dev->mtu = 1281;
983 dev->tx_queue_len = 0;
984 dev->flags = IFF_NOARP | IFF_BROADCAST;
985 dev->watchdog_timeo = 0;
986
987 dev->netdev_ops = &lowpan_netdev_ops;
988 dev->header_ops = &lowpan_header_ops;
989 dev->destructor = lowpan_dev_free;
990}
991
992static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
993{
994 pr_debug("(%s)\n", __func__);
995
996 if (tb[IFLA_ADDRESS]) {
997 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
998 return -EINVAL;
999 }
1000 return 0;
1001}
1002
1003static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
1004 struct packet_type *pt, struct net_device *orig_dev)
1005{
1006 if (!netif_running(dev))
1007 goto drop;
1008
1009 if (dev->type != ARPHRD_IEEE802154)
1010 goto drop;
1011
1012 /* check that it's our buffer */
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +00001013 switch (skb->data[0] & 0xe0) {
1014 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
1015 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
1016 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
Alexander Smirnov44331fe2011-08-24 19:34:42 -07001017 lowpan_process_data(skb);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +00001018 break;
1019 default:
1020 break;
1021 }
Alexander Smirnov44331fe2011-08-24 19:34:42 -07001022
1023 return NET_RX_SUCCESS;
1024
1025drop:
1026 kfree_skb(skb);
1027 return NET_RX_DROP;
1028}
1029
1030static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1031 struct nlattr *tb[], struct nlattr *data[])
1032{
1033 struct net_device *real_dev;
1034 struct lowpan_dev_record *entry;
1035
1036 pr_debug("(%s)\n", __func__);
1037
1038 if (!tb[IFLA_LINK])
1039 return -EINVAL;
1040 /* find and hold real wpan device */
1041 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
1042 if (!real_dev)
1043 return -ENODEV;
1044
1045 lowpan_dev_info(dev)->real_dev = real_dev;
1046 mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
1047
1048 entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
Dan Carpenterdc00fd42011-08-30 03:51:09 +00001049 if (!entry) {
1050 dev_put(real_dev);
1051 lowpan_dev_info(dev)->real_dev = NULL;
Alexander Smirnov44331fe2011-08-24 19:34:42 -07001052 return -ENOMEM;
Dan Carpenterdc00fd42011-08-30 03:51:09 +00001053 }
Alexander Smirnov44331fe2011-08-24 19:34:42 -07001054
1055 entry->ldev = dev;
1056
1057 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1058 INIT_LIST_HEAD(&entry->list);
1059 list_add_tail(&entry->list, &lowpan_devices);
1060 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
1061
1062 register_netdevice(dev);
1063
1064 return 0;
1065}
1066
1067static void lowpan_dellink(struct net_device *dev, struct list_head *head)
1068{
1069 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
1070 struct net_device *real_dev = lowpan_dev->real_dev;
1071 struct lowpan_dev_record *entry;
Dan Carpenteraec9db32011-08-30 03:46:40 +00001072 struct lowpan_dev_record *tmp;
Alexander Smirnov44331fe2011-08-24 19:34:42 -07001073
1074 ASSERT_RTNL();
1075
1076 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
Dan Carpenteraec9db32011-08-30 03:46:40 +00001077 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
Alexander Smirnov44331fe2011-08-24 19:34:42 -07001078 if (entry->ldev == dev) {
1079 list_del(&entry->list);
1080 kfree(entry);
1081 }
Dan Carpenteraec9db32011-08-30 03:46:40 +00001082 }
Alexander Smirnov44331fe2011-08-24 19:34:42 -07001083 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
1084
1085 mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
1086
1087 unregister_netdevice_queue(dev, head);
1088
1089 dev_put(real_dev);
1090}
1091
1092static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
1093 .kind = "lowpan",
1094 .priv_size = sizeof(struct lowpan_dev_info),
1095 .setup = lowpan_setup,
1096 .newlink = lowpan_newlink,
1097 .dellink = lowpan_dellink,
1098 .validate = lowpan_validate,
1099};
1100
1101static inline int __init lowpan_netlink_init(void)
1102{
1103 return rtnl_link_register(&lowpan_link_ops);
1104}
1105
1106static inline void __init lowpan_netlink_fini(void)
1107{
1108 rtnl_link_unregister(&lowpan_link_ops);
1109}
1110
1111static struct packet_type lowpan_packet_type = {
1112 .type = __constant_htons(ETH_P_IEEE802154),
1113 .func = lowpan_rcv,
1114};
1115
1116static int __init lowpan_init_module(void)
1117{
1118 int err = 0;
1119
1120 pr_debug("(%s)\n", __func__);
1121
1122 err = lowpan_netlink_init();
1123 if (err < 0)
1124 goto out;
1125
1126 dev_add_pack(&lowpan_packet_type);
1127out:
1128 return err;
1129}
1130
1131static void __exit lowpan_cleanup_module(void)
1132{
1133 pr_debug("(%s)\n", __func__);
1134
1135 lowpan_netlink_fini();
1136
1137 dev_remove_pack(&lowpan_packet_type);
1138}
1139
1140module_init(lowpan_init_module);
1141module_exit(lowpan_cleanup_module);
1142MODULE_LICENSE("GPL");
1143MODULE_ALIAS_RTNL_LINK("lowpan");