blob: 48b25c0af4d082a5c3256f9844f62e12e1cc6f3c [file] [log] [blame]
Alexander Smirnov44331fe2011-08-24 19:34:42 -07001/*
2 * Copyright 2011, Siemens AG
3 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4 */
5
6/*
7 * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24/* Jon's code is based on 6lowpan implementation for Contiki which is:
25 * Copyright (c) 2008, Swedish Institute of Computer Science.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the Institute nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
Alexander Smirnov44331fe2011-08-24 19:34:42 -070053#include <linux/bitops.h>
54#include <linux/if_arp.h>
55#include <linux/module.h>
56#include <linux/moduleparam.h>
57#include <linux/netdevice.h>
58#include <net/af_ieee802154.h>
59#include <net/ieee802154.h>
60#include <net/ieee802154_netdev.h>
61#include <net/ipv6.h>
62
63#include "6lowpan.h"
64
Alexander Smirnov44331fe2011-08-24 19:34:42 -070065static LIST_HEAD(lowpan_devices);
66
Alexander Smirnov44331fe2011-08-24 19:34:42 -070067/* private device info */
68struct lowpan_dev_info {
69 struct net_device *real_dev; /* real WPAN device ptr */
70 struct mutex dev_list_mtx; /* mutex for list ops */
Tony Cheneauababf382013-03-26 18:09:24 +000071 unsigned short fragment_tag;
Alexander Smirnov44331fe2011-08-24 19:34:42 -070072};
73
74struct lowpan_dev_record {
75 struct net_device *ldev;
76 struct list_head list;
77};
78
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +000079struct lowpan_fragment {
80 struct sk_buff *skb; /* skb to be assembled */
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +000081 u16 length; /* length to be assemled */
82 u32 bytes_rcv; /* bytes received */
83 u16 tag; /* current fragment tag */
84 struct timer_list timer; /* assembling timer */
85 struct list_head list; /* fragments list */
86};
87
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +000088static LIST_HEAD(lowpan_fragments);
alex.bluesman.smirnov@gmail.com4d27de12012-07-10 21:22:42 +000089static DEFINE_SPINLOCK(flist_lock);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +000090
Alexander Smirnov44331fe2011-08-24 19:34:42 -070091static inline struct
92lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
93{
94 return netdev_priv(dev);
95}
96
97static inline void lowpan_address_flip(u8 *src, u8 *dest)
98{
99 int i;
100 for (i = 0; i < IEEE802154_ADDR_LEN; i++)
101 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
102}
103
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700104static int lowpan_header_create(struct sk_buff *skb,
105 struct net_device *dev,
106 unsigned short type, const void *_daddr,
Eric Dumazet95c96172012-04-15 05:58:06 +0000107 const void *_saddr, unsigned int len)
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700108{
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700109 struct ipv6hdr *hdr;
110 const u8 *saddr = _saddr;
111 const u8 *daddr = _daddr;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700112 struct ieee802154_addr sa, da;
113
Alexander Aringfc4e98d2013-02-05 10:23:43 +0000114 /* TODO:
115 * if this package isn't ipv6 one, where should it be routed?
116 */
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700117 if (type != ETH_P_IPV6)
118 return 0;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700119
120 hdr = ipv6_hdr(skb);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700121
122 if (!saddr)
123 saddr = dev->dev_addr;
124
Alexander Aring841a5ec2013-12-12 20:15:25 +0100125 raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
126 raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700127
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200128 lowpan_header_compress(skb, dev, type, daddr, saddr, len);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700129
130 /*
131 * NOTE1: I'm still unsure about the fact that compression and WPAN
132 * header are created here and not later in the xmit. So wait for
133 * an opinion of net maintainers.
134 */
135 /*
136 * NOTE2: to be absolutely correct, we must derive PANid information
137 * from MAC subif of the 'dev' and 'real_dev' network devices, but
138 * this isn't implemented in mainline yet, so currently we assign 0xff
139 */
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200140 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
141 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
Tony Cheneau58ef67c2013-03-25 17:59:25 +0000142
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200143 /* prepare wpan address data */
144 sa.addr_type = IEEE802154_ADDR_LONG;
145 sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700146
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200147 memcpy(&(sa.hwaddr), saddr, 8);
148 /* intra-PAN communications */
149 da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
Tony Cheneau43de7aa2013-03-25 17:59:31 +0000150
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200151 /*
152 * if the destination address is the broadcast address, use the
153 * corresponding short address
154 */
155 if (lowpan_is_addr_broadcast(daddr)) {
156 da.addr_type = IEEE802154_ADDR_SHORT;
157 da.short_addr = IEEE802154_ADDR_BROADCAST;
158 } else {
159 da.addr_type = IEEE802154_ADDR_LONG;
160 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000161
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200162 /* request acknowledgment */
163 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700164 }
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200165
166 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
167 type, (void *)&da, (void *)&sa, skb->len);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700168}
169
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200170static int lowpan_give_skb_to_devices(struct sk_buff *skb,
171 struct net_device *dev)
Alan Ott0c446212013-01-16 19:09:47 +0000172{
173 struct lowpan_dev_record *entry;
174 struct sk_buff *skb_cp;
175 int stat = NET_RX_SUCCESS;
176
177 rcu_read_lock();
178 list_for_each_entry_rcu(entry, &lowpan_devices, list)
179 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
180 skb_cp = skb_copy(skb, GFP_ATOMIC);
181 if (!skb_cp) {
182 stat = -ENOMEM;
183 break;
184 }
185
186 skb_cp->dev = entry->ldev;
187 stat = netif_rx(skb_cp);
188 }
189 rcu_read_unlock();
190
191 return stat;
192}
193
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000194static void lowpan_fragment_timer_expired(unsigned long entry_addr)
195{
196 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
197
alex.bluesman.smirnov@gmail.come71094f2012-06-25 03:49:03 +0000198 pr_debug("timer expired for frame with tag %d\n", entry->tag);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000199
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000200 list_del(&entry->list);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000201 dev_kfree_skb(entry->skb);
202 kfree(entry);
203}
204
alex.bluesman.smirnov@gmail.comc2e94d72012-04-25 23:35:50 +0000205static struct lowpan_fragment *
Tony Cheneaud991b982013-03-25 17:59:26 +0000206lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
alex.bluesman.smirnov@gmail.comc2e94d72012-04-25 23:35:50 +0000207{
208 struct lowpan_fragment *frame;
209
210 frame = kzalloc(sizeof(struct lowpan_fragment),
211 GFP_ATOMIC);
212 if (!frame)
213 goto frame_err;
214
215 INIT_LIST_HEAD(&frame->list);
216
Tony Cheneau5e968552012-07-11 06:51:16 +0000217 frame->length = len;
alex.bluesman.smirnov@gmail.comc2e94d72012-04-25 23:35:50 +0000218 frame->tag = tag;
219
220 /* allocate buffer for frame assembling */
alex.bluesman.smirnov@gmail.com79ff1db2012-07-10 21:22:45 +0000221 frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
222 sizeof(struct ipv6hdr));
alex.bluesman.smirnov@gmail.comc2e94d72012-04-25 23:35:50 +0000223
224 if (!frame->skb)
225 goto skb_err;
226
227 frame->skb->priority = skb->priority;
alex.bluesman.smirnov@gmail.comc2e94d72012-04-25 23:35:50 +0000228
229 /* reserve headroom for uncompressed ipv6 header */
230 skb_reserve(frame->skb, sizeof(struct ipv6hdr));
231 skb_put(frame->skb, frame->length);
232
David Hauweele31afe1f2013-08-16 21:59:55 +0200233 /* copy the first control block to keep a
234 * trace of the link-layer addresses in case
235 * of a link-local compressed address
236 */
237 memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
238
alex.bluesman.smirnov@gmail.comc2e94d72012-04-25 23:35:50 +0000239 init_timer(&frame->timer);
240 /* time out is the same as for ipv6 - 60 sec */
241 frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
242 frame->timer.data = (unsigned long)frame;
243 frame->timer.function = lowpan_fragment_timer_expired;
244
245 add_timer(&frame->timer);
246
247 list_add_tail(&frame->list, &lowpan_fragments);
248
249 return frame;
250
251skb_err:
252 kfree(frame);
253frame_err:
254 return NULL;
255}
256
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200257static int process_data(struct sk_buff *skb)
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700258{
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200259 u8 iphc0, iphc1;
Alexander Aringce2463b2013-08-16 21:59:58 +0200260 const struct ieee802154_addr *_saddr, *_daddr;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700261
Alexander Aring841a5ec2013-12-12 20:15:25 +0100262 raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700263 /* at least two bytes will be used for the encoding */
264 if (skb->len < 2)
265 goto drop;
alex.bluesman.smirnov@gmail.comc5d36872012-06-25 03:49:01 +0000266
267 if (lowpan_fetch_skb_u8(skb, &iphc0))
268 goto drop;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000269
270 /* fragments assembling */
271 switch (iphc0 & LOWPAN_DISPATCH_MASK) {
272 case LOWPAN_DISPATCH_FRAG1:
273 case LOWPAN_DISPATCH_FRAGN:
274 {
275 struct lowpan_fragment *frame;
Tony Cheneau5e968552012-07-11 06:51:16 +0000276 /* slen stores the rightmost 8 bits of the 11 bits length */
Tony Cheneaud991b982013-03-25 17:59:26 +0000277 u8 slen, offset = 0;
Tony Cheneau5e968552012-07-11 06:51:16 +0000278 u16 len, tag;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000279 bool found = false;
280
Tony Cheneau5e968552012-07-11 06:51:16 +0000281 if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
alex.bluesman.smirnov@gmail.comc5d36872012-06-25 03:49:01 +0000282 lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */
283 goto drop;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000284
Tony Cheneau5e968552012-07-11 06:51:16 +0000285 /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
286 len = ((iphc0 & 7) << 8) | slen;
287
Tony Cheneau9da29242013-03-25 17:59:27 +0000288 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
289 pr_debug("%s received a FRAG1 packet (tag: %d, "
290 "size of the entire IP packet: %d)",
291 __func__, tag, len);
292 } else { /* FRAGN */
Tony Cheneaud991b982013-03-25 17:59:26 +0000293 if (lowpan_fetch_skb_u8(skb, &offset))
294 goto unlock_and_drop;
Tony Cheneau9da29242013-03-25 17:59:27 +0000295 pr_debug("%s received a FRAGN packet (tag: %d, "
296 "size of the entire IP packet: %d, "
297 "offset: %d)", __func__, tag, len, offset * 8);
Tony Cheneaud991b982013-03-25 17:59:26 +0000298 }
299
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000300 /*
301 * check if frame assembling with the same tag is
302 * already in progress
303 */
alex.bluesman.smirnov@gmail.com33c34c52012-07-10 21:22:48 +0000304 spin_lock_bh(&flist_lock);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000305
306 list_for_each_entry(frame, &lowpan_fragments, list)
307 if (frame->tag == tag) {
308 found = true;
309 break;
310 }
311
312 /* alloc new frame structure */
313 if (!found) {
Tony Cheneau9da29242013-03-25 17:59:27 +0000314 pr_debug("%s first fragment received for tag %d, "
315 "begin packet reassembly", __func__, tag);
Tony Cheneau5e968552012-07-11 06:51:16 +0000316 frame = lowpan_alloc_new_frame(skb, len, tag);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000317 if (!frame)
318 goto unlock_and_drop;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000319 }
320
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000321 /* if payload fits buffer, copy it */
322 if (likely((offset * 8 + skb->len) <= frame->length))
323 skb_copy_to_linear_data_offset(frame->skb, offset * 8,
324 skb->data, skb->len);
325 else
326 goto unlock_and_drop;
327
328 frame->bytes_rcv += skb->len;
329
330 /* frame assembling complete */
331 if ((frame->bytes_rcv == frame->length) &&
332 frame->timer.expires > jiffies) {
333 /* if timer haven't expired - first of all delete it */
alex.bluesman.smirnov@gmail.com33c34c52012-07-10 21:22:48 +0000334 del_timer_sync(&frame->timer);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000335 list_del(&frame->list);
alex.bluesman.smirnov@gmail.com33c34c52012-07-10 21:22:48 +0000336 spin_unlock_bh(&flist_lock);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000337
Tony Cheneau9da29242013-03-25 17:59:27 +0000338 pr_debug("%s successfully reassembled fragment "
339 "(tag %d)", __func__, tag);
340
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000341 dev_kfree_skb(skb);
342 skb = frame->skb;
343 kfree(frame);
alex.bluesman.smirnov@gmail.comc5d36872012-06-25 03:49:01 +0000344
345 if (lowpan_fetch_skb_u8(skb, &iphc0))
Dan Carpenter747cf6e2012-06-26 20:53:09 +0000346 goto drop;
alex.bluesman.smirnov@gmail.comc5d36872012-06-25 03:49:01 +0000347
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000348 break;
349 }
alex.bluesman.smirnov@gmail.com33c34c52012-07-10 21:22:48 +0000350 spin_unlock_bh(&flist_lock);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000351
352 return kfree_skb(skb), 0;
353 }
354 default:
355 break;
356 }
357
alex.bluesman.smirnov@gmail.comc5d36872012-06-25 03:49:01 +0000358 if (lowpan_fetch_skb_u8(skb, &iphc1))
359 goto drop;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700360
Alexander Aringce2463b2013-08-16 21:59:58 +0200361 _saddr = &mac_cb(skb)->sa;
362 _daddr = &mac_cb(skb)->da;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700363
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200364 return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
365 _saddr->addr_type, IEEE802154_ADDR_LEN,
366 (u8 *)_daddr->hwaddr, _daddr->addr_type,
367 IEEE802154_ADDR_LEN, iphc0, iphc1,
368 lowpan_give_skb_to_devices);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000369
370unlock_and_drop:
alex.bluesman.smirnov@gmail.com33c34c52012-07-10 21:22:48 +0000371 spin_unlock_bh(&flist_lock);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700372drop:
Dan Carpenter90d09632011-08-30 03:45:52 +0000373 kfree_skb(skb);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700374 return -EINVAL;
375}
376
alex.bluesman.smirnov@gmail.com42c36292012-07-01 19:58:46 +0000377static int lowpan_set_address(struct net_device *dev, void *p)
378{
379 struct sockaddr *sa = p;
380
381 if (netif_running(dev))
382 return -EBUSY;
383
384 /* TODO: validate addr */
385 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
386
387 return 0;
388}
389
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000390static int
391lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
Tony Cheneaud991b982013-03-25 17:59:26 +0000392 int mlen, int plen, int offset, int type)
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000393{
394 struct sk_buff *frag;
Alexander Aring545f3612013-10-28 10:24:16 +0100395 int hlen;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000396
Tony Cheneaud991b982013-03-25 17:59:26 +0000397 hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
398 LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000399
Alexander Aring841a5ec2013-12-12 20:15:25 +0100400 raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000401
Alexander Aringb6144422013-10-28 10:24:18 +0100402 frag = netdev_alloc_skb(skb->dev,
403 hlen + mlen + plen + IEEE802154_MFR_SIZE);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000404 if (!frag)
405 return -ENOMEM;
406
407 frag->priority = skb->priority;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000408
409 /* copy header, MFR and payload */
Alexander Aring3582b902013-10-30 09:18:24 +0100410 skb_put(frag, mlen);
411 skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000412
Alexander Aring3582b902013-10-30 09:18:24 +0100413 skb_put(frag, hlen);
414 skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
415
416 skb_put(frag, plen);
417 skb_copy_to_linear_data_offset(frag, mlen + hlen,
418 skb_network_header(skb) + offset, plen);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000419
Alexander Aring841a5ec2013-12-12 20:15:25 +0100420 raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000421
Alexander Aring545f3612013-10-28 10:24:16 +0100422 return dev_queue_xmit(frag);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000423}
424
425static int
Tony Cheneaud4ac3232013-03-25 17:59:28 +0000426lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000427{
428 int err, header_length, payload_length, tag, offset = 0;
429 u8 head[5];
430
Alexander Aring3e691622013-10-30 09:18:22 +0100431 header_length = skb->mac_len;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000432 payload_length = skb->len - header_length;
Tony Cheneaud4ac3232013-03-25 17:59:28 +0000433 tag = lowpan_dev_info(dev)->fragment_tag++;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000434
435 /* first fragment header */
Tony Cheneau5e968552012-07-11 06:51:16 +0000436 head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
437 head[1] = payload_length & 0xff;
Tony Cheneau45760392012-07-11 06:51:15 +0000438 head[2] = tag >> 8;
439 head[3] = tag & 0xff;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000440
Tony Cheneaud991b982013-03-25 17:59:26 +0000441 err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
442 0, LOWPAN_DISPATCH_FRAG1);
443
Tony Cheneau9da29242013-03-25 17:59:27 +0000444 if (err) {
445 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
446 __func__, tag);
Tony Cheneaud991b982013-03-25 17:59:26 +0000447 goto exit;
Tony Cheneau9da29242013-03-25 17:59:27 +0000448 }
Tony Cheneaud991b982013-03-25 17:59:26 +0000449
450 offset = LOWPAN_FRAG_SIZE;
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000451
452 /* next fragment header */
453 head[0] &= ~LOWPAN_DISPATCH_FRAG1;
454 head[0] |= LOWPAN_DISPATCH_FRAGN;
455
Alexander Aring53cb57172013-10-28 10:24:17 +0100456 while (payload_length - offset > 0) {
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000457 int len = LOWPAN_FRAG_SIZE;
458
459 head[4] = offset / 8;
460
461 if (payload_length - offset < len)
462 len = payload_length - offset;
463
464 err = lowpan_fragment_xmit(skb, head, header_length,
Tony Cheneaud991b982013-03-25 17:59:26 +0000465 len, offset, LOWPAN_DISPATCH_FRAGN);
Tony Cheneau9da29242013-03-25 17:59:27 +0000466 if (err) {
467 pr_debug("%s unable to send a subsequent FRAGN packet "
468 "(tag: %d, offset: %d", __func__, tag, offset);
Tony Cheneaud991b982013-03-25 17:59:26 +0000469 goto exit;
Tony Cheneau9da29242013-03-25 17:59:27 +0000470 }
Tony Cheneaud991b982013-03-25 17:59:26 +0000471
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000472 offset += len;
473 }
474
Tony Cheneaud991b982013-03-25 17:59:26 +0000475exit:
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000476 return err;
477}
478
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700479static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
480{
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000481 int err = -1;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700482
alex.bluesman.smirnov@gmail.come71094f2012-06-25 03:49:03 +0000483 pr_debug("package xmit\n");
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700484
485 skb->dev = lowpan_dev_info(dev)->real_dev;
486 if (skb->dev == NULL) {
alex.bluesman.smirnov@gmail.come71094f2012-06-25 03:49:03 +0000487 pr_debug("ERROR: no real wpan device found\n");
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000488 goto error;
489 }
490
Alan Ottb333b7e2012-11-29 15:55:44 +0000491 /* Send directly if less than the MTU minus the 2 checksum bytes. */
492 if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700493 err = dev_queue_xmit(skb);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000494 goto out;
495 }
496
alex.bluesman.smirnov@gmail.come71094f2012-06-25 03:49:03 +0000497 pr_debug("frame is too big, fragmentation is needed\n");
Tony Cheneaud4ac3232013-03-25 17:59:28 +0000498 err = lowpan_skb_fragmentation(skb, dev);
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000499error:
500 dev_kfree_skb(skb);
501out:
Alan Ottfc52eea2013-04-03 04:00:58 +0000502 if (err)
alex.bluesman.smirnov@gmail.come71094f2012-06-25 03:49:03 +0000503 pr_debug("ERROR: xmit failed\n");
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700504
Alan Ottfc52eea2013-04-03 04:00:58 +0000505 return (err < 0) ? NET_XMIT_DROP : err;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700506}
507
alex.bluesman.smirnov@gmail.com0848e402012-04-25 23:24:56 +0000508static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
509{
510 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
511 return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
512}
513
514static u16 lowpan_get_pan_id(const struct net_device *dev)
515{
516 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
517 return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
518}
519
520static u16 lowpan_get_short_addr(const struct net_device *dev)
521{
522 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
523 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
524}
525
Tony Cheneauc7d0ab22013-03-25 17:59:30 +0000526static u8 lowpan_get_dsn(const struct net_device *dev)
527{
528 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
529 return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
530}
531
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700532static struct header_ops lowpan_header_ops = {
533 .create = lowpan_header_create,
534};
535
536static const struct net_device_ops lowpan_netdev_ops = {
537 .ndo_start_xmit = lowpan_xmit,
alex.bluesman.smirnov@gmail.com42c36292012-07-01 19:58:46 +0000538 .ndo_set_mac_address = lowpan_set_address,
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700539};
540
alex.bluesman.smirnov@gmail.com0848e402012-04-25 23:24:56 +0000541static struct ieee802154_mlme_ops lowpan_mlme = {
542 .get_pan_id = lowpan_get_pan_id,
543 .get_phy = lowpan_get_phy,
544 .get_short_addr = lowpan_get_short_addr,
Tony Cheneauc7d0ab22013-03-25 17:59:30 +0000545 .get_dsn = lowpan_get_dsn,
alex.bluesman.smirnov@gmail.com0848e402012-04-25 23:24:56 +0000546};
547
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700548static void lowpan_setup(struct net_device *dev)
549{
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700550 dev->addr_len = IEEE802154_ADDR_LEN;
551 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
552 dev->type = ARPHRD_IEEE802154;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700553 /* Frame Control + Sequence Number + Address fields + Security Header */
554 dev->hard_header_len = 2 + 1 + 20 + 14;
555 dev->needed_tailroom = 2; /* FCS */
556 dev->mtu = 1281;
557 dev->tx_queue_len = 0;
alex.bluesman.smirnov@gmail.com4d039f62011-11-10 07:39:37 +0000558 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700559 dev->watchdog_timeo = 0;
560
561 dev->netdev_ops = &lowpan_netdev_ops;
562 dev->header_ops = &lowpan_header_ops;
alex.bluesman.smirnov@gmail.com0848e402012-04-25 23:24:56 +0000563 dev->ml_priv = &lowpan_mlme;
Alan Otta2dc375e2012-09-01 05:57:07 +0000564 dev->destructor = free_netdev;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700565}
566
567static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
568{
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700569 if (tb[IFLA_ADDRESS]) {
570 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
571 return -EINVAL;
572 }
573 return 0;
574}
575
576static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
577 struct packet_type *pt, struct net_device *orig_dev)
578{
Alan Otta437d272012-09-01 05:57:06 +0000579 struct sk_buff *local_skb;
580
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700581 if (!netif_running(dev))
582 goto drop;
583
584 if (dev->type != ARPHRD_IEEE802154)
585 goto drop;
586
587 /* check that it's our buffer */
Alan Ottee21c7e2013-01-16 19:09:48 +0000588 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
589 /* Copy the packet so that the IPv6 header is
590 * properly aligned.
591 */
592 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
593 skb_tailroom(skb), GFP_ATOMIC);
Alan Otta437d272012-09-01 05:57:06 +0000594 if (!local_skb)
595 goto drop;
Alan Otta437d272012-09-01 05:57:06 +0000596
Alan Ottee21c7e2013-01-16 19:09:48 +0000597 local_skb->protocol = htons(ETH_P_IPV6);
598 local_skb->pkt_type = PACKET_HOST;
599
600 /* Pull off the 1-byte of 6lowpan header. */
601 skb_pull(local_skb, 1);
Alan Ottee21c7e2013-01-16 19:09:48 +0000602
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200603 lowpan_give_skb_to_devices(local_skb, NULL);
Alan Ottee21c7e2013-01-16 19:09:48 +0000604
605 kfree_skb(local_skb);
Alan Otta437d272012-09-01 05:57:06 +0000606 kfree_skb(skb);
Alan Ottee21c7e2013-01-16 19:09:48 +0000607 } else {
608 switch (skb->data[0] & 0xe0) {
609 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
610 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
611 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
612 local_skb = skb_clone(skb, GFP_ATOMIC);
613 if (!local_skb)
614 goto drop;
Jukka Rissanen8df8c562013-12-11 17:05:34 +0200615 process_data(local_skb);
Alan Ottee21c7e2013-01-16 19:09:48 +0000616
617 kfree_skb(skb);
618 break;
619 default:
620 break;
621 }
alex.bluesman.smirnov@gmail.com719269a2011-11-10 07:38:38 +0000622 }
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700623
624 return NET_RX_SUCCESS;
625
626drop:
627 kfree_skb(skb);
628 return NET_RX_DROP;
629}
630
631static int lowpan_newlink(struct net *src_net, struct net_device *dev,
632 struct nlattr *tb[], struct nlattr *data[])
633{
634 struct net_device *real_dev;
635 struct lowpan_dev_record *entry;
636
alex.bluesman.smirnov@gmail.come71094f2012-06-25 03:49:03 +0000637 pr_debug("adding new link\n");
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700638
639 if (!tb[IFLA_LINK])
640 return -EINVAL;
641 /* find and hold real wpan device */
642 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
643 if (!real_dev)
644 return -ENODEV;
Dan Carpenter78032f92013-11-07 10:44:45 +0300645 if (real_dev->type != ARPHRD_IEEE802154) {
646 dev_put(real_dev);
Alan Ott7adac1e2013-10-05 23:15:18 -0400647 return -EINVAL;
Dan Carpenter78032f92013-11-07 10:44:45 +0300648 }
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700649
650 lowpan_dev_info(dev)->real_dev = real_dev;
Tony Cheneaud4ac3232013-03-25 17:59:28 +0000651 lowpan_dev_info(dev)->fragment_tag = 0;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700652 mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
653
654 entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
Dan Carpenterdc00fd42011-08-30 03:51:09 +0000655 if (!entry) {
656 dev_put(real_dev);
657 lowpan_dev_info(dev)->real_dev = NULL;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700658 return -ENOMEM;
Dan Carpenterdc00fd42011-08-30 03:51:09 +0000659 }
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700660
661 entry->ldev = dev;
662
Alan Ottab2d95d2013-10-05 23:15:19 -0400663 /* Set the lowpan harware address to the wpan hardware address. */
664 memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
665
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700666 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
667 INIT_LIST_HEAD(&entry->list);
668 list_add_tail(&entry->list, &lowpan_devices);
669 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
670
671 register_netdevice(dev);
672
673 return 0;
674}
675
676static void lowpan_dellink(struct net_device *dev, struct list_head *head)
677{
678 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
679 struct net_device *real_dev = lowpan_dev->real_dev;
alex.bluesman.smirnov@gmail.com8deff4a2012-04-25 23:24:57 +0000680 struct lowpan_dev_record *entry, *tmp;
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700681
682 ASSERT_RTNL();
683
684 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
Dan Carpenteraec9db32011-08-30 03:46:40 +0000685 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700686 if (entry->ldev == dev) {
687 list_del(&entry->list);
688 kfree(entry);
689 }
Dan Carpenteraec9db32011-08-30 03:46:40 +0000690 }
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700691 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
692
693 mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
694
695 unregister_netdevice_queue(dev, head);
696
697 dev_put(real_dev);
698}
699
700static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
701 .kind = "lowpan",
702 .priv_size = sizeof(struct lowpan_dev_info),
703 .setup = lowpan_setup,
704 .newlink = lowpan_newlink,
705 .dellink = lowpan_dellink,
706 .validate = lowpan_validate,
707};
708
709static inline int __init lowpan_netlink_init(void)
710{
711 return rtnl_link_register(&lowpan_link_ops);
712}
713
David S. Millera07fdce2013-02-06 15:54:38 -0500714static inline void lowpan_netlink_fini(void)
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700715{
716 rtnl_link_unregister(&lowpan_link_ops);
717}
718
Alan Otta2dc375e2012-09-01 05:57:07 +0000719static int lowpan_device_event(struct notifier_block *unused,
Jiri Pirko351638e2013-05-28 01:30:21 +0000720 unsigned long event, void *ptr)
Alan Otta2dc375e2012-09-01 05:57:07 +0000721{
Jiri Pirko351638e2013-05-28 01:30:21 +0000722 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Alan Otta2dc375e2012-09-01 05:57:07 +0000723 LIST_HEAD(del_list);
724 struct lowpan_dev_record *entry, *tmp;
725
726 if (dev->type != ARPHRD_IEEE802154)
727 goto out;
728
729 if (event == NETDEV_UNREGISTER) {
730 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
731 if (lowpan_dev_info(entry->ldev)->real_dev == dev)
732 lowpan_dellink(entry->ldev, &del_list);
733 }
734
735 unregister_netdevice_many(&del_list);
Peter Senna Tschudin4c835012012-09-18 07:10:43 +0000736 }
Alan Otta2dc375e2012-09-01 05:57:07 +0000737
738out:
739 return NOTIFY_DONE;
740}
741
742static struct notifier_block lowpan_dev_notifier = {
743 .notifier_call = lowpan_device_event,
744};
745
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700746static struct packet_type lowpan_packet_type = {
747 .type = __constant_htons(ETH_P_IEEE802154),
748 .func = lowpan_rcv,
749};
750
751static int __init lowpan_init_module(void)
752{
753 int err = 0;
754
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700755 err = lowpan_netlink_init();
756 if (err < 0)
757 goto out;
758
759 dev_add_pack(&lowpan_packet_type);
Alan Otta2dc375e2012-09-01 05:57:07 +0000760
761 err = register_netdevice_notifier(&lowpan_dev_notifier);
762 if (err < 0) {
763 dev_remove_pack(&lowpan_packet_type);
764 lowpan_netlink_fini();
765 }
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700766out:
767 return err;
768}
769
770static void __exit lowpan_cleanup_module(void)
771{
alex.bluesman.smirnov@gmail.com33c34c52012-07-10 21:22:48 +0000772 struct lowpan_fragment *frame, *tframe;
773
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700774 lowpan_netlink_fini();
775
776 dev_remove_pack(&lowpan_packet_type);
alex.bluesman.smirnov@gmail.com33c34c52012-07-10 21:22:48 +0000777
Alan Otta2dc375e2012-09-01 05:57:07 +0000778 unregister_netdevice_notifier(&lowpan_dev_notifier);
779
alex.bluesman.smirnov@gmail.com33c34c52012-07-10 21:22:48 +0000780 /* Now 6lowpan packet_type is removed, so no new fragments are
781 * expected on RX, therefore that's the time to clean incomplete
782 * fragments.
783 */
784 spin_lock_bh(&flist_lock);
785 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
786 del_timer_sync(&frame->timer);
787 list_del(&frame->list);
788 dev_kfree_skb(frame->skb);
789 kfree(frame);
790 }
791 spin_unlock_bh(&flist_lock);
Alexander Smirnov44331fe2011-08-24 19:34:42 -0700792}
793
794module_init(lowpan_init_module);
795module_exit(lowpan_cleanup_module);
796MODULE_LICENSE("GPL");
797MODULE_ALIAS_RTNL_LINK("lowpan");