blob: 3cbb1d14c3047b44a1aea392c1070d8f122e7ce2 [file] [log] [blame]
Jukka Rissanen18722c22013-12-11 17:05:37 +02001/*
2 Copyright (c) 2013 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <linux/version.h>
15#include <linux/if_arp.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18
19#include <net/ipv6.h>
20#include <net/ip6_route.h>
21#include <net/addrconf.h>
22
23#include <net/af_ieee802154.h> /* to get the address type */
24
25#include <net/bluetooth/bluetooth.h>
26#include <net/bluetooth/hci_core.h>
27#include <net/bluetooth/l2cap.h>
28
Johan Hedbergd0746f32013-12-12 09:53:20 +020029#include "6lowpan.h"
30
Jukka Rissanen18722c22013-12-11 17:05:37 +020031#include "../ieee802154/6lowpan.h" /* for the compression support */
32
33#define IFACE_NAME_TEMPLATE "bt%d"
34#define EUI64_ADDR_LEN 8
35
36struct skb_cb {
37 struct in6_addr addr;
38 struct l2cap_conn *conn;
39};
40#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
41
42/* The devices list contains those devices that we are acting
43 * as a proxy. The BT 6LoWPAN device is a virtual device that
44 * connects to the Bluetooth LE device. The real connection to
45 * BT device is done via l2cap layer. There exists one
46 * virtual device / one BT 6LoWPAN network (=hciX device).
47 * The list contains struct lowpan_dev elements.
48 */
49static LIST_HEAD(bt_6lowpan_devices);
50static DEFINE_RWLOCK(devices_lock);
51
52struct lowpan_peer {
53 struct list_head list;
54 struct l2cap_conn *conn;
55
56 /* peer addresses in various formats */
57 unsigned char eui64_addr[EUI64_ADDR_LEN];
58 struct in6_addr peer_addr;
59};
60
61struct lowpan_dev {
62 struct list_head list;
63
64 struct hci_dev *hdev;
65 struct net_device *netdev;
66 struct list_head peers;
67 atomic_t peer_count; /* number of items in peers list */
68
69 struct work_struct delete_netdev;
70 struct delayed_work notify_peers;
71};
72
73static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
74{
75 return netdev_priv(netdev);
76}
77
78static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
79{
80 list_add(&peer->list, &dev->peers);
81 atomic_inc(&dev->peer_count);
82}
83
84static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
85{
86 list_del(&peer->list);
87
88 if (atomic_dec_and_test(&dev->peer_count)) {
89 BT_DBG("last peer");
90 return true;
91 }
92
93 return false;
94}
95
96static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
97 bdaddr_t *ba, __u8 type)
98{
99 struct lowpan_peer *peer, *tmp;
100
101 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
102 ba, type);
103
104 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
105 BT_DBG("addr %pMR type %d",
106 &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
107
108 if (bacmp(&peer->conn->hcon->dst, ba))
109 continue;
110
111 if (type == peer->conn->hcon->dst_type)
112 return peer;
113 }
114
115 return NULL;
116}
117
118static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
119 struct l2cap_conn *conn)
120{
121 struct lowpan_peer *peer, *tmp;
122
123 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
124 if (peer->conn == conn)
125 return peer;
126 }
127
128 return NULL;
129}
130
131static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
132{
133 struct lowpan_dev *entry, *tmp;
134 struct lowpan_peer *peer = NULL;
135 unsigned long flags;
136
137 read_lock_irqsave(&devices_lock, flags);
138
139 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
140 peer = peer_lookup_conn(entry, conn);
141 if (peer)
142 break;
143 }
144
145 read_unlock_irqrestore(&devices_lock, flags);
146
147 return peer;
148}
149
150static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
151{
152 struct lowpan_dev *entry, *tmp;
153 struct lowpan_dev *dev = NULL;
154 unsigned long flags;
155
156 read_lock_irqsave(&devices_lock, flags);
157
158 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
159 if (conn->hcon->hdev == entry->hdev) {
160 dev = entry;
161 break;
162 }
163 }
164
165 read_unlock_irqrestore(&devices_lock, flags);
166
167 return dev;
168}
169
170/* print data in line */
171static inline void raw_dump_inline(const char *caller, char *msg,
172 unsigned char *buf, int len)
173{
174 if (msg)
175 pr_debug("%s():%s: ", caller, msg);
176
177 print_hex_dump_debug("", DUMP_PREFIX_NONE,
178 16, 1, buf, len, false);
179}
180
181/* print data in a table format:
182 *
183 * addr: xx xx xx xx xx xx
184 * addr: xx xx xx xx xx xx
185 * ...
186 */
187static inline void raw_dump_table(const char *caller, char *msg,
188 unsigned char *buf, int len)
189{
190 if (msg)
191 pr_debug("%s():%s:\n", caller, msg);
192
193 print_hex_dump_debug("\t", DUMP_PREFIX_OFFSET,
194 16, 1, buf, len, false);
195}
196
197static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
198{
199 struct sk_buff *skb_cp;
200 int ret;
201
202 skb_cp = skb_copy(skb, GFP_ATOMIC);
203 if (!skb_cp)
204 return -ENOMEM;
205
206 ret = netif_rx(skb_cp);
207
208 BT_DBG("receive skb %d", ret);
209 if (ret < 0)
210 return NET_RX_DROP;
211
212 return ret;
213}
214
215static int process_data(struct sk_buff *skb, struct net_device *netdev,
216 struct l2cap_conn *conn)
217{
218 const u8 *saddr, *daddr;
219 u8 iphc0, iphc1;
220 struct lowpan_dev *dev;
221 struct lowpan_peer *peer;
222 unsigned long flags;
223
224 dev = lowpan_dev(netdev);
225
226 read_lock_irqsave(&devices_lock, flags);
227 peer = peer_lookup_conn(dev, conn);
228 read_unlock_irqrestore(&devices_lock, flags);
229 if (!peer)
230 goto drop;
231
232 saddr = peer->eui64_addr;
233 daddr = dev->netdev->dev_addr;
234
235 /* at least two bytes will be used for the encoding */
236 if (skb->len < 2)
237 goto drop;
238
239 if (lowpan_fetch_skb_u8(skb, &iphc0))
240 goto drop;
241
242 if (lowpan_fetch_skb_u8(skb, &iphc1))
243 goto drop;
244
245 return lowpan_process_data(skb, netdev,
246 saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
247 daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
248 iphc0, iphc1, give_skb_to_upper);
249
250drop:
251 kfree_skb(skb);
252 return -EINVAL;
253}
254
255static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
256 struct l2cap_conn *conn)
257{
258 struct sk_buff *local_skb;
259 int ret;
260
261 if (!netif_running(dev))
262 goto drop;
263
264 if (dev->type != ARPHRD_6LOWPAN)
265 goto drop;
266
267 /* check that it's our buffer */
268 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
269 /* Copy the packet so that the IPv6 header is
270 * properly aligned.
271 */
272 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
273 skb_tailroom(skb), GFP_ATOMIC);
274 if (!local_skb)
275 goto drop;
276
277 local_skb->protocol = htons(ETH_P_IPV6);
278 local_skb->pkt_type = PACKET_HOST;
279
280 skb_reset_network_header(local_skb);
281 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
282
283 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
284 kfree_skb(local_skb);
285 goto drop;
286 }
287
288 dev->stats.rx_bytes += skb->len;
289 dev->stats.rx_packets++;
290
291 kfree_skb(local_skb);
292 kfree_skb(skb);
293 } else {
294 switch (skb->data[0] & 0xe0) {
295 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
296 local_skb = skb_clone(skb, GFP_ATOMIC);
297 if (!local_skb)
298 goto drop;
299
300 ret = process_data(local_skb, dev, conn);
301 if (ret != NET_RX_SUCCESS)
302 goto drop;
303
304 dev->stats.rx_bytes += skb->len;
305 dev->stats.rx_packets++;
306
307 kfree_skb(skb);
308 break;
309 default:
310 break;
311 }
312 }
313
314 return NET_RX_SUCCESS;
315
316drop:
317 kfree_skb(skb);
318 return NET_RX_DROP;
319}
320
321/* Packet from BT LE device */
322int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
323{
324 struct lowpan_dev *dev;
325 struct lowpan_peer *peer;
326 int err;
327
328 peer = lookup_peer(conn);
329 if (!peer)
330 return -ENOENT;
331
332 dev = lookup_dev(conn);
333 if (dev && !dev->netdev)
334 return -ENOENT;
335
336 err = recv_pkt(skb, dev->netdev, conn);
337 BT_DBG("recv pkt %d", err);
338
339 return err;
340}
341
342static inline int skbuff_copy(void *msg, int len, int count, int mtu,
343 struct sk_buff *skb, struct net_device *dev)
344{
345 struct sk_buff **frag;
346 int sent = 0;
347
348 memcpy(skb_put(skb, count), msg, count);
349
350 sent += count;
351 msg += count;
352 len -= count;
353
354 dev->stats.tx_bytes += count;
355 dev->stats.tx_packets++;
356
357 raw_dump_table(__func__, "Sending", skb->data, skb->len);
358
359 /* Continuation fragments (no L2CAP header) */
360 frag = &skb_shinfo(skb)->frag_list;
361 while (len > 0) {
362 struct sk_buff *tmp;
363
364 count = min_t(unsigned int, mtu, len);
365
366 tmp = bt_skb_alloc(count, GFP_ATOMIC);
367 if (IS_ERR(tmp))
368 return PTR_ERR(tmp);
369
370 *frag = tmp;
371
372 memcpy(skb_put(*frag, count), msg, count);
373
374 raw_dump_table(__func__, "Sending fragment",
375 (*frag)->data, count);
376
377 (*frag)->priority = skb->priority;
378
379 sent += count;
380 msg += count;
381 len -= count;
382
383 skb->len += (*frag)->len;
384 skb->data_len += (*frag)->len;
385
386 frag = &(*frag)->next;
387
388 dev->stats.tx_bytes += count;
389 dev->stats.tx_packets++;
390 }
391
392 return sent;
393}
394
395static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
396 size_t len, u32 priority,
397 struct net_device *dev)
398{
399 struct sk_buff *skb;
400 int err, count;
401 struct l2cap_hdr *lh;
402
403 /* FIXME: This mtu check should be not needed and atm is only used for
404 * testing purposes
405 */
406 if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
407 conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
408
409 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
410
411 BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
412
413 skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
414 if (IS_ERR(skb))
415 return skb;
416
417 skb->priority = priority;
418
419 lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
420 lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
421 lh->len = cpu_to_le16(len);
422
423 err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
424 if (unlikely(err < 0)) {
425 kfree_skb(skb);
426 BT_DBG("skbuff copy %d failed", err);
427 return ERR_PTR(err);
428 }
429
430 return skb;
431}
432
433static int conn_send(struct l2cap_conn *conn,
434 void *msg, size_t len, u32 priority,
435 struct net_device *dev)
436{
437 struct sk_buff *skb;
438
439 skb = create_pdu(conn, msg, len, priority, dev);
440 if (IS_ERR(skb))
441 return -EINVAL;
442
443 BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
444 skb->priority);
445
446 hci_send_acl(conn->hchan, skb, ACL_START);
447
448 return 0;
449}
450
451static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
452 bdaddr_t *addr, u8 *addr_type)
453{
454 u8 *eui64;
455
456 eui64 = ip6_daddr->s6_addr + 8;
457
458 addr->b[0] = eui64[7];
459 addr->b[1] = eui64[6];
460 addr->b[2] = eui64[5];
461 addr->b[3] = eui64[2];
462 addr->b[4] = eui64[1];
463 addr->b[5] = eui64[0];
464
465 addr->b[5] ^= 2;
466
467 /* Set universal/local bit to 0 */
468 if (addr->b[5] & 1) {
469 addr->b[5] &= ~1;
470 *addr_type = BDADDR_LE_PUBLIC;
471 } else {
472 *addr_type = BDADDR_LE_RANDOM;
473 }
474}
475
476static int header_create(struct sk_buff *skb, struct net_device *netdev,
477 unsigned short type, const void *_daddr,
478 const void *_saddr, unsigned int len)
479{
480 struct ipv6hdr *hdr;
481 struct lowpan_dev *dev;
482 struct lowpan_peer *peer;
483 bdaddr_t addr, *any = BDADDR_ANY;
484 u8 *saddr, *daddr = any->b;
485 u8 addr_type;
486
487 if (type != ETH_P_IPV6)
488 return -EINVAL;
489
490 hdr = ipv6_hdr(skb);
491
492 dev = lowpan_dev(netdev);
493
494 if (ipv6_addr_is_multicast(&hdr->daddr)) {
495 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
496 sizeof(struct in6_addr));
497 lowpan_cb(skb)->conn = NULL;
498 } else {
499 unsigned long flags;
500
501 /* Get destination BT device from skb.
502 * If there is no such peer then discard the packet.
503 */
504 get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
505
506 BT_DBG("dest addr %pMR type %d", &addr, addr_type);
507
508 read_lock_irqsave(&devices_lock, flags);
509 peer = peer_lookup_ba(dev, &addr, addr_type);
510 read_unlock_irqrestore(&devices_lock, flags);
511
512 if (!peer) {
513 BT_DBG("no such peer %pMR found", &addr);
514 return -ENOENT;
515 }
516
517 daddr = peer->eui64_addr;
518
519 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
520 sizeof(struct in6_addr));
521 lowpan_cb(skb)->conn = peer->conn;
522 }
523
524 saddr = dev->netdev->dev_addr;
525
526 return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
527}
528
529/* Packet to BT LE device */
530static int send_pkt(struct l2cap_conn *conn, const void *saddr,
531 const void *daddr, struct sk_buff *skb,
532 struct net_device *netdev)
533{
534 raw_dump_table(__func__, "raw skb data dump before fragmentation",
535 skb->data, skb->len);
536
537 return conn_send(conn, skb->data, skb->len, 0, netdev);
538}
539
540static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
541{
542 struct sk_buff *local_skb;
543 struct lowpan_dev *entry, *tmp;
544 unsigned long flags;
545
546 read_lock_irqsave(&devices_lock, flags);
547
548 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
549 struct lowpan_peer *pentry, *ptmp;
550 struct lowpan_dev *dev;
551
552 if (entry->netdev != netdev)
553 continue;
554
555 dev = lowpan_dev(entry->netdev);
556
557 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
558 local_skb = skb_clone(skb, GFP_ATOMIC);
559
560 send_pkt(pentry->conn, netdev->dev_addr,
561 pentry->eui64_addr, local_skb, netdev);
562
563 kfree_skb(local_skb);
564 }
565 }
566
567 read_unlock_irqrestore(&devices_lock, flags);
568}
569
570static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
571{
572 int err = 0;
573 unsigned char *eui64_addr;
574 struct lowpan_dev *dev;
575 struct lowpan_peer *peer;
576 bdaddr_t addr;
577 u8 addr_type;
578
579 if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
580 /* We need to send the packet to every device
581 * behind this interface.
582 */
583 send_mcast_pkt(skb, netdev);
584 } else {
585 unsigned long flags;
586
587 get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
588 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
589 dev = lowpan_dev(netdev);
590
591 read_lock_irqsave(&devices_lock, flags);
592 peer = peer_lookup_ba(dev, &addr, addr_type);
593 read_unlock_irqrestore(&devices_lock, flags);
594
595 BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
596 &addr, &lowpan_cb(skb)->addr, peer);
597
598 if (peer && peer->conn)
599 err = send_pkt(peer->conn, netdev->dev_addr,
600 eui64_addr, skb, netdev);
601 }
602 dev_kfree_skb(skb);
603
604 if (err)
605 BT_DBG("ERROR: xmit failed (%d)", err);
606
607 return (err < 0) ? NET_XMIT_DROP : err;
608}
609
610static const struct net_device_ops netdev_ops = {
611 .ndo_start_xmit = bt_xmit,
612};
613
614static struct header_ops header_ops = {
615 .create = header_create,
616};
617
618static void netdev_setup(struct net_device *dev)
619{
620 dev->addr_len = EUI64_ADDR_LEN;
621 dev->type = ARPHRD_6LOWPAN;
622
623 dev->hard_header_len = 0;
624 dev->needed_tailroom = 0;
625 dev->mtu = IPV6_MIN_MTU;
626 dev->tx_queue_len = 0;
627 dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
628 dev->watchdog_timeo = 0;
629
630 dev->netdev_ops = &netdev_ops;
631 dev->header_ops = &header_ops;
632 dev->destructor = free_netdev;
633}
634
635static struct device_type bt_type = {
636 .name = "bluetooth",
637};
638
639static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
640{
641 /* addr is the BT address in little-endian format */
642 eui[0] = addr[5];
643 eui[1] = addr[4];
644 eui[2] = addr[3];
645 eui[3] = 0xFF;
646 eui[4] = 0xFE;
647 eui[5] = addr[2];
648 eui[6] = addr[1];
649 eui[7] = addr[0];
650
651 eui[0] ^= 2;
652
653 /* Universal/local bit set, RFC 4291 */
654 if (addr_type == BDADDR_LE_PUBLIC)
655 eui[0] |= 1;
656 else
657 eui[0] &= ~1;
658}
659
660static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
661 u8 addr_type)
662{
663 netdev->addr_assign_type = NET_ADDR_PERM;
664 set_addr(netdev->dev_addr, addr->b, addr_type);
665 netdev->dev_addr[0] ^= 2;
666}
667
668static void ifup(struct net_device *netdev)
669{
670 int err;
671
672 rtnl_lock();
673 err = dev_open(netdev);
674 if (err < 0)
675 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
676 rtnl_unlock();
677}
678
679static void do_notify_peers(struct work_struct *work)
680{
681 struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
682 notify_peers.work);
683
684 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
685}
686
687static bool is_bt_6lowpan(struct hci_conn *hcon)
688{
689 if (hcon->type != LE_LINK)
690 return false;
691
692 return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
693}
694
695static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
696{
697 struct lowpan_peer *peer;
698 unsigned long flags;
699
700 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
701 if (!peer)
702 return -ENOMEM;
703
704 peer->conn = conn;
705 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
706
707 /* RFC 2464 ch. 5 */
708 peer->peer_addr.s6_addr[0] = 0xFE;
709 peer->peer_addr.s6_addr[1] = 0x80;
710 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
711 conn->hcon->dst_type);
712
713 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
714 EUI64_ADDR_LEN);
715 peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
716 * is done according RFC2464
717 */
718
719 raw_dump_inline(__func__, "peer IPv6 address",
720 (unsigned char *)&peer->peer_addr, 16);
721 raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
722
723 write_lock_irqsave(&devices_lock, flags);
724 INIT_LIST_HEAD(&peer->list);
725 peer_add(dev, peer);
726 write_unlock_irqrestore(&devices_lock, flags);
727
728 /* Notifying peers about us needs to be done without locks held */
729 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
730 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
731
732 return 0;
733}
734
735/* This gets called when BT LE 6LoWPAN device is connected. We then
736 * create network device that acts as a proxy between BT LE device
737 * and kernel network stack.
738 */
739int bt_6lowpan_add_conn(struct l2cap_conn *conn)
740{
741 struct lowpan_peer *peer = NULL;
742 struct lowpan_dev *dev;
743 struct net_device *netdev;
744 int err = 0;
745 unsigned long flags;
746
747 if (!is_bt_6lowpan(conn->hcon))
748 return 0;
749
750 peer = lookup_peer(conn);
751 if (peer)
752 return -EEXIST;
753
754 dev = lookup_dev(conn);
755 if (dev)
756 return add_peer_conn(conn, dev);
757
758 netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
759 if (!netdev)
760 return -ENOMEM;
761
762 set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
763
764 netdev->netdev_ops = &netdev_ops;
765 SET_NETDEV_DEV(netdev, &conn->hcon->dev);
766 SET_NETDEV_DEVTYPE(netdev, &bt_type);
767
768 err = register_netdev(netdev);
769 if (err < 0) {
770 BT_INFO("register_netdev failed %d", err);
771 free_netdev(netdev);
772 goto out;
773 }
774
775 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
776 netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
777 set_bit(__LINK_STATE_PRESENT, &netdev->state);
778
779 dev = netdev_priv(netdev);
780 dev->netdev = netdev;
781 dev->hdev = conn->hcon->hdev;
782 INIT_LIST_HEAD(&dev->peers);
783
784 write_lock_irqsave(&devices_lock, flags);
785 INIT_LIST_HEAD(&dev->list);
786 list_add(&dev->list, &bt_6lowpan_devices);
787 write_unlock_irqrestore(&devices_lock, flags);
788
789 ifup(netdev);
790
791 return add_peer_conn(conn, dev);
792
793out:
794 return err;
795}
796
797static void delete_netdev(struct work_struct *work)
798{
799 struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
800 delete_netdev);
801
802 unregister_netdev(entry->netdev);
803
804 /* The entry pointer is deleted in device_event() */
805}
806
807int bt_6lowpan_del_conn(struct l2cap_conn *conn)
808{
809 struct lowpan_dev *entry, *tmp;
810 struct lowpan_dev *dev = NULL;
811 struct lowpan_peer *peer;
812 int err = -ENOENT;
813 unsigned long flags;
814 bool last = false;
815
816 if (!is_bt_6lowpan(conn->hcon))
817 return 0;
818
819 write_lock_irqsave(&devices_lock, flags);
820
821 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
822 dev = lowpan_dev(entry->netdev);
823 peer = peer_lookup_conn(dev, conn);
824 if (peer) {
825 last = peer_del(dev, peer);
826 err = 0;
827 break;
828 }
829 }
830
831 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
832 write_unlock_irqrestore(&devices_lock, flags);
833
834 cancel_delayed_work_sync(&dev->notify_peers);
835
836 /* bt_6lowpan_del_conn() is called with hci dev lock held which
837 * means that we must delete the netdevice in worker thread.
838 */
839 INIT_WORK(&entry->delete_netdev, delete_netdev);
840 schedule_work(&entry->delete_netdev);
841 } else {
842 write_unlock_irqrestore(&devices_lock, flags);
843 }
844
845 return err;
846}
847
848static int device_event(struct notifier_block *unused,
849 unsigned long event, void *ptr)
850{
851 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
852 struct lowpan_dev *entry, *tmp;
853 unsigned long flags;
854
855 if (netdev->type != ARPHRD_6LOWPAN)
856 return NOTIFY_DONE;
857
858 switch (event) {
859 case NETDEV_UNREGISTER:
860 write_lock_irqsave(&devices_lock, flags);
861 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
862 list) {
863 if (entry->netdev == netdev) {
864 list_del(&entry->list);
865 kfree(entry);
866 break;
867 }
868 }
869 write_unlock_irqrestore(&devices_lock, flags);
870 break;
871 }
872
873 return NOTIFY_DONE;
874}
875
876static struct notifier_block bt_6lowpan_dev_notifier = {
877 .notifier_call = device_event,
878};
879
880int bt_6lowpan_init(void)
881{
882 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
883}
884
885void bt_6lowpan_cleanup(void)
886{
887 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
888}