blob: ba840fe2c555a7bd335b074a80707dacd568954c [file] [log] [blame]
Jukka Rissanen18722c22013-12-11 17:05:37 +02001/*
2 Copyright (c) 2013 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <linux/version.h>
15#include <linux/if_arp.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18
19#include <net/ipv6.h>
20#include <net/ip6_route.h>
21#include <net/addrconf.h>
22
23#include <net/af_ieee802154.h> /* to get the address type */
24
25#include <net/bluetooth/bluetooth.h>
26#include <net/bluetooth/hci_core.h>
27#include <net/bluetooth/l2cap.h>
28
29#include "../ieee802154/6lowpan.h" /* for the compression support */
30
31#define IFACE_NAME_TEMPLATE "bt%d"
32#define EUI64_ADDR_LEN 8
33
34struct skb_cb {
35 struct in6_addr addr;
36 struct l2cap_conn *conn;
37};
38#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
39
40/* The devices list contains those devices that we are acting
41 * as a proxy. The BT 6LoWPAN device is a virtual device that
42 * connects to the Bluetooth LE device. The real connection to
43 * BT device is done via l2cap layer. There exists one
44 * virtual device / one BT 6LoWPAN network (=hciX device).
45 * The list contains struct lowpan_dev elements.
46 */
47static LIST_HEAD(bt_6lowpan_devices);
48static DEFINE_RWLOCK(devices_lock);
49
50struct lowpan_peer {
51 struct list_head list;
52 struct l2cap_conn *conn;
53
54 /* peer addresses in various formats */
55 unsigned char eui64_addr[EUI64_ADDR_LEN];
56 struct in6_addr peer_addr;
57};
58
59struct lowpan_dev {
60 struct list_head list;
61
62 struct hci_dev *hdev;
63 struct net_device *netdev;
64 struct list_head peers;
65 atomic_t peer_count; /* number of items in peers list */
66
67 struct work_struct delete_netdev;
68 struct delayed_work notify_peers;
69};
70
71static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
72{
73 return netdev_priv(netdev);
74}
75
76static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
77{
78 list_add(&peer->list, &dev->peers);
79 atomic_inc(&dev->peer_count);
80}
81
82static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
83{
84 list_del(&peer->list);
85
86 if (atomic_dec_and_test(&dev->peer_count)) {
87 BT_DBG("last peer");
88 return true;
89 }
90
91 return false;
92}
93
94static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
95 bdaddr_t *ba, __u8 type)
96{
97 struct lowpan_peer *peer, *tmp;
98
99 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
100 ba, type);
101
102 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
103 BT_DBG("addr %pMR type %d",
104 &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
105
106 if (bacmp(&peer->conn->hcon->dst, ba))
107 continue;
108
109 if (type == peer->conn->hcon->dst_type)
110 return peer;
111 }
112
113 return NULL;
114}
115
116static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
117 struct l2cap_conn *conn)
118{
119 struct lowpan_peer *peer, *tmp;
120
121 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
122 if (peer->conn == conn)
123 return peer;
124 }
125
126 return NULL;
127}
128
129static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
130{
131 struct lowpan_dev *entry, *tmp;
132 struct lowpan_peer *peer = NULL;
133 unsigned long flags;
134
135 read_lock_irqsave(&devices_lock, flags);
136
137 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
138 peer = peer_lookup_conn(entry, conn);
139 if (peer)
140 break;
141 }
142
143 read_unlock_irqrestore(&devices_lock, flags);
144
145 return peer;
146}
147
148static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
149{
150 struct lowpan_dev *entry, *tmp;
151 struct lowpan_dev *dev = NULL;
152 unsigned long flags;
153
154 read_lock_irqsave(&devices_lock, flags);
155
156 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
157 if (conn->hcon->hdev == entry->hdev) {
158 dev = entry;
159 break;
160 }
161 }
162
163 read_unlock_irqrestore(&devices_lock, flags);
164
165 return dev;
166}
167
168/* print data in line */
169static inline void raw_dump_inline(const char *caller, char *msg,
170 unsigned char *buf, int len)
171{
172 if (msg)
173 pr_debug("%s():%s: ", caller, msg);
174
175 print_hex_dump_debug("", DUMP_PREFIX_NONE,
176 16, 1, buf, len, false);
177}
178
179/* print data in a table format:
180 *
181 * addr: xx xx xx xx xx xx
182 * addr: xx xx xx xx xx xx
183 * ...
184 */
185static inline void raw_dump_table(const char *caller, char *msg,
186 unsigned char *buf, int len)
187{
188 if (msg)
189 pr_debug("%s():%s:\n", caller, msg);
190
191 print_hex_dump_debug("\t", DUMP_PREFIX_OFFSET,
192 16, 1, buf, len, false);
193}
194
195static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
196{
197 struct sk_buff *skb_cp;
198 int ret;
199
200 skb_cp = skb_copy(skb, GFP_ATOMIC);
201 if (!skb_cp)
202 return -ENOMEM;
203
204 ret = netif_rx(skb_cp);
205
206 BT_DBG("receive skb %d", ret);
207 if (ret < 0)
208 return NET_RX_DROP;
209
210 return ret;
211}
212
213static int process_data(struct sk_buff *skb, struct net_device *netdev,
214 struct l2cap_conn *conn)
215{
216 const u8 *saddr, *daddr;
217 u8 iphc0, iphc1;
218 struct lowpan_dev *dev;
219 struct lowpan_peer *peer;
220 unsigned long flags;
221
222 dev = lowpan_dev(netdev);
223
224 read_lock_irqsave(&devices_lock, flags);
225 peer = peer_lookup_conn(dev, conn);
226 read_unlock_irqrestore(&devices_lock, flags);
227 if (!peer)
228 goto drop;
229
230 saddr = peer->eui64_addr;
231 daddr = dev->netdev->dev_addr;
232
233 /* at least two bytes will be used for the encoding */
234 if (skb->len < 2)
235 goto drop;
236
237 if (lowpan_fetch_skb_u8(skb, &iphc0))
238 goto drop;
239
240 if (lowpan_fetch_skb_u8(skb, &iphc1))
241 goto drop;
242
243 return lowpan_process_data(skb, netdev,
244 saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
245 daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
246 iphc0, iphc1, give_skb_to_upper);
247
248drop:
249 kfree_skb(skb);
250 return -EINVAL;
251}
252
253static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
254 struct l2cap_conn *conn)
255{
256 struct sk_buff *local_skb;
257 int ret;
258
259 if (!netif_running(dev))
260 goto drop;
261
262 if (dev->type != ARPHRD_6LOWPAN)
263 goto drop;
264
265 /* check that it's our buffer */
266 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
267 /* Copy the packet so that the IPv6 header is
268 * properly aligned.
269 */
270 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
271 skb_tailroom(skb), GFP_ATOMIC);
272 if (!local_skb)
273 goto drop;
274
275 local_skb->protocol = htons(ETH_P_IPV6);
276 local_skb->pkt_type = PACKET_HOST;
277
278 skb_reset_network_header(local_skb);
279 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
280
281 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
282 kfree_skb(local_skb);
283 goto drop;
284 }
285
286 dev->stats.rx_bytes += skb->len;
287 dev->stats.rx_packets++;
288
289 kfree_skb(local_skb);
290 kfree_skb(skb);
291 } else {
292 switch (skb->data[0] & 0xe0) {
293 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
294 local_skb = skb_clone(skb, GFP_ATOMIC);
295 if (!local_skb)
296 goto drop;
297
298 ret = process_data(local_skb, dev, conn);
299 if (ret != NET_RX_SUCCESS)
300 goto drop;
301
302 dev->stats.rx_bytes += skb->len;
303 dev->stats.rx_packets++;
304
305 kfree_skb(skb);
306 break;
307 default:
308 break;
309 }
310 }
311
312 return NET_RX_SUCCESS;
313
314drop:
315 kfree_skb(skb);
316 return NET_RX_DROP;
317}
318
319/* Packet from BT LE device */
320int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
321{
322 struct lowpan_dev *dev;
323 struct lowpan_peer *peer;
324 int err;
325
326 peer = lookup_peer(conn);
327 if (!peer)
328 return -ENOENT;
329
330 dev = lookup_dev(conn);
331 if (dev && !dev->netdev)
332 return -ENOENT;
333
334 err = recv_pkt(skb, dev->netdev, conn);
335 BT_DBG("recv pkt %d", err);
336
337 return err;
338}
339
340static inline int skbuff_copy(void *msg, int len, int count, int mtu,
341 struct sk_buff *skb, struct net_device *dev)
342{
343 struct sk_buff **frag;
344 int sent = 0;
345
346 memcpy(skb_put(skb, count), msg, count);
347
348 sent += count;
349 msg += count;
350 len -= count;
351
352 dev->stats.tx_bytes += count;
353 dev->stats.tx_packets++;
354
355 raw_dump_table(__func__, "Sending", skb->data, skb->len);
356
357 /* Continuation fragments (no L2CAP header) */
358 frag = &skb_shinfo(skb)->frag_list;
359 while (len > 0) {
360 struct sk_buff *tmp;
361
362 count = min_t(unsigned int, mtu, len);
363
364 tmp = bt_skb_alloc(count, GFP_ATOMIC);
365 if (IS_ERR(tmp))
366 return PTR_ERR(tmp);
367
368 *frag = tmp;
369
370 memcpy(skb_put(*frag, count), msg, count);
371
372 raw_dump_table(__func__, "Sending fragment",
373 (*frag)->data, count);
374
375 (*frag)->priority = skb->priority;
376
377 sent += count;
378 msg += count;
379 len -= count;
380
381 skb->len += (*frag)->len;
382 skb->data_len += (*frag)->len;
383
384 frag = &(*frag)->next;
385
386 dev->stats.tx_bytes += count;
387 dev->stats.tx_packets++;
388 }
389
390 return sent;
391}
392
393static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
394 size_t len, u32 priority,
395 struct net_device *dev)
396{
397 struct sk_buff *skb;
398 int err, count;
399 struct l2cap_hdr *lh;
400
401 /* FIXME: This mtu check should be not needed and atm is only used for
402 * testing purposes
403 */
404 if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
405 conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
406
407 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
408
409 BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
410
411 skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
412 if (IS_ERR(skb))
413 return skb;
414
415 skb->priority = priority;
416
417 lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
418 lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
419 lh->len = cpu_to_le16(len);
420
421 err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
422 if (unlikely(err < 0)) {
423 kfree_skb(skb);
424 BT_DBG("skbuff copy %d failed", err);
425 return ERR_PTR(err);
426 }
427
428 return skb;
429}
430
431static int conn_send(struct l2cap_conn *conn,
432 void *msg, size_t len, u32 priority,
433 struct net_device *dev)
434{
435 struct sk_buff *skb;
436
437 skb = create_pdu(conn, msg, len, priority, dev);
438 if (IS_ERR(skb))
439 return -EINVAL;
440
441 BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
442 skb->priority);
443
444 hci_send_acl(conn->hchan, skb, ACL_START);
445
446 return 0;
447}
448
449static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
450 bdaddr_t *addr, u8 *addr_type)
451{
452 u8 *eui64;
453
454 eui64 = ip6_daddr->s6_addr + 8;
455
456 addr->b[0] = eui64[7];
457 addr->b[1] = eui64[6];
458 addr->b[2] = eui64[5];
459 addr->b[3] = eui64[2];
460 addr->b[4] = eui64[1];
461 addr->b[5] = eui64[0];
462
463 addr->b[5] ^= 2;
464
465 /* Set universal/local bit to 0 */
466 if (addr->b[5] & 1) {
467 addr->b[5] &= ~1;
468 *addr_type = BDADDR_LE_PUBLIC;
469 } else {
470 *addr_type = BDADDR_LE_RANDOM;
471 }
472}
473
474static int header_create(struct sk_buff *skb, struct net_device *netdev,
475 unsigned short type, const void *_daddr,
476 const void *_saddr, unsigned int len)
477{
478 struct ipv6hdr *hdr;
479 struct lowpan_dev *dev;
480 struct lowpan_peer *peer;
481 bdaddr_t addr, *any = BDADDR_ANY;
482 u8 *saddr, *daddr = any->b;
483 u8 addr_type;
484
485 if (type != ETH_P_IPV6)
486 return -EINVAL;
487
488 hdr = ipv6_hdr(skb);
489
490 dev = lowpan_dev(netdev);
491
492 if (ipv6_addr_is_multicast(&hdr->daddr)) {
493 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
494 sizeof(struct in6_addr));
495 lowpan_cb(skb)->conn = NULL;
496 } else {
497 unsigned long flags;
498
499 /* Get destination BT device from skb.
500 * If there is no such peer then discard the packet.
501 */
502 get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
503
504 BT_DBG("dest addr %pMR type %d", &addr, addr_type);
505
506 read_lock_irqsave(&devices_lock, flags);
507 peer = peer_lookup_ba(dev, &addr, addr_type);
508 read_unlock_irqrestore(&devices_lock, flags);
509
510 if (!peer) {
511 BT_DBG("no such peer %pMR found", &addr);
512 return -ENOENT;
513 }
514
515 daddr = peer->eui64_addr;
516
517 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
518 sizeof(struct in6_addr));
519 lowpan_cb(skb)->conn = peer->conn;
520 }
521
522 saddr = dev->netdev->dev_addr;
523
524 return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
525}
526
527/* Packet to BT LE device */
528static int send_pkt(struct l2cap_conn *conn, const void *saddr,
529 const void *daddr, struct sk_buff *skb,
530 struct net_device *netdev)
531{
532 raw_dump_table(__func__, "raw skb data dump before fragmentation",
533 skb->data, skb->len);
534
535 return conn_send(conn, skb->data, skb->len, 0, netdev);
536}
537
538static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
539{
540 struct sk_buff *local_skb;
541 struct lowpan_dev *entry, *tmp;
542 unsigned long flags;
543
544 read_lock_irqsave(&devices_lock, flags);
545
546 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
547 struct lowpan_peer *pentry, *ptmp;
548 struct lowpan_dev *dev;
549
550 if (entry->netdev != netdev)
551 continue;
552
553 dev = lowpan_dev(entry->netdev);
554
555 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
556 local_skb = skb_clone(skb, GFP_ATOMIC);
557
558 send_pkt(pentry->conn, netdev->dev_addr,
559 pentry->eui64_addr, local_skb, netdev);
560
561 kfree_skb(local_skb);
562 }
563 }
564
565 read_unlock_irqrestore(&devices_lock, flags);
566}
567
568static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
569{
570 int err = 0;
571 unsigned char *eui64_addr;
572 struct lowpan_dev *dev;
573 struct lowpan_peer *peer;
574 bdaddr_t addr;
575 u8 addr_type;
576
577 if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
578 /* We need to send the packet to every device
579 * behind this interface.
580 */
581 send_mcast_pkt(skb, netdev);
582 } else {
583 unsigned long flags;
584
585 get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
586 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
587 dev = lowpan_dev(netdev);
588
589 read_lock_irqsave(&devices_lock, flags);
590 peer = peer_lookup_ba(dev, &addr, addr_type);
591 read_unlock_irqrestore(&devices_lock, flags);
592
593 BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
594 &addr, &lowpan_cb(skb)->addr, peer);
595
596 if (peer && peer->conn)
597 err = send_pkt(peer->conn, netdev->dev_addr,
598 eui64_addr, skb, netdev);
599 }
600 dev_kfree_skb(skb);
601
602 if (err)
603 BT_DBG("ERROR: xmit failed (%d)", err);
604
605 return (err < 0) ? NET_XMIT_DROP : err;
606}
607
608static const struct net_device_ops netdev_ops = {
609 .ndo_start_xmit = bt_xmit,
610};
611
612static struct header_ops header_ops = {
613 .create = header_create,
614};
615
616static void netdev_setup(struct net_device *dev)
617{
618 dev->addr_len = EUI64_ADDR_LEN;
619 dev->type = ARPHRD_6LOWPAN;
620
621 dev->hard_header_len = 0;
622 dev->needed_tailroom = 0;
623 dev->mtu = IPV6_MIN_MTU;
624 dev->tx_queue_len = 0;
625 dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
626 dev->watchdog_timeo = 0;
627
628 dev->netdev_ops = &netdev_ops;
629 dev->header_ops = &header_ops;
630 dev->destructor = free_netdev;
631}
632
633static struct device_type bt_type = {
634 .name = "bluetooth",
635};
636
637static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
638{
639 /* addr is the BT address in little-endian format */
640 eui[0] = addr[5];
641 eui[1] = addr[4];
642 eui[2] = addr[3];
643 eui[3] = 0xFF;
644 eui[4] = 0xFE;
645 eui[5] = addr[2];
646 eui[6] = addr[1];
647 eui[7] = addr[0];
648
649 eui[0] ^= 2;
650
651 /* Universal/local bit set, RFC 4291 */
652 if (addr_type == BDADDR_LE_PUBLIC)
653 eui[0] |= 1;
654 else
655 eui[0] &= ~1;
656}
657
658static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
659 u8 addr_type)
660{
661 netdev->addr_assign_type = NET_ADDR_PERM;
662 set_addr(netdev->dev_addr, addr->b, addr_type);
663 netdev->dev_addr[0] ^= 2;
664}
665
666static void ifup(struct net_device *netdev)
667{
668 int err;
669
670 rtnl_lock();
671 err = dev_open(netdev);
672 if (err < 0)
673 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
674 rtnl_unlock();
675}
676
677static void do_notify_peers(struct work_struct *work)
678{
679 struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
680 notify_peers.work);
681
682 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
683}
684
685static bool is_bt_6lowpan(struct hci_conn *hcon)
686{
687 if (hcon->type != LE_LINK)
688 return false;
689
690 return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
691}
692
693static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
694{
695 struct lowpan_peer *peer;
696 unsigned long flags;
697
698 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
699 if (!peer)
700 return -ENOMEM;
701
702 peer->conn = conn;
703 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
704
705 /* RFC 2464 ch. 5 */
706 peer->peer_addr.s6_addr[0] = 0xFE;
707 peer->peer_addr.s6_addr[1] = 0x80;
708 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
709 conn->hcon->dst_type);
710
711 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
712 EUI64_ADDR_LEN);
713 peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
714 * is done according RFC2464
715 */
716
717 raw_dump_inline(__func__, "peer IPv6 address",
718 (unsigned char *)&peer->peer_addr, 16);
719 raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
720
721 write_lock_irqsave(&devices_lock, flags);
722 INIT_LIST_HEAD(&peer->list);
723 peer_add(dev, peer);
724 write_unlock_irqrestore(&devices_lock, flags);
725
726 /* Notifying peers about us needs to be done without locks held */
727 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
728 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
729
730 return 0;
731}
732
733/* This gets called when BT LE 6LoWPAN device is connected. We then
734 * create network device that acts as a proxy between BT LE device
735 * and kernel network stack.
736 */
737int bt_6lowpan_add_conn(struct l2cap_conn *conn)
738{
739 struct lowpan_peer *peer = NULL;
740 struct lowpan_dev *dev;
741 struct net_device *netdev;
742 int err = 0;
743 unsigned long flags;
744
745 if (!is_bt_6lowpan(conn->hcon))
746 return 0;
747
748 peer = lookup_peer(conn);
749 if (peer)
750 return -EEXIST;
751
752 dev = lookup_dev(conn);
753 if (dev)
754 return add_peer_conn(conn, dev);
755
756 netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
757 if (!netdev)
758 return -ENOMEM;
759
760 set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
761
762 netdev->netdev_ops = &netdev_ops;
763 SET_NETDEV_DEV(netdev, &conn->hcon->dev);
764 SET_NETDEV_DEVTYPE(netdev, &bt_type);
765
766 err = register_netdev(netdev);
767 if (err < 0) {
768 BT_INFO("register_netdev failed %d", err);
769 free_netdev(netdev);
770 goto out;
771 }
772
773 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
774 netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
775 set_bit(__LINK_STATE_PRESENT, &netdev->state);
776
777 dev = netdev_priv(netdev);
778 dev->netdev = netdev;
779 dev->hdev = conn->hcon->hdev;
780 INIT_LIST_HEAD(&dev->peers);
781
782 write_lock_irqsave(&devices_lock, flags);
783 INIT_LIST_HEAD(&dev->list);
784 list_add(&dev->list, &bt_6lowpan_devices);
785 write_unlock_irqrestore(&devices_lock, flags);
786
787 ifup(netdev);
788
789 return add_peer_conn(conn, dev);
790
791out:
792 return err;
793}
794
795static void delete_netdev(struct work_struct *work)
796{
797 struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
798 delete_netdev);
799
800 unregister_netdev(entry->netdev);
801
802 /* The entry pointer is deleted in device_event() */
803}
804
805int bt_6lowpan_del_conn(struct l2cap_conn *conn)
806{
807 struct lowpan_dev *entry, *tmp;
808 struct lowpan_dev *dev = NULL;
809 struct lowpan_peer *peer;
810 int err = -ENOENT;
811 unsigned long flags;
812 bool last = false;
813
814 if (!is_bt_6lowpan(conn->hcon))
815 return 0;
816
817 write_lock_irqsave(&devices_lock, flags);
818
819 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
820 dev = lowpan_dev(entry->netdev);
821 peer = peer_lookup_conn(dev, conn);
822 if (peer) {
823 last = peer_del(dev, peer);
824 err = 0;
825 break;
826 }
827 }
828
829 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
830 write_unlock_irqrestore(&devices_lock, flags);
831
832 cancel_delayed_work_sync(&dev->notify_peers);
833
834 /* bt_6lowpan_del_conn() is called with hci dev lock held which
835 * means that we must delete the netdevice in worker thread.
836 */
837 INIT_WORK(&entry->delete_netdev, delete_netdev);
838 schedule_work(&entry->delete_netdev);
839 } else {
840 write_unlock_irqrestore(&devices_lock, flags);
841 }
842
843 return err;
844}
845
846static int device_event(struct notifier_block *unused,
847 unsigned long event, void *ptr)
848{
849 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
850 struct lowpan_dev *entry, *tmp;
851 unsigned long flags;
852
853 if (netdev->type != ARPHRD_6LOWPAN)
854 return NOTIFY_DONE;
855
856 switch (event) {
857 case NETDEV_UNREGISTER:
858 write_lock_irqsave(&devices_lock, flags);
859 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
860 list) {
861 if (entry->netdev == netdev) {
862 list_del(&entry->list);
863 kfree(entry);
864 break;
865 }
866 }
867 write_unlock_irqrestore(&devices_lock, flags);
868 break;
869 }
870
871 return NOTIFY_DONE;
872}
873
874static struct notifier_block bt_6lowpan_dev_notifier = {
875 .notifier_call = device_event,
876};
877
878int bt_6lowpan_init(void)
879{
880 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
881}
882
883void bt_6lowpan_cleanup(void)
884{
885 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
886}