blob: bad0e9d1ea20f3045bca2e19ba56f4ec9ac2893e [file] [log] [blame]
Jukka Rissanen18722c22013-12-11 17:05:37 +02001/*
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03002 Copyright (c) 2013-2014 Intel Corp.
Jukka Rissanen18722c22013-12-11 17:05:37 +02003
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
Jukka Rissanen18722c22013-12-11 17:05:37 +020014#include <linux/if_arp.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
Jukka Rissanen5547e482014-06-18 16:37:09 +030017#include <linux/module.h>
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030018#include <linux/debugfs.h>
Jukka Rissanen18722c22013-12-11 17:05:37 +020019
20#include <net/ipv6.h>
21#include <net/ip6_route.h>
22#include <net/addrconf.h>
23
Jukka Rissanen18722c22013-12-11 17:05:37 +020024#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26#include <net/bluetooth/l2cap.h>
27
Alexander Aringcefc8c82014-03-05 14:29:05 +010028#include <net/6lowpan.h> /* for the compression support */
Jukka Rissanen18722c22013-12-11 17:05:37 +020029
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030030#define VERSION "0.1"
31
Jukka Rissanen7b2ed602015-01-08 17:00:55 +020032static struct dentry *lowpan_enable_debugfs;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030033static struct dentry *lowpan_control_debugfs;
34
Jukka Rissanen18722c22013-12-11 17:05:37 +020035#define IFACE_NAME_TEMPLATE "bt%d"
Jukka Rissanen18722c22013-12-11 17:05:37 +020036
37struct skb_cb {
38 struct in6_addr addr;
Jukka Rissanen39e90c72014-09-08 12:11:45 +030039 struct in6_addr gw;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030040 struct l2cap_chan *chan;
41 int status;
Jukka Rissanen18722c22013-12-11 17:05:37 +020042};
43#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
44
45/* The devices list contains those devices that we are acting
46 * as a proxy. The BT 6LoWPAN device is a virtual device that
47 * connects to the Bluetooth LE device. The real connection to
48 * BT device is done via l2cap layer. There exists one
49 * virtual device / one BT 6LoWPAN network (=hciX device).
50 * The list contains struct lowpan_dev elements.
51 */
52static LIST_HEAD(bt_6lowpan_devices);
Jukka Rissanen90305822014-10-28 17:16:47 +020053static DEFINE_SPINLOCK(devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +020054
Jukka Rissanen7b2ed602015-01-08 17:00:55 +020055static bool enable_6lowpan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030056
57/* We are listening incoming connections via this channel
58 */
59static struct l2cap_chan *listen_chan;
60
Jukka Rissanen18722c22013-12-11 17:05:37 +020061struct lowpan_peer {
62 struct list_head list;
Jukka Rissanen90305822014-10-28 17:16:47 +020063 struct rcu_head rcu;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030064 struct l2cap_chan *chan;
Jukka Rissanen18722c22013-12-11 17:05:37 +020065
66 /* peer addresses in various formats */
67 unsigned char eui64_addr[EUI64_ADDR_LEN];
68 struct in6_addr peer_addr;
69};
70
Alexander Aring2e4d60c2016-04-11 11:04:18 +020071struct lowpan_btle_dev {
Jukka Rissanen18722c22013-12-11 17:05:37 +020072 struct list_head list;
73
74 struct hci_dev *hdev;
75 struct net_device *netdev;
76 struct list_head peers;
77 atomic_t peer_count; /* number of items in peers list */
78
79 struct work_struct delete_netdev;
80 struct delayed_work notify_peers;
81};
82
Patrik Flyktc259d142017-03-12 10:19:33 +020083static void set_addr(u8 *eui, u8 *addr, u8 addr_type);
84
Alexander Aring2e4d60c2016-04-11 11:04:18 +020085static inline struct lowpan_btle_dev *
86lowpan_btle_dev(const struct net_device *netdev)
Jukka Rissanen18722c22013-12-11 17:05:37 +020087{
Alexander Aring2e4d60c2016-04-11 11:04:18 +020088 return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
Jukka Rissanen18722c22013-12-11 17:05:37 +020089}
90
Alexander Aring2e4d60c2016-04-11 11:04:18 +020091static inline void peer_add(struct lowpan_btle_dev *dev,
92 struct lowpan_peer *peer)
Jukka Rissanen18722c22013-12-11 17:05:37 +020093{
Jukka Rissanen90305822014-10-28 17:16:47 +020094 list_add_rcu(&peer->list, &dev->peers);
Jukka Rissanen18722c22013-12-11 17:05:37 +020095 atomic_inc(&dev->peer_count);
96}
97
Alexander Aring2e4d60c2016-04-11 11:04:18 +020098static inline bool peer_del(struct lowpan_btle_dev *dev,
99 struct lowpan_peer *peer)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200100{
Jukka Rissanen90305822014-10-28 17:16:47 +0200101 list_del_rcu(&peer->list);
Johan Hedberg4e790222014-11-11 14:16:29 +0200102 kfree_rcu(peer, rcu);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200103
Jukka Rissanen18d93c12014-06-18 16:37:10 +0300104 module_put(THIS_MODULE);
105
Jukka Rissanen18722c22013-12-11 17:05:37 +0200106 if (atomic_dec_and_test(&dev->peer_count)) {
107 BT_DBG("last peer");
108 return true;
109 }
110
111 return false;
112}
113
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200114static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev,
Jukka Rissanen18722c22013-12-11 17:05:37 +0200115 bdaddr_t *ba, __u8 type)
116{
Jukka Rissanen90305822014-10-28 17:16:47 +0200117 struct lowpan_peer *peer;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200118
119 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
120 ba, type);
121
Jukka Rissanen90305822014-10-28 17:16:47 +0200122 rcu_read_lock();
123
124 list_for_each_entry_rcu(peer, &dev->peers, list) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300125 BT_DBG("dst addr %pMR dst type %d",
126 &peer->chan->dst, peer->chan->dst_type);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200127
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300128 if (bacmp(&peer->chan->dst, ba))
Jukka Rissanen18722c22013-12-11 17:05:37 +0200129 continue;
130
Jukka Rissanen90305822014-10-28 17:16:47 +0200131 if (type == peer->chan->dst_type) {
132 rcu_read_unlock();
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300133 return peer;
Jukka Rissanen90305822014-10-28 17:16:47 +0200134 }
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300135 }
136
Jukka Rissanen90305822014-10-28 17:16:47 +0200137 rcu_read_unlock();
138
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300139 return NULL;
140}
141
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200142static inline struct lowpan_peer *
143__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300144{
Jukka Rissanen90305822014-10-28 17:16:47 +0200145 struct lowpan_peer *peer;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300146
Jukka Rissanen90305822014-10-28 17:16:47 +0200147 list_for_each_entry_rcu(peer, &dev->peers, list) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300148 if (peer->chan == chan)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200149 return peer;
150 }
151
152 return NULL;
153}
154
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200155static inline struct lowpan_peer *
156__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200157{
Jukka Rissanen90305822014-10-28 17:16:47 +0200158 struct lowpan_peer *peer;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200159
Jukka Rissanen90305822014-10-28 17:16:47 +0200160 list_for_each_entry_rcu(peer, &dev->peers, list) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300161 if (peer->chan->conn == conn)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200162 return peer;
163 }
164
165 return NULL;
166}
167
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200168static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300169 struct in6_addr *daddr,
170 struct sk_buff *skb)
171{
Jukka Rissanen90305822014-10-28 17:16:47 +0200172 struct lowpan_peer *peer;
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300173 struct in6_addr *nexthop;
174 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
175 int count = atomic_read(&dev->peer_count);
176
177 BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
178
179 /* If we have multiple 6lowpan peers, then check where we should
180 * send the packet. If only one peer exists, then we can send the
181 * packet right away.
182 */
Jukka Rissanen90305822014-10-28 17:16:47 +0200183 if (count == 1) {
184 rcu_read_lock();
185 peer = list_first_or_null_rcu(&dev->peers, struct lowpan_peer,
186 list);
187 rcu_read_unlock();
188 return peer;
189 }
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300190
191 if (!rt) {
192 nexthop = &lowpan_cb(skb)->gw;
193
194 if (ipv6_addr_any(nexthop))
195 return NULL;
196 } else {
Martin KaFai Lau2647a9b2015-05-22 20:55:58 -0700197 nexthop = rt6_nexthop(rt, daddr);
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300198
199 /* We need to remember the address because it is needed
200 * by bt_xmit() when sending the packet. In bt_xmit(), the
201 * destination routing info is not set.
202 */
203 memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
204 }
205
206 BT_DBG("gw %pI6c", nexthop);
207
Jukka Rissanen90305822014-10-28 17:16:47 +0200208 rcu_read_lock();
209
210 list_for_each_entry_rcu(peer, &dev->peers, list) {
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300211 BT_DBG("dst addr %pMR dst type %d ip %pI6c",
212 &peer->chan->dst, peer->chan->dst_type,
213 &peer->peer_addr);
214
Jukka Rissanen90305822014-10-28 17:16:47 +0200215 if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
216 rcu_read_unlock();
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300217 return peer;
Jukka Rissanen90305822014-10-28 17:16:47 +0200218 }
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300219 }
220
Jukka Rissanen90305822014-10-28 17:16:47 +0200221 rcu_read_unlock();
222
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300223 return NULL;
224}
225
Jukka Rissanen18722c22013-12-11 17:05:37 +0200226static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
227{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200228 struct lowpan_btle_dev *entry;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200229 struct lowpan_peer *peer = NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200230
Jukka Rissanen90305822014-10-28 17:16:47 +0200231 rcu_read_lock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200232
Jukka Rissanen90305822014-10-28 17:16:47 +0200233 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
234 peer = __peer_lookup_conn(entry, conn);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200235 if (peer)
236 break;
237 }
238
Jukka Rissanen90305822014-10-28 17:16:47 +0200239 rcu_read_unlock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200240
241 return peer;
242}
243
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200244static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200245{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200246 struct lowpan_btle_dev *entry;
247 struct lowpan_btle_dev *dev = NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200248
Jukka Rissanen90305822014-10-28 17:16:47 +0200249 rcu_read_lock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200250
Jukka Rissanen90305822014-10-28 17:16:47 +0200251 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
Jukka Rissanen18722c22013-12-11 17:05:37 +0200252 if (conn->hcon->hdev == entry->hdev) {
253 dev = entry;
254 break;
255 }
256 }
257
Jukka Rissanen90305822014-10-28 17:16:47 +0200258 rcu_read_unlock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200259
260 return dev;
261}
262
Jukka Rissanen18722c22013-12-11 17:05:37 +0200263static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
264{
265 struct sk_buff *skb_cp;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200266
267 skb_cp = skb_copy(skb, GFP_ATOMIC);
268 if (!skb_cp)
Martin Townsendf8b36172014-10-23 15:40:53 +0100269 return NET_RX_DROP;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200270
Alexander Aring324e7862015-10-27 08:35:24 +0100271 return netif_rx_ni(skb_cp);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200272}
273
Martin Townsend01141232014-10-23 15:40:56 +0100274static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
275 struct l2cap_chan *chan)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200276{
Patrik Flyktc259d142017-03-12 10:19:33 +0200277 const u8 *saddr;
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200278 struct lowpan_btle_dev *dev;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200279 struct lowpan_peer *peer;
Patrik Flyktc259d142017-03-12 10:19:33 +0200280 unsigned char eui64_daddr[EUI64_ADDR_LEN];
Jukka Rissanen18722c22013-12-11 17:05:37 +0200281
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200282 dev = lowpan_btle_dev(netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200283
Jukka Rissanen90305822014-10-28 17:16:47 +0200284 rcu_read_lock();
285 peer = __peer_lookup_chan(dev, chan);
286 rcu_read_unlock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200287 if (!peer)
Martin Townsend56b2c3e2014-11-06 19:15:13 +0000288 return -EINVAL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200289
290 saddr = peer->eui64_addr;
Patrik Flyktc259d142017-03-12 10:19:33 +0200291 set_addr(&eui64_daddr[0], chan->src.b, chan->src_type);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200292
Patrik Flyktc259d142017-03-12 10:19:33 +0200293 return lowpan_header_decompress(skb, netdev, &eui64_daddr, saddr);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200294}
295
296static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300297 struct l2cap_chan *chan)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200298{
299 struct sk_buff *local_skb;
300 int ret;
301
302 if (!netif_running(dev))
303 goto drop;
304
Alexander Aringcefdb802015-10-13 13:42:55 +0200305 if (dev->type != ARPHRD_6LOWPAN || !skb->len)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200306 goto drop;
307
Alexander Aringcefdb802015-10-13 13:42:55 +0200308 skb_reset_network_header(skb);
309
Martin Townsend11e3ff72014-10-13 11:00:56 +0100310 skb = skb_share_check(skb, GFP_ATOMIC);
311 if (!skb)
312 goto drop;
313
Jukka Rissanen18722c22013-12-11 17:05:37 +0200314 /* check that it's our buffer */
Alexander Aringcefdb802015-10-13 13:42:55 +0200315 if (lowpan_is_ipv6(*skb_network_header(skb))) {
Lukasz Duda87f5fed2016-01-13 16:57:48 +0100316 /* Pull off the 1-byte of 6lowpan header. */
317 skb_pull(skb, 1);
318
Jukka Rissanen18722c22013-12-11 17:05:37 +0200319 /* Copy the packet so that the IPv6 header is
320 * properly aligned.
321 */
322 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
323 skb_tailroom(skb), GFP_ATOMIC);
324 if (!local_skb)
325 goto drop;
326
327 local_skb->protocol = htons(ETH_P_IPV6);
328 local_skb->pkt_type = PACKET_HOST;
Glenn Ruben Bakke4c58f322016-01-13 16:41:42 +0100329 local_skb->dev = dev;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200330
Jukka Rissanen18722c22013-12-11 17:05:37 +0200331 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
332
333 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
334 kfree_skb(local_skb);
335 goto drop;
336 }
337
338 dev->stats.rx_bytes += skb->len;
339 dev->stats.rx_packets++;
340
Martin Townsend3c400b82014-10-23 15:40:55 +0100341 consume_skb(local_skb);
342 consume_skb(skb);
Alexander Aringcefdb802015-10-13 13:42:55 +0200343 } else if (lowpan_is_iphc(*skb_network_header(skb))) {
344 local_skb = skb_clone(skb, GFP_ATOMIC);
345 if (!local_skb)
346 goto drop;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200347
Glenn Ruben Bakke4c58f322016-01-13 16:41:42 +0100348 local_skb->dev = dev;
349
Alexander Aringcefdb802015-10-13 13:42:55 +0200350 ret = iphc_decompress(local_skb, dev, chan);
351 if (ret < 0) {
352 kfree_skb(local_skb);
353 goto drop;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200354 }
Alexander Aringcefdb802015-10-13 13:42:55 +0200355
356 local_skb->protocol = htons(ETH_P_IPV6);
357 local_skb->pkt_type = PACKET_HOST;
Alexander Aringcefdb802015-10-13 13:42:55 +0200358
359 if (give_skb_to_upper(local_skb, dev)
360 != NET_RX_SUCCESS) {
361 kfree_skb(local_skb);
362 goto drop;
363 }
364
365 dev->stats.rx_bytes += skb->len;
366 dev->stats.rx_packets++;
367
368 consume_skb(local_skb);
369 consume_skb(skb);
370 } else {
371 goto drop;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200372 }
373
374 return NET_RX_SUCCESS;
375
376drop:
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300377 dev->stats.rx_dropped++;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200378 return NET_RX_DROP;
379}
380
381/* Packet from BT LE device */
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300382static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200383{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200384 struct lowpan_btle_dev *dev;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200385 struct lowpan_peer *peer;
386 int err;
387
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300388 peer = lookup_peer(chan->conn);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200389 if (!peer)
390 return -ENOENT;
391
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300392 dev = lookup_dev(chan->conn);
Johan Hedberg30d3db42013-12-12 09:53:21 +0200393 if (!dev || !dev->netdev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200394 return -ENOENT;
395
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300396 err = recv_pkt(skb, dev->netdev, chan);
397 if (err) {
398 BT_DBG("recv pkt %d", err);
399 err = -EAGAIN;
400 }
Jukka Rissanen18722c22013-12-11 17:05:37 +0200401
402 return err;
403}
404
Jukka Rissanen62bbd5b2014-05-27 11:33:22 +0300405static u8 get_addr_type_from_eui64(u8 byte)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200406{
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300407 /* Is universal(0) or local(1) bit */
408 return ((byte & 0x02) ? BDADDR_LE_RANDOM : BDADDR_LE_PUBLIC);
Jukka Rissanen62bbd5b2014-05-27 11:33:22 +0300409}
410
411static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
412{
413 u8 *eui64 = ip6_daddr->s6_addr + 8;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200414
415 addr->b[0] = eui64[7];
416 addr->b[1] = eui64[6];
417 addr->b[2] = eui64[5];
418 addr->b[3] = eui64[2];
419 addr->b[4] = eui64[1];
420 addr->b[5] = eui64[0];
Jukka Rissanen62bbd5b2014-05-27 11:33:22 +0300421}
Jukka Rissanen18722c22013-12-11 17:05:37 +0200422
Jukka Rissanen62bbd5b2014-05-27 11:33:22 +0300423static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
424 bdaddr_t *addr, u8 *addr_type)
425{
426 copy_to_bdaddr(ip6_daddr, addr);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200427
Jukka Rissanen62bbd5b2014-05-27 11:33:22 +0300428 /* We need to toggle the U/L bit that we got from IPv6 address
429 * so that we get the proper address and type of the BD address.
430 */
431 addr->b[5] ^= 0x02;
432
433 *addr_type = get_addr_type_from_eui64(addr->b[5]);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200434}
435
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300436static int setup_header(struct sk_buff *skb, struct net_device *netdev,
437 bdaddr_t *peer_addr, u8 *peer_addr_type)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200438{
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300439 struct in6_addr ipv6_daddr;
Glenn Ruben Bakke55441072016-04-22 18:06:11 +0200440 struct ipv6hdr *hdr;
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200441 struct lowpan_btle_dev *dev;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200442 struct lowpan_peer *peer;
443 bdaddr_t addr, *any = BDADDR_ANY;
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300444 u8 *daddr = any->b;
445 int err, status = 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200446
Glenn Ruben Bakke55441072016-04-22 18:06:11 +0200447 hdr = ipv6_hdr(skb);
448
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200449 dev = lowpan_btle_dev(netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200450
Glenn Ruben Bakke55441072016-04-22 18:06:11 +0200451 memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300452
453 if (ipv6_addr_is_multicast(&ipv6_daddr)) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300454 lowpan_cb(skb)->chan = NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200455 } else {
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300456 u8 addr_type;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200457
458 /* Get destination BT device from skb.
459 * If there is no such peer then discard the packet.
460 */
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300461 convert_dest_bdaddr(&ipv6_daddr, &addr, &addr_type);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200462
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300463 BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300464 addr_type, &ipv6_daddr);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200465
Jukka Rissanen18722c22013-12-11 17:05:37 +0200466 peer = peer_lookup_ba(dev, &addr, addr_type);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200467 if (!peer) {
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300468 /* The packet might be sent to 6lowpan interface
469 * because of routing (either via default route
470 * or user set route) so get peer according to
471 * the destination address.
472 */
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300473 peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300474 if (!peer) {
475 BT_DBG("no such peer %pMR found", &addr);
476 return -ENOENT;
477 }
Jukka Rissanen18722c22013-12-11 17:05:37 +0200478 }
479
480 daddr = peer->eui64_addr;
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300481 *peer_addr = addr;
482 *peer_addr_type = addr_type;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300483 lowpan_cb(skb)->chan = peer->chan;
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300484
485 status = 1;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200486 }
487
Alexander Aringa6f77382015-10-13 13:42:57 +0200488 lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200489
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300490 err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
491 if (err < 0)
492 return err;
493
494 return status;
495}
496
497static int header_create(struct sk_buff *skb, struct net_device *netdev,
498 unsigned short type, const void *_daddr,
499 const void *_saddr, unsigned int len)
500{
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300501 if (type != ETH_P_IPV6)
502 return -EINVAL;
503
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300504 return 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200505}
506
507/* Packet to BT LE device */
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300508static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
Jukka Rissanend7b6b0a2014-10-01 15:59:14 +0300509 struct net_device *netdev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200510{
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300511 struct msghdr msg;
512 struct kvec iv;
513 int err;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200514
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300515 /* Remember the skb so that we can send EAGAIN to the caller if
Jukka Rissanend7b6b0a2014-10-01 15:59:14 +0300516 * we run out of credits.
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300517 */
Jukka Rissanend7b6b0a2014-10-01 15:59:14 +0300518 chan->data = skb;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300519
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300520 iv.iov_base = skb->data;
521 iv.iov_len = skb->len;
522
Al Viroc0371da2014-11-24 10:42:55 -0500523 memset(&msg, 0, sizeof(msg));
Al Viro17836392014-11-24 17:07:38 -0500524 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len);
Al Viroc0371da2014-11-24 10:42:55 -0500525
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300526 err = l2cap_chan_send(chan, &msg, skb->len);
527 if (err > 0) {
528 netdev->stats.tx_bytes += err;
529 netdev->stats.tx_packets++;
530 return 0;
531 }
532
533 if (!err)
534 err = lowpan_cb(skb)->status;
535
536 if (err < 0) {
537 if (err == -EAGAIN)
538 netdev->stats.tx_dropped++;
539 else
540 netdev->stats.tx_errors++;
541 }
542
543 return err;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200544}
545
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300546static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200547{
548 struct sk_buff *local_skb;
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200549 struct lowpan_btle_dev *entry;
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300550 int err = 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200551
Jukka Rissanen90305822014-10-28 17:16:47 +0200552 rcu_read_lock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200553
Jukka Rissanen90305822014-10-28 17:16:47 +0200554 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
555 struct lowpan_peer *pentry;
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200556 struct lowpan_btle_dev *dev;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200557
558 if (entry->netdev != netdev)
559 continue;
560
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200561 dev = lowpan_btle_dev(entry->netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200562
Jukka Rissanen90305822014-10-28 17:16:47 +0200563 list_for_each_entry_rcu(pentry, &dev->peers, list) {
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300564 int ret;
565
Jukka Rissanen18722c22013-12-11 17:05:37 +0200566 local_skb = skb_clone(skb, GFP_ATOMIC);
567
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300568 BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
569 netdev->name,
570 &pentry->chan->dst, pentry->chan->dst_type,
571 &pentry->peer_addr, pentry->chan);
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300572 ret = send_pkt(pentry->chan, local_skb, netdev);
573 if (ret < 0)
574 err = ret;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200575
576 kfree_skb(local_skb);
577 }
578 }
579
Jukka Rissanen90305822014-10-28 17:16:47 +0200580 rcu_read_unlock();
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300581
582 return err;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200583}
584
585static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
586{
587 int err = 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200588 bdaddr_t addr;
589 u8 addr_type;
590
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300591 /* We must take a copy of the skb before we modify/replace the ipv6
592 * header as the header could be used elsewhere
593 */
Alexander Aringb0c42cd2014-10-08 10:24:53 +0200594 skb = skb_unshare(skb, GFP_ATOMIC);
595 if (!skb)
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300596 return NET_XMIT_DROP;
597
598 /* Return values from setup_header()
599 * <0 - error, packet is dropped
600 * 0 - this is a multicast packet
601 * 1 - this is unicast packet
602 */
603 err = setup_header(skb, netdev, &addr, &addr_type);
604 if (err < 0) {
605 kfree_skb(skb);
606 return NET_XMIT_DROP;
607 }
608
609 if (err) {
610 if (lowpan_cb(skb)->chan) {
611 BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
612 netdev->name, &addr, addr_type,
613 &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
Jukka Rissanend7b6b0a2014-10-01 15:59:14 +0300614 err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300615 } else {
616 err = -ENOENT;
617 }
618 } else {
619 /* We need to send the packet to every device behind this
620 * interface.
Jukka Rissanen18722c22013-12-11 17:05:37 +0200621 */
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300622 err = send_mcast_pkt(skb, netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200623 }
Jukka Rissanen18722c22013-12-11 17:05:37 +0200624
Jukka Rissanenfc125182014-10-01 11:30:26 +0300625 dev_kfree_skb(skb);
626
Jukka Rissanen18722c22013-12-11 17:05:37 +0200627 if (err)
628 BT_DBG("ERROR: xmit failed (%d)", err);
629
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300630 return err < 0 ? NET_XMIT_DROP : err;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200631}
632
Jukka Rissanendf092302014-10-28 17:16:48 +0200633static int bt_dev_init(struct net_device *dev)
634{
Eric Dumazetd3fff6c2016-06-09 07:45:12 -0700635 netdev_lockdep_set_classes(dev);
Jukka Rissanendf092302014-10-28 17:16:48 +0200636
637 return 0;
638}
639
Jukka Rissanen18722c22013-12-11 17:05:37 +0200640static const struct net_device_ops netdev_ops = {
Jukka Rissanendf092302014-10-28 17:16:48 +0200641 .ndo_init = bt_dev_init,
Jukka Rissanen18722c22013-12-11 17:05:37 +0200642 .ndo_start_xmit = bt_xmit,
643};
644
645static struct header_ops header_ops = {
646 .create = header_create,
647};
648
649static void netdev_setup(struct net_device *dev)
650{
Jukka Rissanen18722c22013-12-11 17:05:37 +0200651 dev->hard_header_len = 0;
652 dev->needed_tailroom = 0;
Jukka Rissanen156395c2014-09-29 16:37:26 +0300653 dev->flags = IFF_RUNNING | IFF_POINTOPOINT |
654 IFF_MULTICAST;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200655 dev->watchdog_timeo = 0;
656
657 dev->netdev_ops = &netdev_ops;
658 dev->header_ops = &header_ops;
659 dev->destructor = free_netdev;
660}
661
662static struct device_type bt_type = {
663 .name = "bluetooth",
664};
665
666static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
667{
668 /* addr is the BT address in little-endian format */
669 eui[0] = addr[5];
670 eui[1] = addr[4];
671 eui[2] = addr[3];
672 eui[3] = 0xFF;
673 eui[4] = 0xFE;
674 eui[5] = addr[2];
675 eui[6] = addr[1];
676 eui[7] = addr[0];
677
Jukka Rissanen62bbd5b2014-05-27 11:33:22 +0300678 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300679 if (addr_type == BDADDR_LE_PUBLIC)
Jukka Rissanen62bbd5b2014-05-27 11:33:22 +0300680 eui[0] &= ~0x02;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200681 else
Jukka Rissanen62bbd5b2014-05-27 11:33:22 +0300682 eui[0] |= 0x02;
683
684 BT_DBG("type %d addr %*phC", addr_type, 8, eui);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200685}
686
Jukka Rissanen18722c22013-12-11 17:05:37 +0200687static void ifup(struct net_device *netdev)
688{
689 int err;
690
691 rtnl_lock();
692 err = dev_open(netdev);
693 if (err < 0)
694 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
695 rtnl_unlock();
696}
697
Jukka Rissanen7f118252014-06-18 16:37:11 +0300698static void ifdown(struct net_device *netdev)
699{
700 int err;
701
702 rtnl_lock();
703 err = dev_close(netdev);
704 if (err < 0)
705 BT_INFO("iface %s cannot be closed (%d)", netdev->name, err);
706 rtnl_unlock();
707}
708
Jukka Rissanen18722c22013-12-11 17:05:37 +0200709static void do_notify_peers(struct work_struct *work)
710{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200711 struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
712 notify_peers.work);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200713
714 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
715}
716
717static bool is_bt_6lowpan(struct hci_conn *hcon)
718{
719 if (hcon->type != LE_LINK)
720 return false;
721
Jukka Rissanen7b2ed602015-01-08 17:00:55 +0200722 if (!enable_6lowpan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300723 return false;
724
725 return true;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200726}
727
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300728static struct l2cap_chan *chan_create(void)
729{
730 struct l2cap_chan *chan;
731
732 chan = l2cap_chan_create();
733 if (!chan)
734 return NULL;
735
736 l2cap_chan_set_defaults(chan);
737
738 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
739 chan->mode = L2CAP_MODE_LE_FLOWCTL;
Johan Hedberg301de2c2015-10-06 13:03:19 +0300740 chan->imtu = 1280;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300741
742 return chan;
743}
744
Jukka Rissanenb2799ce2014-09-08 12:11:44 +0300745static void set_ip_addr_bits(u8 addr_type, u8 *addr)
746{
747 if (addr_type == BDADDR_LE_PUBLIC)
748 *addr |= 0x02;
749 else
750 *addr &= ~0x02;
751}
752
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300753static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200754 struct lowpan_btle_dev *dev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200755{
756 struct lowpan_peer *peer;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200757
758 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
759 if (!peer)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300760 return NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200761
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300762 peer->chan = chan;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200763 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
764
765 /* RFC 2464 ch. 5 */
766 peer->peer_addr.s6_addr[0] = 0xFE;
767 peer->peer_addr.s6_addr[1] = 0x80;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300768 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, chan->dst.b,
769 chan->dst_type);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200770
771 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
772 EUI64_ADDR_LEN);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200773
Jukka Rissanenb2799ce2014-09-08 12:11:44 +0300774 /* IPv6 address needs to have the U/L bit set properly so toggle
775 * it back here.
776 */
777 set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8);
778
Jukka Rissanen90305822014-10-28 17:16:47 +0200779 spin_lock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200780 INIT_LIST_HEAD(&peer->list);
781 peer_add(dev, peer);
Jukka Rissanen90305822014-10-28 17:16:47 +0200782 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200783
784 /* Notifying peers about us needs to be done without locks held */
785 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
786 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
787
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300788 return peer->chan;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200789}
790
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200791static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200792{
Jukka Rissanen18722c22013-12-11 17:05:37 +0200793 struct net_device *netdev;
794 int err = 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200795
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200796 netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
Alexander Aringb72f6f512015-08-11 21:44:08 +0200797 IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
798 netdev_setup);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200799 if (!netdev)
800 return -ENOMEM;
801
Patrik Flyktc259d142017-03-12 10:19:33 +0200802 netdev->addr_assign_type = NET_ADDR_PERM;
803 baswap((void *)netdev->dev_addr, &chan->src);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200804
805 netdev->netdev_ops = &netdev_ops;
Glenn Ruben Bakkefc842422015-06-17 07:32:25 -0700806 SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200807 SET_NETDEV_DEVTYPE(netdev, &bt_type);
808
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200809 *dev = lowpan_btle_dev(netdev);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300810 (*dev)->netdev = netdev;
811 (*dev)->hdev = chan->conn->hcon->hdev;
812 INIT_LIST_HEAD(&(*dev)->peers);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200813
Jukka Rissanen90305822014-10-28 17:16:47 +0200814 spin_lock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300815 INIT_LIST_HEAD(&(*dev)->list);
Jukka Rissanen90305822014-10-28 17:16:47 +0200816 list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
817 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200818
Alexander Aring00f59312015-12-09 22:46:29 +0100819 err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
Alexander Aring5857d1d2015-07-30 09:40:53 +0200820 if (err < 0) {
821 BT_INFO("register_netdev failed %d", err);
822 spin_lock(&devices_lock);
823 list_del_rcu(&(*dev)->list);
824 spin_unlock(&devices_lock);
825 free_netdev(netdev);
826 goto out;
827 }
828
829 BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
830 netdev->ifindex, &chan->dst, chan->dst_type,
831 &chan->src, chan->src_type);
832 set_bit(__LINK_STATE_PRESENT, &netdev->state);
833
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300834 return 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200835
836out:
837 return err;
838}
839
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300840static inline void chan_ready_cb(struct l2cap_chan *chan)
841{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200842 struct lowpan_btle_dev *dev;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300843
844 dev = lookup_dev(chan->conn);
845
846 BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
847
848 if (!dev) {
849 if (setup_netdev(chan, &dev) < 0) {
850 l2cap_chan_del(chan, -ENOENT);
851 return;
852 }
853 }
854
Jukka Rissanen18d93c12014-06-18 16:37:10 +0300855 if (!try_module_get(THIS_MODULE))
856 return;
857
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300858 add_peer_chan(chan, dev);
859 ifup(dev->netdev);
860}
861
Johan Hedberg2b293492014-08-07 10:03:32 +0300862static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300863{
Johan Hedberg2b293492014-08-07 10:03:32 +0300864 struct l2cap_chan *chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300865
Johan Hedberg630ef792015-10-06 13:03:22 +0300866 chan = chan_create();
867 if (!chan)
868 return NULL;
869
Johan Hedberg2b293492014-08-07 10:03:32 +0300870 chan->ops = pchan->ops;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300871
872 BT_DBG("chan %p pchan %p", chan, pchan);
873
Johan Hedberg2b293492014-08-07 10:03:32 +0300874 return chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300875}
876
Jukka Rissanen18722c22013-12-11 17:05:37 +0200877static void delete_netdev(struct work_struct *work)
878{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200879 struct lowpan_btle_dev *entry = container_of(work,
880 struct lowpan_btle_dev,
881 delete_netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200882
Alexander Aring00f59312015-12-09 22:46:29 +0100883 lowpan_unregister_netdev(entry->netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200884
Glenn Ruben Bakke2ad88fb2015-06-17 07:32:26 -0700885 /* The entry pointer is deleted by the netdev destructor. */
Jukka Rissanen18722c22013-12-11 17:05:37 +0200886}
887
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300888static void chan_close_cb(struct l2cap_chan *chan)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200889{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200890 struct lowpan_btle_dev *entry;
891 struct lowpan_btle_dev *dev = NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200892 struct lowpan_peer *peer;
893 int err = -ENOENT;
Glenn Ruben Bakkef63666d22015-06-17 07:32:24 -0700894 bool last = false, remove = true;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200895
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300896 BT_DBG("chan %p conn %p", chan, chan->conn);
897
898 if (chan->conn && chan->conn->hcon) {
899 if (!is_bt_6lowpan(chan->conn->hcon))
900 return;
901
902 /* If conn is set, then the netdev is also there and we should
903 * not remove it.
904 */
Glenn Ruben Bakkef63666d22015-06-17 07:32:24 -0700905 remove = false;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300906 }
Jukka Rissanen18722c22013-12-11 17:05:37 +0200907
Jukka Rissanen90305822014-10-28 17:16:47 +0200908 spin_lock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200909
Jukka Rissanen90305822014-10-28 17:16:47 +0200910 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200911 dev = lowpan_btle_dev(entry->netdev);
Jukka Rissanen90305822014-10-28 17:16:47 +0200912 peer = __peer_lookup_chan(dev, chan);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200913 if (peer) {
914 last = peer_del(dev, peer);
915 err = 0;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300916
917 BT_DBG("dev %p removing %speer %p", dev,
918 last ? "last " : "1 ", peer);
919 BT_DBG("chan %p orig refcnt %d", chan,
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100920 kref_read(&chan->kref));
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300921
922 l2cap_chan_put(chan);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200923 break;
924 }
925 }
926
927 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
Jukka Rissanen90305822014-10-28 17:16:47 +0200928 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200929
930 cancel_delayed_work_sync(&dev->notify_peers);
931
Jukka Rissanen7f118252014-06-18 16:37:11 +0300932 ifdown(dev->netdev);
933
Glenn Ruben Bakkef63666d22015-06-17 07:32:24 -0700934 if (remove) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300935 INIT_WORK(&entry->delete_netdev, delete_netdev);
936 schedule_work(&entry->delete_netdev);
937 }
Jukka Rissanen18722c22013-12-11 17:05:37 +0200938 } else {
Jukka Rissanen90305822014-10-28 17:16:47 +0200939 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200940 }
941
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300942 return;
943}
944
945static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
946{
947 BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
948 state_to_string(state), err);
949}
950
951static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
952 unsigned long hdr_len,
953 unsigned long len, int nb)
954{
955 /* Note that we must allocate using GFP_ATOMIC here as
956 * this function is called originally from netdev hard xmit
957 * function in atomic context.
958 */
959 return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
960}
961
962static void chan_suspend_cb(struct l2cap_chan *chan)
963{
964 struct sk_buff *skb = chan->data;
965
966 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
967
Jukka Rissanen59790aa2014-09-29 10:55:46 +0300968 if (!skb)
969 return;
970
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300971 lowpan_cb(skb)->status = -EAGAIN;
972}
973
974static void chan_resume_cb(struct l2cap_chan *chan)
975{
976 struct sk_buff *skb = chan->data;
977
978 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
979
Jukka Rissanen59790aa2014-09-29 10:55:46 +0300980 if (!skb)
981 return;
982
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300983 lowpan_cb(skb)->status = 0;
984}
985
986static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
987{
Jukka Rissanen2ae50d82014-09-08 12:11:43 +0300988 return L2CAP_CONN_TIMEOUT;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300989}
990
991static const struct l2cap_ops bt_6lowpan_chan_ops = {
992 .name = "L2CAP 6LoWPAN channel",
993 .new_connection = chan_new_conn_cb,
994 .recv = chan_recv_cb,
995 .close = chan_close_cb,
996 .state_change = chan_state_change_cb,
997 .ready = chan_ready_cb,
998 .resume = chan_resume_cb,
999 .suspend = chan_suspend_cb,
1000 .get_sndtimeo = chan_get_sndtimeo_cb,
1001 .alloc_skb = chan_alloc_skb_cb,
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001002
1003 .teardown = l2cap_chan_no_teardown,
1004 .defer = l2cap_chan_no_defer,
1005 .set_shutdown = l2cap_chan_no_set_shutdown,
1006};
1007
1008static inline __u8 bdaddr_type(__u8 type)
1009{
1010 if (type == ADDR_LE_DEV_PUBLIC)
1011 return BDADDR_LE_PUBLIC;
1012 else
1013 return BDADDR_LE_RANDOM;
1014}
1015
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001016static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
1017{
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001018 struct l2cap_chan *chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001019 int err;
1020
Johan Hedberg26d46df2015-10-06 13:03:24 +03001021 chan = chan_create();
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001022 if (!chan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001023 return -EINVAL;
1024
Johan Hedberg26d46df2015-10-06 13:03:24 +03001025 chan->ops = &bt_6lowpan_chan_ops;
1026
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001027 err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001028 addr, dst_type);
1029
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001030 BT_DBG("chan %p err %d", chan, err);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001031 if (err < 0)
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001032 l2cap_chan_put(chan);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001033
Jukka Rissanen18722c22013-12-11 17:05:37 +02001034 return err;
1035}
1036
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001037static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
1038{
1039 struct lowpan_peer *peer;
1040
1041 BT_DBG("conn %p dst type %d", conn, dst_type);
1042
1043 peer = lookup_peer(conn);
1044 if (!peer)
1045 return -ENOENT;
1046
1047 BT_DBG("peer %p chan %p", peer, peer->chan);
1048
1049 l2cap_chan_close(peer->chan, ENOENT);
1050
1051 return 0;
1052}
1053
1054static struct l2cap_chan *bt_6lowpan_listen(void)
1055{
1056 bdaddr_t *addr = BDADDR_ANY;
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001057 struct l2cap_chan *chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001058 int err;
1059
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001060 if (!enable_6lowpan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001061 return NULL;
1062
Johan Hedberg26d46df2015-10-06 13:03:24 +03001063 chan = chan_create();
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001064 if (!chan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001065 return NULL;
1066
Johan Hedberg26d46df2015-10-06 13:03:24 +03001067 chan->ops = &bt_6lowpan_chan_ops;
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001068 chan->state = BT_LISTEN;
1069 chan->src_type = BDADDR_LE_PUBLIC;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001070
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001071 atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
Johan Hedberg2773b022014-11-13 09:46:05 +02001072
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001073 BT_DBG("chan %p src type %d", chan, chan->src_type);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001074
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001075 err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001076 if (err) {
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001077 l2cap_chan_put(chan);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001078 BT_ERR("psm cannot be added err %d", err);
1079 return NULL;
1080 }
1081
Johan Hedberg0cd088f2015-10-06 13:03:23 +03001082 return chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001083}
1084
1085static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
1086 struct l2cap_conn **conn)
1087{
1088 struct hci_conn *hcon;
1089 struct hci_dev *hdev;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001090 int n;
1091
1092 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1093 &addr->b[5], &addr->b[4], &addr->b[3],
1094 &addr->b[2], &addr->b[1], &addr->b[0],
1095 addr_type);
1096
1097 if (n < 7)
1098 return -EINVAL;
1099
Johan Hedberg39385cb2016-11-12 17:03:07 +02001100 /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
1101 hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001102 if (!hdev)
1103 return -ENOENT;
1104
1105 hci_dev_lock(hdev);
Johan Hedbergf5ad4ff2015-10-21 18:03:02 +03001106 hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001107 hci_dev_unlock(hdev);
1108
1109 if (!hcon)
1110 return -ENOENT;
1111
1112 *conn = (struct l2cap_conn *)hcon->l2cap_data;
1113
1114 BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
1115
1116 return 0;
1117}
1118
1119static void disconnect_all_peers(void)
1120{
Alexander Aring2e4d60c2016-04-11 11:04:18 +02001121 struct lowpan_btle_dev *entry;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001122 struct lowpan_peer *peer, *tmp_peer, *new_peer;
1123 struct list_head peers;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001124
1125 INIT_LIST_HEAD(&peers);
1126
1127 /* We make a separate list of peers as the close_cb() will
1128 * modify the device peers list so it is better not to mess
1129 * with the same list at the same time.
1130 */
1131
Jukka Rissanen90305822014-10-28 17:16:47 +02001132 rcu_read_lock();
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001133
Jukka Rissanen90305822014-10-28 17:16:47 +02001134 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1135 list_for_each_entry_rcu(peer, &entry->peers, list) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001136 new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1137 if (!new_peer)
1138 break;
1139
1140 new_peer->chan = peer->chan;
1141 INIT_LIST_HEAD(&new_peer->list);
1142
1143 list_add(&new_peer->list, &peers);
1144 }
1145 }
1146
Jukka Rissanen90305822014-10-28 17:16:47 +02001147 rcu_read_unlock();
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001148
Jukka Rissanen90305822014-10-28 17:16:47 +02001149 spin_lock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001150 list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1151 l2cap_chan_close(peer->chan, ENOENT);
Jukka Rissanen90305822014-10-28 17:16:47 +02001152
1153 list_del_rcu(&peer->list);
Johan Hedberg4e790222014-11-11 14:16:29 +02001154 kfree_rcu(peer, rcu);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001155 }
Jukka Rissanen90305822014-10-28 17:16:47 +02001156 spin_unlock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001157}
1158
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001159struct set_enable {
Jukka Rissanen90305822014-10-28 17:16:47 +02001160 struct work_struct work;
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001161 bool flag;
Jukka Rissanen90305822014-10-28 17:16:47 +02001162};
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001163
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001164static void do_enable_set(struct work_struct *work)
Jukka Rissanen90305822014-10-28 17:16:47 +02001165{
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001166 struct set_enable *set_enable = container_of(work,
1167 struct set_enable, work);
Jukka Rissanen90305822014-10-28 17:16:47 +02001168
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001169 if (!set_enable->flag || enable_6lowpan != set_enable->flag)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001170 /* Disconnect existing connections if 6lowpan is
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001171 * disabled
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001172 */
1173 disconnect_all_peers();
1174
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001175 enable_6lowpan = set_enable->flag;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001176
1177 if (listen_chan) {
1178 l2cap_chan_close(listen_chan, 0);
1179 l2cap_chan_put(listen_chan);
1180 }
1181
1182 listen_chan = bt_6lowpan_listen();
1183
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001184 kfree(set_enable);
Jukka Rissanen90305822014-10-28 17:16:47 +02001185}
1186
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001187static int lowpan_enable_set(void *data, u64 val)
Jukka Rissanen90305822014-10-28 17:16:47 +02001188{
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001189 struct set_enable *set_enable;
Jukka Rissanen90305822014-10-28 17:16:47 +02001190
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001191 set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1192 if (!set_enable)
Jukka Rissanen90305822014-10-28 17:16:47 +02001193 return -ENOMEM;
1194
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001195 set_enable->flag = !!val;
1196 INIT_WORK(&set_enable->work, do_enable_set);
Jukka Rissanen90305822014-10-28 17:16:47 +02001197
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001198 schedule_work(&set_enable->work);
Jukka Rissanen90305822014-10-28 17:16:47 +02001199
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001200 return 0;
1201}
1202
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001203static int lowpan_enable_get(void *data, u64 *val)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001204{
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001205 *val = enable_6lowpan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001206 return 0;
1207}
1208
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001209DEFINE_SIMPLE_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1210 lowpan_enable_set, "%llu\n");
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001211
1212static ssize_t lowpan_control_write(struct file *fp,
1213 const char __user *user_buffer,
1214 size_t count,
1215 loff_t *position)
1216{
1217 char buf[32];
1218 size_t buf_size = min(count, sizeof(buf) - 1);
1219 int ret;
1220 bdaddr_t addr;
1221 u8 addr_type;
1222 struct l2cap_conn *conn = NULL;
1223
1224 if (copy_from_user(buf, user_buffer, buf_size))
1225 return -EFAULT;
1226
1227 buf[buf_size] = '\0';
1228
1229 if (memcmp(buf, "connect ", 8) == 0) {
1230 ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1231 if (ret == -EINVAL)
1232 return ret;
1233
1234 if (listen_chan) {
1235 l2cap_chan_close(listen_chan, 0);
1236 l2cap_chan_put(listen_chan);
1237 listen_chan = NULL;
1238 }
1239
1240 if (conn) {
1241 struct lowpan_peer *peer;
1242
1243 if (!is_bt_6lowpan(conn->hcon))
1244 return -EINVAL;
1245
1246 peer = lookup_peer(conn);
1247 if (peer) {
1248 BT_DBG("6LoWPAN connection already exists");
1249 return -EALREADY;
1250 }
1251
1252 BT_DBG("conn %p dst %pMR type %d user %d", conn,
1253 &conn->hcon->dst, conn->hcon->dst_type,
1254 addr_type);
1255 }
1256
1257 ret = bt_6lowpan_connect(&addr, addr_type);
1258 if (ret < 0)
1259 return ret;
1260
1261 return count;
1262 }
1263
1264 if (memcmp(buf, "disconnect ", 11) == 0) {
1265 ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1266 if (ret < 0)
1267 return ret;
1268
1269 ret = bt_6lowpan_disconnect(conn, addr_type);
1270 if (ret < 0)
1271 return ret;
1272
1273 return count;
1274 }
1275
1276 return count;
1277}
1278
1279static int lowpan_control_show(struct seq_file *f, void *ptr)
1280{
Alexander Aring2e4d60c2016-04-11 11:04:18 +02001281 struct lowpan_btle_dev *entry;
Jukka Rissanen90305822014-10-28 17:16:47 +02001282 struct lowpan_peer *peer;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001283
Jukka Rissanen90305822014-10-28 17:16:47 +02001284 spin_lock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001285
Jukka Rissanen90305822014-10-28 17:16:47 +02001286 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1287 list_for_each_entry(peer, &entry->peers, list)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001288 seq_printf(f, "%pMR (type %u)\n",
1289 &peer->chan->dst, peer->chan->dst_type);
1290 }
1291
Jukka Rissanen90305822014-10-28 17:16:47 +02001292 spin_unlock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001293
1294 return 0;
1295}
1296
1297static int lowpan_control_open(struct inode *inode, struct file *file)
1298{
1299 return single_open(file, lowpan_control_show, inode->i_private);
1300}
1301
1302static const struct file_operations lowpan_control_fops = {
1303 .open = lowpan_control_open,
1304 .read = seq_read,
1305 .write = lowpan_control_write,
1306 .llseek = seq_lseek,
1307 .release = single_release,
1308};
1309
Jukka Rissanen7f118252014-06-18 16:37:11 +03001310static void disconnect_devices(void)
1311{
Alexander Aring2e4d60c2016-04-11 11:04:18 +02001312 struct lowpan_btle_dev *entry, *tmp, *new_dev;
Jukka Rissanen7f118252014-06-18 16:37:11 +03001313 struct list_head devices;
Jukka Rissanen7f118252014-06-18 16:37:11 +03001314
1315 INIT_LIST_HEAD(&devices);
1316
1317 /* We make a separate list of devices because the unregister_netdev()
1318 * will call device_event() which will also want to modify the same
1319 * devices list.
1320 */
1321
Jukka Rissanen90305822014-10-28 17:16:47 +02001322 rcu_read_lock();
Jukka Rissanen7f118252014-06-18 16:37:11 +03001323
Jukka Rissanen90305822014-10-28 17:16:47 +02001324 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
Jukka Rissanen7f118252014-06-18 16:37:11 +03001325 new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1326 if (!new_dev)
1327 break;
1328
1329 new_dev->netdev = entry->netdev;
1330 INIT_LIST_HEAD(&new_dev->list);
1331
Jukka Rissanen90305822014-10-28 17:16:47 +02001332 list_add_rcu(&new_dev->list, &devices);
Jukka Rissanen7f118252014-06-18 16:37:11 +03001333 }
1334
Jukka Rissanen90305822014-10-28 17:16:47 +02001335 rcu_read_unlock();
Jukka Rissanen7f118252014-06-18 16:37:11 +03001336
Dan Carpenterdaac1972014-10-29 19:10:57 +03001337 list_for_each_entry_safe(entry, tmp, &devices, list) {
Jukka Rissanen7f118252014-06-18 16:37:11 +03001338 ifdown(entry->netdev);
1339 BT_DBG("Unregistering netdev %s %p",
1340 entry->netdev->name, entry->netdev);
Alexander Aring00f59312015-12-09 22:46:29 +01001341 lowpan_unregister_netdev(entry->netdev);
Jukka Rissanen7f118252014-06-18 16:37:11 +03001342 kfree(entry);
1343 }
1344}
1345
Jukka Rissanen18722c22013-12-11 17:05:37 +02001346static int device_event(struct notifier_block *unused,
1347 unsigned long event, void *ptr)
1348{
1349 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
Alexander Aring2e4d60c2016-04-11 11:04:18 +02001350 struct lowpan_btle_dev *entry;
Jukka Rissanen18722c22013-12-11 17:05:37 +02001351
1352 if (netdev->type != ARPHRD_6LOWPAN)
1353 return NOTIFY_DONE;
1354
1355 switch (event) {
1356 case NETDEV_UNREGISTER:
Jukka Rissanen90305822014-10-28 17:16:47 +02001357 spin_lock(&devices_lock);
1358 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
Jukka Rissanen18722c22013-12-11 17:05:37 +02001359 if (entry->netdev == netdev) {
Jukka Rissanen7f118252014-06-18 16:37:11 +03001360 BT_DBG("Unregistered netdev %s %p",
1361 netdev->name, netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +02001362 list_del(&entry->list);
Jukka Rissanen18722c22013-12-11 17:05:37 +02001363 break;
1364 }
1365 }
Jukka Rissanen90305822014-10-28 17:16:47 +02001366 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +02001367 break;
1368 }
1369
1370 return NOTIFY_DONE;
1371}
1372
1373static struct notifier_block bt_6lowpan_dev_notifier = {
1374 .notifier_call = device_event,
1375};
1376
Jukka Rissanen5547e482014-06-18 16:37:09 +03001377static int __init bt_6lowpan_init(void)
Jukka Rissanen18722c22013-12-11 17:05:37 +02001378{
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001379 lowpan_enable_debugfs = debugfs_create_file("6lowpan_enable", 0644,
1380 bt_debugfs, NULL,
1381 &lowpan_enable_fops);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001382 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1383 bt_debugfs, NULL,
1384 &lowpan_control_fops);
1385
Jukka Rissanen18722c22013-12-11 17:05:37 +02001386 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1387}
1388
Jukka Rissanen5547e482014-06-18 16:37:09 +03001389static void __exit bt_6lowpan_exit(void)
Jukka Rissanen18722c22013-12-11 17:05:37 +02001390{
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001391 debugfs_remove(lowpan_enable_debugfs);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001392 debugfs_remove(lowpan_control_debugfs);
1393
1394 if (listen_chan) {
1395 l2cap_chan_close(listen_chan, 0);
1396 l2cap_chan_put(listen_chan);
1397 }
1398
Jukka Rissanen7f118252014-06-18 16:37:11 +03001399 disconnect_devices();
1400
Jukka Rissanen18722c22013-12-11 17:05:37 +02001401 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1402}
Jukka Rissanen5547e482014-06-18 16:37:09 +03001403
1404module_init(bt_6lowpan_init);
1405module_exit(bt_6lowpan_exit);
1406
1407MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1408MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1409MODULE_VERSION(VERSION);
1410MODULE_LICENSE("GPL");