blob: d9ad68448173da4fe1be88348ed62a4641b41700 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020033#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Johan Hedberg801c1e82015-03-06 21:08:50 +020037static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
Marcel Holtmanncd82e612012-02-20 20:34:38 +010040static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* ----- HCI socket interface ----- */
43
Marcel Holtmann863def52014-07-11 05:41:00 +020044/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070053 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020054};
55
Marcel Holtmann6befc642015-03-14 19:27:53 -070056void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
Marcel Holtmannc85be542015-03-14 19:28:00 -070066int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
Johan Hedbergd0f172b2015-03-17 13:48:46 +020071unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
Jiri Slaby93919762015-02-19 15:20:43 +010076static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Jiri Slaby93919762015-02-19 15:20:43 +010078 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020082#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
Marcel Holtmann7e67c112014-07-11 05:36:40 +020090static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* Packet types */
92 0x10,
93 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020094 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020099 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113};
114
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 flt_type = 0;
125 else
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128 if (!test_bit(flt_type, &flt->type_mask))
129 return true;
130
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 return false;
134
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137 if (!hci_test_bit(flt_event, &flt->event_mask))
138 return true;
139
140 /* Check filter only when opcode is set */
141 if (!flt->opcode)
142 return false;
143
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 return true;
147
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 return true;
151
152 return false;
153}
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100159 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 BT_DBG("hdev %p len %d", hdev, skb->len);
162
163 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100164
Sasha Levinb67bfe02013-02-27 17:06:00 -0800165 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 struct sk_buff *nskb;
167
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 continue;
170
171 /* Don't send frame to the socket it came from */
172 if (skb->sk == sk)
173 continue;
174
Marcel Holtmann23500182013-08-26 21:40:52 -0700175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
177 continue;
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
180 continue;
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 continue;
185 } else {
186 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200187 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100190 if (!skb_copy) {
191 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100193 if (!skb_copy)
194 continue;
195
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 }
199
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200201 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 continue;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (sock_queue_rcv_skb(sk, nskb))
205 kfree_skb(nskb);
206 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100207
208 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100209
210 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100211}
212
Johan Hedberg71290692015-02-20 13:26:23 +0200213/* Send frame to sockets with specific channel */
214void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700215 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100216{
217 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100218
Johan Hedberg71290692015-02-20 13:26:23 +0200219 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220
221 read_lock(&hci_sk_list.lock);
222
Sasha Levinb67bfe02013-02-27 17:06:00 -0800223 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100224 struct sk_buff *nskb;
225
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700226 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700227 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700228 continue;
229
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100230 /* Skip the original socket */
231 if (sk == skip_sk)
232 continue;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
Johan Hedberg71290692015-02-20 13:26:23 +0200237 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800238 continue;
239
240 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (!nskb)
242 continue;
243
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
247
248 read_unlock(&hci_sk_list.lock);
249}
250
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100251/* Send frame to monitor socket */
252void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100254 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800255 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100256 __le16 opcode;
257
258 if (!atomic_read(&monitor_promisc))
259 return;
260
261 BT_DBG("hdev %p len %d", hdev, skb->len);
262
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100266 break;
267 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269 break;
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 break;
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100279 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100281 break;
282 default:
283 return;
284 }
285
Marcel Holtmann2b531292015-01-11 19:33:31 -0800286 /* Create a private copy with headroom */
287 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 if (!skb_copy)
289 return;
290
291 /* Put header before the data */
292 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 hdr->opcode = opcode;
294 hdr->index = cpu_to_le16(hdev->id);
295 hdr->len = cpu_to_le16(skb->len);
296
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700297 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100299 kfree_skb(skb_copy);
300}
301
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100302static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303{
304 struct hci_mon_hdr *hdr;
305 struct hci_mon_new_index *ni;
306 struct sk_buff *skb;
307 __le16 opcode;
308
309 switch (event) {
310 case HCI_DEV_REG:
311 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
312 if (!skb)
313 return NULL;
314
315 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
316 ni->type = hdev->dev_type;
317 ni->bus = hdev->bus;
318 bacpy(&ni->bdaddr, &hdev->bdaddr);
319 memcpy(ni->name, hdev->name, 8);
320
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700321 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100322 break;
323
324 case HCI_DEV_UNREG:
325 skb = bt_skb_alloc(0, GFP_ATOMIC);
326 if (!skb)
327 return NULL;
328
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700329 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100330 break;
331
332 default:
333 return NULL;
334 }
335
336 __net_timestamp(skb);
337
338 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
339 hdr->opcode = opcode;
340 hdr->index = cpu_to_le16(hdev->id);
341 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
342
343 return skb;
344}
345
346static void send_monitor_replay(struct sock *sk)
347{
348 struct hci_dev *hdev;
349
350 read_lock(&hci_dev_list_lock);
351
352 list_for_each_entry(hdev, &hci_dev_list, list) {
353 struct sk_buff *skb;
354
355 skb = create_monitor_event(hdev, HCI_DEV_REG);
356 if (!skb)
357 continue;
358
359 if (sock_queue_rcv_skb(sk, skb))
360 kfree_skb(skb);
361 }
362
363 read_unlock(&hci_dev_list_lock);
364}
365
Marcel Holtmann040030e2012-02-20 14:50:37 +0100366/* Generate internal stack event */
367static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
368{
369 struct hci_event_hdr *hdr;
370 struct hci_ev_stack_internal *ev;
371 struct sk_buff *skb;
372
373 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
374 if (!skb)
375 return;
376
377 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
378 hdr->evt = HCI_EV_STACK_INTERNAL;
379 hdr->plen = sizeof(*ev) + dlen;
380
381 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
382 ev->type = type;
383 memcpy(ev->data, data, dlen);
384
385 bt_cb(skb)->incoming = 1;
386 __net_timestamp(skb);
387
388 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100389 hci_send_to_sock(hdev, skb);
390 kfree_skb(skb);
391}
392
393void hci_sock_dev_event(struct hci_dev *hdev, int event)
394{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100395 BT_DBG("hdev %s event %d", hdev->name, event);
396
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100397 if (atomic_read(&monitor_promisc)) {
398 struct sk_buff *skb;
399
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200400 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100401 skb = create_monitor_event(hdev, event);
402 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700403 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
404 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100405 kfree_skb(skb);
406 }
407 }
408
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200409 if (event <= HCI_DEV_DOWN) {
410 struct hci_ev_si_device ev;
411
412 /* Send event to sockets */
413 ev.event = event;
414 ev.dev_id = hdev->id;
415 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
416 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100417
418 if (event == HCI_DEV_UNREG) {
419 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100420
421 /* Detach sockets from device */
422 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800423 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100424 bh_lock_sock_nested(sk);
425 if (hci_pi(sk)->hdev == hdev) {
426 hci_pi(sk)->hdev = NULL;
427 sk->sk_err = EPIPE;
428 sk->sk_state = BT_OPEN;
429 sk->sk_state_change(sk);
430
431 hci_dev_put(hdev);
432 }
433 bh_unlock_sock(sk);
434 }
435 read_unlock(&hci_sk_list.lock);
436 }
437}
438
Johan Hedberg801c1e82015-03-06 21:08:50 +0200439static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
440{
441 struct hci_mgmt_chan *c;
442
443 list_for_each_entry(c, &mgmt_chan_list, list) {
444 if (c->channel == channel)
445 return c;
446 }
447
448 return NULL;
449}
450
451static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
452{
453 struct hci_mgmt_chan *c;
454
455 mutex_lock(&mgmt_chan_list_lock);
456 c = __hci_mgmt_chan_find(channel);
457 mutex_unlock(&mgmt_chan_list_lock);
458
459 return c;
460}
461
462int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
463{
464 if (c->channel < HCI_CHANNEL_CONTROL)
465 return -EINVAL;
466
467 mutex_lock(&mgmt_chan_list_lock);
468 if (__hci_mgmt_chan_find(c->channel)) {
469 mutex_unlock(&mgmt_chan_list_lock);
470 return -EALREADY;
471 }
472
473 list_add_tail(&c->list, &mgmt_chan_list);
474
475 mutex_unlock(&mgmt_chan_list_lock);
476
477 return 0;
478}
479EXPORT_SYMBOL(hci_mgmt_chan_register);
480
481void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
482{
483 mutex_lock(&mgmt_chan_list_lock);
484 list_del(&c->list);
485 mutex_unlock(&mgmt_chan_list_lock);
486}
487EXPORT_SYMBOL(hci_mgmt_chan_unregister);
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489static int hci_sock_release(struct socket *sock)
490{
491 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100492 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
494 BT_DBG("sock %p sk %p", sock, sk);
495
496 if (!sk)
497 return 0;
498
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100499 hdev = hci_pi(sk)->hdev;
500
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100501 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
502 atomic_dec(&monitor_promisc);
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 bt_sock_unlink(&hci_sk_list, sk);
505
506 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700507 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200508 /* When releasing an user channel exclusive access,
509 * call hci_dev_do_close directly instead of calling
510 * hci_dev_close to ensure the exclusive access will
511 * be released and the controller brought back down.
512 *
513 * The checking of HCI_AUTO_OFF is not needed in this
514 * case since it will have been cleared already when
515 * opening the user channel.
516 */
517 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200518 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
519 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700520 }
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 atomic_dec(&hdev->promisc);
523 hci_dev_put(hdev);
524 }
525
526 sock_orphan(sk);
527
528 skb_queue_purge(&sk->sk_receive_queue);
529 skb_queue_purge(&sk->sk_write_queue);
530
531 sock_put(sk);
532 return 0;
533}
534
Antti Julkub2a66aa2011-06-15 12:01:14 +0300535static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200536{
537 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300538 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200539
540 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
541 return -EFAULT;
542
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300543 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300544
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300545 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300546
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300547 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300548
549 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200550}
551
Antti Julkub2a66aa2011-06-15 12:01:14 +0300552static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200553{
554 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300555 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200556
557 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
558 return -EFAULT;
559
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300560 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300561
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300562 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300563
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300564 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300565
566 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200567}
568
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900569/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300570static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
571 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
573 struct hci_dev *hdev = hci_pi(sk)->hdev;
574
575 if (!hdev)
576 return -EBADFD;
577
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700578 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700579 return -EBUSY;
580
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700581 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200582 return -EOPNOTSUPP;
583
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700584 if (hdev->dev_type != HCI_BREDR)
585 return -EOPNOTSUPP;
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 switch (cmd) {
588 case HCISETRAW:
589 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000590 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700591 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200594 return hci_get_conn_info(hdev, (void __user *) arg);
595
596 case HCIGETAUTHINFO:
597 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Johan Hedbergf0358562010-05-18 13:20:32 +0200599 case HCIBLOCKADDR:
600 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000601 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300602 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200603
604 case HCIUNBLOCKADDR:
605 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000606 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300607 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700609
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700610 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611}
612
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300613static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
614 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200616 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700617 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 int err;
619
620 BT_DBG("cmd %x arg %lx", cmd, arg);
621
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700622 lock_sock(sk);
623
624 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
625 err = -EBADFD;
626 goto done;
627 }
628
629 release_sock(sk);
630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 switch (cmd) {
632 case HCIGETDEVLIST:
633 return hci_get_dev_list(argp);
634
635 case HCIGETDEVINFO:
636 return hci_get_dev_info(argp);
637
638 case HCIGETCONNLIST:
639 return hci_get_conn_list(argp);
640
641 case HCIDEVUP:
642 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000643 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 return hci_dev_open(arg);
645
646 case HCIDEVDOWN:
647 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000648 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 return hci_dev_close(arg);
650
651 case HCIDEVRESET:
652 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000653 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 return hci_dev_reset(arg);
655
656 case HCIDEVRESTAT:
657 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000658 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 return hci_dev_reset_stat(arg);
660
661 case HCISETSCAN:
662 case HCISETAUTH:
663 case HCISETENCRYPT:
664 case HCISETPTYPE:
665 case HCISETLINKPOL:
666 case HCISETLINKMODE:
667 case HCISETACLMTU:
668 case HCISETSCOMTU:
669 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000670 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return hci_dev_cmd(cmd, argp);
672
673 case HCIINQUIRY:
674 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700676
677 lock_sock(sk);
678
679 err = hci_sock_bound_ioctl(sk, cmd, arg);
680
681done:
682 release_sock(sk);
683 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684}
685
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300686static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
687 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688{
Johan Hedberg03811012010-12-08 00:21:06 +0200689 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 struct sock *sk = sock->sk;
691 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200692 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 BT_DBG("sock %p sk %p", sock, sk);
695
Johan Hedberg03811012010-12-08 00:21:06 +0200696 if (!addr)
697 return -EINVAL;
698
699 memset(&haddr, 0, sizeof(haddr));
700 len = min_t(unsigned int, sizeof(haddr), addr_len);
701 memcpy(&haddr, addr, len);
702
703 if (haddr.hci_family != AF_BLUETOOTH)
704 return -EINVAL;
705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 lock_sock(sk);
707
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100708 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 err = -EALREADY;
710 goto done;
711 }
712
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100713 switch (haddr.hci_channel) {
714 case HCI_CHANNEL_RAW:
715 if (hci_pi(sk)->hdev) {
716 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 goto done;
718 }
719
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100720 if (haddr.hci_dev != HCI_DEV_NONE) {
721 hdev = hci_dev_get(haddr.hci_dev);
722 if (!hdev) {
723 err = -ENODEV;
724 goto done;
725 }
726
727 atomic_inc(&hdev->promisc);
728 }
729
730 hci_pi(sk)->hdev = hdev;
731 break;
732
Marcel Holtmann23500182013-08-26 21:40:52 -0700733 case HCI_CHANNEL_USER:
734 if (hci_pi(sk)->hdev) {
735 err = -EALREADY;
736 goto done;
737 }
738
739 if (haddr.hci_dev == HCI_DEV_NONE) {
740 err = -EINVAL;
741 goto done;
742 }
743
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700744 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700745 err = -EPERM;
746 goto done;
747 }
748
749 hdev = hci_dev_get(haddr.hci_dev);
750 if (!hdev) {
751 err = -ENODEV;
752 goto done;
753 }
754
Marcel Holtmann781f8992015-06-06 06:06:49 +0200755 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700756 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +0200757 hci_dev_test_flag(hdev, HCI_CONFIG) ||
758 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
759 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700760 err = -EBUSY;
761 hci_dev_put(hdev);
762 goto done;
763 }
764
Marcel Holtmann238be782015-03-13 02:11:06 -0700765 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700766 err = -EUSERS;
767 hci_dev_put(hdev);
768 goto done;
769 }
770
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200771 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700772
773 err = hci_dev_open(hdev->id);
774 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +0200775 if (err == -EALREADY) {
776 /* In case the transport is already up and
777 * running, clear the error here.
778 *
779 * This can happen when opening an user
780 * channel and HCI_AUTO_OFF grace period
781 * is still active.
782 */
783 err = 0;
784 } else {
785 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
786 mgmt_index_added(hdev);
787 hci_dev_put(hdev);
788 goto done;
789 }
Marcel Holtmann23500182013-08-26 21:40:52 -0700790 }
791
792 atomic_inc(&hdev->promisc);
793
794 hci_pi(sk)->hdev = hdev;
795 break;
796
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100797 case HCI_CHANNEL_MONITOR:
798 if (haddr.hci_dev != HCI_DEV_NONE) {
799 err = -EINVAL;
800 goto done;
801 }
802
803 if (!capable(CAP_NET_RAW)) {
804 err = -EPERM;
805 goto done;
806 }
807
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700808 /* The monitor interface is restricted to CAP_NET_RAW
809 * capabilities and with that implicitly trusted.
810 */
811 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
812
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100813 send_monitor_replay(sk);
814
815 atomic_inc(&monitor_promisc);
816 break;
817
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100818 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200819 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
820 err = -EINVAL;
821 goto done;
822 }
823
824 if (haddr.hci_dev != HCI_DEV_NONE) {
825 err = -EINVAL;
826 goto done;
827 }
828
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700829 /* Users with CAP_NET_ADMIN capabilities are allowed
830 * access to all management commands and events. For
831 * untrusted users the interface is restricted and
832 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700833 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700834 if (capable(CAP_NET_ADMIN))
835 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700836
Marcel Holtmannf9207332015-03-14 19:27:55 -0700837 /* At the moment the index and unconfigured index events
838 * are enabled unconditionally. Setting them on each
839 * socket when binding keeps this functionality. They
840 * however might be cleared later and then sending of these
841 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700842 *
843 * This also enables generic events that are safe to be
844 * received by untrusted users. Example for such events
845 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700846 */
847 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
848 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
849 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700850 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700851 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200852 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 }
854
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100855
Johan Hedberg03811012010-12-08 00:21:06 +0200856 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 sk->sk_state = BT_BOUND;
858
859done:
860 release_sock(sk);
861 return err;
862}
863
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300864static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
865 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866{
867 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
868 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700869 struct hci_dev *hdev;
870 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872 BT_DBG("sock %p sk %p", sock, sk);
873
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700874 if (peer)
875 return -EOPNOTSUPP;
876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 lock_sock(sk);
878
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700879 hdev = hci_pi(sk)->hdev;
880 if (!hdev) {
881 err = -EBADFD;
882 goto done;
883 }
884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 *addr_len = sizeof(*haddr);
886 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100887 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700888 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700890done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700892 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893}
894
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300895static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
896 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897{
898 __u32 mask = hci_pi(sk)->cmsg_mask;
899
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700900 if (mask & HCI_CMSG_DIR) {
901 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300902 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
903 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700906 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100907#ifdef CONFIG_COMPAT
908 struct compat_timeval ctv;
909#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700910 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200911 void *data;
912 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700913
914 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200915
David S. Miller1da97f82007-09-12 14:10:58 +0200916 data = &tv;
917 len = sizeof(tv);
918#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800919 if (!COMPAT_USE_64BIT_TIME &&
920 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200921 ctv.tv_sec = tv.tv_sec;
922 ctv.tv_usec = tv.tv_usec;
923 data = &ctv;
924 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200925 }
David S. Miller1da97f82007-09-12 14:10:58 +0200926#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200927
928 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700929 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900931
Ying Xue1b784142015-03-02 15:37:48 +0800932static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
933 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934{
935 int noblock = flags & MSG_DONTWAIT;
936 struct sock *sk = sock->sk;
937 struct sk_buff *skb;
938 int copied, err;
939
940 BT_DBG("sock %p, sk %p", sock, sk);
941
942 if (flags & (MSG_OOB))
943 return -EOPNOTSUPP;
944
945 if (sk->sk_state == BT_CLOSED)
946 return 0;
947
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200948 skb = skb_recv_datagram(sk, flags, noblock, &err);
949 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 return err;
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 copied = skb->len;
953 if (len < copied) {
954 msg->msg_flags |= MSG_TRUNC;
955 copied = len;
956 }
957
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300958 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500959 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
Marcel Holtmann3a208622012-02-20 14:50:34 +0100961 switch (hci_pi(sk)->channel) {
962 case HCI_CHANNEL_RAW:
963 hci_sock_cmsg(sk, msg, skb);
964 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700965 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100966 case HCI_CHANNEL_MONITOR:
967 sock_recv_timestamp(msg, sk, skb);
968 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200969 default:
970 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
971 sock_recv_timestamp(msg, sk, skb);
972 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975 skb_free_datagram(sk, skb);
976
977 return err ? : copied;
978}
979
Johan Hedbergfa4335d2015-03-17 13:48:50 +0200980static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
981 struct msghdr *msg, size_t msglen)
982{
983 void *buf;
984 u8 *cp;
985 struct mgmt_hdr *hdr;
986 u16 opcode, index, len;
987 struct hci_dev *hdev = NULL;
988 const struct hci_mgmt_handler *handler;
989 bool var_len, no_hdev;
990 int err;
991
992 BT_DBG("got %zu bytes", msglen);
993
994 if (msglen < sizeof(*hdr))
995 return -EINVAL;
996
997 buf = kmalloc(msglen, GFP_KERNEL);
998 if (!buf)
999 return -ENOMEM;
1000
1001 if (memcpy_from_msg(buf, msg, msglen)) {
1002 err = -EFAULT;
1003 goto done;
1004 }
1005
1006 hdr = buf;
1007 opcode = __le16_to_cpu(hdr->opcode);
1008 index = __le16_to_cpu(hdr->index);
1009 len = __le16_to_cpu(hdr->len);
1010
1011 if (len != msglen - sizeof(*hdr)) {
1012 err = -EINVAL;
1013 goto done;
1014 }
1015
1016 if (opcode >= chan->handler_count ||
1017 chan->handlers[opcode].func == NULL) {
1018 BT_DBG("Unknown op %u", opcode);
1019 err = mgmt_cmd_status(sk, index, opcode,
1020 MGMT_STATUS_UNKNOWN_COMMAND);
1021 goto done;
1022 }
1023
1024 handler = &chan->handlers[opcode];
1025
1026 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1027 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1028 err = mgmt_cmd_status(sk, index, opcode,
1029 MGMT_STATUS_PERMISSION_DENIED);
1030 goto done;
1031 }
1032
1033 if (index != MGMT_INDEX_NONE) {
1034 hdev = hci_dev_get(index);
1035 if (!hdev) {
1036 err = mgmt_cmd_status(sk, index, opcode,
1037 MGMT_STATUS_INVALID_INDEX);
1038 goto done;
1039 }
1040
1041 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1042 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1043 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1044 err = mgmt_cmd_status(sk, index, opcode,
1045 MGMT_STATUS_INVALID_INDEX);
1046 goto done;
1047 }
1048
1049 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1050 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1051 err = mgmt_cmd_status(sk, index, opcode,
1052 MGMT_STATUS_INVALID_INDEX);
1053 goto done;
1054 }
1055 }
1056
1057 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1058 if (no_hdev != !hdev) {
1059 err = mgmt_cmd_status(sk, index, opcode,
1060 MGMT_STATUS_INVALID_INDEX);
1061 goto done;
1062 }
1063
1064 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1065 if ((var_len && len < handler->data_len) ||
1066 (!var_len && len != handler->data_len)) {
1067 err = mgmt_cmd_status(sk, index, opcode,
1068 MGMT_STATUS_INVALID_PARAMS);
1069 goto done;
1070 }
1071
1072 if (hdev && chan->hdev_init)
1073 chan->hdev_init(sk, hdev);
1074
1075 cp = buf + sizeof(*hdr);
1076
1077 err = handler->func(sk, hdev, cp, len);
1078 if (err < 0)
1079 goto done;
1080
1081 err = msglen;
1082
1083done:
1084 if (hdev)
1085 hci_dev_put(hdev);
1086
1087 kfree(buf);
1088 return err;
1089}
1090
Ying Xue1b784142015-03-02 15:37:48 +08001091static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1092 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
1094 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001095 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 struct hci_dev *hdev;
1097 struct sk_buff *skb;
1098 int err;
1099
1100 BT_DBG("sock %p sk %p", sock, sk);
1101
1102 if (msg->msg_flags & MSG_OOB)
1103 return -EOPNOTSUPP;
1104
1105 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1106 return -EINVAL;
1107
1108 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1109 return -EINVAL;
1110
1111 lock_sock(sk);
1112
Johan Hedberg03811012010-12-08 00:21:06 +02001113 switch (hci_pi(sk)->channel) {
1114 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001115 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001116 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001117 case HCI_CHANNEL_MONITOR:
1118 err = -EOPNOTSUPP;
1119 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001120 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001121 mutex_lock(&mgmt_chan_list_lock);
1122 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1123 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001124 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001125 else
1126 err = -EINVAL;
1127
1128 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001129 goto done;
1130 }
1131
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001132 hdev = hci_pi(sk)->hdev;
1133 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 err = -EBADFD;
1135 goto done;
1136 }
1137
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001138 if (!test_bit(HCI_UP, &hdev->flags)) {
1139 err = -ENETDOWN;
1140 goto done;
1141 }
1142
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001143 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1144 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 goto done;
1146
Al Viro6ce8e9c2014-04-06 21:25:44 -04001147 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 err = -EFAULT;
1149 goto drop;
1150 }
1151
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001152 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001155 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1156 /* No permission check is needed for user channel
1157 * since that gets enforced when binding the socket.
1158 *
1159 * However check that the packet type is valid.
1160 */
1161 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1162 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1163 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1164 err = -EINVAL;
1165 goto drop;
1166 }
1167
1168 skb_queue_tail(&hdev->raw_q, skb);
1169 queue_work(hdev->workqueue, &hdev->tx_work);
1170 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001171 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 u16 ogf = hci_opcode_ogf(opcode);
1173 u16 ocf = hci_opcode_ocf(opcode);
1174
1175 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001176 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1177 &hci_sec_filter.ocf_mask[ogf])) &&
1178 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 err = -EPERM;
1180 goto drop;
1181 }
1182
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001183 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001185 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001187 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001188 * single-command requests.
1189 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03001190 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02001191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001193 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 }
1195 } else {
1196 if (!capable(CAP_NET_RAW)) {
1197 err = -EPERM;
1198 goto drop;
1199 }
1200
1201 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001202 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 }
1204
1205 err = len;
1206
1207done:
1208 release_sock(sk);
1209 return err;
1210
1211drop:
1212 kfree_skb(skb);
1213 goto done;
1214}
1215
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001216static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1217 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218{
1219 struct hci_ufilter uf = { .opcode = 0 };
1220 struct sock *sk = sock->sk;
1221 int err = 0, opt = 0;
1222
1223 BT_DBG("sk %p, opt %d", sk, optname);
1224
1225 lock_sock(sk);
1226
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001227 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001228 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001229 goto done;
1230 }
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 switch (optname) {
1233 case HCI_DATA_DIR:
1234 if (get_user(opt, (int __user *)optval)) {
1235 err = -EFAULT;
1236 break;
1237 }
1238
1239 if (opt)
1240 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1241 else
1242 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1243 break;
1244
1245 case HCI_TIME_STAMP:
1246 if (get_user(opt, (int __user *)optval)) {
1247 err = -EFAULT;
1248 break;
1249 }
1250
1251 if (opt)
1252 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1253 else
1254 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1255 break;
1256
1257 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001258 {
1259 struct hci_filter *f = &hci_pi(sk)->filter;
1260
1261 uf.type_mask = f->type_mask;
1262 uf.opcode = f->opcode;
1263 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1264 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1265 }
1266
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 len = min_t(unsigned int, len, sizeof(uf));
1268 if (copy_from_user(&uf, optval, len)) {
1269 err = -EFAULT;
1270 break;
1271 }
1272
1273 if (!capable(CAP_NET_RAW)) {
1274 uf.type_mask &= hci_sec_filter.type_mask;
1275 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1276 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1277 }
1278
1279 {
1280 struct hci_filter *f = &hci_pi(sk)->filter;
1281
1282 f->type_mask = uf.type_mask;
1283 f->opcode = uf.opcode;
1284 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1285 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1286 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001287 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
1289 default:
1290 err = -ENOPROTOOPT;
1291 break;
1292 }
1293
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001294done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 release_sock(sk);
1296 return err;
1297}
1298
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001299static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1300 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
1302 struct hci_ufilter uf;
1303 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001304 int len, opt, err = 0;
1305
1306 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
1308 if (get_user(len, optlen))
1309 return -EFAULT;
1310
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001311 lock_sock(sk);
1312
1313 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001314 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001315 goto done;
1316 }
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 switch (optname) {
1319 case HCI_DATA_DIR:
1320 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1321 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001322 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 opt = 0;
1324
1325 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001326 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 break;
1328
1329 case HCI_TIME_STAMP:
1330 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1331 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001332 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 opt = 0;
1334
1335 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001336 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 break;
1338
1339 case HCI_FILTER:
1340 {
1341 struct hci_filter *f = &hci_pi(sk)->filter;
1342
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001343 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 uf.type_mask = f->type_mask;
1345 uf.opcode = f->opcode;
1346 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1347 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1348 }
1349
1350 len = min_t(unsigned int, len, sizeof(uf));
1351 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001352 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 break;
1354
1355 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001356 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 break;
1358 }
1359
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001360done:
1361 release_sock(sk);
1362 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363}
1364
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001365static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 .family = PF_BLUETOOTH,
1367 .owner = THIS_MODULE,
1368 .release = hci_sock_release,
1369 .bind = hci_sock_bind,
1370 .getname = hci_sock_getname,
1371 .sendmsg = hci_sock_sendmsg,
1372 .recvmsg = hci_sock_recvmsg,
1373 .ioctl = hci_sock_ioctl,
1374 .poll = datagram_poll,
1375 .listen = sock_no_listen,
1376 .shutdown = sock_no_shutdown,
1377 .setsockopt = hci_sock_setsockopt,
1378 .getsockopt = hci_sock_getsockopt,
1379 .connect = sock_no_connect,
1380 .socketpair = sock_no_socketpair,
1381 .accept = sock_no_accept,
1382 .mmap = sock_no_mmap
1383};
1384
1385static struct proto hci_sk_proto = {
1386 .name = "HCI",
1387 .owner = THIS_MODULE,
1388 .obj_size = sizeof(struct hci_pinfo)
1389};
1390
Eric Paris3f378b62009-11-05 22:18:14 -08001391static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1392 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393{
1394 struct sock *sk;
1395
1396 BT_DBG("sock %p", sock);
1397
1398 if (sock->type != SOCK_RAW)
1399 return -ESOCKTNOSUPPORT;
1400
1401 sock->ops = &hci_sock_ops;
1402
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001403 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 if (!sk)
1405 return -ENOMEM;
1406
1407 sock_init_data(sock, sk);
1408
1409 sock_reset_flag(sk, SOCK_ZAPPED);
1410
1411 sk->sk_protocol = protocol;
1412
1413 sock->state = SS_UNCONNECTED;
1414 sk->sk_state = BT_OPEN;
1415
1416 bt_sock_link(&hci_sk_list, sk);
1417 return 0;
1418}
1419
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001420static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 .family = PF_BLUETOOTH,
1422 .owner = THIS_MODULE,
1423 .create = hci_sock_create,
1424};
1425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426int __init hci_sock_init(void)
1427{
1428 int err;
1429
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001430 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 err = proto_register(&hci_sk_proto, 0);
1433 if (err < 0)
1434 return err;
1435
1436 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001437 if (err < 0) {
1438 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001440 }
1441
Al Virob0316612013-04-04 19:14:33 -04001442 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001443 if (err < 0) {
1444 BT_ERR("Failed to create HCI proc file");
1445 bt_sock_unregister(BTPROTO_HCI);
1446 goto error;
1447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 BT_INFO("HCI socket layer initialized");
1450
1451 return 0;
1452
1453error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 proto_unregister(&hci_sk_proto);
1455 return err;
1456}
1457
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301458void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001460 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001461 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463}