blob: 56f9edbf3d05dc6a2c6ba4f42174b2314d5e920d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020033#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Johan Hedberg801c1e82015-03-06 21:08:50 +020037static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
Marcel Holtmanncd82e612012-02-20 20:34:38 +010040static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* ----- HCI socket interface ----- */
43
Marcel Holtmann863def52014-07-11 05:41:00 +020044/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070053 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020054};
55
Marcel Holtmann6befc642015-03-14 19:27:53 -070056void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
Marcel Holtmannc85be542015-03-14 19:28:00 -070066int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
Johan Hedbergd0f172b2015-03-17 13:48:46 +020071unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
Jiri Slaby93919762015-02-19 15:20:43 +010076static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Jiri Slaby93919762015-02-19 15:20:43 +010078 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020082#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
Marcel Holtmann7e67c112014-07-11 05:36:40 +020090static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* Packet types */
92 0x10,
93 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020094 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020099 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113};
114
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 flt_type = 0;
125 else
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128 if (!test_bit(flt_type, &flt->type_mask))
129 return true;
130
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 return false;
134
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137 if (!hci_test_bit(flt_event, &flt->event_mask))
138 return true;
139
140 /* Check filter only when opcode is set */
141 if (!flt->opcode)
142 return false;
143
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 return true;
147
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 return true;
151
152 return false;
153}
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100159 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 BT_DBG("hdev %p len %d", hdev, skb->len);
162
163 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100164
Sasha Levinb67bfe02013-02-27 17:06:00 -0800165 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 struct sk_buff *nskb;
167
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 continue;
170
171 /* Don't send frame to the socket it came from */
172 if (skb->sk == sk)
173 continue;
174
Marcel Holtmann23500182013-08-26 21:40:52 -0700175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
177 continue;
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
180 continue;
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 continue;
185 } else {
186 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200187 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100190 if (!skb_copy) {
191 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100193 if (!skb_copy)
194 continue;
195
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 }
199
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200201 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 continue;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (sock_queue_rcv_skb(sk, nskb))
205 kfree_skb(nskb);
206 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100207
208 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100209
210 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100211}
212
Johan Hedberg71290692015-02-20 13:26:23 +0200213/* Send frame to sockets with specific channel */
214void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700215 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100216{
217 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100218
Johan Hedberg71290692015-02-20 13:26:23 +0200219 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220
221 read_lock(&hci_sk_list.lock);
222
Sasha Levinb67bfe02013-02-27 17:06:00 -0800223 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100224 struct sk_buff *nskb;
225
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700226 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700227 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700228 continue;
229
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100230 /* Skip the original socket */
231 if (sk == skip_sk)
232 continue;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
Johan Hedberg71290692015-02-20 13:26:23 +0200237 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800238 continue;
239
240 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (!nskb)
242 continue;
243
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
247
248 read_unlock(&hci_sk_list.lock);
249}
250
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100251/* Send frame to monitor socket */
252void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100254 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800255 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100256 __le16 opcode;
257
258 if (!atomic_read(&monitor_promisc))
259 return;
260
261 BT_DBG("hdev %p len %d", hdev, skb->len);
262
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100266 break;
267 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269 break;
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 break;
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100279 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100281 break;
282 default:
283 return;
284 }
285
Marcel Holtmann2b531292015-01-11 19:33:31 -0800286 /* Create a private copy with headroom */
287 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 if (!skb_copy)
289 return;
290
291 /* Put header before the data */
292 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 hdr->opcode = opcode;
294 hdr->index = cpu_to_le16(hdev->id);
295 hdr->len = cpu_to_le16(skb->len);
296
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700297 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100299 kfree_skb(skb_copy);
300}
301
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100302static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303{
304 struct hci_mon_hdr *hdr;
305 struct hci_mon_new_index *ni;
306 struct sk_buff *skb;
307 __le16 opcode;
308
309 switch (event) {
310 case HCI_DEV_REG:
311 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
312 if (!skb)
313 return NULL;
314
315 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
316 ni->type = hdev->dev_type;
317 ni->bus = hdev->bus;
318 bacpy(&ni->bdaddr, &hdev->bdaddr);
319 memcpy(ni->name, hdev->name, 8);
320
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700321 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100322 break;
323
324 case HCI_DEV_UNREG:
325 skb = bt_skb_alloc(0, GFP_ATOMIC);
326 if (!skb)
327 return NULL;
328
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700329 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100330 break;
331
332 default:
333 return NULL;
334 }
335
336 __net_timestamp(skb);
337
338 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
339 hdr->opcode = opcode;
340 hdr->index = cpu_to_le16(hdev->id);
341 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
342
343 return skb;
344}
345
346static void send_monitor_replay(struct sock *sk)
347{
348 struct hci_dev *hdev;
349
350 read_lock(&hci_dev_list_lock);
351
352 list_for_each_entry(hdev, &hci_dev_list, list) {
353 struct sk_buff *skb;
354
355 skb = create_monitor_event(hdev, HCI_DEV_REG);
356 if (!skb)
357 continue;
358
359 if (sock_queue_rcv_skb(sk, skb))
360 kfree_skb(skb);
361 }
362
363 read_unlock(&hci_dev_list_lock);
364}
365
Marcel Holtmann040030e2012-02-20 14:50:37 +0100366/* Generate internal stack event */
367static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
368{
369 struct hci_event_hdr *hdr;
370 struct hci_ev_stack_internal *ev;
371 struct sk_buff *skb;
372
373 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
374 if (!skb)
375 return;
376
377 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
378 hdr->evt = HCI_EV_STACK_INTERNAL;
379 hdr->plen = sizeof(*ev) + dlen;
380
381 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
382 ev->type = type;
383 memcpy(ev->data, data, dlen);
384
385 bt_cb(skb)->incoming = 1;
386 __net_timestamp(skb);
387
388 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100389 hci_send_to_sock(hdev, skb);
390 kfree_skb(skb);
391}
392
393void hci_sock_dev_event(struct hci_dev *hdev, int event)
394{
395 struct hci_ev_si_device ev;
396
397 BT_DBG("hdev %s event %d", hdev->name, event);
398
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100399 /* Send event to monitor */
400 if (atomic_read(&monitor_promisc)) {
401 struct sk_buff *skb;
402
403 skb = create_monitor_event(hdev, event);
404 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700405 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
406 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100407 kfree_skb(skb);
408 }
409 }
410
Marcel Holtmann040030e2012-02-20 14:50:37 +0100411 /* Send event to sockets */
412 ev.event = event;
413 ev.dev_id = hdev->id;
414 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
415
416 if (event == HCI_DEV_UNREG) {
417 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100418
419 /* Detach sockets from device */
420 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800421 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100422 bh_lock_sock_nested(sk);
423 if (hci_pi(sk)->hdev == hdev) {
424 hci_pi(sk)->hdev = NULL;
425 sk->sk_err = EPIPE;
426 sk->sk_state = BT_OPEN;
427 sk->sk_state_change(sk);
428
429 hci_dev_put(hdev);
430 }
431 bh_unlock_sock(sk);
432 }
433 read_unlock(&hci_sk_list.lock);
434 }
435}
436
Johan Hedberg801c1e82015-03-06 21:08:50 +0200437static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
438{
439 struct hci_mgmt_chan *c;
440
441 list_for_each_entry(c, &mgmt_chan_list, list) {
442 if (c->channel == channel)
443 return c;
444 }
445
446 return NULL;
447}
448
449static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
450{
451 struct hci_mgmt_chan *c;
452
453 mutex_lock(&mgmt_chan_list_lock);
454 c = __hci_mgmt_chan_find(channel);
455 mutex_unlock(&mgmt_chan_list_lock);
456
457 return c;
458}
459
460int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
461{
462 if (c->channel < HCI_CHANNEL_CONTROL)
463 return -EINVAL;
464
465 mutex_lock(&mgmt_chan_list_lock);
466 if (__hci_mgmt_chan_find(c->channel)) {
467 mutex_unlock(&mgmt_chan_list_lock);
468 return -EALREADY;
469 }
470
471 list_add_tail(&c->list, &mgmt_chan_list);
472
473 mutex_unlock(&mgmt_chan_list_lock);
474
475 return 0;
476}
477EXPORT_SYMBOL(hci_mgmt_chan_register);
478
479void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
480{
481 mutex_lock(&mgmt_chan_list_lock);
482 list_del(&c->list);
483 mutex_unlock(&mgmt_chan_list_lock);
484}
485EXPORT_SYMBOL(hci_mgmt_chan_unregister);
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487static int hci_sock_release(struct socket *sock)
488{
489 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100490 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 BT_DBG("sock %p sk %p", sock, sk);
493
494 if (!sk)
495 return 0;
496
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100497 hdev = hci_pi(sk)->hdev;
498
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100499 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
500 atomic_dec(&monitor_promisc);
501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 bt_sock_unlink(&hci_sk_list, sk);
503
504 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700505 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200506 mgmt_index_added(hdev);
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700507 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann23500182013-08-26 21:40:52 -0700508 hci_dev_close(hdev->id);
509 }
510
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 atomic_dec(&hdev->promisc);
512 hci_dev_put(hdev);
513 }
514
515 sock_orphan(sk);
516
517 skb_queue_purge(&sk->sk_receive_queue);
518 skb_queue_purge(&sk->sk_write_queue);
519
520 sock_put(sk);
521 return 0;
522}
523
Antti Julkub2a66aa2011-06-15 12:01:14 +0300524static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200525{
526 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300527 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200528
529 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
530 return -EFAULT;
531
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300532 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300533
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300534 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300535
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300536 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300537
538 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200539}
540
Antti Julkub2a66aa2011-06-15 12:01:14 +0300541static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200542{
543 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300544 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200545
546 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
547 return -EFAULT;
548
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300549 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300550
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300551 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300552
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300553 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300554
555 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200556}
557
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900558/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300559static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
560 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
562 struct hci_dev *hdev = hci_pi(sk)->hdev;
563
564 if (!hdev)
565 return -EBADFD;
566
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700567 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700568 return -EBUSY;
569
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700570 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200571 return -EOPNOTSUPP;
572
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700573 if (hdev->dev_type != HCI_BREDR)
574 return -EOPNOTSUPP;
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 switch (cmd) {
577 case HCISETRAW:
578 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000579 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700580 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200583 return hci_get_conn_info(hdev, (void __user *) arg);
584
585 case HCIGETAUTHINFO:
586 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
Johan Hedbergf0358562010-05-18 13:20:32 +0200588 case HCIBLOCKADDR:
589 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000590 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300591 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200592
593 case HCIUNBLOCKADDR:
594 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000595 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300596 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700598
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700599 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300602static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
603 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200605 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700606 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 int err;
608
609 BT_DBG("cmd %x arg %lx", cmd, arg);
610
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700611 lock_sock(sk);
612
613 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
614 err = -EBADFD;
615 goto done;
616 }
617
618 release_sock(sk);
619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 switch (cmd) {
621 case HCIGETDEVLIST:
622 return hci_get_dev_list(argp);
623
624 case HCIGETDEVINFO:
625 return hci_get_dev_info(argp);
626
627 case HCIGETCONNLIST:
628 return hci_get_conn_list(argp);
629
630 case HCIDEVUP:
631 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000632 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 return hci_dev_open(arg);
634
635 case HCIDEVDOWN:
636 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000637 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 return hci_dev_close(arg);
639
640 case HCIDEVRESET:
641 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000642 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 return hci_dev_reset(arg);
644
645 case HCIDEVRESTAT:
646 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000647 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 return hci_dev_reset_stat(arg);
649
650 case HCISETSCAN:
651 case HCISETAUTH:
652 case HCISETENCRYPT:
653 case HCISETPTYPE:
654 case HCISETLINKPOL:
655 case HCISETLINKMODE:
656 case HCISETACLMTU:
657 case HCISETSCOMTU:
658 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000659 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 return hci_dev_cmd(cmd, argp);
661
662 case HCIINQUIRY:
663 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700665
666 lock_sock(sk);
667
668 err = hci_sock_bound_ioctl(sk, cmd, arg);
669
670done:
671 release_sock(sk);
672 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300675static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
676 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677{
Johan Hedberg03811012010-12-08 00:21:06 +0200678 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 struct sock *sk = sock->sk;
680 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200681 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683 BT_DBG("sock %p sk %p", sock, sk);
684
Johan Hedberg03811012010-12-08 00:21:06 +0200685 if (!addr)
686 return -EINVAL;
687
688 memset(&haddr, 0, sizeof(haddr));
689 len = min_t(unsigned int, sizeof(haddr), addr_len);
690 memcpy(&haddr, addr, len);
691
692 if (haddr.hci_family != AF_BLUETOOTH)
693 return -EINVAL;
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 lock_sock(sk);
696
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100697 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 err = -EALREADY;
699 goto done;
700 }
701
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100702 switch (haddr.hci_channel) {
703 case HCI_CHANNEL_RAW:
704 if (hci_pi(sk)->hdev) {
705 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 goto done;
707 }
708
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100709 if (haddr.hci_dev != HCI_DEV_NONE) {
710 hdev = hci_dev_get(haddr.hci_dev);
711 if (!hdev) {
712 err = -ENODEV;
713 goto done;
714 }
715
716 atomic_inc(&hdev->promisc);
717 }
718
719 hci_pi(sk)->hdev = hdev;
720 break;
721
Marcel Holtmann23500182013-08-26 21:40:52 -0700722 case HCI_CHANNEL_USER:
723 if (hci_pi(sk)->hdev) {
724 err = -EALREADY;
725 goto done;
726 }
727
728 if (haddr.hci_dev == HCI_DEV_NONE) {
729 err = -EINVAL;
730 goto done;
731 }
732
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700733 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700734 err = -EPERM;
735 goto done;
736 }
737
738 hdev = hci_dev_get(haddr.hci_dev);
739 if (!hdev) {
740 err = -ENODEV;
741 goto done;
742 }
743
744 if (test_bit(HCI_UP, &hdev->flags) ||
745 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700746 hci_dev_test_flag(hdev, HCI_SETUP) ||
747 hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700748 err = -EBUSY;
749 hci_dev_put(hdev);
750 goto done;
751 }
752
Marcel Holtmann238be782015-03-13 02:11:06 -0700753 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700754 err = -EUSERS;
755 hci_dev_put(hdev);
756 goto done;
757 }
758
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200759 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700760
761 err = hci_dev_open(hdev->id);
762 if (err) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700763 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200764 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700765 hci_dev_put(hdev);
766 goto done;
767 }
768
769 atomic_inc(&hdev->promisc);
770
771 hci_pi(sk)->hdev = hdev;
772 break;
773
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100774 case HCI_CHANNEL_MONITOR:
775 if (haddr.hci_dev != HCI_DEV_NONE) {
776 err = -EINVAL;
777 goto done;
778 }
779
780 if (!capable(CAP_NET_RAW)) {
781 err = -EPERM;
782 goto done;
783 }
784
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700785 /* The monitor interface is restricted to CAP_NET_RAW
786 * capabilities and with that implicitly trusted.
787 */
788 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
789
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100790 send_monitor_replay(sk);
791
792 atomic_inc(&monitor_promisc);
793 break;
794
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100795 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200796 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
797 err = -EINVAL;
798 goto done;
799 }
800
801 if (haddr.hci_dev != HCI_DEV_NONE) {
802 err = -EINVAL;
803 goto done;
804 }
805
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700806 /* Users with CAP_NET_ADMIN capabilities are allowed
807 * access to all management commands and events. For
808 * untrusted users the interface is restricted and
809 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700810 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700811 if (capable(CAP_NET_ADMIN))
812 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700813
Marcel Holtmannf9207332015-03-14 19:27:55 -0700814 /* At the moment the index and unconfigured index events
815 * are enabled unconditionally. Setting them on each
816 * socket when binding keeps this functionality. They
817 * however might be cleared later and then sending of these
818 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700819 *
820 * This also enables generic events that are safe to be
821 * received by untrusted users. Example for such events
822 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700823 */
824 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
825 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
826 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700827 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700828 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200829 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
831
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100832
Johan Hedberg03811012010-12-08 00:21:06 +0200833 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 sk->sk_state = BT_BOUND;
835
836done:
837 release_sock(sk);
838 return err;
839}
840
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300841static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
842 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
844 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
845 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700846 struct hci_dev *hdev;
847 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
849 BT_DBG("sock %p sk %p", sock, sk);
850
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700851 if (peer)
852 return -EOPNOTSUPP;
853
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 lock_sock(sk);
855
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700856 hdev = hci_pi(sk)->hdev;
857 if (!hdev) {
858 err = -EBADFD;
859 goto done;
860 }
861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 *addr_len = sizeof(*haddr);
863 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100864 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700865 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700867done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700869 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870}
871
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300872static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
873 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
875 __u32 mask = hci_pi(sk)->cmsg_mask;
876
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700877 if (mask & HCI_CMSG_DIR) {
878 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300879 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
880 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700881 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700883 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100884#ifdef CONFIG_COMPAT
885 struct compat_timeval ctv;
886#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700887 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200888 void *data;
889 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700890
891 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200892
David S. Miller1da97f82007-09-12 14:10:58 +0200893 data = &tv;
894 len = sizeof(tv);
895#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800896 if (!COMPAT_USE_64BIT_TIME &&
897 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200898 ctv.tv_sec = tv.tv_sec;
899 ctv.tv_usec = tv.tv_usec;
900 data = &ctv;
901 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200902 }
David S. Miller1da97f82007-09-12 14:10:58 +0200903#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200904
905 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900908
Ying Xue1b784142015-03-02 15:37:48 +0800909static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
910 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
912 int noblock = flags & MSG_DONTWAIT;
913 struct sock *sk = sock->sk;
914 struct sk_buff *skb;
915 int copied, err;
916
917 BT_DBG("sock %p, sk %p", sock, sk);
918
919 if (flags & (MSG_OOB))
920 return -EOPNOTSUPP;
921
922 if (sk->sk_state == BT_CLOSED)
923 return 0;
924
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200925 skb = skb_recv_datagram(sk, flags, noblock, &err);
926 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 return err;
928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 copied = skb->len;
930 if (len < copied) {
931 msg->msg_flags |= MSG_TRUNC;
932 copied = len;
933 }
934
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300935 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500936 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
Marcel Holtmann3a208622012-02-20 14:50:34 +0100938 switch (hci_pi(sk)->channel) {
939 case HCI_CHANNEL_RAW:
940 hci_sock_cmsg(sk, msg, skb);
941 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700942 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100943 case HCI_CHANNEL_MONITOR:
944 sock_recv_timestamp(msg, sk, skb);
945 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200946 default:
947 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
948 sock_recv_timestamp(msg, sk, skb);
949 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100950 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
952 skb_free_datagram(sk, skb);
953
954 return err ? : copied;
955}
956
Johan Hedbergfa4335d2015-03-17 13:48:50 +0200957static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
958 struct msghdr *msg, size_t msglen)
959{
960 void *buf;
961 u8 *cp;
962 struct mgmt_hdr *hdr;
963 u16 opcode, index, len;
964 struct hci_dev *hdev = NULL;
965 const struct hci_mgmt_handler *handler;
966 bool var_len, no_hdev;
967 int err;
968
969 BT_DBG("got %zu bytes", msglen);
970
971 if (msglen < sizeof(*hdr))
972 return -EINVAL;
973
974 buf = kmalloc(msglen, GFP_KERNEL);
975 if (!buf)
976 return -ENOMEM;
977
978 if (memcpy_from_msg(buf, msg, msglen)) {
979 err = -EFAULT;
980 goto done;
981 }
982
983 hdr = buf;
984 opcode = __le16_to_cpu(hdr->opcode);
985 index = __le16_to_cpu(hdr->index);
986 len = __le16_to_cpu(hdr->len);
987
988 if (len != msglen - sizeof(*hdr)) {
989 err = -EINVAL;
990 goto done;
991 }
992
993 if (opcode >= chan->handler_count ||
994 chan->handlers[opcode].func == NULL) {
995 BT_DBG("Unknown op %u", opcode);
996 err = mgmt_cmd_status(sk, index, opcode,
997 MGMT_STATUS_UNKNOWN_COMMAND);
998 goto done;
999 }
1000
1001 handler = &chan->handlers[opcode];
1002
1003 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1004 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1005 err = mgmt_cmd_status(sk, index, opcode,
1006 MGMT_STATUS_PERMISSION_DENIED);
1007 goto done;
1008 }
1009
1010 if (index != MGMT_INDEX_NONE) {
1011 hdev = hci_dev_get(index);
1012 if (!hdev) {
1013 err = mgmt_cmd_status(sk, index, opcode,
1014 MGMT_STATUS_INVALID_INDEX);
1015 goto done;
1016 }
1017
1018 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1019 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1020 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1021 err = mgmt_cmd_status(sk, index, opcode,
1022 MGMT_STATUS_INVALID_INDEX);
1023 goto done;
1024 }
1025
1026 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1027 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1028 err = mgmt_cmd_status(sk, index, opcode,
1029 MGMT_STATUS_INVALID_INDEX);
1030 goto done;
1031 }
1032 }
1033
1034 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1035 if (no_hdev != !hdev) {
1036 err = mgmt_cmd_status(sk, index, opcode,
1037 MGMT_STATUS_INVALID_INDEX);
1038 goto done;
1039 }
1040
1041 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1042 if ((var_len && len < handler->data_len) ||
1043 (!var_len && len != handler->data_len)) {
1044 err = mgmt_cmd_status(sk, index, opcode,
1045 MGMT_STATUS_INVALID_PARAMS);
1046 goto done;
1047 }
1048
1049 if (hdev && chan->hdev_init)
1050 chan->hdev_init(sk, hdev);
1051
1052 cp = buf + sizeof(*hdr);
1053
1054 err = handler->func(sk, hdev, cp, len);
1055 if (err < 0)
1056 goto done;
1057
1058 err = msglen;
1059
1060done:
1061 if (hdev)
1062 hci_dev_put(hdev);
1063
1064 kfree(buf);
1065 return err;
1066}
1067
Ying Xue1b784142015-03-02 15:37:48 +08001068static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1069 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
1071 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001072 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 struct hci_dev *hdev;
1074 struct sk_buff *skb;
1075 int err;
1076
1077 BT_DBG("sock %p sk %p", sock, sk);
1078
1079 if (msg->msg_flags & MSG_OOB)
1080 return -EOPNOTSUPP;
1081
1082 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1083 return -EINVAL;
1084
1085 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1086 return -EINVAL;
1087
1088 lock_sock(sk);
1089
Johan Hedberg03811012010-12-08 00:21:06 +02001090 switch (hci_pi(sk)->channel) {
1091 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001092 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001093 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001094 case HCI_CHANNEL_MONITOR:
1095 err = -EOPNOTSUPP;
1096 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001097 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001098 mutex_lock(&mgmt_chan_list_lock);
1099 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1100 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001101 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001102 else
1103 err = -EINVAL;
1104
1105 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001106 goto done;
1107 }
1108
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001109 hdev = hci_pi(sk)->hdev;
1110 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 err = -EBADFD;
1112 goto done;
1113 }
1114
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001115 if (!test_bit(HCI_UP, &hdev->flags)) {
1116 err = -ENETDOWN;
1117 goto done;
1118 }
1119
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001120 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1121 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 goto done;
1123
Al Viro6ce8e9c2014-04-06 21:25:44 -04001124 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 err = -EFAULT;
1126 goto drop;
1127 }
1128
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001129 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001132 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1133 /* No permission check is needed for user channel
1134 * since that gets enforced when binding the socket.
1135 *
1136 * However check that the packet type is valid.
1137 */
1138 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1139 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1140 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1141 err = -EINVAL;
1142 goto drop;
1143 }
1144
1145 skb_queue_tail(&hdev->raw_q, skb);
1146 queue_work(hdev->workqueue, &hdev->tx_work);
1147 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001148 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 u16 ogf = hci_opcode_ogf(opcode);
1150 u16 ocf = hci_opcode_ocf(opcode);
1151
1152 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001153 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1154 &hci_sec_filter.ocf_mask[ogf])) &&
1155 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 err = -EPERM;
1157 goto drop;
1158 }
1159
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001160 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001162 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001164 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001165 * single-command requests.
1166 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03001167 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02001168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001170 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
1172 } else {
1173 if (!capable(CAP_NET_RAW)) {
1174 err = -EPERM;
1175 goto drop;
1176 }
1177
1178 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001179 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 }
1181
1182 err = len;
1183
1184done:
1185 release_sock(sk);
1186 return err;
1187
1188drop:
1189 kfree_skb(skb);
1190 goto done;
1191}
1192
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001193static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1194 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195{
1196 struct hci_ufilter uf = { .opcode = 0 };
1197 struct sock *sk = sock->sk;
1198 int err = 0, opt = 0;
1199
1200 BT_DBG("sk %p, opt %d", sk, optname);
1201
1202 lock_sock(sk);
1203
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001204 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001205 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001206 goto done;
1207 }
1208
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 switch (optname) {
1210 case HCI_DATA_DIR:
1211 if (get_user(opt, (int __user *)optval)) {
1212 err = -EFAULT;
1213 break;
1214 }
1215
1216 if (opt)
1217 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1218 else
1219 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1220 break;
1221
1222 case HCI_TIME_STAMP:
1223 if (get_user(opt, (int __user *)optval)) {
1224 err = -EFAULT;
1225 break;
1226 }
1227
1228 if (opt)
1229 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1230 else
1231 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1232 break;
1233
1234 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001235 {
1236 struct hci_filter *f = &hci_pi(sk)->filter;
1237
1238 uf.type_mask = f->type_mask;
1239 uf.opcode = f->opcode;
1240 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1241 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1242 }
1243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 len = min_t(unsigned int, len, sizeof(uf));
1245 if (copy_from_user(&uf, optval, len)) {
1246 err = -EFAULT;
1247 break;
1248 }
1249
1250 if (!capable(CAP_NET_RAW)) {
1251 uf.type_mask &= hci_sec_filter.type_mask;
1252 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1253 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1254 }
1255
1256 {
1257 struct hci_filter *f = &hci_pi(sk)->filter;
1258
1259 f->type_mask = uf.type_mask;
1260 f->opcode = uf.opcode;
1261 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1262 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1263 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001264 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
1266 default:
1267 err = -ENOPROTOOPT;
1268 break;
1269 }
1270
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001271done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 release_sock(sk);
1273 return err;
1274}
1275
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001276static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1277 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278{
1279 struct hci_ufilter uf;
1280 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001281 int len, opt, err = 0;
1282
1283 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
1285 if (get_user(len, optlen))
1286 return -EFAULT;
1287
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001288 lock_sock(sk);
1289
1290 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001291 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001292 goto done;
1293 }
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 switch (optname) {
1296 case HCI_DATA_DIR:
1297 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1298 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001299 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 opt = 0;
1301
1302 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001303 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 break;
1305
1306 case HCI_TIME_STAMP:
1307 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1308 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001309 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 opt = 0;
1311
1312 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001313 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 break;
1315
1316 case HCI_FILTER:
1317 {
1318 struct hci_filter *f = &hci_pi(sk)->filter;
1319
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001320 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 uf.type_mask = f->type_mask;
1322 uf.opcode = f->opcode;
1323 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1324 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1325 }
1326
1327 len = min_t(unsigned int, len, sizeof(uf));
1328 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001329 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 break;
1331
1332 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001333 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 break;
1335 }
1336
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001337done:
1338 release_sock(sk);
1339 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340}
1341
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001342static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 .family = PF_BLUETOOTH,
1344 .owner = THIS_MODULE,
1345 .release = hci_sock_release,
1346 .bind = hci_sock_bind,
1347 .getname = hci_sock_getname,
1348 .sendmsg = hci_sock_sendmsg,
1349 .recvmsg = hci_sock_recvmsg,
1350 .ioctl = hci_sock_ioctl,
1351 .poll = datagram_poll,
1352 .listen = sock_no_listen,
1353 .shutdown = sock_no_shutdown,
1354 .setsockopt = hci_sock_setsockopt,
1355 .getsockopt = hci_sock_getsockopt,
1356 .connect = sock_no_connect,
1357 .socketpair = sock_no_socketpair,
1358 .accept = sock_no_accept,
1359 .mmap = sock_no_mmap
1360};
1361
1362static struct proto hci_sk_proto = {
1363 .name = "HCI",
1364 .owner = THIS_MODULE,
1365 .obj_size = sizeof(struct hci_pinfo)
1366};
1367
Eric Paris3f378b62009-11-05 22:18:14 -08001368static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1369 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370{
1371 struct sock *sk;
1372
1373 BT_DBG("sock %p", sock);
1374
1375 if (sock->type != SOCK_RAW)
1376 return -ESOCKTNOSUPPORT;
1377
1378 sock->ops = &hci_sock_ops;
1379
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001380 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 if (!sk)
1382 return -ENOMEM;
1383
1384 sock_init_data(sock, sk);
1385
1386 sock_reset_flag(sk, SOCK_ZAPPED);
1387
1388 sk->sk_protocol = protocol;
1389
1390 sock->state = SS_UNCONNECTED;
1391 sk->sk_state = BT_OPEN;
1392
1393 bt_sock_link(&hci_sk_list, sk);
1394 return 0;
1395}
1396
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001397static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 .family = PF_BLUETOOTH,
1399 .owner = THIS_MODULE,
1400 .create = hci_sock_create,
1401};
1402
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403int __init hci_sock_init(void)
1404{
1405 int err;
1406
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001407 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 err = proto_register(&hci_sk_proto, 0);
1410 if (err < 0)
1411 return err;
1412
1413 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001414 if (err < 0) {
1415 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001417 }
1418
Al Virob0316612013-04-04 19:14:33 -04001419 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001420 if (err < 0) {
1421 BT_ERR("Failed to create HCI proc file");
1422 bt_sock_unregister(BTPROTO_HCI);
1423 goto error;
1424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 BT_INFO("HCI socket layer initialized");
1427
1428 return 0;
1429
1430error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 proto_unregister(&hci_sk_proto);
1432 return err;
1433}
1434
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301435void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001437 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001438 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440}