blob: 150556345263357a04a6bf2eb53382894a374349 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020033#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Johan Hedberg801c1e82015-03-06 21:08:50 +020037static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
Marcel Holtmanncd82e612012-02-20 20:34:38 +010040static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* ----- HCI socket interface ----- */
43
Marcel Holtmann863def52014-07-11 05:41:00 +020044/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070053 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020054};
55
Marcel Holtmann6befc642015-03-14 19:27:53 -070056void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
Marcel Holtmannc85be542015-03-14 19:28:00 -070066int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
Johan Hedbergd0f172b2015-03-17 13:48:46 +020071unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
Jiri Slaby93919762015-02-19 15:20:43 +010076static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Jiri Slaby93919762015-02-19 15:20:43 +010078 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020082#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
Marcel Holtmann7e67c112014-07-11 05:36:40 +020090static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* Packet types */
92 0x10,
93 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020094 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020099 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113};
114
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 flt_type = 0;
125 else
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128 if (!test_bit(flt_type, &flt->type_mask))
129 return true;
130
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 return false;
134
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137 if (!hci_test_bit(flt_event, &flt->event_mask))
138 return true;
139
140 /* Check filter only when opcode is set */
141 if (!flt->opcode)
142 return false;
143
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 return true;
147
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 return true;
151
152 return false;
153}
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100159 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 BT_DBG("hdev %p len %d", hdev, skb->len);
162
163 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100164
Sasha Levinb67bfe02013-02-27 17:06:00 -0800165 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 struct sk_buff *nskb;
167
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 continue;
170
171 /* Don't send frame to the socket it came from */
172 if (skb->sk == sk)
173 continue;
174
Marcel Holtmann23500182013-08-26 21:40:52 -0700175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
177 continue;
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
180 continue;
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 continue;
185 } else {
186 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200187 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100190 if (!skb_copy) {
191 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100193 if (!skb_copy)
194 continue;
195
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 }
199
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200201 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 continue;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (sock_queue_rcv_skb(sk, nskb))
205 kfree_skb(nskb);
206 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100207
208 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100209
210 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100211}
212
Johan Hedberg71290692015-02-20 13:26:23 +0200213/* Send frame to sockets with specific channel */
214void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700215 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100216{
217 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100218
Johan Hedberg71290692015-02-20 13:26:23 +0200219 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220
221 read_lock(&hci_sk_list.lock);
222
Sasha Levinb67bfe02013-02-27 17:06:00 -0800223 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100224 struct sk_buff *nskb;
225
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700226 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700227 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700228 continue;
229
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100230 /* Skip the original socket */
231 if (sk == skip_sk)
232 continue;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
Johan Hedberg71290692015-02-20 13:26:23 +0200237 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800238 continue;
239
240 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (!nskb)
242 continue;
243
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
247
248 read_unlock(&hci_sk_list.lock);
249}
250
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100251/* Send frame to monitor socket */
252void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100254 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800255 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100256 __le16 opcode;
257
258 if (!atomic_read(&monitor_promisc))
259 return;
260
261 BT_DBG("hdev %p len %d", hdev, skb->len);
262
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100266 break;
267 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269 break;
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 break;
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100279 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100281 break;
282 default:
283 return;
284 }
285
Marcel Holtmann2b531292015-01-11 19:33:31 -0800286 /* Create a private copy with headroom */
287 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 if (!skb_copy)
289 return;
290
291 /* Put header before the data */
292 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 hdr->opcode = opcode;
294 hdr->index = cpu_to_le16(hdev->id);
295 hdr->len = cpu_to_le16(skb->len);
296
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700297 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100299 kfree_skb(skb_copy);
300}
301
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100302static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303{
304 struct hci_mon_hdr *hdr;
305 struct hci_mon_new_index *ni;
306 struct sk_buff *skb;
307 __le16 opcode;
308
309 switch (event) {
310 case HCI_DEV_REG:
311 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
312 if (!skb)
313 return NULL;
314
315 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
316 ni->type = hdev->dev_type;
317 ni->bus = hdev->bus;
318 bacpy(&ni->bdaddr, &hdev->bdaddr);
319 memcpy(ni->name, hdev->name, 8);
320
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700321 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100322 break;
323
324 case HCI_DEV_UNREG:
325 skb = bt_skb_alloc(0, GFP_ATOMIC);
326 if (!skb)
327 return NULL;
328
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700329 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100330 break;
331
332 default:
333 return NULL;
334 }
335
336 __net_timestamp(skb);
337
338 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
339 hdr->opcode = opcode;
340 hdr->index = cpu_to_le16(hdev->id);
341 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
342
343 return skb;
344}
345
346static void send_monitor_replay(struct sock *sk)
347{
348 struct hci_dev *hdev;
349
350 read_lock(&hci_dev_list_lock);
351
352 list_for_each_entry(hdev, &hci_dev_list, list) {
353 struct sk_buff *skb;
354
355 skb = create_monitor_event(hdev, HCI_DEV_REG);
356 if (!skb)
357 continue;
358
359 if (sock_queue_rcv_skb(sk, skb))
360 kfree_skb(skb);
361 }
362
363 read_unlock(&hci_dev_list_lock);
364}
365
Marcel Holtmann040030e2012-02-20 14:50:37 +0100366/* Generate internal stack event */
367static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
368{
369 struct hci_event_hdr *hdr;
370 struct hci_ev_stack_internal *ev;
371 struct sk_buff *skb;
372
373 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
374 if (!skb)
375 return;
376
377 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
378 hdr->evt = HCI_EV_STACK_INTERNAL;
379 hdr->plen = sizeof(*ev) + dlen;
380
381 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
382 ev->type = type;
383 memcpy(ev->data, data, dlen);
384
385 bt_cb(skb)->incoming = 1;
386 __net_timestamp(skb);
387
388 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100389 hci_send_to_sock(hdev, skb);
390 kfree_skb(skb);
391}
392
393void hci_sock_dev_event(struct hci_dev *hdev, int event)
394{
395 struct hci_ev_si_device ev;
396
397 BT_DBG("hdev %s event %d", hdev->name, event);
398
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100399 /* Send event to monitor */
400 if (atomic_read(&monitor_promisc)) {
401 struct sk_buff *skb;
402
403 skb = create_monitor_event(hdev, event);
404 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700405 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
406 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100407 kfree_skb(skb);
408 }
409 }
410
Marcel Holtmann040030e2012-02-20 14:50:37 +0100411 /* Send event to sockets */
412 ev.event = event;
413 ev.dev_id = hdev->id;
414 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
415
416 if (event == HCI_DEV_UNREG) {
417 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100418
419 /* Detach sockets from device */
420 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800421 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100422 bh_lock_sock_nested(sk);
423 if (hci_pi(sk)->hdev == hdev) {
424 hci_pi(sk)->hdev = NULL;
425 sk->sk_err = EPIPE;
426 sk->sk_state = BT_OPEN;
427 sk->sk_state_change(sk);
428
429 hci_dev_put(hdev);
430 }
431 bh_unlock_sock(sk);
432 }
433 read_unlock(&hci_sk_list.lock);
434 }
435}
436
Johan Hedberg801c1e82015-03-06 21:08:50 +0200437static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
438{
439 struct hci_mgmt_chan *c;
440
441 list_for_each_entry(c, &mgmt_chan_list, list) {
442 if (c->channel == channel)
443 return c;
444 }
445
446 return NULL;
447}
448
449static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
450{
451 struct hci_mgmt_chan *c;
452
453 mutex_lock(&mgmt_chan_list_lock);
454 c = __hci_mgmt_chan_find(channel);
455 mutex_unlock(&mgmt_chan_list_lock);
456
457 return c;
458}
459
460int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
461{
462 if (c->channel < HCI_CHANNEL_CONTROL)
463 return -EINVAL;
464
465 mutex_lock(&mgmt_chan_list_lock);
466 if (__hci_mgmt_chan_find(c->channel)) {
467 mutex_unlock(&mgmt_chan_list_lock);
468 return -EALREADY;
469 }
470
471 list_add_tail(&c->list, &mgmt_chan_list);
472
473 mutex_unlock(&mgmt_chan_list_lock);
474
475 return 0;
476}
477EXPORT_SYMBOL(hci_mgmt_chan_register);
478
479void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
480{
481 mutex_lock(&mgmt_chan_list_lock);
482 list_del(&c->list);
483 mutex_unlock(&mgmt_chan_list_lock);
484}
485EXPORT_SYMBOL(hci_mgmt_chan_unregister);
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487static int hci_sock_release(struct socket *sock)
488{
489 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100490 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 BT_DBG("sock %p sk %p", sock, sk);
493
494 if (!sk)
495 return 0;
496
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100497 hdev = hci_pi(sk)->hdev;
498
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100499 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
500 atomic_dec(&monitor_promisc);
501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 bt_sock_unlink(&hci_sk_list, sk);
503
504 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700505 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200506 /* When releasing an user channel exclusive access,
507 * call hci_dev_do_close directly instead of calling
508 * hci_dev_close to ensure the exclusive access will
509 * be released and the controller brought back down.
510 *
511 * The checking of HCI_AUTO_OFF is not needed in this
512 * case since it will have been cleared already when
513 * opening the user channel.
514 */
515 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200516 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
517 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700518 }
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 atomic_dec(&hdev->promisc);
521 hci_dev_put(hdev);
522 }
523
524 sock_orphan(sk);
525
526 skb_queue_purge(&sk->sk_receive_queue);
527 skb_queue_purge(&sk->sk_write_queue);
528
529 sock_put(sk);
530 return 0;
531}
532
Antti Julkub2a66aa2011-06-15 12:01:14 +0300533static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200534{
535 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300536 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200537
538 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
539 return -EFAULT;
540
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300541 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300542
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300543 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300544
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300545 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300546
547 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200548}
549
Antti Julkub2a66aa2011-06-15 12:01:14 +0300550static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200551{
552 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300553 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200554
555 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
556 return -EFAULT;
557
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300558 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300559
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300560 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300561
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300562 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300563
564 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200565}
566
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900567/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300568static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
569 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570{
571 struct hci_dev *hdev = hci_pi(sk)->hdev;
572
573 if (!hdev)
574 return -EBADFD;
575
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700576 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700577 return -EBUSY;
578
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700579 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200580 return -EOPNOTSUPP;
581
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700582 if (hdev->dev_type != HCI_BREDR)
583 return -EOPNOTSUPP;
584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 switch (cmd) {
586 case HCISETRAW:
587 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000588 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700589 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200592 return hci_get_conn_info(hdev, (void __user *) arg);
593
594 case HCIGETAUTHINFO:
595 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Johan Hedbergf0358562010-05-18 13:20:32 +0200597 case HCIBLOCKADDR:
598 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000599 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300600 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200601
602 case HCIUNBLOCKADDR:
603 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000604 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300605 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700607
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700608 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609}
610
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300611static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
612 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200614 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700615 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 int err;
617
618 BT_DBG("cmd %x arg %lx", cmd, arg);
619
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700620 lock_sock(sk);
621
622 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
623 err = -EBADFD;
624 goto done;
625 }
626
627 release_sock(sk);
628
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 switch (cmd) {
630 case HCIGETDEVLIST:
631 return hci_get_dev_list(argp);
632
633 case HCIGETDEVINFO:
634 return hci_get_dev_info(argp);
635
636 case HCIGETCONNLIST:
637 return hci_get_conn_list(argp);
638
639 case HCIDEVUP:
640 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000641 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 return hci_dev_open(arg);
643
644 case HCIDEVDOWN:
645 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000646 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 return hci_dev_close(arg);
648
649 case HCIDEVRESET:
650 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000651 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 return hci_dev_reset(arg);
653
654 case HCIDEVRESTAT:
655 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000656 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 return hci_dev_reset_stat(arg);
658
659 case HCISETSCAN:
660 case HCISETAUTH:
661 case HCISETENCRYPT:
662 case HCISETPTYPE:
663 case HCISETLINKPOL:
664 case HCISETLINKMODE:
665 case HCISETACLMTU:
666 case HCISETSCOMTU:
667 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000668 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 return hci_dev_cmd(cmd, argp);
670
671 case HCIINQUIRY:
672 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700674
675 lock_sock(sk);
676
677 err = hci_sock_bound_ioctl(sk, cmd, arg);
678
679done:
680 release_sock(sk);
681 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682}
683
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300684static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
685 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686{
Johan Hedberg03811012010-12-08 00:21:06 +0200687 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 struct sock *sk = sock->sk;
689 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200690 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692 BT_DBG("sock %p sk %p", sock, sk);
693
Johan Hedberg03811012010-12-08 00:21:06 +0200694 if (!addr)
695 return -EINVAL;
696
697 memset(&haddr, 0, sizeof(haddr));
698 len = min_t(unsigned int, sizeof(haddr), addr_len);
699 memcpy(&haddr, addr, len);
700
701 if (haddr.hci_family != AF_BLUETOOTH)
702 return -EINVAL;
703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 lock_sock(sk);
705
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100706 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 err = -EALREADY;
708 goto done;
709 }
710
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100711 switch (haddr.hci_channel) {
712 case HCI_CHANNEL_RAW:
713 if (hci_pi(sk)->hdev) {
714 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 goto done;
716 }
717
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100718 if (haddr.hci_dev != HCI_DEV_NONE) {
719 hdev = hci_dev_get(haddr.hci_dev);
720 if (!hdev) {
721 err = -ENODEV;
722 goto done;
723 }
724
725 atomic_inc(&hdev->promisc);
726 }
727
728 hci_pi(sk)->hdev = hdev;
729 break;
730
Marcel Holtmann23500182013-08-26 21:40:52 -0700731 case HCI_CHANNEL_USER:
732 if (hci_pi(sk)->hdev) {
733 err = -EALREADY;
734 goto done;
735 }
736
737 if (haddr.hci_dev == HCI_DEV_NONE) {
738 err = -EINVAL;
739 goto done;
740 }
741
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700742 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700743 err = -EPERM;
744 goto done;
745 }
746
747 hdev = hci_dev_get(haddr.hci_dev);
748 if (!hdev) {
749 err = -ENODEV;
750 goto done;
751 }
752
Marcel Holtmann781f8992015-06-06 06:06:49 +0200753 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700754 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +0200755 hci_dev_test_flag(hdev, HCI_CONFIG) ||
756 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
757 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700758 err = -EBUSY;
759 hci_dev_put(hdev);
760 goto done;
761 }
762
Marcel Holtmann238be782015-03-13 02:11:06 -0700763 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700764 err = -EUSERS;
765 hci_dev_put(hdev);
766 goto done;
767 }
768
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200769 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700770
771 err = hci_dev_open(hdev->id);
772 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +0200773 if (err == -EALREADY) {
774 /* In case the transport is already up and
775 * running, clear the error here.
776 *
777 * This can happen when opening an user
778 * channel and HCI_AUTO_OFF grace period
779 * is still active.
780 */
781 err = 0;
782 } else {
783 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
784 mgmt_index_added(hdev);
785 hci_dev_put(hdev);
786 goto done;
787 }
Marcel Holtmann23500182013-08-26 21:40:52 -0700788 }
789
790 atomic_inc(&hdev->promisc);
791
792 hci_pi(sk)->hdev = hdev;
793 break;
794
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100795 case HCI_CHANNEL_MONITOR:
796 if (haddr.hci_dev != HCI_DEV_NONE) {
797 err = -EINVAL;
798 goto done;
799 }
800
801 if (!capable(CAP_NET_RAW)) {
802 err = -EPERM;
803 goto done;
804 }
805
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700806 /* The monitor interface is restricted to CAP_NET_RAW
807 * capabilities and with that implicitly trusted.
808 */
809 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
810
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100811 send_monitor_replay(sk);
812
813 atomic_inc(&monitor_promisc);
814 break;
815
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100816 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200817 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
818 err = -EINVAL;
819 goto done;
820 }
821
822 if (haddr.hci_dev != HCI_DEV_NONE) {
823 err = -EINVAL;
824 goto done;
825 }
826
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700827 /* Users with CAP_NET_ADMIN capabilities are allowed
828 * access to all management commands and events. For
829 * untrusted users the interface is restricted and
830 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700831 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700832 if (capable(CAP_NET_ADMIN))
833 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700834
Marcel Holtmannf9207332015-03-14 19:27:55 -0700835 /* At the moment the index and unconfigured index events
836 * are enabled unconditionally. Setting them on each
837 * socket when binding keeps this functionality. They
838 * however might be cleared later and then sending of these
839 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700840 *
841 * This also enables generic events that are safe to be
842 * received by untrusted users. Example for such events
843 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700844 */
845 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
846 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
847 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700848 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700849 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200850 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
852
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100853
Johan Hedberg03811012010-12-08 00:21:06 +0200854 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 sk->sk_state = BT_BOUND;
856
857done:
858 release_sock(sk);
859 return err;
860}
861
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300862static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
863 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
865 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
866 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700867 struct hci_dev *hdev;
868 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870 BT_DBG("sock %p sk %p", sock, sk);
871
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700872 if (peer)
873 return -EOPNOTSUPP;
874
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 lock_sock(sk);
876
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700877 hdev = hci_pi(sk)->hdev;
878 if (!hdev) {
879 err = -EBADFD;
880 goto done;
881 }
882
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 *addr_len = sizeof(*haddr);
884 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100885 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700886 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700888done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700890 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891}
892
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300893static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
894 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
896 __u32 mask = hci_pi(sk)->cmsg_mask;
897
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700898 if (mask & HCI_CMSG_DIR) {
899 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300900 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
901 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700904 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100905#ifdef CONFIG_COMPAT
906 struct compat_timeval ctv;
907#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700908 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200909 void *data;
910 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700911
912 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200913
David S. Miller1da97f82007-09-12 14:10:58 +0200914 data = &tv;
915 len = sizeof(tv);
916#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800917 if (!COMPAT_USE_64BIT_TIME &&
918 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200919 ctv.tv_sec = tv.tv_sec;
920 ctv.tv_usec = tv.tv_usec;
921 data = &ctv;
922 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200923 }
David S. Miller1da97f82007-09-12 14:10:58 +0200924#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200925
926 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700927 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900929
Ying Xue1b784142015-03-02 15:37:48 +0800930static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
931 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
933 int noblock = flags & MSG_DONTWAIT;
934 struct sock *sk = sock->sk;
935 struct sk_buff *skb;
936 int copied, err;
937
938 BT_DBG("sock %p, sk %p", sock, sk);
939
940 if (flags & (MSG_OOB))
941 return -EOPNOTSUPP;
942
943 if (sk->sk_state == BT_CLOSED)
944 return 0;
945
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200946 skb = skb_recv_datagram(sk, flags, noblock, &err);
947 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 return err;
949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 copied = skb->len;
951 if (len < copied) {
952 msg->msg_flags |= MSG_TRUNC;
953 copied = len;
954 }
955
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300956 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500957 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Marcel Holtmann3a208622012-02-20 14:50:34 +0100959 switch (hci_pi(sk)->channel) {
960 case HCI_CHANNEL_RAW:
961 hci_sock_cmsg(sk, msg, skb);
962 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700963 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100964 case HCI_CHANNEL_MONITOR:
965 sock_recv_timestamp(msg, sk, skb);
966 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200967 default:
968 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
969 sock_recv_timestamp(msg, sk, skb);
970 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100971 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 skb_free_datagram(sk, skb);
974
975 return err ? : copied;
976}
977
Johan Hedbergfa4335d2015-03-17 13:48:50 +0200978static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
979 struct msghdr *msg, size_t msglen)
980{
981 void *buf;
982 u8 *cp;
983 struct mgmt_hdr *hdr;
984 u16 opcode, index, len;
985 struct hci_dev *hdev = NULL;
986 const struct hci_mgmt_handler *handler;
987 bool var_len, no_hdev;
988 int err;
989
990 BT_DBG("got %zu bytes", msglen);
991
992 if (msglen < sizeof(*hdr))
993 return -EINVAL;
994
995 buf = kmalloc(msglen, GFP_KERNEL);
996 if (!buf)
997 return -ENOMEM;
998
999 if (memcpy_from_msg(buf, msg, msglen)) {
1000 err = -EFAULT;
1001 goto done;
1002 }
1003
1004 hdr = buf;
1005 opcode = __le16_to_cpu(hdr->opcode);
1006 index = __le16_to_cpu(hdr->index);
1007 len = __le16_to_cpu(hdr->len);
1008
1009 if (len != msglen - sizeof(*hdr)) {
1010 err = -EINVAL;
1011 goto done;
1012 }
1013
1014 if (opcode >= chan->handler_count ||
1015 chan->handlers[opcode].func == NULL) {
1016 BT_DBG("Unknown op %u", opcode);
1017 err = mgmt_cmd_status(sk, index, opcode,
1018 MGMT_STATUS_UNKNOWN_COMMAND);
1019 goto done;
1020 }
1021
1022 handler = &chan->handlers[opcode];
1023
1024 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1025 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1026 err = mgmt_cmd_status(sk, index, opcode,
1027 MGMT_STATUS_PERMISSION_DENIED);
1028 goto done;
1029 }
1030
1031 if (index != MGMT_INDEX_NONE) {
1032 hdev = hci_dev_get(index);
1033 if (!hdev) {
1034 err = mgmt_cmd_status(sk, index, opcode,
1035 MGMT_STATUS_INVALID_INDEX);
1036 goto done;
1037 }
1038
1039 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1040 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1041 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1042 err = mgmt_cmd_status(sk, index, opcode,
1043 MGMT_STATUS_INVALID_INDEX);
1044 goto done;
1045 }
1046
1047 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1048 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1049 err = mgmt_cmd_status(sk, index, opcode,
1050 MGMT_STATUS_INVALID_INDEX);
1051 goto done;
1052 }
1053 }
1054
1055 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1056 if (no_hdev != !hdev) {
1057 err = mgmt_cmd_status(sk, index, opcode,
1058 MGMT_STATUS_INVALID_INDEX);
1059 goto done;
1060 }
1061
1062 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1063 if ((var_len && len < handler->data_len) ||
1064 (!var_len && len != handler->data_len)) {
1065 err = mgmt_cmd_status(sk, index, opcode,
1066 MGMT_STATUS_INVALID_PARAMS);
1067 goto done;
1068 }
1069
1070 if (hdev && chan->hdev_init)
1071 chan->hdev_init(sk, hdev);
1072
1073 cp = buf + sizeof(*hdr);
1074
1075 err = handler->func(sk, hdev, cp, len);
1076 if (err < 0)
1077 goto done;
1078
1079 err = msglen;
1080
1081done:
1082 if (hdev)
1083 hci_dev_put(hdev);
1084
1085 kfree(buf);
1086 return err;
1087}
1088
Ying Xue1b784142015-03-02 15:37:48 +08001089static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1090 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091{
1092 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001093 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 struct hci_dev *hdev;
1095 struct sk_buff *skb;
1096 int err;
1097
1098 BT_DBG("sock %p sk %p", sock, sk);
1099
1100 if (msg->msg_flags & MSG_OOB)
1101 return -EOPNOTSUPP;
1102
1103 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1104 return -EINVAL;
1105
1106 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1107 return -EINVAL;
1108
1109 lock_sock(sk);
1110
Johan Hedberg03811012010-12-08 00:21:06 +02001111 switch (hci_pi(sk)->channel) {
1112 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001113 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001114 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001115 case HCI_CHANNEL_MONITOR:
1116 err = -EOPNOTSUPP;
1117 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001118 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001119 mutex_lock(&mgmt_chan_list_lock);
1120 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1121 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001122 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001123 else
1124 err = -EINVAL;
1125
1126 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001127 goto done;
1128 }
1129
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001130 hdev = hci_pi(sk)->hdev;
1131 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 err = -EBADFD;
1133 goto done;
1134 }
1135
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001136 if (!test_bit(HCI_UP, &hdev->flags)) {
1137 err = -ENETDOWN;
1138 goto done;
1139 }
1140
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001141 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1142 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 goto done;
1144
Al Viro6ce8e9c2014-04-06 21:25:44 -04001145 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 err = -EFAULT;
1147 goto drop;
1148 }
1149
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001150 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001153 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1154 /* No permission check is needed for user channel
1155 * since that gets enforced when binding the socket.
1156 *
1157 * However check that the packet type is valid.
1158 */
1159 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1160 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1161 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1162 err = -EINVAL;
1163 goto drop;
1164 }
1165
1166 skb_queue_tail(&hdev->raw_q, skb);
1167 queue_work(hdev->workqueue, &hdev->tx_work);
1168 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001169 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 u16 ogf = hci_opcode_ogf(opcode);
1171 u16 ocf = hci_opcode_ocf(opcode);
1172
1173 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001174 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1175 &hci_sec_filter.ocf_mask[ogf])) &&
1176 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 err = -EPERM;
1178 goto drop;
1179 }
1180
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001181 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001183 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001185 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001186 * single-command requests.
1187 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03001188 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02001189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001191 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 }
1193 } else {
1194 if (!capable(CAP_NET_RAW)) {
1195 err = -EPERM;
1196 goto drop;
1197 }
1198
1199 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001200 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 }
1202
1203 err = len;
1204
1205done:
1206 release_sock(sk);
1207 return err;
1208
1209drop:
1210 kfree_skb(skb);
1211 goto done;
1212}
1213
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001214static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1215 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216{
1217 struct hci_ufilter uf = { .opcode = 0 };
1218 struct sock *sk = sock->sk;
1219 int err = 0, opt = 0;
1220
1221 BT_DBG("sk %p, opt %d", sk, optname);
1222
1223 lock_sock(sk);
1224
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001225 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001226 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001227 goto done;
1228 }
1229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 switch (optname) {
1231 case HCI_DATA_DIR:
1232 if (get_user(opt, (int __user *)optval)) {
1233 err = -EFAULT;
1234 break;
1235 }
1236
1237 if (opt)
1238 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1239 else
1240 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1241 break;
1242
1243 case HCI_TIME_STAMP:
1244 if (get_user(opt, (int __user *)optval)) {
1245 err = -EFAULT;
1246 break;
1247 }
1248
1249 if (opt)
1250 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1251 else
1252 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1253 break;
1254
1255 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001256 {
1257 struct hci_filter *f = &hci_pi(sk)->filter;
1258
1259 uf.type_mask = f->type_mask;
1260 uf.opcode = f->opcode;
1261 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1262 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1263 }
1264
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 len = min_t(unsigned int, len, sizeof(uf));
1266 if (copy_from_user(&uf, optval, len)) {
1267 err = -EFAULT;
1268 break;
1269 }
1270
1271 if (!capable(CAP_NET_RAW)) {
1272 uf.type_mask &= hci_sec_filter.type_mask;
1273 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1274 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1275 }
1276
1277 {
1278 struct hci_filter *f = &hci_pi(sk)->filter;
1279
1280 f->type_mask = uf.type_mask;
1281 f->opcode = uf.opcode;
1282 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1283 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1284 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001285 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
1287 default:
1288 err = -ENOPROTOOPT;
1289 break;
1290 }
1291
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001292done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 release_sock(sk);
1294 return err;
1295}
1296
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001297static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1298 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299{
1300 struct hci_ufilter uf;
1301 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001302 int len, opt, err = 0;
1303
1304 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
1306 if (get_user(len, optlen))
1307 return -EFAULT;
1308
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001309 lock_sock(sk);
1310
1311 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001312 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001313 goto done;
1314 }
1315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 switch (optname) {
1317 case HCI_DATA_DIR:
1318 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1319 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001320 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 opt = 0;
1322
1323 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001324 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 break;
1326
1327 case HCI_TIME_STAMP:
1328 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1329 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001330 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 opt = 0;
1332
1333 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001334 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 break;
1336
1337 case HCI_FILTER:
1338 {
1339 struct hci_filter *f = &hci_pi(sk)->filter;
1340
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001341 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 uf.type_mask = f->type_mask;
1343 uf.opcode = f->opcode;
1344 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1345 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1346 }
1347
1348 len = min_t(unsigned int, len, sizeof(uf));
1349 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001350 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 break;
1352
1353 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001354 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 break;
1356 }
1357
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001358done:
1359 release_sock(sk);
1360 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361}
1362
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001363static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 .family = PF_BLUETOOTH,
1365 .owner = THIS_MODULE,
1366 .release = hci_sock_release,
1367 .bind = hci_sock_bind,
1368 .getname = hci_sock_getname,
1369 .sendmsg = hci_sock_sendmsg,
1370 .recvmsg = hci_sock_recvmsg,
1371 .ioctl = hci_sock_ioctl,
1372 .poll = datagram_poll,
1373 .listen = sock_no_listen,
1374 .shutdown = sock_no_shutdown,
1375 .setsockopt = hci_sock_setsockopt,
1376 .getsockopt = hci_sock_getsockopt,
1377 .connect = sock_no_connect,
1378 .socketpair = sock_no_socketpair,
1379 .accept = sock_no_accept,
1380 .mmap = sock_no_mmap
1381};
1382
1383static struct proto hci_sk_proto = {
1384 .name = "HCI",
1385 .owner = THIS_MODULE,
1386 .obj_size = sizeof(struct hci_pinfo)
1387};
1388
Eric Paris3f378b62009-11-05 22:18:14 -08001389static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1390 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391{
1392 struct sock *sk;
1393
1394 BT_DBG("sock %p", sock);
1395
1396 if (sock->type != SOCK_RAW)
1397 return -ESOCKTNOSUPPORT;
1398
1399 sock->ops = &hci_sock_ops;
1400
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001401 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 if (!sk)
1403 return -ENOMEM;
1404
1405 sock_init_data(sock, sk);
1406
1407 sock_reset_flag(sk, SOCK_ZAPPED);
1408
1409 sk->sk_protocol = protocol;
1410
1411 sock->state = SS_UNCONNECTED;
1412 sk->sk_state = BT_OPEN;
1413
1414 bt_sock_link(&hci_sk_list, sk);
1415 return 0;
1416}
1417
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001418static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 .family = PF_BLUETOOTH,
1420 .owner = THIS_MODULE,
1421 .create = hci_sock_create,
1422};
1423
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424int __init hci_sock_init(void)
1425{
1426 int err;
1427
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001428 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1429
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 err = proto_register(&hci_sk_proto, 0);
1431 if (err < 0)
1432 return err;
1433
1434 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001435 if (err < 0) {
1436 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001438 }
1439
Al Virob0316612013-04-04 19:14:33 -04001440 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001441 if (err < 0) {
1442 BT_ERR("Failed to create HCI proc file");
1443 bt_sock_unregister(BTPROTO_HCI);
1444 goto error;
1445 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 BT_INFO("HCI socket layer initialized");
1448
1449 return 0;
1450
1451error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 proto_unregister(&hci_sk_proto);
1453 return err;
1454}
1455
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301456void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001458 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001459 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461}