blob: 8c429a179aa4d0e4370d8d3d5dc18d793d79817e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28
29#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080030#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/errno.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
Marcel Holtmann767c5eb2007-09-09 08:39:34 +020040#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
45#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020046#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/unaligned.h>
48
49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010051#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Rusty Russelleb939922011-12-19 14:08:01 +000053static bool enable_mgmt;
Johan Hedberg03811012010-12-08 00:21:06 +020054
Marcel Holtmanncd82e612012-02-20 20:34:38 +010055static atomic_t monitor_promisc = ATOMIC_INIT(0);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/* ----- HCI socket interface ----- */
58
59static inline int hci_test_bit(int nr, void *addr)
60{
61 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
62}
63
64/* Security filter */
65static struct hci_sec_filter hci_sec_filter = {
66 /* Packet types */
67 0x10,
68 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020069 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 /* Commands */
71 {
72 { 0x0 },
73 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020074 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020076 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020078 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020080 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020082 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
84};
85
86static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070087 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088};
89
90/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010091void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
93 struct sock *sk;
94 struct hlist_node *node;
Marcel Holtmanne0edf372012-02-20 14:50:36 +010095 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97 BT_DBG("hdev %p len %d", hdev, skb->len);
98
99 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 sk_for_each(sk, node, &hci_sk_list.head) {
102 struct hci_filter *flt;
103 struct sk_buff *nskb;
104
105 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
106 continue;
107
108 /* Don't send frame to the socket it came from */
109 if (skb->sk == sk)
110 continue;
111
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100112 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +0200113 continue;
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 /* Apply filter */
116 flt = &hci_pi(sk)->filter;
117
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700118 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
119 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 continue;
121
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700122 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
124
125 if (!hci_test_bit(evt, &flt->event_mask))
126 continue;
127
David S. Miller4498c802006-11-21 16:17:41 -0800128 if (flt->opcode &&
129 ((evt == HCI_EV_CMD_COMPLETE &&
130 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800131 get_unaligned((__le16 *)(skb->data + 3))) ||
David S. Miller4498c802006-11-21 16:17:41 -0800132 (evt == HCI_EV_CMD_STATUS &&
133 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800134 get_unaligned((__le16 *)(skb->data + 4)))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 continue;
136 }
137
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100138 if (!skb_copy) {
139 /* Create a private copy with headroom */
140 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
141 if (!skb_copy)
142 continue;
143
144 /* Put type byte before the data */
145 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
146 }
147
148 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200149 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 continue;
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 if (sock_queue_rcv_skb(sk, nskb))
153 kfree_skb(nskb);
154 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100155
156 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100157
158 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100159}
160
161/* Send frame to control socket */
162void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
163{
164 struct sock *sk;
165 struct hlist_node *node;
166
167 BT_DBG("len %d", skb->len);
168
169 read_lock(&hci_sk_list.lock);
170
171 sk_for_each(sk, node, &hci_sk_list.head) {
172 struct sk_buff *nskb;
173
174 /* Skip the original socket */
175 if (sk == skip_sk)
176 continue;
177
178 if (sk->sk_state != BT_BOUND)
179 continue;
180
181 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
182 continue;
183
184 nskb = skb_clone(skb, GFP_ATOMIC);
185 if (!nskb)
186 continue;
187
188 if (sock_queue_rcv_skb(sk, nskb))
189 kfree_skb(nskb);
190 }
191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 read_unlock(&hci_sk_list.lock);
193}
194
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100195/* Send frame to monitor socket */
196void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
197{
198 struct sock *sk;
199 struct hlist_node *node;
200 struct sk_buff *skb_copy = NULL;
201 __le16 opcode;
202
203 if (!atomic_read(&monitor_promisc))
204 return;
205
206 BT_DBG("hdev %p len %d", hdev, skb->len);
207
208 switch (bt_cb(skb)->pkt_type) {
209 case HCI_COMMAND_PKT:
210 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
211 break;
212 case HCI_EVENT_PKT:
213 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
214 break;
215 case HCI_ACLDATA_PKT:
216 if (bt_cb(skb)->incoming)
217 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
218 else
219 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
220 break;
221 case HCI_SCODATA_PKT:
222 if (bt_cb(skb)->incoming)
223 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
224 else
225 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
226 break;
227 default:
228 return;
229 }
230
231 read_lock(&hci_sk_list.lock);
232
233 sk_for_each(sk, node, &hci_sk_list.head) {
234 struct sk_buff *nskb;
235
236 if (sk->sk_state != BT_BOUND)
237 continue;
238
239 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
240 continue;
241
242 if (!skb_copy) {
243 struct hci_mon_hdr *hdr;
244
245 /* Create a private copy with headroom */
246 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
247 if (!skb_copy)
248 continue;
249
250 /* Put header before the data */
251 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
252 hdr->opcode = opcode;
253 hdr->index = cpu_to_le16(hdev->id);
254 hdr->len = cpu_to_le16(skb->len);
255 }
256
257 nskb = skb_clone(skb_copy, GFP_ATOMIC);
258 if (!nskb)
259 continue;
260
261 if (sock_queue_rcv_skb(sk, nskb))
262 kfree_skb(nskb);
263 }
264
265 read_unlock(&hci_sk_list.lock);
266
267 kfree_skb(skb_copy);
268}
269
270static void send_monitor_event(struct sk_buff *skb)
271{
272 struct sock *sk;
273 struct hlist_node *node;
274
275 BT_DBG("len %d", skb->len);
276
277 read_lock(&hci_sk_list.lock);
278
279 sk_for_each(sk, node, &hci_sk_list.head) {
280 struct sk_buff *nskb;
281
282 if (sk->sk_state != BT_BOUND)
283 continue;
284
285 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
286 continue;
287
288 nskb = skb_clone(skb, GFP_ATOMIC);
289 if (!nskb)
290 continue;
291
292 if (sock_queue_rcv_skb(sk, nskb))
293 kfree_skb(nskb);
294 }
295
296 read_unlock(&hci_sk_list.lock);
297}
298
299static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
300{
301 struct hci_mon_hdr *hdr;
302 struct hci_mon_new_index *ni;
303 struct sk_buff *skb;
304 __le16 opcode;
305
306 switch (event) {
307 case HCI_DEV_REG:
308 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
309 if (!skb)
310 return NULL;
311
312 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
313 ni->type = hdev->dev_type;
314 ni->bus = hdev->bus;
315 bacpy(&ni->bdaddr, &hdev->bdaddr);
316 memcpy(ni->name, hdev->name, 8);
317
318 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
319 break;
320
321 case HCI_DEV_UNREG:
322 skb = bt_skb_alloc(0, GFP_ATOMIC);
323 if (!skb)
324 return NULL;
325
326 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
327 break;
328
329 default:
330 return NULL;
331 }
332
333 __net_timestamp(skb);
334
335 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
336 hdr->opcode = opcode;
337 hdr->index = cpu_to_le16(hdev->id);
338 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
339
340 return skb;
341}
342
343static void send_monitor_replay(struct sock *sk)
344{
345 struct hci_dev *hdev;
346
347 read_lock(&hci_dev_list_lock);
348
349 list_for_each_entry(hdev, &hci_dev_list, list) {
350 struct sk_buff *skb;
351
352 skb = create_monitor_event(hdev, HCI_DEV_REG);
353 if (!skb)
354 continue;
355
356 if (sock_queue_rcv_skb(sk, skb))
357 kfree_skb(skb);
358 }
359
360 read_unlock(&hci_dev_list_lock);
361}
362
Marcel Holtmann040030e2012-02-20 14:50:37 +0100363/* Generate internal stack event */
364static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
365{
366 struct hci_event_hdr *hdr;
367 struct hci_ev_stack_internal *ev;
368 struct sk_buff *skb;
369
370 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
371 if (!skb)
372 return;
373
374 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
375 hdr->evt = HCI_EV_STACK_INTERNAL;
376 hdr->plen = sizeof(*ev) + dlen;
377
378 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
379 ev->type = type;
380 memcpy(ev->data, data, dlen);
381
382 bt_cb(skb)->incoming = 1;
383 __net_timestamp(skb);
384
385 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
386 skb->dev = (void *) hdev;
387 hci_send_to_sock(hdev, skb);
388 kfree_skb(skb);
389}
390
391void hci_sock_dev_event(struct hci_dev *hdev, int event)
392{
393 struct hci_ev_si_device ev;
394
395 BT_DBG("hdev %s event %d", hdev->name, event);
396
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100397 /* Send event to monitor */
398 if (atomic_read(&monitor_promisc)) {
399 struct sk_buff *skb;
400
401 skb = create_monitor_event(hdev, event);
402 if (skb) {
403 send_monitor_event(skb);
404 kfree_skb(skb);
405 }
406 }
407
Marcel Holtmann040030e2012-02-20 14:50:37 +0100408 /* Send event to sockets */
409 ev.event = event;
410 ev.dev_id = hdev->id;
411 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
412
413 if (event == HCI_DEV_UNREG) {
414 struct sock *sk;
415 struct hlist_node *node;
416
417 /* Detach sockets from device */
418 read_lock(&hci_sk_list.lock);
419 sk_for_each(sk, node, &hci_sk_list.head) {
420 bh_lock_sock_nested(sk);
421 if (hci_pi(sk)->hdev == hdev) {
422 hci_pi(sk)->hdev = NULL;
423 sk->sk_err = EPIPE;
424 sk->sk_state = BT_OPEN;
425 sk->sk_state_change(sk);
426
427 hci_dev_put(hdev);
428 }
429 bh_unlock_sock(sk);
430 }
431 read_unlock(&hci_sk_list.lock);
432 }
433}
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435static int hci_sock_release(struct socket *sock)
436{
437 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100438 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
440 BT_DBG("sock %p sk %p", sock, sk);
441
442 if (!sk)
443 return 0;
444
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100445 hdev = hci_pi(sk)->hdev;
446
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100447 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
448 atomic_dec(&monitor_promisc);
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 bt_sock_unlink(&hci_sk_list, sk);
451
452 if (hdev) {
453 atomic_dec(&hdev->promisc);
454 hci_dev_put(hdev);
455 }
456
457 sock_orphan(sk);
458
459 skb_queue_purge(&sk->sk_receive_queue);
460 skb_queue_purge(&sk->sk_write_queue);
461
462 sock_put(sk);
463 return 0;
464}
465
Antti Julkub2a66aa2011-06-15 12:01:14 +0300466static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200467{
468 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300469 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200470
471 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
472 return -EFAULT;
473
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300474 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300475
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200476 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300477
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300478 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300479
480 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200481}
482
Antti Julkub2a66aa2011-06-15 12:01:14 +0300483static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200484{
485 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300486 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200487
488 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
489 return -EFAULT;
490
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300491 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300492
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200493 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300494
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300495 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300496
497 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200498}
499
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900500/* Ioctls that require bound socket */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
502{
503 struct hci_dev *hdev = hci_pi(sk)->hdev;
504
505 if (!hdev)
506 return -EBADFD;
507
508 switch (cmd) {
509 case HCISETRAW:
510 if (!capable(CAP_NET_ADMIN))
511 return -EACCES;
512
513 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
514 return -EPERM;
515
516 if (arg)
517 set_bit(HCI_RAW, &hdev->flags);
518 else
519 clear_bit(HCI_RAW, &hdev->flags);
520
521 return 0;
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200524 return hci_get_conn_info(hdev, (void __user *) arg);
525
526 case HCIGETAUTHINFO:
527 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Johan Hedbergf0358562010-05-18 13:20:32 +0200529 case HCIBLOCKADDR:
530 if (!capable(CAP_NET_ADMIN))
531 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300532 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200533
534 case HCIUNBLOCKADDR:
535 if (!capable(CAP_NET_ADMIN))
536 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300537 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 default:
540 if (hdev->ioctl)
541 return hdev->ioctl(hdev, cmd, arg);
542 return -EINVAL;
543 }
544}
545
546static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
547{
548 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200549 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 int err;
551
552 BT_DBG("cmd %x arg %lx", cmd, arg);
553
554 switch (cmd) {
555 case HCIGETDEVLIST:
556 return hci_get_dev_list(argp);
557
558 case HCIGETDEVINFO:
559 return hci_get_dev_info(argp);
560
561 case HCIGETCONNLIST:
562 return hci_get_conn_list(argp);
563
564 case HCIDEVUP:
565 if (!capable(CAP_NET_ADMIN))
566 return -EACCES;
567 return hci_dev_open(arg);
568
569 case HCIDEVDOWN:
570 if (!capable(CAP_NET_ADMIN))
571 return -EACCES;
572 return hci_dev_close(arg);
573
574 case HCIDEVRESET:
575 if (!capable(CAP_NET_ADMIN))
576 return -EACCES;
577 return hci_dev_reset(arg);
578
579 case HCIDEVRESTAT:
580 if (!capable(CAP_NET_ADMIN))
581 return -EACCES;
582 return hci_dev_reset_stat(arg);
583
584 case HCISETSCAN:
585 case HCISETAUTH:
586 case HCISETENCRYPT:
587 case HCISETPTYPE:
588 case HCISETLINKPOL:
589 case HCISETLINKMODE:
590 case HCISETACLMTU:
591 case HCISETSCOMTU:
592 if (!capable(CAP_NET_ADMIN))
593 return -EACCES;
594 return hci_dev_cmd(cmd, argp);
595
596 case HCIINQUIRY:
597 return hci_inquiry(argp);
598
599 default:
600 lock_sock(sk);
601 err = hci_sock_bound_ioctl(sk, cmd, arg);
602 release_sock(sk);
603 return err;
604 }
605}
606
607static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
608{
Johan Hedberg03811012010-12-08 00:21:06 +0200609 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 struct sock *sk = sock->sk;
611 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200612 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 BT_DBG("sock %p sk %p", sock, sk);
615
Johan Hedberg03811012010-12-08 00:21:06 +0200616 if (!addr)
617 return -EINVAL;
618
619 memset(&haddr, 0, sizeof(haddr));
620 len = min_t(unsigned int, sizeof(haddr), addr_len);
621 memcpy(&haddr, addr, len);
622
623 if (haddr.hci_family != AF_BLUETOOTH)
624 return -EINVAL;
625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 lock_sock(sk);
627
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100628 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = -EALREADY;
630 goto done;
631 }
632
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100633 switch (haddr.hci_channel) {
634 case HCI_CHANNEL_RAW:
635 if (hci_pi(sk)->hdev) {
636 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 goto done;
638 }
639
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100640 if (haddr.hci_dev != HCI_DEV_NONE) {
641 hdev = hci_dev_get(haddr.hci_dev);
642 if (!hdev) {
643 err = -ENODEV;
644 goto done;
645 }
646
647 atomic_inc(&hdev->promisc);
648 }
649
650 hci_pi(sk)->hdev = hdev;
651 break;
652
653 case HCI_CHANNEL_CONTROL:
654 if (haddr.hci_dev != HCI_DEV_NONE || !enable_mgmt) {
655 err = -EINVAL;
656 goto done;
657 }
658
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100659 if (!capable(CAP_NET_ADMIN)) {
660 err = -EPERM;
661 goto done;
662 }
663
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100664 set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
665 break;
666
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100667 case HCI_CHANNEL_MONITOR:
668 if (haddr.hci_dev != HCI_DEV_NONE) {
669 err = -EINVAL;
670 goto done;
671 }
672
673 if (!capable(CAP_NET_RAW)) {
674 err = -EPERM;
675 goto done;
676 }
677
678 send_monitor_replay(sk);
679
680 atomic_inc(&monitor_promisc);
681 break;
682
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100683 default:
684 err = -EINVAL;
685 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 }
687
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100688
Johan Hedberg03811012010-12-08 00:21:06 +0200689 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 sk->sk_state = BT_BOUND;
691
692done:
693 release_sock(sk);
694 return err;
695}
696
697static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
698{
699 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
700 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100701 struct hci_dev *hdev = hci_pi(sk)->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
703 BT_DBG("sock %p sk %p", sock, sk);
704
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100705 if (!hdev)
706 return -EBADFD;
707
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 lock_sock(sk);
709
710 *addr_len = sizeof(*haddr);
711 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100712 haddr->hci_dev = hdev->id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714 release_sock(sk);
715 return 0;
716}
717
718static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
719{
720 __u32 mask = hci_pi(sk)->cmsg_mask;
721
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700722 if (mask & HCI_CMSG_DIR) {
723 int incoming = bt_cb(skb)->incoming;
724 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
725 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700727 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100728#ifdef CONFIG_COMPAT
729 struct compat_timeval ctv;
730#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700731 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200732 void *data;
733 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700734
735 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200736
David S. Miller1da97f82007-09-12 14:10:58 +0200737 data = &tv;
738 len = sizeof(tv);
739#ifdef CONFIG_COMPAT
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200740 if (msg->msg_flags & MSG_CMSG_COMPAT) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200741 ctv.tv_sec = tv.tv_sec;
742 ctv.tv_usec = tv.tv_usec;
743 data = &ctv;
744 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200745 }
David S. Miller1da97f82007-09-12 14:10:58 +0200746#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200747
748 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900751
752static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 struct msghdr *msg, size_t len, int flags)
754{
755 int noblock = flags & MSG_DONTWAIT;
756 struct sock *sk = sock->sk;
757 struct sk_buff *skb;
758 int copied, err;
759
760 BT_DBG("sock %p, sk %p", sock, sk);
761
762 if (flags & (MSG_OOB))
763 return -EOPNOTSUPP;
764
765 if (sk->sk_state == BT_CLOSED)
766 return 0;
767
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200768 skb = skb_recv_datagram(sk, flags, noblock, &err);
769 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 return err;
771
772 msg->msg_namelen = 0;
773
774 copied = skb->len;
775 if (len < copied) {
776 msg->msg_flags |= MSG_TRUNC;
777 copied = len;
778 }
779
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300780 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
782
Marcel Holtmann3a208622012-02-20 14:50:34 +0100783 switch (hci_pi(sk)->channel) {
784 case HCI_CHANNEL_RAW:
785 hci_sock_cmsg(sk, msg, skb);
786 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100787 case HCI_CHANNEL_MONITOR:
788 sock_recv_timestamp(msg, sk, skb);
789 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100790 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791
792 skb_free_datagram(sk, skb);
793
794 return err ? : copied;
795}
796
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900797static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 struct msghdr *msg, size_t len)
799{
800 struct sock *sk = sock->sk;
801 struct hci_dev *hdev;
802 struct sk_buff *skb;
803 int err;
804
805 BT_DBG("sock %p sk %p", sock, sk);
806
807 if (msg->msg_flags & MSG_OOB)
808 return -EOPNOTSUPP;
809
810 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
811 return -EINVAL;
812
813 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
814 return -EINVAL;
815
816 lock_sock(sk);
817
Johan Hedberg03811012010-12-08 00:21:06 +0200818 switch (hci_pi(sk)->channel) {
819 case HCI_CHANNEL_RAW:
820 break;
821 case HCI_CHANNEL_CONTROL:
822 err = mgmt_control(sk, msg, len);
823 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100824 case HCI_CHANNEL_MONITOR:
825 err = -EOPNOTSUPP;
826 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200827 default:
828 err = -EINVAL;
829 goto done;
830 }
831
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200832 hdev = hci_pi(sk)->hdev;
833 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 err = -EBADFD;
835 goto done;
836 }
837
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100838 if (!test_bit(HCI_UP, &hdev->flags)) {
839 err = -ENETDOWN;
840 goto done;
841 }
842
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200843 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
844 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 goto done;
846
847 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
848 err = -EFAULT;
849 goto drop;
850 }
851
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700852 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 skb_pull(skb, 1);
854 skb->dev = (void *) hdev;
855
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700856 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700857 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 u16 ogf = hci_opcode_ogf(opcode);
859 u16 ocf = hci_opcode_ocf(opcode);
860
861 if (((ogf > HCI_SFLT_MAX_OGF) ||
862 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
863 !capable(CAP_NET_RAW)) {
864 err = -EPERM;
865 goto drop;
866 }
867
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200868 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200870 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 } else {
872 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200873 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 }
875 } else {
876 if (!capable(CAP_NET_RAW)) {
877 err = -EPERM;
878 goto drop;
879 }
880
881 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200882 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 }
884
885 err = len;
886
887done:
888 release_sock(sk);
889 return err;
890
891drop:
892 kfree_skb(skb);
893 goto done;
894}
895
David S. Millerb7058842009-09-30 16:12:20 -0700896static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897{
898 struct hci_ufilter uf = { .opcode = 0 };
899 struct sock *sk = sock->sk;
900 int err = 0, opt = 0;
901
902 BT_DBG("sk %p, opt %d", sk, optname);
903
904 lock_sock(sk);
905
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100906 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
907 err = -EINVAL;
908 goto done;
909 }
910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 switch (optname) {
912 case HCI_DATA_DIR:
913 if (get_user(opt, (int __user *)optval)) {
914 err = -EFAULT;
915 break;
916 }
917
918 if (opt)
919 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
920 else
921 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
922 break;
923
924 case HCI_TIME_STAMP:
925 if (get_user(opt, (int __user *)optval)) {
926 err = -EFAULT;
927 break;
928 }
929
930 if (opt)
931 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
932 else
933 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
934 break;
935
936 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200937 {
938 struct hci_filter *f = &hci_pi(sk)->filter;
939
940 uf.type_mask = f->type_mask;
941 uf.opcode = f->opcode;
942 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
943 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
944 }
945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 len = min_t(unsigned int, len, sizeof(uf));
947 if (copy_from_user(&uf, optval, len)) {
948 err = -EFAULT;
949 break;
950 }
951
952 if (!capable(CAP_NET_RAW)) {
953 uf.type_mask &= hci_sec_filter.type_mask;
954 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
955 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
956 }
957
958 {
959 struct hci_filter *f = &hci_pi(sk)->filter;
960
961 f->type_mask = uf.type_mask;
962 f->opcode = uf.opcode;
963 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
964 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
965 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900966 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
968 default:
969 err = -ENOPROTOOPT;
970 break;
971 }
972
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100973done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 release_sock(sk);
975 return err;
976}
977
978static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
979{
980 struct hci_ufilter uf;
981 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100982 int len, opt, err = 0;
983
984 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 if (get_user(len, optlen))
987 return -EFAULT;
988
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100989 lock_sock(sk);
990
991 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
992 err = -EINVAL;
993 goto done;
994 }
995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 switch (optname) {
997 case HCI_DATA_DIR:
998 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
999 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001000 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 opt = 0;
1002
1003 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001004 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 break;
1006
1007 case HCI_TIME_STAMP:
1008 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1009 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001010 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 opt = 0;
1012
1013 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001014 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 break;
1016
1017 case HCI_FILTER:
1018 {
1019 struct hci_filter *f = &hci_pi(sk)->filter;
1020
1021 uf.type_mask = f->type_mask;
1022 uf.opcode = f->opcode;
1023 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1024 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1025 }
1026
1027 len = min_t(unsigned int, len, sizeof(uf));
1028 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001029 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 break;
1031
1032 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001033 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 break;
1035 }
1036
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001037done:
1038 release_sock(sk);
1039 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001042static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 .family = PF_BLUETOOTH,
1044 .owner = THIS_MODULE,
1045 .release = hci_sock_release,
1046 .bind = hci_sock_bind,
1047 .getname = hci_sock_getname,
1048 .sendmsg = hci_sock_sendmsg,
1049 .recvmsg = hci_sock_recvmsg,
1050 .ioctl = hci_sock_ioctl,
1051 .poll = datagram_poll,
1052 .listen = sock_no_listen,
1053 .shutdown = sock_no_shutdown,
1054 .setsockopt = hci_sock_setsockopt,
1055 .getsockopt = hci_sock_getsockopt,
1056 .connect = sock_no_connect,
1057 .socketpair = sock_no_socketpair,
1058 .accept = sock_no_accept,
1059 .mmap = sock_no_mmap
1060};
1061
1062static struct proto hci_sk_proto = {
1063 .name = "HCI",
1064 .owner = THIS_MODULE,
1065 .obj_size = sizeof(struct hci_pinfo)
1066};
1067
Eric Paris3f378b62009-11-05 22:18:14 -08001068static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1069 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
1071 struct sock *sk;
1072
1073 BT_DBG("sock %p", sock);
1074
1075 if (sock->type != SOCK_RAW)
1076 return -ESOCKTNOSUPPORT;
1077
1078 sock->ops = &hci_sock_ops;
1079
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001080 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 if (!sk)
1082 return -ENOMEM;
1083
1084 sock_init_data(sock, sk);
1085
1086 sock_reset_flag(sk, SOCK_ZAPPED);
1087
1088 sk->sk_protocol = protocol;
1089
1090 sock->state = SS_UNCONNECTED;
1091 sk->sk_state = BT_OPEN;
1092
1093 bt_sock_link(&hci_sk_list, sk);
1094 return 0;
1095}
1096
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001097static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 .family = PF_BLUETOOTH,
1099 .owner = THIS_MODULE,
1100 .create = hci_sock_create,
1101};
1102
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103int __init hci_sock_init(void)
1104{
1105 int err;
1106
1107 err = proto_register(&hci_sk_proto, 0);
1108 if (err < 0)
1109 return err;
1110
1111 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1112 if (err < 0)
1113 goto error;
1114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 BT_INFO("HCI socket layer initialized");
1116
1117 return 0;
1118
1119error:
1120 BT_ERR("HCI socket registration failed");
1121 proto_unregister(&hci_sk_proto);
1122 return err;
1123}
1124
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301125void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126{
1127 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1128 BT_ERR("HCI socket unregistration failed");
1129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131}
Johan Hedberg03811012010-12-08 00:21:06 +02001132
1133module_param(enable_mgmt, bool, 0644);
1134MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");