blob: 8a814bca00d71bc9f88601baa72d0517ccf4bc42 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28
29#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080030#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/errno.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
Marcel Holtmann767c5eb2007-09-09 08:39:34 +020040#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
45#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020046#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/unaligned.h>
48
49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010051#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Marcel Holtmanncd82e612012-02-20 20:34:38 +010053static atomic_t monitor_promisc = ATOMIC_INIT(0);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ----- HCI socket interface ----- */
56
57static inline int hci_test_bit(int nr, void *addr)
58{
59 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
60}
61
62/* Security filter */
63static struct hci_sec_filter hci_sec_filter = {
64 /* Packet types */
65 0x10,
66 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020067 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 /* Commands */
69 {
70 { 0x0 },
71 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020072 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020074 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020076 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020078 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020080 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 }
82};
83
84static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070085 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086};
87
88/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010089void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
91 struct sock *sk;
92 struct hlist_node *node;
Marcel Holtmanne0edf372012-02-20 14:50:36 +010093 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95 BT_DBG("hdev %p len %d", hdev, skb->len);
96
97 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010098
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 sk_for_each(sk, node, &hci_sk_list.head) {
100 struct hci_filter *flt;
101 struct sk_buff *nskb;
102
103 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
104 continue;
105
106 /* Don't send frame to the socket it came from */
107 if (skb->sk == sk)
108 continue;
109
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100110 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +0200111 continue;
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 /* Apply filter */
114 flt = &hci_pi(sk)->filter;
115
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700116 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
117 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 continue;
119
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700120 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
122
123 if (!hci_test_bit(evt, &flt->event_mask))
124 continue;
125
David S. Miller4498c802006-11-21 16:17:41 -0800126 if (flt->opcode &&
127 ((evt == HCI_EV_CMD_COMPLETE &&
128 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800129 get_unaligned((__le16 *)(skb->data + 3))) ||
David S. Miller4498c802006-11-21 16:17:41 -0800130 (evt == HCI_EV_CMD_STATUS &&
131 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800132 get_unaligned((__le16 *)(skb->data + 4)))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 continue;
134 }
135
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100136 if (!skb_copy) {
137 /* Create a private copy with headroom */
138 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
139 if (!skb_copy)
140 continue;
141
142 /* Put type byte before the data */
143 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
144 }
145
146 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200147 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 continue;
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 if (sock_queue_rcv_skb(sk, nskb))
151 kfree_skb(nskb);
152 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100153
154 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100155
156 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100157}
158
159/* Send frame to control socket */
160void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
161{
162 struct sock *sk;
163 struct hlist_node *node;
164
165 BT_DBG("len %d", skb->len);
166
167 read_lock(&hci_sk_list.lock);
168
169 sk_for_each(sk, node, &hci_sk_list.head) {
170 struct sk_buff *nskb;
171
172 /* Skip the original socket */
173 if (sk == skip_sk)
174 continue;
175
176 if (sk->sk_state != BT_BOUND)
177 continue;
178
179 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
180 continue;
181
182 nskb = skb_clone(skb, GFP_ATOMIC);
183 if (!nskb)
184 continue;
185
186 if (sock_queue_rcv_skb(sk, nskb))
187 kfree_skb(nskb);
188 }
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 read_unlock(&hci_sk_list.lock);
191}
192
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100193/* Send frame to monitor socket */
194void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
195{
196 struct sock *sk;
197 struct hlist_node *node;
198 struct sk_buff *skb_copy = NULL;
199 __le16 opcode;
200
201 if (!atomic_read(&monitor_promisc))
202 return;
203
204 BT_DBG("hdev %p len %d", hdev, skb->len);
205
206 switch (bt_cb(skb)->pkt_type) {
207 case HCI_COMMAND_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
209 break;
210 case HCI_EVENT_PKT:
211 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
212 break;
213 case HCI_ACLDATA_PKT:
214 if (bt_cb(skb)->incoming)
215 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
216 else
217 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
218 break;
219 case HCI_SCODATA_PKT:
220 if (bt_cb(skb)->incoming)
221 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
222 else
223 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
224 break;
225 default:
226 return;
227 }
228
229 read_lock(&hci_sk_list.lock);
230
231 sk_for_each(sk, node, &hci_sk_list.head) {
232 struct sk_buff *nskb;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
237 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
238 continue;
239
240 if (!skb_copy) {
241 struct hci_mon_hdr *hdr;
242
243 /* Create a private copy with headroom */
244 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
245 if (!skb_copy)
246 continue;
247
248 /* Put header before the data */
249 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
250 hdr->opcode = opcode;
251 hdr->index = cpu_to_le16(hdev->id);
252 hdr->len = cpu_to_le16(skb->len);
253 }
254
255 nskb = skb_clone(skb_copy, GFP_ATOMIC);
256 if (!nskb)
257 continue;
258
259 if (sock_queue_rcv_skb(sk, nskb))
260 kfree_skb(nskb);
261 }
262
263 read_unlock(&hci_sk_list.lock);
264
265 kfree_skb(skb_copy);
266}
267
268static void send_monitor_event(struct sk_buff *skb)
269{
270 struct sock *sk;
271 struct hlist_node *node;
272
273 BT_DBG("len %d", skb->len);
274
275 read_lock(&hci_sk_list.lock);
276
277 sk_for_each(sk, node, &hci_sk_list.head) {
278 struct sk_buff *nskb;
279
280 if (sk->sk_state != BT_BOUND)
281 continue;
282
283 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
284 continue;
285
286 nskb = skb_clone(skb, GFP_ATOMIC);
287 if (!nskb)
288 continue;
289
290 if (sock_queue_rcv_skb(sk, nskb))
291 kfree_skb(nskb);
292 }
293
294 read_unlock(&hci_sk_list.lock);
295}
296
297static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
298{
299 struct hci_mon_hdr *hdr;
300 struct hci_mon_new_index *ni;
301 struct sk_buff *skb;
302 __le16 opcode;
303
304 switch (event) {
305 case HCI_DEV_REG:
306 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 if (!skb)
308 return NULL;
309
310 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311 ni->type = hdev->dev_type;
312 ni->bus = hdev->bus;
313 bacpy(&ni->bdaddr, &hdev->bdaddr);
314 memcpy(ni->name, hdev->name, 8);
315
316 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
317 break;
318
319 case HCI_DEV_UNREG:
320 skb = bt_skb_alloc(0, GFP_ATOMIC);
321 if (!skb)
322 return NULL;
323
324 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
325 break;
326
327 default:
328 return NULL;
329 }
330
331 __net_timestamp(skb);
332
333 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334 hdr->opcode = opcode;
335 hdr->index = cpu_to_le16(hdev->id);
336 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
337
338 return skb;
339}
340
341static void send_monitor_replay(struct sock *sk)
342{
343 struct hci_dev *hdev;
344
345 read_lock(&hci_dev_list_lock);
346
347 list_for_each_entry(hdev, &hci_dev_list, list) {
348 struct sk_buff *skb;
349
350 skb = create_monitor_event(hdev, HCI_DEV_REG);
351 if (!skb)
352 continue;
353
354 if (sock_queue_rcv_skb(sk, skb))
355 kfree_skb(skb);
356 }
357
358 read_unlock(&hci_dev_list_lock);
359}
360
Marcel Holtmann040030e2012-02-20 14:50:37 +0100361/* Generate internal stack event */
362static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
363{
364 struct hci_event_hdr *hdr;
365 struct hci_ev_stack_internal *ev;
366 struct sk_buff *skb;
367
368 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 if (!skb)
370 return;
371
372 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373 hdr->evt = HCI_EV_STACK_INTERNAL;
374 hdr->plen = sizeof(*ev) + dlen;
375
376 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
377 ev->type = type;
378 memcpy(ev->data, data, dlen);
379
380 bt_cb(skb)->incoming = 1;
381 __net_timestamp(skb);
382
383 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
384 skb->dev = (void *) hdev;
385 hci_send_to_sock(hdev, skb);
386 kfree_skb(skb);
387}
388
389void hci_sock_dev_event(struct hci_dev *hdev, int event)
390{
391 struct hci_ev_si_device ev;
392
393 BT_DBG("hdev %s event %d", hdev->name, event);
394
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100395 /* Send event to monitor */
396 if (atomic_read(&monitor_promisc)) {
397 struct sk_buff *skb;
398
399 skb = create_monitor_event(hdev, event);
400 if (skb) {
401 send_monitor_event(skb);
402 kfree_skb(skb);
403 }
404 }
405
Marcel Holtmann040030e2012-02-20 14:50:37 +0100406 /* Send event to sockets */
407 ev.event = event;
408 ev.dev_id = hdev->id;
409 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
410
411 if (event == HCI_DEV_UNREG) {
412 struct sock *sk;
413 struct hlist_node *node;
414
415 /* Detach sockets from device */
416 read_lock(&hci_sk_list.lock);
417 sk_for_each(sk, node, &hci_sk_list.head) {
418 bh_lock_sock_nested(sk);
419 if (hci_pi(sk)->hdev == hdev) {
420 hci_pi(sk)->hdev = NULL;
421 sk->sk_err = EPIPE;
422 sk->sk_state = BT_OPEN;
423 sk->sk_state_change(sk);
424
425 hci_dev_put(hdev);
426 }
427 bh_unlock_sock(sk);
428 }
429 read_unlock(&hci_sk_list.lock);
430 }
431}
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433static int hci_sock_release(struct socket *sock)
434{
435 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100436 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 BT_DBG("sock %p sk %p", sock, sk);
439
440 if (!sk)
441 return 0;
442
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100443 hdev = hci_pi(sk)->hdev;
444
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100445 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
446 atomic_dec(&monitor_promisc);
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 bt_sock_unlink(&hci_sk_list, sk);
449
450 if (hdev) {
451 atomic_dec(&hdev->promisc);
452 hci_dev_put(hdev);
453 }
454
455 sock_orphan(sk);
456
457 skb_queue_purge(&sk->sk_receive_queue);
458 skb_queue_purge(&sk->sk_write_queue);
459
460 sock_put(sk);
461 return 0;
462}
463
Antti Julkub2a66aa2011-06-15 12:01:14 +0300464static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200465{
466 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300467 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200468
469 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
470 return -EFAULT;
471
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300472 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300473
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200474 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300475
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300476 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300477
478 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200479}
480
Antti Julkub2a66aa2011-06-15 12:01:14 +0300481static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200482{
483 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300484 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200485
486 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
487 return -EFAULT;
488
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300489 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300490
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200491 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300492
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300493 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300494
495 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200496}
497
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900498/* Ioctls that require bound socket */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
500{
501 struct hci_dev *hdev = hci_pi(sk)->hdev;
502
503 if (!hdev)
504 return -EBADFD;
505
506 switch (cmd) {
507 case HCISETRAW:
508 if (!capable(CAP_NET_ADMIN))
509 return -EACCES;
510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 return -EPERM;
513
514 if (arg)
515 set_bit(HCI_RAW, &hdev->flags);
516 else
517 clear_bit(HCI_RAW, &hdev->flags);
518
519 return 0;
520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200522 return hci_get_conn_info(hdev, (void __user *) arg);
523
524 case HCIGETAUTHINFO:
525 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Johan Hedbergf0358562010-05-18 13:20:32 +0200527 case HCIBLOCKADDR:
528 if (!capable(CAP_NET_ADMIN))
529 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300530 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200531
532 case HCIUNBLOCKADDR:
533 if (!capable(CAP_NET_ADMIN))
534 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300535 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 default:
538 if (hdev->ioctl)
539 return hdev->ioctl(hdev, cmd, arg);
540 return -EINVAL;
541 }
542}
543
544static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
545{
546 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200547 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 int err;
549
550 BT_DBG("cmd %x arg %lx", cmd, arg);
551
552 switch (cmd) {
553 case HCIGETDEVLIST:
554 return hci_get_dev_list(argp);
555
556 case HCIGETDEVINFO:
557 return hci_get_dev_info(argp);
558
559 case HCIGETCONNLIST:
560 return hci_get_conn_list(argp);
561
562 case HCIDEVUP:
563 if (!capable(CAP_NET_ADMIN))
564 return -EACCES;
565 return hci_dev_open(arg);
566
567 case HCIDEVDOWN:
568 if (!capable(CAP_NET_ADMIN))
569 return -EACCES;
570 return hci_dev_close(arg);
571
572 case HCIDEVRESET:
573 if (!capable(CAP_NET_ADMIN))
574 return -EACCES;
575 return hci_dev_reset(arg);
576
577 case HCIDEVRESTAT:
578 if (!capable(CAP_NET_ADMIN))
579 return -EACCES;
580 return hci_dev_reset_stat(arg);
581
582 case HCISETSCAN:
583 case HCISETAUTH:
584 case HCISETENCRYPT:
585 case HCISETPTYPE:
586 case HCISETLINKPOL:
587 case HCISETLINKMODE:
588 case HCISETACLMTU:
589 case HCISETSCOMTU:
590 if (!capable(CAP_NET_ADMIN))
591 return -EACCES;
592 return hci_dev_cmd(cmd, argp);
593
594 case HCIINQUIRY:
595 return hci_inquiry(argp);
596
597 default:
598 lock_sock(sk);
599 err = hci_sock_bound_ioctl(sk, cmd, arg);
600 release_sock(sk);
601 return err;
602 }
603}
604
605static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
606{
Johan Hedberg03811012010-12-08 00:21:06 +0200607 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 struct sock *sk = sock->sk;
609 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200610 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
612 BT_DBG("sock %p sk %p", sock, sk);
613
Johan Hedberg03811012010-12-08 00:21:06 +0200614 if (!addr)
615 return -EINVAL;
616
617 memset(&haddr, 0, sizeof(haddr));
618 len = min_t(unsigned int, sizeof(haddr), addr_len);
619 memcpy(&haddr, addr, len);
620
621 if (haddr.hci_family != AF_BLUETOOTH)
622 return -EINVAL;
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 lock_sock(sk);
625
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100626 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 err = -EALREADY;
628 goto done;
629 }
630
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100631 switch (haddr.hci_channel) {
632 case HCI_CHANNEL_RAW:
633 if (hci_pi(sk)->hdev) {
634 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 goto done;
636 }
637
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100638 if (haddr.hci_dev != HCI_DEV_NONE) {
639 hdev = hci_dev_get(haddr.hci_dev);
640 if (!hdev) {
641 err = -ENODEV;
642 goto done;
643 }
644
645 atomic_inc(&hdev->promisc);
646 }
647
648 hci_pi(sk)->hdev = hdev;
649 break;
650
651 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100652 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100653 err = -EINVAL;
654 goto done;
655 }
656
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100657 if (!capable(CAP_NET_ADMIN)) {
658 err = -EPERM;
659 goto done;
660 }
661
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100662 set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
663 break;
664
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100665 case HCI_CHANNEL_MONITOR:
666 if (haddr.hci_dev != HCI_DEV_NONE) {
667 err = -EINVAL;
668 goto done;
669 }
670
671 if (!capable(CAP_NET_RAW)) {
672 err = -EPERM;
673 goto done;
674 }
675
676 send_monitor_replay(sk);
677
678 atomic_inc(&monitor_promisc);
679 break;
680
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100681 default:
682 err = -EINVAL;
683 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 }
685
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100686
Johan Hedberg03811012010-12-08 00:21:06 +0200687 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 sk->sk_state = BT_BOUND;
689
690done:
691 release_sock(sk);
692 return err;
693}
694
695static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
696{
697 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
698 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100699 struct hci_dev *hdev = hci_pi(sk)->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 BT_DBG("sock %p sk %p", sock, sk);
702
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100703 if (!hdev)
704 return -EBADFD;
705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 lock_sock(sk);
707
708 *addr_len = sizeof(*haddr);
709 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100710 haddr->hci_dev = hdev->id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 release_sock(sk);
713 return 0;
714}
715
716static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
717{
718 __u32 mask = hci_pi(sk)->cmsg_mask;
719
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700720 if (mask & HCI_CMSG_DIR) {
721 int incoming = bt_cb(skb)->incoming;
722 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700725 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100726#ifdef CONFIG_COMPAT
727 struct compat_timeval ctv;
728#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700729 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200730 void *data;
731 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700732
733 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200734
David S. Miller1da97f82007-09-12 14:10:58 +0200735 data = &tv;
736 len = sizeof(tv);
737#ifdef CONFIG_COMPAT
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200738 if (msg->msg_flags & MSG_CMSG_COMPAT) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200739 ctv.tv_sec = tv.tv_sec;
740 ctv.tv_usec = tv.tv_usec;
741 data = &ctv;
742 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200743 }
David S. Miller1da97f82007-09-12 14:10:58 +0200744#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200745
746 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900749
750static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 struct msghdr *msg, size_t len, int flags)
752{
753 int noblock = flags & MSG_DONTWAIT;
754 struct sock *sk = sock->sk;
755 struct sk_buff *skb;
756 int copied, err;
757
758 BT_DBG("sock %p, sk %p", sock, sk);
759
760 if (flags & (MSG_OOB))
761 return -EOPNOTSUPP;
762
763 if (sk->sk_state == BT_CLOSED)
764 return 0;
765
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200766 skb = skb_recv_datagram(sk, flags, noblock, &err);
767 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 return err;
769
770 msg->msg_namelen = 0;
771
772 copied = skb->len;
773 if (len < copied) {
774 msg->msg_flags |= MSG_TRUNC;
775 copied = len;
776 }
777
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300778 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
780
Marcel Holtmann3a208622012-02-20 14:50:34 +0100781 switch (hci_pi(sk)->channel) {
782 case HCI_CHANNEL_RAW:
783 hci_sock_cmsg(sk, msg, skb);
784 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100785 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100786 case HCI_CHANNEL_MONITOR:
787 sock_recv_timestamp(msg, sk, skb);
788 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791 skb_free_datagram(sk, skb);
792
793 return err ? : copied;
794}
795
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900796static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 struct msghdr *msg, size_t len)
798{
799 struct sock *sk = sock->sk;
800 struct hci_dev *hdev;
801 struct sk_buff *skb;
802 int err;
803
804 BT_DBG("sock %p sk %p", sock, sk);
805
806 if (msg->msg_flags & MSG_OOB)
807 return -EOPNOTSUPP;
808
809 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
810 return -EINVAL;
811
812 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
813 return -EINVAL;
814
815 lock_sock(sk);
816
Johan Hedberg03811012010-12-08 00:21:06 +0200817 switch (hci_pi(sk)->channel) {
818 case HCI_CHANNEL_RAW:
819 break;
820 case HCI_CHANNEL_CONTROL:
821 err = mgmt_control(sk, msg, len);
822 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100823 case HCI_CHANNEL_MONITOR:
824 err = -EOPNOTSUPP;
825 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200826 default:
827 err = -EINVAL;
828 goto done;
829 }
830
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200831 hdev = hci_pi(sk)->hdev;
832 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 err = -EBADFD;
834 goto done;
835 }
836
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100837 if (!test_bit(HCI_UP, &hdev->flags)) {
838 err = -ENETDOWN;
839 goto done;
840 }
841
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200842 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
843 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 goto done;
845
846 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
847 err = -EFAULT;
848 goto drop;
849 }
850
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700851 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 skb_pull(skb, 1);
853 skb->dev = (void *) hdev;
854
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700855 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700856 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 u16 ogf = hci_opcode_ogf(opcode);
858 u16 ocf = hci_opcode_ocf(opcode);
859
860 if (((ogf > HCI_SFLT_MAX_OGF) ||
861 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
862 !capable(CAP_NET_RAW)) {
863 err = -EPERM;
864 goto drop;
865 }
866
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200867 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200869 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 } else {
871 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200872 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 }
874 } else {
875 if (!capable(CAP_NET_RAW)) {
876 err = -EPERM;
877 goto drop;
878 }
879
880 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200881 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
883
884 err = len;
885
886done:
887 release_sock(sk);
888 return err;
889
890drop:
891 kfree_skb(skb);
892 goto done;
893}
894
David S. Millerb7058842009-09-30 16:12:20 -0700895static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
897 struct hci_ufilter uf = { .opcode = 0 };
898 struct sock *sk = sock->sk;
899 int err = 0, opt = 0;
900
901 BT_DBG("sk %p, opt %d", sk, optname);
902
903 lock_sock(sk);
904
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100905 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
906 err = -EINVAL;
907 goto done;
908 }
909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 switch (optname) {
911 case HCI_DATA_DIR:
912 if (get_user(opt, (int __user *)optval)) {
913 err = -EFAULT;
914 break;
915 }
916
917 if (opt)
918 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
919 else
920 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
921 break;
922
923 case HCI_TIME_STAMP:
924 if (get_user(opt, (int __user *)optval)) {
925 err = -EFAULT;
926 break;
927 }
928
929 if (opt)
930 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
931 else
932 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
933 break;
934
935 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200936 {
937 struct hci_filter *f = &hci_pi(sk)->filter;
938
939 uf.type_mask = f->type_mask;
940 uf.opcode = f->opcode;
941 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
942 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
943 }
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 len = min_t(unsigned int, len, sizeof(uf));
946 if (copy_from_user(&uf, optval, len)) {
947 err = -EFAULT;
948 break;
949 }
950
951 if (!capable(CAP_NET_RAW)) {
952 uf.type_mask &= hci_sec_filter.type_mask;
953 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
954 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
955 }
956
957 {
958 struct hci_filter *f = &hci_pi(sk)->filter;
959
960 f->type_mask = uf.type_mask;
961 f->opcode = uf.opcode;
962 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
963 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
964 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900965 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
967 default:
968 err = -ENOPROTOOPT;
969 break;
970 }
971
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100972done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 release_sock(sk);
974 return err;
975}
976
977static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
978{
979 struct hci_ufilter uf;
980 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100981 int len, opt, err = 0;
982
983 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 if (get_user(len, optlen))
986 return -EFAULT;
987
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100988 lock_sock(sk);
989
990 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
991 err = -EINVAL;
992 goto done;
993 }
994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 switch (optname) {
996 case HCI_DATA_DIR:
997 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
998 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900999 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 opt = 0;
1001
1002 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001003 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 break;
1005
1006 case HCI_TIME_STAMP:
1007 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1008 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001009 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 opt = 0;
1011
1012 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001013 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 break;
1015
1016 case HCI_FILTER:
1017 {
1018 struct hci_filter *f = &hci_pi(sk)->filter;
1019
1020 uf.type_mask = f->type_mask;
1021 uf.opcode = f->opcode;
1022 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1023 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1024 }
1025
1026 len = min_t(unsigned int, len, sizeof(uf));
1027 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001028 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 break;
1030
1031 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001032 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 break;
1034 }
1035
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001036done:
1037 release_sock(sk);
1038 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039}
1040
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001041static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 .family = PF_BLUETOOTH,
1043 .owner = THIS_MODULE,
1044 .release = hci_sock_release,
1045 .bind = hci_sock_bind,
1046 .getname = hci_sock_getname,
1047 .sendmsg = hci_sock_sendmsg,
1048 .recvmsg = hci_sock_recvmsg,
1049 .ioctl = hci_sock_ioctl,
1050 .poll = datagram_poll,
1051 .listen = sock_no_listen,
1052 .shutdown = sock_no_shutdown,
1053 .setsockopt = hci_sock_setsockopt,
1054 .getsockopt = hci_sock_getsockopt,
1055 .connect = sock_no_connect,
1056 .socketpair = sock_no_socketpair,
1057 .accept = sock_no_accept,
1058 .mmap = sock_no_mmap
1059};
1060
1061static struct proto hci_sk_proto = {
1062 .name = "HCI",
1063 .owner = THIS_MODULE,
1064 .obj_size = sizeof(struct hci_pinfo)
1065};
1066
Eric Paris3f378b62009-11-05 22:18:14 -08001067static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1068 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
1070 struct sock *sk;
1071
1072 BT_DBG("sock %p", sock);
1073
1074 if (sock->type != SOCK_RAW)
1075 return -ESOCKTNOSUPPORT;
1076
1077 sock->ops = &hci_sock_ops;
1078
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001079 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 if (!sk)
1081 return -ENOMEM;
1082
1083 sock_init_data(sock, sk);
1084
1085 sock_reset_flag(sk, SOCK_ZAPPED);
1086
1087 sk->sk_protocol = protocol;
1088
1089 sock->state = SS_UNCONNECTED;
1090 sk->sk_state = BT_OPEN;
1091
1092 bt_sock_link(&hci_sk_list, sk);
1093 return 0;
1094}
1095
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001096static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 .family = PF_BLUETOOTH,
1098 .owner = THIS_MODULE,
1099 .create = hci_sock_create,
1100};
1101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102int __init hci_sock_init(void)
1103{
1104 int err;
1105
1106 err = proto_register(&hci_sk_proto, 0);
1107 if (err < 0)
1108 return err;
1109
1110 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1111 if (err < 0)
1112 goto error;
1113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 BT_INFO("HCI socket layer initialized");
1115
1116 return 0;
1117
1118error:
1119 BT_ERR("HCI socket registration failed");
1120 proto_unregister(&hci_sk_proto);
1121 return err;
1122}
1123
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301124void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125{
1126 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1127 BT_ERR("HCI socket unregistration failed");
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130}