blob: c1e0c3df5e6cec5736fb5a5ec032ee3451c0d7d6 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28
29#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080030#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/errno.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
Marcel Holtmann767c5eb2007-09-09 08:39:34 +020040#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020045#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/unaligned.h>
47
48#include <net/bluetooth/bluetooth.h>
49#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010050#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Marcel Holtmanncd82e612012-02-20 20:34:38 +010052static atomic_t monitor_promisc = ATOMIC_INIT(0);
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054/* ----- HCI socket interface ----- */
55
56static inline int hci_test_bit(int nr, void *addr)
57{
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
59}
60
61/* Security filter */
62static struct hci_sec_filter hci_sec_filter = {
63 /* Packet types */
64 0x10,
65 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020066 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 /* Commands */
68 {
69 { 0x0 },
70 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020071 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020073 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020075 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020077 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020079 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 }
81};
82
83static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070084 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085};
86
87/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010088void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
90 struct sock *sk;
91 struct hlist_node *node;
Marcel Holtmanne0edf372012-02-20 14:50:36 +010092 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94 BT_DBG("hdev %p len %d", hdev, skb->len);
95
96 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010097
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 sk_for_each(sk, node, &hci_sk_list.head) {
99 struct hci_filter *flt;
100 struct sk_buff *nskb;
101
102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
103 continue;
104
105 /* Don't send frame to the socket it came from */
106 if (skb->sk == sk)
107 continue;
108
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +0200110 continue;
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 /* Apply filter */
113 flt = &hci_pi(sk)->filter;
114
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
117 &flt->type_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 continue;
119
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700120 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
Gustavo Padovanfc5fef62012-05-23 04:04:19 -0300121 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 if (!hci_test_bit(evt, &flt->event_mask))
124 continue;
125
David S. Miller4498c802006-11-21 16:17:41 -0800126 if (flt->opcode &&
127 ((evt == HCI_EV_CMD_COMPLETE &&
128 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800129 get_unaligned((__le16 *)(skb->data + 3))) ||
David S. Miller4498c802006-11-21 16:17:41 -0800130 (evt == HCI_EV_CMD_STATUS &&
131 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800132 get_unaligned((__le16 *)(skb->data + 4)))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 continue;
134 }
135
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100136 if (!skb_copy) {
137 /* Create a private copy with headroom */
138 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
139 if (!skb_copy)
140 continue;
141
142 /* Put type byte before the data */
143 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
144 }
145
146 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200147 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 continue;
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 if (sock_queue_rcv_skb(sk, nskb))
151 kfree_skb(nskb);
152 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100153
154 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100155
156 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100157}
158
159/* Send frame to control socket */
160void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
161{
162 struct sock *sk;
163 struct hlist_node *node;
164
165 BT_DBG("len %d", skb->len);
166
167 read_lock(&hci_sk_list.lock);
168
169 sk_for_each(sk, node, &hci_sk_list.head) {
170 struct sk_buff *nskb;
171
172 /* Skip the original socket */
173 if (sk == skip_sk)
174 continue;
175
176 if (sk->sk_state != BT_BOUND)
177 continue;
178
179 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
180 continue;
181
182 nskb = skb_clone(skb, GFP_ATOMIC);
183 if (!nskb)
184 continue;
185
186 if (sock_queue_rcv_skb(sk, nskb))
187 kfree_skb(nskb);
188 }
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 read_unlock(&hci_sk_list.lock);
191}
192
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100193/* Send frame to monitor socket */
194void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
195{
196 struct sock *sk;
197 struct hlist_node *node;
198 struct sk_buff *skb_copy = NULL;
199 __le16 opcode;
200
201 if (!atomic_read(&monitor_promisc))
202 return;
203
204 BT_DBG("hdev %p len %d", hdev, skb->len);
205
206 switch (bt_cb(skb)->pkt_type) {
207 case HCI_COMMAND_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
209 break;
210 case HCI_EVENT_PKT:
211 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
212 break;
213 case HCI_ACLDATA_PKT:
214 if (bt_cb(skb)->incoming)
215 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
216 else
217 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
218 break;
219 case HCI_SCODATA_PKT:
220 if (bt_cb(skb)->incoming)
221 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
222 else
223 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
224 break;
225 default:
226 return;
227 }
228
229 read_lock(&hci_sk_list.lock);
230
231 sk_for_each(sk, node, &hci_sk_list.head) {
232 struct sk_buff *nskb;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
237 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
238 continue;
239
240 if (!skb_copy) {
241 struct hci_mon_hdr *hdr;
242
243 /* Create a private copy with headroom */
244 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
245 if (!skb_copy)
246 continue;
247
248 /* Put header before the data */
249 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
250 hdr->opcode = opcode;
251 hdr->index = cpu_to_le16(hdev->id);
252 hdr->len = cpu_to_le16(skb->len);
253 }
254
255 nskb = skb_clone(skb_copy, GFP_ATOMIC);
256 if (!nskb)
257 continue;
258
259 if (sock_queue_rcv_skb(sk, nskb))
260 kfree_skb(nskb);
261 }
262
263 read_unlock(&hci_sk_list.lock);
264
265 kfree_skb(skb_copy);
266}
267
268static void send_monitor_event(struct sk_buff *skb)
269{
270 struct sock *sk;
271 struct hlist_node *node;
272
273 BT_DBG("len %d", skb->len);
274
275 read_lock(&hci_sk_list.lock);
276
277 sk_for_each(sk, node, &hci_sk_list.head) {
278 struct sk_buff *nskb;
279
280 if (sk->sk_state != BT_BOUND)
281 continue;
282
283 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
284 continue;
285
286 nskb = skb_clone(skb, GFP_ATOMIC);
287 if (!nskb)
288 continue;
289
290 if (sock_queue_rcv_skb(sk, nskb))
291 kfree_skb(nskb);
292 }
293
294 read_unlock(&hci_sk_list.lock);
295}
296
297static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
298{
299 struct hci_mon_hdr *hdr;
300 struct hci_mon_new_index *ni;
301 struct sk_buff *skb;
302 __le16 opcode;
303
304 switch (event) {
305 case HCI_DEV_REG:
306 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 if (!skb)
308 return NULL;
309
310 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311 ni->type = hdev->dev_type;
312 ni->bus = hdev->bus;
313 bacpy(&ni->bdaddr, &hdev->bdaddr);
314 memcpy(ni->name, hdev->name, 8);
315
316 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
317 break;
318
319 case HCI_DEV_UNREG:
320 skb = bt_skb_alloc(0, GFP_ATOMIC);
321 if (!skb)
322 return NULL;
323
324 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
325 break;
326
327 default:
328 return NULL;
329 }
330
331 __net_timestamp(skb);
332
333 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334 hdr->opcode = opcode;
335 hdr->index = cpu_to_le16(hdev->id);
336 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
337
338 return skb;
339}
340
341static void send_monitor_replay(struct sock *sk)
342{
343 struct hci_dev *hdev;
344
345 read_lock(&hci_dev_list_lock);
346
347 list_for_each_entry(hdev, &hci_dev_list, list) {
348 struct sk_buff *skb;
349
350 skb = create_monitor_event(hdev, HCI_DEV_REG);
351 if (!skb)
352 continue;
353
354 if (sock_queue_rcv_skb(sk, skb))
355 kfree_skb(skb);
356 }
357
358 read_unlock(&hci_dev_list_lock);
359}
360
Marcel Holtmann040030e2012-02-20 14:50:37 +0100361/* Generate internal stack event */
362static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
363{
364 struct hci_event_hdr *hdr;
365 struct hci_ev_stack_internal *ev;
366 struct sk_buff *skb;
367
368 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 if (!skb)
370 return;
371
372 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373 hdr->evt = HCI_EV_STACK_INTERNAL;
374 hdr->plen = sizeof(*ev) + dlen;
375
376 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
377 ev->type = type;
378 memcpy(ev->data, data, dlen);
379
380 bt_cb(skb)->incoming = 1;
381 __net_timestamp(skb);
382
383 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
384 skb->dev = (void *) hdev;
385 hci_send_to_sock(hdev, skb);
386 kfree_skb(skb);
387}
388
389void hci_sock_dev_event(struct hci_dev *hdev, int event)
390{
391 struct hci_ev_si_device ev;
392
393 BT_DBG("hdev %s event %d", hdev->name, event);
394
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100395 /* Send event to monitor */
396 if (atomic_read(&monitor_promisc)) {
397 struct sk_buff *skb;
398
399 skb = create_monitor_event(hdev, event);
400 if (skb) {
401 send_monitor_event(skb);
402 kfree_skb(skb);
403 }
404 }
405
Marcel Holtmann040030e2012-02-20 14:50:37 +0100406 /* Send event to sockets */
407 ev.event = event;
408 ev.dev_id = hdev->id;
409 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
410
411 if (event == HCI_DEV_UNREG) {
412 struct sock *sk;
413 struct hlist_node *node;
414
415 /* Detach sockets from device */
416 read_lock(&hci_sk_list.lock);
417 sk_for_each(sk, node, &hci_sk_list.head) {
418 bh_lock_sock_nested(sk);
419 if (hci_pi(sk)->hdev == hdev) {
420 hci_pi(sk)->hdev = NULL;
421 sk->sk_err = EPIPE;
422 sk->sk_state = BT_OPEN;
423 sk->sk_state_change(sk);
424
425 hci_dev_put(hdev);
426 }
427 bh_unlock_sock(sk);
428 }
429 read_unlock(&hci_sk_list.lock);
430 }
431}
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433static int hci_sock_release(struct socket *sock)
434{
435 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100436 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 BT_DBG("sock %p sk %p", sock, sk);
439
440 if (!sk)
441 return 0;
442
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100443 hdev = hci_pi(sk)->hdev;
444
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100445 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
446 atomic_dec(&monitor_promisc);
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 bt_sock_unlink(&hci_sk_list, sk);
449
450 if (hdev) {
451 atomic_dec(&hdev->promisc);
452 hci_dev_put(hdev);
453 }
454
455 sock_orphan(sk);
456
457 skb_queue_purge(&sk->sk_receive_queue);
458 skb_queue_purge(&sk->sk_write_queue);
459
460 sock_put(sk);
461 return 0;
462}
463
Antti Julkub2a66aa2011-06-15 12:01:14 +0300464static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200465{
466 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300467 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200468
469 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
470 return -EFAULT;
471
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300472 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300473
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200474 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300475
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300476 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300477
478 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200479}
480
Antti Julkub2a66aa2011-06-15 12:01:14 +0300481static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200482{
483 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300484 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200485
486 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
487 return -EFAULT;
488
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300489 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300490
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200491 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300492
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300493 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300494
495 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200496}
497
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900498/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300499static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
500 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
502 struct hci_dev *hdev = hci_pi(sk)->hdev;
503
504 if (!hdev)
505 return -EBADFD;
506
507 switch (cmd) {
508 case HCISETRAW:
509 if (!capable(CAP_NET_ADMIN))
510 return -EACCES;
511
512 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
513 return -EPERM;
514
515 if (arg)
516 set_bit(HCI_RAW, &hdev->flags);
517 else
518 clear_bit(HCI_RAW, &hdev->flags);
519
520 return 0;
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200523 return hci_get_conn_info(hdev, (void __user *) arg);
524
525 case HCIGETAUTHINFO:
526 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Johan Hedbergf0358562010-05-18 13:20:32 +0200528 case HCIBLOCKADDR:
529 if (!capable(CAP_NET_ADMIN))
530 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300531 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200532
533 case HCIUNBLOCKADDR:
534 if (!capable(CAP_NET_ADMIN))
535 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300536 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 default:
539 if (hdev->ioctl)
540 return hdev->ioctl(hdev, cmd, arg);
541 return -EINVAL;
542 }
543}
544
545static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
546{
547 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200548 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 int err;
550
551 BT_DBG("cmd %x arg %lx", cmd, arg);
552
553 switch (cmd) {
554 case HCIGETDEVLIST:
555 return hci_get_dev_list(argp);
556
557 case HCIGETDEVINFO:
558 return hci_get_dev_info(argp);
559
560 case HCIGETCONNLIST:
561 return hci_get_conn_list(argp);
562
563 case HCIDEVUP:
564 if (!capable(CAP_NET_ADMIN))
565 return -EACCES;
566 return hci_dev_open(arg);
567
568 case HCIDEVDOWN:
569 if (!capable(CAP_NET_ADMIN))
570 return -EACCES;
571 return hci_dev_close(arg);
572
573 case HCIDEVRESET:
574 if (!capable(CAP_NET_ADMIN))
575 return -EACCES;
576 return hci_dev_reset(arg);
577
578 case HCIDEVRESTAT:
579 if (!capable(CAP_NET_ADMIN))
580 return -EACCES;
581 return hci_dev_reset_stat(arg);
582
583 case HCISETSCAN:
584 case HCISETAUTH:
585 case HCISETENCRYPT:
586 case HCISETPTYPE:
587 case HCISETLINKPOL:
588 case HCISETLINKMODE:
589 case HCISETACLMTU:
590 case HCISETSCOMTU:
591 if (!capable(CAP_NET_ADMIN))
592 return -EACCES;
593 return hci_dev_cmd(cmd, argp);
594
595 case HCIINQUIRY:
596 return hci_inquiry(argp);
597
598 default:
599 lock_sock(sk);
600 err = hci_sock_bound_ioctl(sk, cmd, arg);
601 release_sock(sk);
602 return err;
603 }
604}
605
606static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
607{
Johan Hedberg03811012010-12-08 00:21:06 +0200608 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 struct sock *sk = sock->sk;
610 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200611 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 BT_DBG("sock %p sk %p", sock, sk);
614
Johan Hedberg03811012010-12-08 00:21:06 +0200615 if (!addr)
616 return -EINVAL;
617
618 memset(&haddr, 0, sizeof(haddr));
619 len = min_t(unsigned int, sizeof(haddr), addr_len);
620 memcpy(&haddr, addr, len);
621
622 if (haddr.hci_family != AF_BLUETOOTH)
623 return -EINVAL;
624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 lock_sock(sk);
626
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100627 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 err = -EALREADY;
629 goto done;
630 }
631
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100632 switch (haddr.hci_channel) {
633 case HCI_CHANNEL_RAW:
634 if (hci_pi(sk)->hdev) {
635 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 goto done;
637 }
638
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100639 if (haddr.hci_dev != HCI_DEV_NONE) {
640 hdev = hci_dev_get(haddr.hci_dev);
641 if (!hdev) {
642 err = -ENODEV;
643 goto done;
644 }
645
646 atomic_inc(&hdev->promisc);
647 }
648
649 hci_pi(sk)->hdev = hdev;
650 break;
651
652 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100653 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100654 err = -EINVAL;
655 goto done;
656 }
657
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100658 if (!capable(CAP_NET_ADMIN)) {
659 err = -EPERM;
660 goto done;
661 }
662
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100663 break;
664
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100665 case HCI_CHANNEL_MONITOR:
666 if (haddr.hci_dev != HCI_DEV_NONE) {
667 err = -EINVAL;
668 goto done;
669 }
670
671 if (!capable(CAP_NET_RAW)) {
672 err = -EPERM;
673 goto done;
674 }
675
676 send_monitor_replay(sk);
677
678 atomic_inc(&monitor_promisc);
679 break;
680
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100681 default:
682 err = -EINVAL;
683 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 }
685
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100686
Johan Hedberg03811012010-12-08 00:21:06 +0200687 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 sk->sk_state = BT_BOUND;
689
690done:
691 release_sock(sk);
692 return err;
693}
694
695static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
696{
697 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
698 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100699 struct hci_dev *hdev = hci_pi(sk)->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 BT_DBG("sock %p sk %p", sock, sk);
702
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100703 if (!hdev)
704 return -EBADFD;
705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 lock_sock(sk);
707
708 *addr_len = sizeof(*haddr);
709 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100710 haddr->hci_dev = hdev->id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 release_sock(sk);
713 return 0;
714}
715
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300716static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
717 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
719 __u32 mask = hci_pi(sk)->cmsg_mask;
720
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700721 if (mask & HCI_CMSG_DIR) {
722 int incoming = bt_cb(skb)->incoming;
723 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700726 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100727#ifdef CONFIG_COMPAT
728 struct compat_timeval ctv;
729#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700730 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200731 void *data;
732 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700733
734 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200735
David S. Miller1da97f82007-09-12 14:10:58 +0200736 data = &tv;
737 len = sizeof(tv);
738#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800739 if (!COMPAT_USE_64BIT_TIME &&
740 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200741 ctv.tv_sec = tv.tv_sec;
742 ctv.tv_usec = tv.tv_usec;
743 data = &ctv;
744 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200745 }
David S. Miller1da97f82007-09-12 14:10:58 +0200746#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200747
748 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900751
752static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300753 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754{
755 int noblock = flags & MSG_DONTWAIT;
756 struct sock *sk = sock->sk;
757 struct sk_buff *skb;
758 int copied, err;
759
760 BT_DBG("sock %p, sk %p", sock, sk);
761
762 if (flags & (MSG_OOB))
763 return -EOPNOTSUPP;
764
765 if (sk->sk_state == BT_CLOSED)
766 return 0;
767
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200768 skb = skb_recv_datagram(sk, flags, noblock, &err);
769 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 return err;
771
772 msg->msg_namelen = 0;
773
774 copied = skb->len;
775 if (len < copied) {
776 msg->msg_flags |= MSG_TRUNC;
777 copied = len;
778 }
779
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300780 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
782
Marcel Holtmann3a208622012-02-20 14:50:34 +0100783 switch (hci_pi(sk)->channel) {
784 case HCI_CHANNEL_RAW:
785 hci_sock_cmsg(sk, msg, skb);
786 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100787 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100788 case HCI_CHANNEL_MONITOR:
789 sock_recv_timestamp(msg, sk, skb);
790 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
793 skb_free_datagram(sk, skb);
794
795 return err ? : copied;
796}
797
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900798static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 struct msghdr *msg, size_t len)
800{
801 struct sock *sk = sock->sk;
802 struct hci_dev *hdev;
803 struct sk_buff *skb;
804 int err;
805
806 BT_DBG("sock %p sk %p", sock, sk);
807
808 if (msg->msg_flags & MSG_OOB)
809 return -EOPNOTSUPP;
810
811 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
812 return -EINVAL;
813
814 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
815 return -EINVAL;
816
817 lock_sock(sk);
818
Johan Hedberg03811012010-12-08 00:21:06 +0200819 switch (hci_pi(sk)->channel) {
820 case HCI_CHANNEL_RAW:
821 break;
822 case HCI_CHANNEL_CONTROL:
823 err = mgmt_control(sk, msg, len);
824 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100825 case HCI_CHANNEL_MONITOR:
826 err = -EOPNOTSUPP;
827 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200828 default:
829 err = -EINVAL;
830 goto done;
831 }
832
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200833 hdev = hci_pi(sk)->hdev;
834 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 err = -EBADFD;
836 goto done;
837 }
838
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100839 if (!test_bit(HCI_UP, &hdev->flags)) {
840 err = -ENETDOWN;
841 goto done;
842 }
843
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200844 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
845 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 goto done;
847
848 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
849 err = -EFAULT;
850 goto drop;
851 }
852
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700853 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 skb_pull(skb, 1);
855 skb->dev = (void *) hdev;
856
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700857 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700858 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 u16 ogf = hci_opcode_ogf(opcode);
860 u16 ocf = hci_opcode_ocf(opcode);
861
862 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300863 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
864 &hci_sec_filter.ocf_mask[ogf])) &&
865 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 err = -EPERM;
867 goto drop;
868 }
869
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200870 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200872 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 } else {
874 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200875 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 }
877 } else {
878 if (!capable(CAP_NET_RAW)) {
879 err = -EPERM;
880 goto drop;
881 }
882
883 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200884 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 }
886
887 err = len;
888
889done:
890 release_sock(sk);
891 return err;
892
893drop:
894 kfree_skb(skb);
895 goto done;
896}
897
David S. Millerb7058842009-09-30 16:12:20 -0700898static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899{
900 struct hci_ufilter uf = { .opcode = 0 };
901 struct sock *sk = sock->sk;
902 int err = 0, opt = 0;
903
904 BT_DBG("sk %p, opt %d", sk, optname);
905
906 lock_sock(sk);
907
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100908 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
909 err = -EINVAL;
910 goto done;
911 }
912
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 switch (optname) {
914 case HCI_DATA_DIR:
915 if (get_user(opt, (int __user *)optval)) {
916 err = -EFAULT;
917 break;
918 }
919
920 if (opt)
921 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
922 else
923 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
924 break;
925
926 case HCI_TIME_STAMP:
927 if (get_user(opt, (int __user *)optval)) {
928 err = -EFAULT;
929 break;
930 }
931
932 if (opt)
933 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
934 else
935 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
936 break;
937
938 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200939 {
940 struct hci_filter *f = &hci_pi(sk)->filter;
941
942 uf.type_mask = f->type_mask;
943 uf.opcode = f->opcode;
944 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
945 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
946 }
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 len = min_t(unsigned int, len, sizeof(uf));
949 if (copy_from_user(&uf, optval, len)) {
950 err = -EFAULT;
951 break;
952 }
953
954 if (!capable(CAP_NET_RAW)) {
955 uf.type_mask &= hci_sec_filter.type_mask;
956 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
957 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
958 }
959
960 {
961 struct hci_filter *f = &hci_pi(sk)->filter;
962
963 f->type_mask = uf.type_mask;
964 f->opcode = uf.opcode;
965 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
966 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
967 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900968 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
970 default:
971 err = -ENOPROTOOPT;
972 break;
973 }
974
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100975done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 release_sock(sk);
977 return err;
978}
979
980static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
981{
982 struct hci_ufilter uf;
983 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100984 int len, opt, err = 0;
985
986 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
988 if (get_user(len, optlen))
989 return -EFAULT;
990
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100991 lock_sock(sk);
992
993 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
994 err = -EINVAL;
995 goto done;
996 }
997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 switch (optname) {
999 case HCI_DATA_DIR:
1000 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1001 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001002 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 opt = 0;
1004
1005 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001006 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 break;
1008
1009 case HCI_TIME_STAMP:
1010 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1011 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001012 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 opt = 0;
1014
1015 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001016 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 break;
1018
1019 case HCI_FILTER:
1020 {
1021 struct hci_filter *f = &hci_pi(sk)->filter;
1022
1023 uf.type_mask = f->type_mask;
1024 uf.opcode = f->opcode;
1025 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1026 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1027 }
1028
1029 len = min_t(unsigned int, len, sizeof(uf));
1030 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001031 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 break;
1033
1034 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001035 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 break;
1037 }
1038
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001039done:
1040 release_sock(sk);
1041 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042}
1043
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001044static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 .family = PF_BLUETOOTH,
1046 .owner = THIS_MODULE,
1047 .release = hci_sock_release,
1048 .bind = hci_sock_bind,
1049 .getname = hci_sock_getname,
1050 .sendmsg = hci_sock_sendmsg,
1051 .recvmsg = hci_sock_recvmsg,
1052 .ioctl = hci_sock_ioctl,
1053 .poll = datagram_poll,
1054 .listen = sock_no_listen,
1055 .shutdown = sock_no_shutdown,
1056 .setsockopt = hci_sock_setsockopt,
1057 .getsockopt = hci_sock_getsockopt,
1058 .connect = sock_no_connect,
1059 .socketpair = sock_no_socketpair,
1060 .accept = sock_no_accept,
1061 .mmap = sock_no_mmap
1062};
1063
1064static struct proto hci_sk_proto = {
1065 .name = "HCI",
1066 .owner = THIS_MODULE,
1067 .obj_size = sizeof(struct hci_pinfo)
1068};
1069
Eric Paris3f378b62009-11-05 22:18:14 -08001070static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1071 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072{
1073 struct sock *sk;
1074
1075 BT_DBG("sock %p", sock);
1076
1077 if (sock->type != SOCK_RAW)
1078 return -ESOCKTNOSUPPORT;
1079
1080 sock->ops = &hci_sock_ops;
1081
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001082 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 if (!sk)
1084 return -ENOMEM;
1085
1086 sock_init_data(sock, sk);
1087
1088 sock_reset_flag(sk, SOCK_ZAPPED);
1089
1090 sk->sk_protocol = protocol;
1091
1092 sock->state = SS_UNCONNECTED;
1093 sk->sk_state = BT_OPEN;
1094
1095 bt_sock_link(&hci_sk_list, sk);
1096 return 0;
1097}
1098
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001099static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 .family = PF_BLUETOOTH,
1101 .owner = THIS_MODULE,
1102 .create = hci_sock_create,
1103};
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105int __init hci_sock_init(void)
1106{
1107 int err;
1108
1109 err = proto_register(&hci_sk_proto, 0);
1110 if (err < 0)
1111 return err;
1112
1113 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1114 if (err < 0)
1115 goto error;
1116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 BT_INFO("HCI socket layer initialized");
1118
1119 return 0;
1120
1121error:
1122 BT_ERR("HCI socket registration failed");
1123 proto_unregister(&hci_sk_proto);
1124 return err;
1125}
1126
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301127void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128{
1129 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1130 BT_ERR("HCI socket unregistration failed");
1131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133}