blob: 9d8e1c39955eae2fa88b0ea7531f81a84bd4dc8b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28
29#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080030#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/errno.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
Marcel Holtmann767c5eb2007-09-09 08:39:34 +020040#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020045#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/unaligned.h>
47
48#include <net/bluetooth/bluetooth.h>
49#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010050#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Marcel Holtmanncd82e612012-02-20 20:34:38 +010052static atomic_t monitor_promisc = ATOMIC_INIT(0);
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054/* ----- HCI socket interface ----- */
55
56static inline int hci_test_bit(int nr, void *addr)
57{
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
59}
60
61/* Security filter */
62static struct hci_sec_filter hci_sec_filter = {
63 /* Packet types */
64 0x10,
65 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020066 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 /* Commands */
68 {
69 { 0x0 },
70 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020071 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020073 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020075 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020077 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020079 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 }
81};
82
83static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070084 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085};
86
87/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010088void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
90 struct sock *sk;
91 struct hlist_node *node;
Marcel Holtmanne0edf372012-02-20 14:50:36 +010092 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94 BT_DBG("hdev %p len %d", hdev, skb->len);
95
96 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010097
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 sk_for_each(sk, node, &hci_sk_list.head) {
99 struct hci_filter *flt;
100 struct sk_buff *nskb;
101
102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
103 continue;
104
105 /* Don't send frame to the socket it came from */
106 if (skb->sk == sk)
107 continue;
108
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +0200110 continue;
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 /* Apply filter */
113 flt = &hci_pi(sk)->filter;
114
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
117 &flt->type_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 continue;
119
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700120 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
Gustavo Padovanfc5fef62012-05-23 04:04:19 -0300121 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 if (!hci_test_bit(evt, &flt->event_mask))
124 continue;
125
David S. Miller4498c802006-11-21 16:17:41 -0800126 if (flt->opcode &&
127 ((evt == HCI_EV_CMD_COMPLETE &&
128 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800129 get_unaligned((__le16 *)(skb->data + 3))) ||
David S. Miller4498c802006-11-21 16:17:41 -0800130 (evt == HCI_EV_CMD_STATUS &&
131 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800132 get_unaligned((__le16 *)(skb->data + 4)))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 continue;
134 }
135
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100136 if (!skb_copy) {
137 /* Create a private copy with headroom */
138 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
139 if (!skb_copy)
140 continue;
141
142 /* Put type byte before the data */
143 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
144 }
145
146 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200147 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 continue;
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 if (sock_queue_rcv_skb(sk, nskb))
151 kfree_skb(nskb);
152 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100153
154 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100155
156 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100157}
158
159/* Send frame to control socket */
160void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
161{
162 struct sock *sk;
163 struct hlist_node *node;
164
165 BT_DBG("len %d", skb->len);
166
167 read_lock(&hci_sk_list.lock);
168
169 sk_for_each(sk, node, &hci_sk_list.head) {
170 struct sk_buff *nskb;
171
172 /* Skip the original socket */
173 if (sk == skip_sk)
174 continue;
175
176 if (sk->sk_state != BT_BOUND)
177 continue;
178
179 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
180 continue;
181
182 nskb = skb_clone(skb, GFP_ATOMIC);
183 if (!nskb)
184 continue;
185
186 if (sock_queue_rcv_skb(sk, nskb))
187 kfree_skb(nskb);
188 }
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 read_unlock(&hci_sk_list.lock);
191}
192
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100193/* Send frame to monitor socket */
194void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
195{
196 struct sock *sk;
197 struct hlist_node *node;
198 struct sk_buff *skb_copy = NULL;
199 __le16 opcode;
200
201 if (!atomic_read(&monitor_promisc))
202 return;
203
204 BT_DBG("hdev %p len %d", hdev, skb->len);
205
206 switch (bt_cb(skb)->pkt_type) {
207 case HCI_COMMAND_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
209 break;
210 case HCI_EVENT_PKT:
211 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
212 break;
213 case HCI_ACLDATA_PKT:
214 if (bt_cb(skb)->incoming)
215 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
216 else
217 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
218 break;
219 case HCI_SCODATA_PKT:
220 if (bt_cb(skb)->incoming)
221 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
222 else
223 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
224 break;
225 default:
226 return;
227 }
228
229 read_lock(&hci_sk_list.lock);
230
231 sk_for_each(sk, node, &hci_sk_list.head) {
232 struct sk_buff *nskb;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
237 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
238 continue;
239
240 if (!skb_copy) {
241 struct hci_mon_hdr *hdr;
242
243 /* Create a private copy with headroom */
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300244 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
245 GFP_ATOMIC);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100246 if (!skb_copy)
247 continue;
248
249 /* Put header before the data */
250 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
251 hdr->opcode = opcode;
252 hdr->index = cpu_to_le16(hdev->id);
253 hdr->len = cpu_to_le16(skb->len);
254 }
255
256 nskb = skb_clone(skb_copy, GFP_ATOMIC);
257 if (!nskb)
258 continue;
259
260 if (sock_queue_rcv_skb(sk, nskb))
261 kfree_skb(nskb);
262 }
263
264 read_unlock(&hci_sk_list.lock);
265
266 kfree_skb(skb_copy);
267}
268
269static void send_monitor_event(struct sk_buff *skb)
270{
271 struct sock *sk;
272 struct hlist_node *node;
273
274 BT_DBG("len %d", skb->len);
275
276 read_lock(&hci_sk_list.lock);
277
278 sk_for_each(sk, node, &hci_sk_list.head) {
279 struct sk_buff *nskb;
280
281 if (sk->sk_state != BT_BOUND)
282 continue;
283
284 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
285 continue;
286
287 nskb = skb_clone(skb, GFP_ATOMIC);
288 if (!nskb)
289 continue;
290
291 if (sock_queue_rcv_skb(sk, nskb))
292 kfree_skb(nskb);
293 }
294
295 read_unlock(&hci_sk_list.lock);
296}
297
298static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
299{
300 struct hci_mon_hdr *hdr;
301 struct hci_mon_new_index *ni;
302 struct sk_buff *skb;
303 __le16 opcode;
304
305 switch (event) {
306 case HCI_DEV_REG:
307 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
308 if (!skb)
309 return NULL;
310
311 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
312 ni->type = hdev->dev_type;
313 ni->bus = hdev->bus;
314 bacpy(&ni->bdaddr, &hdev->bdaddr);
315 memcpy(ni->name, hdev->name, 8);
316
317 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
318 break;
319
320 case HCI_DEV_UNREG:
321 skb = bt_skb_alloc(0, GFP_ATOMIC);
322 if (!skb)
323 return NULL;
324
325 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
326 break;
327
328 default:
329 return NULL;
330 }
331
332 __net_timestamp(skb);
333
334 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
335 hdr->opcode = opcode;
336 hdr->index = cpu_to_le16(hdev->id);
337 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
338
339 return skb;
340}
341
342static void send_monitor_replay(struct sock *sk)
343{
344 struct hci_dev *hdev;
345
346 read_lock(&hci_dev_list_lock);
347
348 list_for_each_entry(hdev, &hci_dev_list, list) {
349 struct sk_buff *skb;
350
351 skb = create_monitor_event(hdev, HCI_DEV_REG);
352 if (!skb)
353 continue;
354
355 if (sock_queue_rcv_skb(sk, skb))
356 kfree_skb(skb);
357 }
358
359 read_unlock(&hci_dev_list_lock);
360}
361
Marcel Holtmann040030e2012-02-20 14:50:37 +0100362/* Generate internal stack event */
363static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
364{
365 struct hci_event_hdr *hdr;
366 struct hci_ev_stack_internal *ev;
367 struct sk_buff *skb;
368
369 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
370 if (!skb)
371 return;
372
373 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
374 hdr->evt = HCI_EV_STACK_INTERNAL;
375 hdr->plen = sizeof(*ev) + dlen;
376
377 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
378 ev->type = type;
379 memcpy(ev->data, data, dlen);
380
381 bt_cb(skb)->incoming = 1;
382 __net_timestamp(skb);
383
384 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
385 skb->dev = (void *) hdev;
386 hci_send_to_sock(hdev, skb);
387 kfree_skb(skb);
388}
389
390void hci_sock_dev_event(struct hci_dev *hdev, int event)
391{
392 struct hci_ev_si_device ev;
393
394 BT_DBG("hdev %s event %d", hdev->name, event);
395
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100396 /* Send event to monitor */
397 if (atomic_read(&monitor_promisc)) {
398 struct sk_buff *skb;
399
400 skb = create_monitor_event(hdev, event);
401 if (skb) {
402 send_monitor_event(skb);
403 kfree_skb(skb);
404 }
405 }
406
Marcel Holtmann040030e2012-02-20 14:50:37 +0100407 /* Send event to sockets */
408 ev.event = event;
409 ev.dev_id = hdev->id;
410 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
411
412 if (event == HCI_DEV_UNREG) {
413 struct sock *sk;
414 struct hlist_node *node;
415
416 /* Detach sockets from device */
417 read_lock(&hci_sk_list.lock);
418 sk_for_each(sk, node, &hci_sk_list.head) {
419 bh_lock_sock_nested(sk);
420 if (hci_pi(sk)->hdev == hdev) {
421 hci_pi(sk)->hdev = NULL;
422 sk->sk_err = EPIPE;
423 sk->sk_state = BT_OPEN;
424 sk->sk_state_change(sk);
425
426 hci_dev_put(hdev);
427 }
428 bh_unlock_sock(sk);
429 }
430 read_unlock(&hci_sk_list.lock);
431 }
432}
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434static int hci_sock_release(struct socket *sock)
435{
436 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100437 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
439 BT_DBG("sock %p sk %p", sock, sk);
440
441 if (!sk)
442 return 0;
443
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100444 hdev = hci_pi(sk)->hdev;
445
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100446 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
447 atomic_dec(&monitor_promisc);
448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 bt_sock_unlink(&hci_sk_list, sk);
450
451 if (hdev) {
452 atomic_dec(&hdev->promisc);
453 hci_dev_put(hdev);
454 }
455
456 sock_orphan(sk);
457
458 skb_queue_purge(&sk->sk_receive_queue);
459 skb_queue_purge(&sk->sk_write_queue);
460
461 sock_put(sk);
462 return 0;
463}
464
Antti Julkub2a66aa2011-06-15 12:01:14 +0300465static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200466{
467 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300468 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200469
470 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
471 return -EFAULT;
472
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300473 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300474
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200475 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300476
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300477 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300478
479 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200480}
481
Antti Julkub2a66aa2011-06-15 12:01:14 +0300482static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200483{
484 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300485 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200486
487 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
488 return -EFAULT;
489
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300490 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300491
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200492 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300493
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300494 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300495
496 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200497}
498
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900499/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300500static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
501 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
503 struct hci_dev *hdev = hci_pi(sk)->hdev;
504
505 if (!hdev)
506 return -EBADFD;
507
508 switch (cmd) {
509 case HCISETRAW:
510 if (!capable(CAP_NET_ADMIN))
511 return -EACCES;
512
513 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
514 return -EPERM;
515
516 if (arg)
517 set_bit(HCI_RAW, &hdev->flags);
518 else
519 clear_bit(HCI_RAW, &hdev->flags);
520
521 return 0;
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200524 return hci_get_conn_info(hdev, (void __user *) arg);
525
526 case HCIGETAUTHINFO:
527 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Johan Hedbergf0358562010-05-18 13:20:32 +0200529 case HCIBLOCKADDR:
530 if (!capable(CAP_NET_ADMIN))
531 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300532 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200533
534 case HCIUNBLOCKADDR:
535 if (!capable(CAP_NET_ADMIN))
536 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300537 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 default:
540 if (hdev->ioctl)
541 return hdev->ioctl(hdev, cmd, arg);
542 return -EINVAL;
543 }
544}
545
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300546static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
547 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
549 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200550 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 int err;
552
553 BT_DBG("cmd %x arg %lx", cmd, arg);
554
555 switch (cmd) {
556 case HCIGETDEVLIST:
557 return hci_get_dev_list(argp);
558
559 case HCIGETDEVINFO:
560 return hci_get_dev_info(argp);
561
562 case HCIGETCONNLIST:
563 return hci_get_conn_list(argp);
564
565 case HCIDEVUP:
566 if (!capable(CAP_NET_ADMIN))
567 return -EACCES;
568 return hci_dev_open(arg);
569
570 case HCIDEVDOWN:
571 if (!capable(CAP_NET_ADMIN))
572 return -EACCES;
573 return hci_dev_close(arg);
574
575 case HCIDEVRESET:
576 if (!capable(CAP_NET_ADMIN))
577 return -EACCES;
578 return hci_dev_reset(arg);
579
580 case HCIDEVRESTAT:
581 if (!capable(CAP_NET_ADMIN))
582 return -EACCES;
583 return hci_dev_reset_stat(arg);
584
585 case HCISETSCAN:
586 case HCISETAUTH:
587 case HCISETENCRYPT:
588 case HCISETPTYPE:
589 case HCISETLINKPOL:
590 case HCISETLINKMODE:
591 case HCISETACLMTU:
592 case HCISETSCOMTU:
593 if (!capable(CAP_NET_ADMIN))
594 return -EACCES;
595 return hci_dev_cmd(cmd, argp);
596
597 case HCIINQUIRY:
598 return hci_inquiry(argp);
599
600 default:
601 lock_sock(sk);
602 err = hci_sock_bound_ioctl(sk, cmd, arg);
603 release_sock(sk);
604 return err;
605 }
606}
607
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300608static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
609 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610{
Johan Hedberg03811012010-12-08 00:21:06 +0200611 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 struct sock *sk = sock->sk;
613 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200614 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
616 BT_DBG("sock %p sk %p", sock, sk);
617
Johan Hedberg03811012010-12-08 00:21:06 +0200618 if (!addr)
619 return -EINVAL;
620
621 memset(&haddr, 0, sizeof(haddr));
622 len = min_t(unsigned int, sizeof(haddr), addr_len);
623 memcpy(&haddr, addr, len);
624
625 if (haddr.hci_family != AF_BLUETOOTH)
626 return -EINVAL;
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 lock_sock(sk);
629
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100630 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 err = -EALREADY;
632 goto done;
633 }
634
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100635 switch (haddr.hci_channel) {
636 case HCI_CHANNEL_RAW:
637 if (hci_pi(sk)->hdev) {
638 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 goto done;
640 }
641
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100642 if (haddr.hci_dev != HCI_DEV_NONE) {
643 hdev = hci_dev_get(haddr.hci_dev);
644 if (!hdev) {
645 err = -ENODEV;
646 goto done;
647 }
648
649 atomic_inc(&hdev->promisc);
650 }
651
652 hci_pi(sk)->hdev = hdev;
653 break;
654
655 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100656 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100657 err = -EINVAL;
658 goto done;
659 }
660
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100661 if (!capable(CAP_NET_ADMIN)) {
662 err = -EPERM;
663 goto done;
664 }
665
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100666 break;
667
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100668 case HCI_CHANNEL_MONITOR:
669 if (haddr.hci_dev != HCI_DEV_NONE) {
670 err = -EINVAL;
671 goto done;
672 }
673
674 if (!capable(CAP_NET_RAW)) {
675 err = -EPERM;
676 goto done;
677 }
678
679 send_monitor_replay(sk);
680
681 atomic_inc(&monitor_promisc);
682 break;
683
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100684 default:
685 err = -EINVAL;
686 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 }
688
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100689
Johan Hedberg03811012010-12-08 00:21:06 +0200690 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 sk->sk_state = BT_BOUND;
692
693done:
694 release_sock(sk);
695 return err;
696}
697
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300698static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
699 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
701 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
702 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100703 struct hci_dev *hdev = hci_pi(sk)->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
705 BT_DBG("sock %p sk %p", sock, sk);
706
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100707 if (!hdev)
708 return -EBADFD;
709
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 lock_sock(sk);
711
712 *addr_len = sizeof(*haddr);
713 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100714 haddr->hci_dev = hdev->id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
716 release_sock(sk);
717 return 0;
718}
719
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300720static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
721 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722{
723 __u32 mask = hci_pi(sk)->cmsg_mask;
724
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700725 if (mask & HCI_CMSG_DIR) {
726 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300727 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
728 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700729 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700731 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100732#ifdef CONFIG_COMPAT
733 struct compat_timeval ctv;
734#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700735 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200736 void *data;
737 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700738
739 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200740
David S. Miller1da97f82007-09-12 14:10:58 +0200741 data = &tv;
742 len = sizeof(tv);
743#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800744 if (!COMPAT_USE_64BIT_TIME &&
745 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200746 ctv.tv_sec = tv.tv_sec;
747 ctv.tv_usec = tv.tv_usec;
748 data = &ctv;
749 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200750 }
David S. Miller1da97f82007-09-12 14:10:58 +0200751#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200752
753 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900756
757static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300758 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
760 int noblock = flags & MSG_DONTWAIT;
761 struct sock *sk = sock->sk;
762 struct sk_buff *skb;
763 int copied, err;
764
765 BT_DBG("sock %p, sk %p", sock, sk);
766
767 if (flags & (MSG_OOB))
768 return -EOPNOTSUPP;
769
770 if (sk->sk_state == BT_CLOSED)
771 return 0;
772
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200773 skb = skb_recv_datagram(sk, flags, noblock, &err);
774 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 return err;
776
777 msg->msg_namelen = 0;
778
779 copied = skb->len;
780 if (len < copied) {
781 msg->msg_flags |= MSG_TRUNC;
782 copied = len;
783 }
784
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300785 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
787
Marcel Holtmann3a208622012-02-20 14:50:34 +0100788 switch (hci_pi(sk)->channel) {
789 case HCI_CHANNEL_RAW:
790 hci_sock_cmsg(sk, msg, skb);
791 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100792 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100793 case HCI_CHANNEL_MONITOR:
794 sock_recv_timestamp(msg, sk, skb);
795 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100796 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
798 skb_free_datagram(sk, skb);
799
800 return err ? : copied;
801}
802
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900803static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 struct msghdr *msg, size_t len)
805{
806 struct sock *sk = sock->sk;
807 struct hci_dev *hdev;
808 struct sk_buff *skb;
809 int err;
810
811 BT_DBG("sock %p sk %p", sock, sk);
812
813 if (msg->msg_flags & MSG_OOB)
814 return -EOPNOTSUPP;
815
816 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
817 return -EINVAL;
818
819 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
820 return -EINVAL;
821
822 lock_sock(sk);
823
Johan Hedberg03811012010-12-08 00:21:06 +0200824 switch (hci_pi(sk)->channel) {
825 case HCI_CHANNEL_RAW:
826 break;
827 case HCI_CHANNEL_CONTROL:
828 err = mgmt_control(sk, msg, len);
829 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100830 case HCI_CHANNEL_MONITOR:
831 err = -EOPNOTSUPP;
832 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200833 default:
834 err = -EINVAL;
835 goto done;
836 }
837
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200838 hdev = hci_pi(sk)->hdev;
839 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 err = -EBADFD;
841 goto done;
842 }
843
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100844 if (!test_bit(HCI_UP, &hdev->flags)) {
845 err = -ENETDOWN;
846 goto done;
847 }
848
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200849 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
850 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 goto done;
852
853 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
854 err = -EFAULT;
855 goto drop;
856 }
857
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700858 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 skb_pull(skb, 1);
860 skb->dev = (void *) hdev;
861
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700862 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700863 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 u16 ogf = hci_opcode_ogf(opcode);
865 u16 ocf = hci_opcode_ocf(opcode);
866
867 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300868 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
869 &hci_sec_filter.ocf_mask[ogf])) &&
870 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 err = -EPERM;
872 goto drop;
873 }
874
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200875 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200877 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 } else {
879 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200880 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 }
882 } else {
883 if (!capable(CAP_NET_RAW)) {
884 err = -EPERM;
885 goto drop;
886 }
887
888 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200889 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 }
891
892 err = len;
893
894done:
895 release_sock(sk);
896 return err;
897
898drop:
899 kfree_skb(skb);
900 goto done;
901}
902
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300903static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
904 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905{
906 struct hci_ufilter uf = { .opcode = 0 };
907 struct sock *sk = sock->sk;
908 int err = 0, opt = 0;
909
910 BT_DBG("sk %p, opt %d", sk, optname);
911
912 lock_sock(sk);
913
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100914 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
915 err = -EINVAL;
916 goto done;
917 }
918
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 switch (optname) {
920 case HCI_DATA_DIR:
921 if (get_user(opt, (int __user *)optval)) {
922 err = -EFAULT;
923 break;
924 }
925
926 if (opt)
927 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
928 else
929 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
930 break;
931
932 case HCI_TIME_STAMP:
933 if (get_user(opt, (int __user *)optval)) {
934 err = -EFAULT;
935 break;
936 }
937
938 if (opt)
939 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
940 else
941 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
942 break;
943
944 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200945 {
946 struct hci_filter *f = &hci_pi(sk)->filter;
947
948 uf.type_mask = f->type_mask;
949 uf.opcode = f->opcode;
950 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
951 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
952 }
953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 len = min_t(unsigned int, len, sizeof(uf));
955 if (copy_from_user(&uf, optval, len)) {
956 err = -EFAULT;
957 break;
958 }
959
960 if (!capable(CAP_NET_RAW)) {
961 uf.type_mask &= hci_sec_filter.type_mask;
962 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
963 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
964 }
965
966 {
967 struct hci_filter *f = &hci_pi(sk)->filter;
968
969 f->type_mask = uf.type_mask;
970 f->opcode = uf.opcode;
971 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
972 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
973 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900974 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976 default:
977 err = -ENOPROTOOPT;
978 break;
979 }
980
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100981done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 release_sock(sk);
983 return err;
984}
985
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300986static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
987 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
989 struct hci_ufilter uf;
990 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100991 int len, opt, err = 0;
992
993 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995 if (get_user(len, optlen))
996 return -EFAULT;
997
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100998 lock_sock(sk);
999
1000 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1001 err = -EINVAL;
1002 goto done;
1003 }
1004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 switch (optname) {
1006 case HCI_DATA_DIR:
1007 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1008 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001009 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 opt = 0;
1011
1012 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001013 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 break;
1015
1016 case HCI_TIME_STAMP:
1017 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1018 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001019 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 opt = 0;
1021
1022 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001023 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 break;
1025
1026 case HCI_FILTER:
1027 {
1028 struct hci_filter *f = &hci_pi(sk)->filter;
1029
1030 uf.type_mask = f->type_mask;
1031 uf.opcode = f->opcode;
1032 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1033 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1034 }
1035
1036 len = min_t(unsigned int, len, sizeof(uf));
1037 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001038 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 break;
1040
1041 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001042 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 break;
1044 }
1045
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001046done:
1047 release_sock(sk);
1048 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049}
1050
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001051static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 .family = PF_BLUETOOTH,
1053 .owner = THIS_MODULE,
1054 .release = hci_sock_release,
1055 .bind = hci_sock_bind,
1056 .getname = hci_sock_getname,
1057 .sendmsg = hci_sock_sendmsg,
1058 .recvmsg = hci_sock_recvmsg,
1059 .ioctl = hci_sock_ioctl,
1060 .poll = datagram_poll,
1061 .listen = sock_no_listen,
1062 .shutdown = sock_no_shutdown,
1063 .setsockopt = hci_sock_setsockopt,
1064 .getsockopt = hci_sock_getsockopt,
1065 .connect = sock_no_connect,
1066 .socketpair = sock_no_socketpair,
1067 .accept = sock_no_accept,
1068 .mmap = sock_no_mmap
1069};
1070
1071static struct proto hci_sk_proto = {
1072 .name = "HCI",
1073 .owner = THIS_MODULE,
1074 .obj_size = sizeof(struct hci_pinfo)
1075};
1076
Eric Paris3f378b62009-11-05 22:18:14 -08001077static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1078 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079{
1080 struct sock *sk;
1081
1082 BT_DBG("sock %p", sock);
1083
1084 if (sock->type != SOCK_RAW)
1085 return -ESOCKTNOSUPPORT;
1086
1087 sock->ops = &hci_sock_ops;
1088
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001089 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 if (!sk)
1091 return -ENOMEM;
1092
1093 sock_init_data(sock, sk);
1094
1095 sock_reset_flag(sk, SOCK_ZAPPED);
1096
1097 sk->sk_protocol = protocol;
1098
1099 sock->state = SS_UNCONNECTED;
1100 sk->sk_state = BT_OPEN;
1101
1102 bt_sock_link(&hci_sk_list, sk);
1103 return 0;
1104}
1105
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001106static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 .family = PF_BLUETOOTH,
1108 .owner = THIS_MODULE,
1109 .create = hci_sock_create,
1110};
1111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112int __init hci_sock_init(void)
1113{
1114 int err;
1115
1116 err = proto_register(&hci_sk_proto, 0);
1117 if (err < 0)
1118 return err;
1119
1120 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1121 if (err < 0)
1122 goto error;
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 BT_INFO("HCI socket layer initialized");
1125
1126 return 0;
1127
1128error:
1129 BT_ERR("HCI socket registration failed");
1130 proto_unregister(&hci_sk_proto);
1131 return err;
1132}
1133
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301134void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135{
1136 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1137 BT_ERR("HCI socket unregistration failed");
1138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140}