blob: ab570387f50597c85fed57ad0d133452df10d158 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
44static struct hci_sec_filter hci_sec_filter = {
45 /* Packet types */
46 0x10,
47 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020048 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 /* Commands */
50 {
51 { 0x0 },
52 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020053 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020055 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020057 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020059 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020061 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 }
63};
64
65static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070066 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067};
68
Marcel Holtmannf81fe642013-08-25 23:25:15 -070069static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
70{
71 struct hci_filter *flt;
72 int flt_type, flt_event;
73
74 /* Apply filter */
75 flt = &hci_pi(sk)->filter;
76
77 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
78 flt_type = 0;
79 else
80 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
81
82 if (!test_bit(flt_type, &flt->type_mask))
83 return true;
84
85 /* Extra filter for event packets only */
86 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
87 return false;
88
89 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
90
91 if (!hci_test_bit(flt_event, &flt->event_mask))
92 return true;
93
94 /* Check filter only when opcode is set */
95 if (!flt->opcode)
96 return false;
97
98 if (flt_event == HCI_EV_CMD_COMPLETE &&
99 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
100 return true;
101
102 if (flt_event == HCI_EV_CMD_STATUS &&
103 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
104 return true;
105
106 return false;
107}
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100110void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100113 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115 BT_DBG("hdev %p len %d", hdev, skb->len);
116
117 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100118
Sasha Levinb67bfe02013-02-27 17:06:00 -0800119 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 struct sk_buff *nskb;
121
122 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
123 continue;
124
125 /* Don't send frame to the socket it came from */
126 if (skb->sk == sk)
127 continue;
128
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100129 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +0200130 continue;
131
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700132 if (is_filtered_packet(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 continue;
134
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100135 if (!skb_copy) {
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
138 if (!skb_copy)
139 continue;
140
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
143 }
144
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200146 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 continue;
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100152
153 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100154
155 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156}
157
158/* Send frame to control socket */
159void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
160{
161 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100162
163 BT_DBG("len %d", skb->len);
164
165 read_lock(&hci_sk_list.lock);
166
Sasha Levinb67bfe02013-02-27 17:06:00 -0800167 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100168 struct sk_buff *nskb;
169
170 /* Skip the original socket */
171 if (sk == skip_sk)
172 continue;
173
174 if (sk->sk_state != BT_BOUND)
175 continue;
176
177 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
178 continue;
179
180 nskb = skb_clone(skb, GFP_ATOMIC);
181 if (!nskb)
182 continue;
183
184 if (sock_queue_rcv_skb(sk, nskb))
185 kfree_skb(nskb);
186 }
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 read_unlock(&hci_sk_list.lock);
189}
190
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100191/* Send frame to monitor socket */
192void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
193{
194 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100195 struct sk_buff *skb_copy = NULL;
196 __le16 opcode;
197
198 if (!atomic_read(&monitor_promisc))
199 return;
200
201 BT_DBG("hdev %p len %d", hdev, skb->len);
202
203 switch (bt_cb(skb)->pkt_type) {
204 case HCI_COMMAND_PKT:
205 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
206 break;
207 case HCI_EVENT_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
209 break;
210 case HCI_ACLDATA_PKT:
211 if (bt_cb(skb)->incoming)
212 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
213 else
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
215 break;
216 case HCI_SCODATA_PKT:
217 if (bt_cb(skb)->incoming)
218 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
219 else
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
221 break;
222 default:
223 return;
224 }
225
226 read_lock(&hci_sk_list.lock);
227
Sasha Levinb67bfe02013-02-27 17:06:00 -0800228 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100229 struct sk_buff *nskb;
230
231 if (sk->sk_state != BT_BOUND)
232 continue;
233
234 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
235 continue;
236
237 if (!skb_copy) {
238 struct hci_mon_hdr *hdr;
239
240 /* Create a private copy with headroom */
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300241 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
242 GFP_ATOMIC);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100243 if (!skb_copy)
244 continue;
245
246 /* Put header before the data */
247 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
248 hdr->opcode = opcode;
249 hdr->index = cpu_to_le16(hdev->id);
250 hdr->len = cpu_to_le16(skb->len);
251 }
252
253 nskb = skb_clone(skb_copy, GFP_ATOMIC);
254 if (!nskb)
255 continue;
256
257 if (sock_queue_rcv_skb(sk, nskb))
258 kfree_skb(nskb);
259 }
260
261 read_unlock(&hci_sk_list.lock);
262
263 kfree_skb(skb_copy);
264}
265
266static void send_monitor_event(struct sk_buff *skb)
267{
268 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269
270 BT_DBG("len %d", skb->len);
271
272 read_lock(&hci_sk_list.lock);
273
Sasha Levinb67bfe02013-02-27 17:06:00 -0800274 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 struct sk_buff *nskb;
276
277 if (sk->sk_state != BT_BOUND)
278 continue;
279
280 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
281 continue;
282
283 nskb = skb_clone(skb, GFP_ATOMIC);
284 if (!nskb)
285 continue;
286
287 if (sock_queue_rcv_skb(sk, nskb))
288 kfree_skb(nskb);
289 }
290
291 read_unlock(&hci_sk_list.lock);
292}
293
294static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
295{
296 struct hci_mon_hdr *hdr;
297 struct hci_mon_new_index *ni;
298 struct sk_buff *skb;
299 __le16 opcode;
300
301 switch (event) {
302 case HCI_DEV_REG:
303 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
308 ni->type = hdev->dev_type;
309 ni->bus = hdev->bus;
310 bacpy(&ni->bdaddr, &hdev->bdaddr);
311 memcpy(ni->name, hdev->name, 8);
312
313 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
314 break;
315
316 case HCI_DEV_UNREG:
317 skb = bt_skb_alloc(0, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
320
321 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
322 break;
323
324 default:
325 return NULL;
326 }
327
328 __net_timestamp(skb);
329
330 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
331 hdr->opcode = opcode;
332 hdr->index = cpu_to_le16(hdev->id);
333 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
334
335 return skb;
336}
337
338static void send_monitor_replay(struct sock *sk)
339{
340 struct hci_dev *hdev;
341
342 read_lock(&hci_dev_list_lock);
343
344 list_for_each_entry(hdev, &hci_dev_list, list) {
345 struct sk_buff *skb;
346
347 skb = create_monitor_event(hdev, HCI_DEV_REG);
348 if (!skb)
349 continue;
350
351 if (sock_queue_rcv_skb(sk, skb))
352 kfree_skb(skb);
353 }
354
355 read_unlock(&hci_dev_list_lock);
356}
357
Marcel Holtmann040030e2012-02-20 14:50:37 +0100358/* Generate internal stack event */
359static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
360{
361 struct hci_event_hdr *hdr;
362 struct hci_ev_stack_internal *ev;
363 struct sk_buff *skb;
364
365 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
366 if (!skb)
367 return;
368
369 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
370 hdr->evt = HCI_EV_STACK_INTERNAL;
371 hdr->plen = sizeof(*ev) + dlen;
372
373 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
374 ev->type = type;
375 memcpy(ev->data, data, dlen);
376
377 bt_cb(skb)->incoming = 1;
378 __net_timestamp(skb);
379
380 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
381 skb->dev = (void *) hdev;
382 hci_send_to_sock(hdev, skb);
383 kfree_skb(skb);
384}
385
386void hci_sock_dev_event(struct hci_dev *hdev, int event)
387{
388 struct hci_ev_si_device ev;
389
390 BT_DBG("hdev %s event %d", hdev->name, event);
391
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100392 /* Send event to monitor */
393 if (atomic_read(&monitor_promisc)) {
394 struct sk_buff *skb;
395
396 skb = create_monitor_event(hdev, event);
397 if (skb) {
398 send_monitor_event(skb);
399 kfree_skb(skb);
400 }
401 }
402
Marcel Holtmann040030e2012-02-20 14:50:37 +0100403 /* Send event to sockets */
404 ev.event = event;
405 ev.dev_id = hdev->id;
406 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
407
408 if (event == HCI_DEV_UNREG) {
409 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100410
411 /* Detach sockets from device */
412 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800413 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100414 bh_lock_sock_nested(sk);
415 if (hci_pi(sk)->hdev == hdev) {
416 hci_pi(sk)->hdev = NULL;
417 sk->sk_err = EPIPE;
418 sk->sk_state = BT_OPEN;
419 sk->sk_state_change(sk);
420
421 hci_dev_put(hdev);
422 }
423 bh_unlock_sock(sk);
424 }
425 read_unlock(&hci_sk_list.lock);
426 }
427}
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429static int hci_sock_release(struct socket *sock)
430{
431 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100432 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434 BT_DBG("sock %p sk %p", sock, sk);
435
436 if (!sk)
437 return 0;
438
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100439 hdev = hci_pi(sk)->hdev;
440
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100441 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
442 atomic_dec(&monitor_promisc);
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 bt_sock_unlink(&hci_sk_list, sk);
445
446 if (hdev) {
447 atomic_dec(&hdev->promisc);
448 hci_dev_put(hdev);
449 }
450
451 sock_orphan(sk);
452
453 skb_queue_purge(&sk->sk_receive_queue);
454 skb_queue_purge(&sk->sk_write_queue);
455
456 sock_put(sk);
457 return 0;
458}
459
Antti Julkub2a66aa2011-06-15 12:01:14 +0300460static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200461{
462 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300463 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200464
465 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
466 return -EFAULT;
467
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300468 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300469
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200470 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300471
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300472 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300473
474 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200475}
476
Antti Julkub2a66aa2011-06-15 12:01:14 +0300477static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200478{
479 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300480 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200481
482 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
483 return -EFAULT;
484
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300485 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300486
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200487 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300488
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300489 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300490
491 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200492}
493
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900494/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300495static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
496 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497{
498 struct hci_dev *hdev = hci_pi(sk)->hdev;
499
500 if (!hdev)
501 return -EBADFD;
502
503 switch (cmd) {
504 case HCISETRAW:
505 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000506 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
509 return -EPERM;
510
511 if (arg)
512 set_bit(HCI_RAW, &hdev->flags);
513 else
514 clear_bit(HCI_RAW, &hdev->flags);
515
516 return 0;
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200519 return hci_get_conn_info(hdev, (void __user *) arg);
520
521 case HCIGETAUTHINFO:
522 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Johan Hedbergf0358562010-05-18 13:20:32 +0200524 case HCIBLOCKADDR:
525 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000526 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300527 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200528
529 case HCIUNBLOCKADDR:
530 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000531 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300532 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 default:
535 if (hdev->ioctl)
536 return hdev->ioctl(hdev, cmd, arg);
537 return -EINVAL;
538 }
539}
540
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300541static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
542 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543{
544 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200545 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 int err;
547
548 BT_DBG("cmd %x arg %lx", cmd, arg);
549
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700550 lock_sock(sk);
551
552 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
553 err = -EBADFD;
554 goto done;
555 }
556
557 release_sock(sk);
558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 switch (cmd) {
560 case HCIGETDEVLIST:
561 return hci_get_dev_list(argp);
562
563 case HCIGETDEVINFO:
564 return hci_get_dev_info(argp);
565
566 case HCIGETCONNLIST:
567 return hci_get_conn_list(argp);
568
569 case HCIDEVUP:
570 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000571 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 return hci_dev_open(arg);
573
574 case HCIDEVDOWN:
575 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000576 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 return hci_dev_close(arg);
578
579 case HCIDEVRESET:
580 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000581 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 return hci_dev_reset(arg);
583
584 case HCIDEVRESTAT:
585 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000586 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 return hci_dev_reset_stat(arg);
588
589 case HCISETSCAN:
590 case HCISETAUTH:
591 case HCISETENCRYPT:
592 case HCISETPTYPE:
593 case HCISETLINKPOL:
594 case HCISETLINKMODE:
595 case HCISETACLMTU:
596 case HCISETSCOMTU:
597 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000598 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 return hci_dev_cmd(cmd, argp);
600
601 case HCIINQUIRY:
602 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700604
605 lock_sock(sk);
606
607 err = hci_sock_bound_ioctl(sk, cmd, arg);
608
609done:
610 release_sock(sk);
611 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612}
613
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300614static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
615 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616{
Johan Hedberg03811012010-12-08 00:21:06 +0200617 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 struct sock *sk = sock->sk;
619 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200620 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
622 BT_DBG("sock %p sk %p", sock, sk);
623
Johan Hedberg03811012010-12-08 00:21:06 +0200624 if (!addr)
625 return -EINVAL;
626
627 memset(&haddr, 0, sizeof(haddr));
628 len = min_t(unsigned int, sizeof(haddr), addr_len);
629 memcpy(&haddr, addr, len);
630
631 if (haddr.hci_family != AF_BLUETOOTH)
632 return -EINVAL;
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 lock_sock(sk);
635
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100636 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 err = -EALREADY;
638 goto done;
639 }
640
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100641 switch (haddr.hci_channel) {
642 case HCI_CHANNEL_RAW:
643 if (hci_pi(sk)->hdev) {
644 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 goto done;
646 }
647
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100648 if (haddr.hci_dev != HCI_DEV_NONE) {
649 hdev = hci_dev_get(haddr.hci_dev);
650 if (!hdev) {
651 err = -ENODEV;
652 goto done;
653 }
654
655 atomic_inc(&hdev->promisc);
656 }
657
658 hci_pi(sk)->hdev = hdev;
659 break;
660
661 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100662 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100663 err = -EINVAL;
664 goto done;
665 }
666
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100667 if (!capable(CAP_NET_ADMIN)) {
668 err = -EPERM;
669 goto done;
670 }
671
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100672 break;
673
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100674 case HCI_CHANNEL_MONITOR:
675 if (haddr.hci_dev != HCI_DEV_NONE) {
676 err = -EINVAL;
677 goto done;
678 }
679
680 if (!capable(CAP_NET_RAW)) {
681 err = -EPERM;
682 goto done;
683 }
684
685 send_monitor_replay(sk);
686
687 atomic_inc(&monitor_promisc);
688 break;
689
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100690 default:
691 err = -EINVAL;
692 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 }
694
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100695
Johan Hedberg03811012010-12-08 00:21:06 +0200696 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 sk->sk_state = BT_BOUND;
698
699done:
700 release_sock(sk);
701 return err;
702}
703
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300704static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
705 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706{
707 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
708 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700709 struct hci_dev *hdev;
710 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 BT_DBG("sock %p sk %p", sock, sk);
713
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700714 if (peer)
715 return -EOPNOTSUPP;
716
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 lock_sock(sk);
718
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700719 hdev = hci_pi(sk)->hdev;
720 if (!hdev) {
721 err = -EBADFD;
722 goto done;
723 }
724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 *addr_len = sizeof(*haddr);
726 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100727 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700728 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700730done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700732 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733}
734
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300735static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
736 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
738 __u32 mask = hci_pi(sk)->cmsg_mask;
739
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700740 if (mask & HCI_CMSG_DIR) {
741 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300742 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
743 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700744 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700746 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100747#ifdef CONFIG_COMPAT
748 struct compat_timeval ctv;
749#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700750 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200751 void *data;
752 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700753
754 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200755
David S. Miller1da97f82007-09-12 14:10:58 +0200756 data = &tv;
757 len = sizeof(tv);
758#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800759 if (!COMPAT_USE_64BIT_TIME &&
760 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200761 ctv.tv_sec = tv.tv_sec;
762 ctv.tv_usec = tv.tv_usec;
763 data = &ctv;
764 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200765 }
David S. Miller1da97f82007-09-12 14:10:58 +0200766#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200767
768 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900771
772static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300773 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 int noblock = flags & MSG_DONTWAIT;
776 struct sock *sk = sock->sk;
777 struct sk_buff *skb;
778 int copied, err;
779
780 BT_DBG("sock %p, sk %p", sock, sk);
781
782 if (flags & (MSG_OOB))
783 return -EOPNOTSUPP;
784
785 if (sk->sk_state == BT_CLOSED)
786 return 0;
787
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200788 skb = skb_recv_datagram(sk, flags, noblock, &err);
789 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return err;
791
792 msg->msg_namelen = 0;
793
794 copied = skb->len;
795 if (len < copied) {
796 msg->msg_flags |= MSG_TRUNC;
797 copied = len;
798 }
799
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300800 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
802
Marcel Holtmann3a208622012-02-20 14:50:34 +0100803 switch (hci_pi(sk)->channel) {
804 case HCI_CHANNEL_RAW:
805 hci_sock_cmsg(sk, msg, skb);
806 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100807 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100808 case HCI_CHANNEL_MONITOR:
809 sock_recv_timestamp(msg, sk, skb);
810 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
813 skb_free_datagram(sk, skb);
814
815 return err ? : copied;
816}
817
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900818static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 struct msghdr *msg, size_t len)
820{
821 struct sock *sk = sock->sk;
822 struct hci_dev *hdev;
823 struct sk_buff *skb;
824 int err;
825
826 BT_DBG("sock %p sk %p", sock, sk);
827
828 if (msg->msg_flags & MSG_OOB)
829 return -EOPNOTSUPP;
830
831 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
832 return -EINVAL;
833
834 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
835 return -EINVAL;
836
837 lock_sock(sk);
838
Johan Hedberg03811012010-12-08 00:21:06 +0200839 switch (hci_pi(sk)->channel) {
840 case HCI_CHANNEL_RAW:
841 break;
842 case HCI_CHANNEL_CONTROL:
843 err = mgmt_control(sk, msg, len);
844 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100845 case HCI_CHANNEL_MONITOR:
846 err = -EOPNOTSUPP;
847 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200848 default:
849 err = -EINVAL;
850 goto done;
851 }
852
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200853 hdev = hci_pi(sk)->hdev;
854 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 err = -EBADFD;
856 goto done;
857 }
858
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100859 if (!test_bit(HCI_UP, &hdev->flags)) {
860 err = -ENETDOWN;
861 goto done;
862 }
863
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200864 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
865 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 goto done;
867
868 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
869 err = -EFAULT;
870 goto drop;
871 }
872
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700873 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 skb_pull(skb, 1);
875 skb->dev = (void *) hdev;
876
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700877 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700878 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 u16 ogf = hci_opcode_ogf(opcode);
880 u16 ocf = hci_opcode_ocf(opcode);
881
882 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300883 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
884 &hci_sec_filter.ocf_mask[ogf])) &&
885 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 err = -EPERM;
887 goto drop;
888 }
889
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200890 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200892 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 } else {
Johan Hedberg11714b32013-03-05 20:37:47 +0200894 /* Stand-alone HCI commands must be flaged as
895 * single-command requests.
896 */
897 bt_cb(skb)->req.start = true;
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200900 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 }
902 } else {
903 if (!capable(CAP_NET_RAW)) {
904 err = -EPERM;
905 goto drop;
906 }
907
908 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200909 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 }
911
912 err = len;
913
914done:
915 release_sock(sk);
916 return err;
917
918drop:
919 kfree_skb(skb);
920 goto done;
921}
922
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300923static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
924 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925{
926 struct hci_ufilter uf = { .opcode = 0 };
927 struct sock *sk = sock->sk;
928 int err = 0, opt = 0;
929
930 BT_DBG("sk %p, opt %d", sk, optname);
931
932 lock_sock(sk);
933
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100934 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -0700935 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100936 goto done;
937 }
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 switch (optname) {
940 case HCI_DATA_DIR:
941 if (get_user(opt, (int __user *)optval)) {
942 err = -EFAULT;
943 break;
944 }
945
946 if (opt)
947 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
948 else
949 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
950 break;
951
952 case HCI_TIME_STAMP:
953 if (get_user(opt, (int __user *)optval)) {
954 err = -EFAULT;
955 break;
956 }
957
958 if (opt)
959 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
960 else
961 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
962 break;
963
964 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200965 {
966 struct hci_filter *f = &hci_pi(sk)->filter;
967
968 uf.type_mask = f->type_mask;
969 uf.opcode = f->opcode;
970 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
971 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
972 }
973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 len = min_t(unsigned int, len, sizeof(uf));
975 if (copy_from_user(&uf, optval, len)) {
976 err = -EFAULT;
977 break;
978 }
979
980 if (!capable(CAP_NET_RAW)) {
981 uf.type_mask &= hci_sec_filter.type_mask;
982 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
983 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
984 }
985
986 {
987 struct hci_filter *f = &hci_pi(sk)->filter;
988
989 f->type_mask = uf.type_mask;
990 f->opcode = uf.opcode;
991 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
992 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
993 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900994 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
996 default:
997 err = -ENOPROTOOPT;
998 break;
999 }
1000
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001001done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 release_sock(sk);
1003 return err;
1004}
1005
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001006static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1007 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008{
1009 struct hci_ufilter uf;
1010 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001011 int len, opt, err = 0;
1012
1013 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
1015 if (get_user(len, optlen))
1016 return -EFAULT;
1017
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001018 lock_sock(sk);
1019
1020 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001021 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001022 goto done;
1023 }
1024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 switch (optname) {
1026 case HCI_DATA_DIR:
1027 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1028 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001029 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 opt = 0;
1031
1032 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001033 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 break;
1035
1036 case HCI_TIME_STAMP:
1037 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1038 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001039 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 opt = 0;
1041
1042 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001043 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 break;
1045
1046 case HCI_FILTER:
1047 {
1048 struct hci_filter *f = &hci_pi(sk)->filter;
1049
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001050 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 uf.type_mask = f->type_mask;
1052 uf.opcode = f->opcode;
1053 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1054 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1055 }
1056
1057 len = min_t(unsigned int, len, sizeof(uf));
1058 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001059 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 break;
1061
1062 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001063 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 break;
1065 }
1066
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001067done:
1068 release_sock(sk);
1069 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070}
1071
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001072static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 .family = PF_BLUETOOTH,
1074 .owner = THIS_MODULE,
1075 .release = hci_sock_release,
1076 .bind = hci_sock_bind,
1077 .getname = hci_sock_getname,
1078 .sendmsg = hci_sock_sendmsg,
1079 .recvmsg = hci_sock_recvmsg,
1080 .ioctl = hci_sock_ioctl,
1081 .poll = datagram_poll,
1082 .listen = sock_no_listen,
1083 .shutdown = sock_no_shutdown,
1084 .setsockopt = hci_sock_setsockopt,
1085 .getsockopt = hci_sock_getsockopt,
1086 .connect = sock_no_connect,
1087 .socketpair = sock_no_socketpair,
1088 .accept = sock_no_accept,
1089 .mmap = sock_no_mmap
1090};
1091
1092static struct proto hci_sk_proto = {
1093 .name = "HCI",
1094 .owner = THIS_MODULE,
1095 .obj_size = sizeof(struct hci_pinfo)
1096};
1097
Eric Paris3f378b62009-11-05 22:18:14 -08001098static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1099 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100{
1101 struct sock *sk;
1102
1103 BT_DBG("sock %p", sock);
1104
1105 if (sock->type != SOCK_RAW)
1106 return -ESOCKTNOSUPPORT;
1107
1108 sock->ops = &hci_sock_ops;
1109
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001110 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 if (!sk)
1112 return -ENOMEM;
1113
1114 sock_init_data(sock, sk);
1115
1116 sock_reset_flag(sk, SOCK_ZAPPED);
1117
1118 sk->sk_protocol = protocol;
1119
1120 sock->state = SS_UNCONNECTED;
1121 sk->sk_state = BT_OPEN;
1122
1123 bt_sock_link(&hci_sk_list, sk);
1124 return 0;
1125}
1126
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001127static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 .family = PF_BLUETOOTH,
1129 .owner = THIS_MODULE,
1130 .create = hci_sock_create,
1131};
1132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133int __init hci_sock_init(void)
1134{
1135 int err;
1136
1137 err = proto_register(&hci_sk_proto, 0);
1138 if (err < 0)
1139 return err;
1140
1141 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001142 if (err < 0) {
1143 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001145 }
1146
Al Virob0316612013-04-04 19:14:33 -04001147 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001148 if (err < 0) {
1149 BT_ERR("Failed to create HCI proc file");
1150 bt_sock_unregister(BTPROTO_HCI);
1151 goto error;
1152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 BT_INFO("HCI socket layer initialized");
1155
1156 return 0;
1157
1158error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 proto_unregister(&hci_sk_proto);
1160 return err;
1161}
1162
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301163void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001165 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001166 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168}