blob: f92e913c8a037c84c730b2bba1c0f0bfbeebcded [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
44static struct hci_sec_filter hci_sec_filter = {
45 /* Packet types */
46 0x10,
47 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020048 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 /* Commands */
50 {
51 { 0x0 },
52 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020053 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020055 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020057 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020059 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020061 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 }
63};
64
65static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070066 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067};
68
Marcel Holtmannf81fe642013-08-25 23:25:15 -070069static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
70{
71 struct hci_filter *flt;
72 int flt_type, flt_event;
73
74 /* Apply filter */
75 flt = &hci_pi(sk)->filter;
76
77 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
78 flt_type = 0;
79 else
80 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
81
82 if (!test_bit(flt_type, &flt->type_mask))
83 return true;
84
85 /* Extra filter for event packets only */
86 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
87 return false;
88
89 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
90
91 if (!hci_test_bit(flt_event, &flt->event_mask))
92 return true;
93
94 /* Check filter only when opcode is set */
95 if (!flt->opcode)
96 return false;
97
98 if (flt_event == HCI_EV_CMD_COMPLETE &&
99 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
100 return true;
101
102 if (flt_event == HCI_EV_CMD_STATUS &&
103 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
104 return true;
105
106 return false;
107}
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100110void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100113 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115 BT_DBG("hdev %p len %d", hdev, skb->len);
116
117 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100118
Sasha Levinb67bfe02013-02-27 17:06:00 -0800119 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 struct sk_buff *nskb;
121
122 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
123 continue;
124
125 /* Don't send frame to the socket it came from */
126 if (skb->sk == sk)
127 continue;
128
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100129 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +0200130 continue;
131
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700132 if (is_filtered_packet(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 continue;
134
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100135 if (!skb_copy) {
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
138 if (!skb_copy)
139 continue;
140
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
143 }
144
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200146 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 continue;
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100152
153 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100154
155 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156}
157
158/* Send frame to control socket */
159void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
160{
161 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100162
163 BT_DBG("len %d", skb->len);
164
165 read_lock(&hci_sk_list.lock);
166
Sasha Levinb67bfe02013-02-27 17:06:00 -0800167 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100168 struct sk_buff *nskb;
169
170 /* Skip the original socket */
171 if (sk == skip_sk)
172 continue;
173
174 if (sk->sk_state != BT_BOUND)
175 continue;
176
177 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
178 continue;
179
180 nskb = skb_clone(skb, GFP_ATOMIC);
181 if (!nskb)
182 continue;
183
184 if (sock_queue_rcv_skb(sk, nskb))
185 kfree_skb(nskb);
186 }
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 read_unlock(&hci_sk_list.lock);
189}
190
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100191/* Send frame to monitor socket */
192void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
193{
194 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100195 struct sk_buff *skb_copy = NULL;
196 __le16 opcode;
197
198 if (!atomic_read(&monitor_promisc))
199 return;
200
201 BT_DBG("hdev %p len %d", hdev, skb->len);
202
203 switch (bt_cb(skb)->pkt_type) {
204 case HCI_COMMAND_PKT:
205 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
206 break;
207 case HCI_EVENT_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
209 break;
210 case HCI_ACLDATA_PKT:
211 if (bt_cb(skb)->incoming)
212 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
213 else
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
215 break;
216 case HCI_SCODATA_PKT:
217 if (bt_cb(skb)->incoming)
218 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
219 else
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
221 break;
222 default:
223 return;
224 }
225
226 read_lock(&hci_sk_list.lock);
227
Sasha Levinb67bfe02013-02-27 17:06:00 -0800228 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100229 struct sk_buff *nskb;
230
231 if (sk->sk_state != BT_BOUND)
232 continue;
233
234 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
235 continue;
236
237 if (!skb_copy) {
238 struct hci_mon_hdr *hdr;
239
240 /* Create a private copy with headroom */
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300241 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
242 GFP_ATOMIC);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100243 if (!skb_copy)
244 continue;
245
246 /* Put header before the data */
247 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
248 hdr->opcode = opcode;
249 hdr->index = cpu_to_le16(hdev->id);
250 hdr->len = cpu_to_le16(skb->len);
251 }
252
253 nskb = skb_clone(skb_copy, GFP_ATOMIC);
254 if (!nskb)
255 continue;
256
257 if (sock_queue_rcv_skb(sk, nskb))
258 kfree_skb(nskb);
259 }
260
261 read_unlock(&hci_sk_list.lock);
262
263 kfree_skb(skb_copy);
264}
265
266static void send_monitor_event(struct sk_buff *skb)
267{
268 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269
270 BT_DBG("len %d", skb->len);
271
272 read_lock(&hci_sk_list.lock);
273
Sasha Levinb67bfe02013-02-27 17:06:00 -0800274 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 struct sk_buff *nskb;
276
277 if (sk->sk_state != BT_BOUND)
278 continue;
279
280 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
281 continue;
282
283 nskb = skb_clone(skb, GFP_ATOMIC);
284 if (!nskb)
285 continue;
286
287 if (sock_queue_rcv_skb(sk, nskb))
288 kfree_skb(nskb);
289 }
290
291 read_unlock(&hci_sk_list.lock);
292}
293
294static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
295{
296 struct hci_mon_hdr *hdr;
297 struct hci_mon_new_index *ni;
298 struct sk_buff *skb;
299 __le16 opcode;
300
301 switch (event) {
302 case HCI_DEV_REG:
303 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
308 ni->type = hdev->dev_type;
309 ni->bus = hdev->bus;
310 bacpy(&ni->bdaddr, &hdev->bdaddr);
311 memcpy(ni->name, hdev->name, 8);
312
313 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
314 break;
315
316 case HCI_DEV_UNREG:
317 skb = bt_skb_alloc(0, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
320
321 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
322 break;
323
324 default:
325 return NULL;
326 }
327
328 __net_timestamp(skb);
329
330 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
331 hdr->opcode = opcode;
332 hdr->index = cpu_to_le16(hdev->id);
333 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
334
335 return skb;
336}
337
338static void send_monitor_replay(struct sock *sk)
339{
340 struct hci_dev *hdev;
341
342 read_lock(&hci_dev_list_lock);
343
344 list_for_each_entry(hdev, &hci_dev_list, list) {
345 struct sk_buff *skb;
346
347 skb = create_monitor_event(hdev, HCI_DEV_REG);
348 if (!skb)
349 continue;
350
351 if (sock_queue_rcv_skb(sk, skb))
352 kfree_skb(skb);
353 }
354
355 read_unlock(&hci_dev_list_lock);
356}
357
Marcel Holtmann040030e2012-02-20 14:50:37 +0100358/* Generate internal stack event */
359static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
360{
361 struct hci_event_hdr *hdr;
362 struct hci_ev_stack_internal *ev;
363 struct sk_buff *skb;
364
365 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
366 if (!skb)
367 return;
368
369 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
370 hdr->evt = HCI_EV_STACK_INTERNAL;
371 hdr->plen = sizeof(*ev) + dlen;
372
373 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
374 ev->type = type;
375 memcpy(ev->data, data, dlen);
376
377 bt_cb(skb)->incoming = 1;
378 __net_timestamp(skb);
379
380 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
381 skb->dev = (void *) hdev;
382 hci_send_to_sock(hdev, skb);
383 kfree_skb(skb);
384}
385
386void hci_sock_dev_event(struct hci_dev *hdev, int event)
387{
388 struct hci_ev_si_device ev;
389
390 BT_DBG("hdev %s event %d", hdev->name, event);
391
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100392 /* Send event to monitor */
393 if (atomic_read(&monitor_promisc)) {
394 struct sk_buff *skb;
395
396 skb = create_monitor_event(hdev, event);
397 if (skb) {
398 send_monitor_event(skb);
399 kfree_skb(skb);
400 }
401 }
402
Marcel Holtmann040030e2012-02-20 14:50:37 +0100403 /* Send event to sockets */
404 ev.event = event;
405 ev.dev_id = hdev->id;
406 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
407
408 if (event == HCI_DEV_UNREG) {
409 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100410
411 /* Detach sockets from device */
412 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800413 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100414 bh_lock_sock_nested(sk);
415 if (hci_pi(sk)->hdev == hdev) {
416 hci_pi(sk)->hdev = NULL;
417 sk->sk_err = EPIPE;
418 sk->sk_state = BT_OPEN;
419 sk->sk_state_change(sk);
420
421 hci_dev_put(hdev);
422 }
423 bh_unlock_sock(sk);
424 }
425 read_unlock(&hci_sk_list.lock);
426 }
427}
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429static int hci_sock_release(struct socket *sock)
430{
431 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100432 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434 BT_DBG("sock %p sk %p", sock, sk);
435
436 if (!sk)
437 return 0;
438
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100439 hdev = hci_pi(sk)->hdev;
440
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100441 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
442 atomic_dec(&monitor_promisc);
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 bt_sock_unlink(&hci_sk_list, sk);
445
446 if (hdev) {
447 atomic_dec(&hdev->promisc);
448 hci_dev_put(hdev);
449 }
450
451 sock_orphan(sk);
452
453 skb_queue_purge(&sk->sk_receive_queue);
454 skb_queue_purge(&sk->sk_write_queue);
455
456 sock_put(sk);
457 return 0;
458}
459
Antti Julkub2a66aa2011-06-15 12:01:14 +0300460static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200461{
462 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300463 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200464
465 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
466 return -EFAULT;
467
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300468 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300469
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200470 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300471
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300472 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300473
474 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200475}
476
Antti Julkub2a66aa2011-06-15 12:01:14 +0300477static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200478{
479 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300480 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200481
482 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
483 return -EFAULT;
484
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300485 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300486
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200487 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300488
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300489 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300490
491 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200492}
493
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900494/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300495static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
496 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497{
498 struct hci_dev *hdev = hci_pi(sk)->hdev;
499
500 if (!hdev)
501 return -EBADFD;
502
503 switch (cmd) {
504 case HCISETRAW:
505 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000506 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
509 return -EPERM;
510
511 if (arg)
512 set_bit(HCI_RAW, &hdev->flags);
513 else
514 clear_bit(HCI_RAW, &hdev->flags);
515
516 return 0;
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200519 return hci_get_conn_info(hdev, (void __user *) arg);
520
521 case HCIGETAUTHINFO:
522 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Johan Hedbergf0358562010-05-18 13:20:32 +0200524 case HCIBLOCKADDR:
525 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000526 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300527 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200528
529 case HCIUNBLOCKADDR:
530 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000531 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300532 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 default:
535 if (hdev->ioctl)
536 return hdev->ioctl(hdev, cmd, arg);
537 return -EINVAL;
538 }
539}
540
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300541static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
542 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543{
544 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200545 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 int err;
547
548 BT_DBG("cmd %x arg %lx", cmd, arg);
549
550 switch (cmd) {
551 case HCIGETDEVLIST:
552 return hci_get_dev_list(argp);
553
554 case HCIGETDEVINFO:
555 return hci_get_dev_info(argp);
556
557 case HCIGETCONNLIST:
558 return hci_get_conn_list(argp);
559
560 case HCIDEVUP:
561 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000562 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 return hci_dev_open(arg);
564
565 case HCIDEVDOWN:
566 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000567 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 return hci_dev_close(arg);
569
570 case HCIDEVRESET:
571 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000572 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 return hci_dev_reset(arg);
574
575 case HCIDEVRESTAT:
576 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000577 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 return hci_dev_reset_stat(arg);
579
580 case HCISETSCAN:
581 case HCISETAUTH:
582 case HCISETENCRYPT:
583 case HCISETPTYPE:
584 case HCISETLINKPOL:
585 case HCISETLINKMODE:
586 case HCISETACLMTU:
587 case HCISETSCOMTU:
588 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000589 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 return hci_dev_cmd(cmd, argp);
591
592 case HCIINQUIRY:
593 return hci_inquiry(argp);
594
595 default:
596 lock_sock(sk);
597 err = hci_sock_bound_ioctl(sk, cmd, arg);
598 release_sock(sk);
599 return err;
600 }
601}
602
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300603static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
604 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Johan Hedberg03811012010-12-08 00:21:06 +0200606 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 struct sock *sk = sock->sk;
608 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200609 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 BT_DBG("sock %p sk %p", sock, sk);
612
Johan Hedberg03811012010-12-08 00:21:06 +0200613 if (!addr)
614 return -EINVAL;
615
616 memset(&haddr, 0, sizeof(haddr));
617 len = min_t(unsigned int, sizeof(haddr), addr_len);
618 memcpy(&haddr, addr, len);
619
620 if (haddr.hci_family != AF_BLUETOOTH)
621 return -EINVAL;
622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 lock_sock(sk);
624
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100625 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 err = -EALREADY;
627 goto done;
628 }
629
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100630 switch (haddr.hci_channel) {
631 case HCI_CHANNEL_RAW:
632 if (hci_pi(sk)->hdev) {
633 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 goto done;
635 }
636
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100637 if (haddr.hci_dev != HCI_DEV_NONE) {
638 hdev = hci_dev_get(haddr.hci_dev);
639 if (!hdev) {
640 err = -ENODEV;
641 goto done;
642 }
643
644 atomic_inc(&hdev->promisc);
645 }
646
647 hci_pi(sk)->hdev = hdev;
648 break;
649
650 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100651 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100652 err = -EINVAL;
653 goto done;
654 }
655
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100656 if (!capable(CAP_NET_ADMIN)) {
657 err = -EPERM;
658 goto done;
659 }
660
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100661 break;
662
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100663 case HCI_CHANNEL_MONITOR:
664 if (haddr.hci_dev != HCI_DEV_NONE) {
665 err = -EINVAL;
666 goto done;
667 }
668
669 if (!capable(CAP_NET_RAW)) {
670 err = -EPERM;
671 goto done;
672 }
673
674 send_monitor_replay(sk);
675
676 atomic_inc(&monitor_promisc);
677 break;
678
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100679 default:
680 err = -EINVAL;
681 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
683
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100684
Johan Hedberg03811012010-12-08 00:21:06 +0200685 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 sk->sk_state = BT_BOUND;
687
688done:
689 release_sock(sk);
690 return err;
691}
692
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300693static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
694 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
696 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
697 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700698 struct hci_dev *hdev;
699 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 BT_DBG("sock %p sk %p", sock, sk);
702
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700703 if (peer)
704 return -EOPNOTSUPP;
705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 lock_sock(sk);
707
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700708 hdev = hci_pi(sk)->hdev;
709 if (!hdev) {
710 err = -EBADFD;
711 goto done;
712 }
713
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 *addr_len = sizeof(*haddr);
715 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100716 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700717 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700719done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700721 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
723
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300724static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
725 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726{
727 __u32 mask = hci_pi(sk)->cmsg_mask;
728
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700729 if (mask & HCI_CMSG_DIR) {
730 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300731 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
732 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700733 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700735 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100736#ifdef CONFIG_COMPAT
737 struct compat_timeval ctv;
738#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700739 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200740 void *data;
741 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700742
743 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200744
David S. Miller1da97f82007-09-12 14:10:58 +0200745 data = &tv;
746 len = sizeof(tv);
747#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800748 if (!COMPAT_USE_64BIT_TIME &&
749 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200750 ctv.tv_sec = tv.tv_sec;
751 ctv.tv_usec = tv.tv_usec;
752 data = &ctv;
753 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200754 }
David S. Miller1da97f82007-09-12 14:10:58 +0200755#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200756
757 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700758 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900760
761static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300762 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
764 int noblock = flags & MSG_DONTWAIT;
765 struct sock *sk = sock->sk;
766 struct sk_buff *skb;
767 int copied, err;
768
769 BT_DBG("sock %p, sk %p", sock, sk);
770
771 if (flags & (MSG_OOB))
772 return -EOPNOTSUPP;
773
774 if (sk->sk_state == BT_CLOSED)
775 return 0;
776
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200777 skb = skb_recv_datagram(sk, flags, noblock, &err);
778 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 return err;
780
781 msg->msg_namelen = 0;
782
783 copied = skb->len;
784 if (len < copied) {
785 msg->msg_flags |= MSG_TRUNC;
786 copied = len;
787 }
788
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300789 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
791
Marcel Holtmann3a208622012-02-20 14:50:34 +0100792 switch (hci_pi(sk)->channel) {
793 case HCI_CHANNEL_RAW:
794 hci_sock_cmsg(sk, msg, skb);
795 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100796 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100797 case HCI_CHANNEL_MONITOR:
798 sock_recv_timestamp(msg, sk, skb);
799 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802 skb_free_datagram(sk, skb);
803
804 return err ? : copied;
805}
806
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900807static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 struct msghdr *msg, size_t len)
809{
810 struct sock *sk = sock->sk;
811 struct hci_dev *hdev;
812 struct sk_buff *skb;
813 int err;
814
815 BT_DBG("sock %p sk %p", sock, sk);
816
817 if (msg->msg_flags & MSG_OOB)
818 return -EOPNOTSUPP;
819
820 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
821 return -EINVAL;
822
823 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
824 return -EINVAL;
825
826 lock_sock(sk);
827
Johan Hedberg03811012010-12-08 00:21:06 +0200828 switch (hci_pi(sk)->channel) {
829 case HCI_CHANNEL_RAW:
830 break;
831 case HCI_CHANNEL_CONTROL:
832 err = mgmt_control(sk, msg, len);
833 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100834 case HCI_CHANNEL_MONITOR:
835 err = -EOPNOTSUPP;
836 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200837 default:
838 err = -EINVAL;
839 goto done;
840 }
841
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200842 hdev = hci_pi(sk)->hdev;
843 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 err = -EBADFD;
845 goto done;
846 }
847
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100848 if (!test_bit(HCI_UP, &hdev->flags)) {
849 err = -ENETDOWN;
850 goto done;
851 }
852
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200853 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
854 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 goto done;
856
857 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
858 err = -EFAULT;
859 goto drop;
860 }
861
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700862 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 skb_pull(skb, 1);
864 skb->dev = (void *) hdev;
865
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700866 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700867 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 u16 ogf = hci_opcode_ogf(opcode);
869 u16 ocf = hci_opcode_ocf(opcode);
870
871 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300872 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
873 &hci_sec_filter.ocf_mask[ogf])) &&
874 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 err = -EPERM;
876 goto drop;
877 }
878
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200879 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200881 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 } else {
Johan Hedberg11714b32013-03-05 20:37:47 +0200883 /* Stand-alone HCI commands must be flaged as
884 * single-command requests.
885 */
886 bt_cb(skb)->req.start = true;
887
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200889 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 }
891 } else {
892 if (!capable(CAP_NET_RAW)) {
893 err = -EPERM;
894 goto drop;
895 }
896
897 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200898 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
900
901 err = len;
902
903done:
904 release_sock(sk);
905 return err;
906
907drop:
908 kfree_skb(skb);
909 goto done;
910}
911
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300912static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
913 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
915 struct hci_ufilter uf = { .opcode = 0 };
916 struct sock *sk = sock->sk;
917 int err = 0, opt = 0;
918
919 BT_DBG("sk %p, opt %d", sk, optname);
920
921 lock_sock(sk);
922
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100923 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -0700924 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100925 goto done;
926 }
927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 switch (optname) {
929 case HCI_DATA_DIR:
930 if (get_user(opt, (int __user *)optval)) {
931 err = -EFAULT;
932 break;
933 }
934
935 if (opt)
936 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
937 else
938 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
939 break;
940
941 case HCI_TIME_STAMP:
942 if (get_user(opt, (int __user *)optval)) {
943 err = -EFAULT;
944 break;
945 }
946
947 if (opt)
948 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
949 else
950 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
951 break;
952
953 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200954 {
955 struct hci_filter *f = &hci_pi(sk)->filter;
956
957 uf.type_mask = f->type_mask;
958 uf.opcode = f->opcode;
959 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
960 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
961 }
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 len = min_t(unsigned int, len, sizeof(uf));
964 if (copy_from_user(&uf, optval, len)) {
965 err = -EFAULT;
966 break;
967 }
968
969 if (!capable(CAP_NET_RAW)) {
970 uf.type_mask &= hci_sec_filter.type_mask;
971 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
972 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
973 }
974
975 {
976 struct hci_filter *f = &hci_pi(sk)->filter;
977
978 f->type_mask = uf.type_mask;
979 f->opcode = uf.opcode;
980 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
981 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
982 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900983 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 default:
986 err = -ENOPROTOOPT;
987 break;
988 }
989
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100990done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 release_sock(sk);
992 return err;
993}
994
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300995static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
996 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997{
998 struct hci_ufilter uf;
999 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001000 int len, opt, err = 0;
1001
1002 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004 if (get_user(len, optlen))
1005 return -EFAULT;
1006
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001007 lock_sock(sk);
1008
1009 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001010 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001011 goto done;
1012 }
1013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 switch (optname) {
1015 case HCI_DATA_DIR:
1016 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1017 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001018 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 opt = 0;
1020
1021 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001022 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 break;
1024
1025 case HCI_TIME_STAMP:
1026 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1027 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001028 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 opt = 0;
1030
1031 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001032 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 break;
1034
1035 case HCI_FILTER:
1036 {
1037 struct hci_filter *f = &hci_pi(sk)->filter;
1038
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001039 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 uf.type_mask = f->type_mask;
1041 uf.opcode = f->opcode;
1042 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1043 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1044 }
1045
1046 len = min_t(unsigned int, len, sizeof(uf));
1047 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001048 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 break;
1050
1051 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001052 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 break;
1054 }
1055
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001056done:
1057 release_sock(sk);
1058 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059}
1060
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001061static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 .family = PF_BLUETOOTH,
1063 .owner = THIS_MODULE,
1064 .release = hci_sock_release,
1065 .bind = hci_sock_bind,
1066 .getname = hci_sock_getname,
1067 .sendmsg = hci_sock_sendmsg,
1068 .recvmsg = hci_sock_recvmsg,
1069 .ioctl = hci_sock_ioctl,
1070 .poll = datagram_poll,
1071 .listen = sock_no_listen,
1072 .shutdown = sock_no_shutdown,
1073 .setsockopt = hci_sock_setsockopt,
1074 .getsockopt = hci_sock_getsockopt,
1075 .connect = sock_no_connect,
1076 .socketpair = sock_no_socketpair,
1077 .accept = sock_no_accept,
1078 .mmap = sock_no_mmap
1079};
1080
1081static struct proto hci_sk_proto = {
1082 .name = "HCI",
1083 .owner = THIS_MODULE,
1084 .obj_size = sizeof(struct hci_pinfo)
1085};
1086
Eric Paris3f378b62009-11-05 22:18:14 -08001087static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1088 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089{
1090 struct sock *sk;
1091
1092 BT_DBG("sock %p", sock);
1093
1094 if (sock->type != SOCK_RAW)
1095 return -ESOCKTNOSUPPORT;
1096
1097 sock->ops = &hci_sock_ops;
1098
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001099 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 if (!sk)
1101 return -ENOMEM;
1102
1103 sock_init_data(sock, sk);
1104
1105 sock_reset_flag(sk, SOCK_ZAPPED);
1106
1107 sk->sk_protocol = protocol;
1108
1109 sock->state = SS_UNCONNECTED;
1110 sk->sk_state = BT_OPEN;
1111
1112 bt_sock_link(&hci_sk_list, sk);
1113 return 0;
1114}
1115
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001116static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 .family = PF_BLUETOOTH,
1118 .owner = THIS_MODULE,
1119 .create = hci_sock_create,
1120};
1121
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122int __init hci_sock_init(void)
1123{
1124 int err;
1125
1126 err = proto_register(&hci_sk_proto, 0);
1127 if (err < 0)
1128 return err;
1129
1130 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001131 if (err < 0) {
1132 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001134 }
1135
Al Virob0316612013-04-04 19:14:33 -04001136 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001137 if (err < 0) {
1138 BT_ERR("Failed to create HCI proc file");
1139 bt_sock_unregister(BTPROTO_HCI);
1140 goto error;
1141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 BT_INFO("HCI socket layer initialized");
1144
1145 return 0;
1146
1147error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 proto_unregister(&hci_sk_proto);
1149 return err;
1150}
1151
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301152void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001154 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001155 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157}