blob: c45ec25aefb9566aa97054104f5b3db270436167 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
44static struct hci_sec_filter hci_sec_filter = {
45 /* Packet types */
46 0x10,
47 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020048 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 /* Commands */
50 {
51 { 0x0 },
52 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020053 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020055 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020057 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020059 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020061 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 }
63};
64
65static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070066 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067};
68
Marcel Holtmannf81fe642013-08-25 23:25:15 -070069static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
70{
71 struct hci_filter *flt;
72 int flt_type, flt_event;
73
74 /* Apply filter */
75 flt = &hci_pi(sk)->filter;
76
77 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
78 flt_type = 0;
79 else
80 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
81
82 if (!test_bit(flt_type, &flt->type_mask))
83 return true;
84
85 /* Extra filter for event packets only */
86 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
87 return false;
88
89 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
90
91 if (!hci_test_bit(flt_event, &flt->event_mask))
92 return true;
93
94 /* Check filter only when opcode is set */
95 if (!flt->opcode)
96 return false;
97
98 if (flt_event == HCI_EV_CMD_COMPLETE &&
99 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
100 return true;
101
102 if (flt_event == HCI_EV_CMD_STATUS &&
103 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
104 return true;
105
106 return false;
107}
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100110void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100113 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115 BT_DBG("hdev %p len %d", hdev, skb->len);
116
117 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100118
Sasha Levinb67bfe02013-02-27 17:06:00 -0800119 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 struct sk_buff *nskb;
121
122 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
123 continue;
124
125 /* Don't send frame to the socket it came from */
126 if (skb->sk == sk)
127 continue;
128
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100129 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +0200130 continue;
131
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700132 if (is_filtered_packet(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 continue;
134
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100135 if (!skb_copy) {
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
138 if (!skb_copy)
139 continue;
140
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
143 }
144
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200146 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 continue;
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100152
153 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100154
155 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156}
157
158/* Send frame to control socket */
159void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
160{
161 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100162
163 BT_DBG("len %d", skb->len);
164
165 read_lock(&hci_sk_list.lock);
166
Sasha Levinb67bfe02013-02-27 17:06:00 -0800167 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100168 struct sk_buff *nskb;
169
170 /* Skip the original socket */
171 if (sk == skip_sk)
172 continue;
173
174 if (sk->sk_state != BT_BOUND)
175 continue;
176
177 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
178 continue;
179
180 nskb = skb_clone(skb, GFP_ATOMIC);
181 if (!nskb)
182 continue;
183
184 if (sock_queue_rcv_skb(sk, nskb))
185 kfree_skb(nskb);
186 }
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 read_unlock(&hci_sk_list.lock);
189}
190
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100191/* Send frame to monitor socket */
192void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
193{
194 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100195 struct sk_buff *skb_copy = NULL;
196 __le16 opcode;
197
198 if (!atomic_read(&monitor_promisc))
199 return;
200
201 BT_DBG("hdev %p len %d", hdev, skb->len);
202
203 switch (bt_cb(skb)->pkt_type) {
204 case HCI_COMMAND_PKT:
205 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
206 break;
207 case HCI_EVENT_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
209 break;
210 case HCI_ACLDATA_PKT:
211 if (bt_cb(skb)->incoming)
212 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
213 else
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
215 break;
216 case HCI_SCODATA_PKT:
217 if (bt_cb(skb)->incoming)
218 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
219 else
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
221 break;
222 default:
223 return;
224 }
225
226 read_lock(&hci_sk_list.lock);
227
Sasha Levinb67bfe02013-02-27 17:06:00 -0800228 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100229 struct sk_buff *nskb;
230
231 if (sk->sk_state != BT_BOUND)
232 continue;
233
234 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
235 continue;
236
237 if (!skb_copy) {
238 struct hci_mon_hdr *hdr;
239
240 /* Create a private copy with headroom */
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300241 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
242 GFP_ATOMIC);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100243 if (!skb_copy)
244 continue;
245
246 /* Put header before the data */
247 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
248 hdr->opcode = opcode;
249 hdr->index = cpu_to_le16(hdev->id);
250 hdr->len = cpu_to_le16(skb->len);
251 }
252
253 nskb = skb_clone(skb_copy, GFP_ATOMIC);
254 if (!nskb)
255 continue;
256
257 if (sock_queue_rcv_skb(sk, nskb))
258 kfree_skb(nskb);
259 }
260
261 read_unlock(&hci_sk_list.lock);
262
263 kfree_skb(skb_copy);
264}
265
266static void send_monitor_event(struct sk_buff *skb)
267{
268 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269
270 BT_DBG("len %d", skb->len);
271
272 read_lock(&hci_sk_list.lock);
273
Sasha Levinb67bfe02013-02-27 17:06:00 -0800274 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 struct sk_buff *nskb;
276
277 if (sk->sk_state != BT_BOUND)
278 continue;
279
280 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
281 continue;
282
283 nskb = skb_clone(skb, GFP_ATOMIC);
284 if (!nskb)
285 continue;
286
287 if (sock_queue_rcv_skb(sk, nskb))
288 kfree_skb(nskb);
289 }
290
291 read_unlock(&hci_sk_list.lock);
292}
293
294static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
295{
296 struct hci_mon_hdr *hdr;
297 struct hci_mon_new_index *ni;
298 struct sk_buff *skb;
299 __le16 opcode;
300
301 switch (event) {
302 case HCI_DEV_REG:
303 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
308 ni->type = hdev->dev_type;
309 ni->bus = hdev->bus;
310 bacpy(&ni->bdaddr, &hdev->bdaddr);
311 memcpy(ni->name, hdev->name, 8);
312
313 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
314 break;
315
316 case HCI_DEV_UNREG:
317 skb = bt_skb_alloc(0, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
320
321 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
322 break;
323
324 default:
325 return NULL;
326 }
327
328 __net_timestamp(skb);
329
330 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
331 hdr->opcode = opcode;
332 hdr->index = cpu_to_le16(hdev->id);
333 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
334
335 return skb;
336}
337
338static void send_monitor_replay(struct sock *sk)
339{
340 struct hci_dev *hdev;
341
342 read_lock(&hci_dev_list_lock);
343
344 list_for_each_entry(hdev, &hci_dev_list, list) {
345 struct sk_buff *skb;
346
347 skb = create_monitor_event(hdev, HCI_DEV_REG);
348 if (!skb)
349 continue;
350
351 if (sock_queue_rcv_skb(sk, skb))
352 kfree_skb(skb);
353 }
354
355 read_unlock(&hci_dev_list_lock);
356}
357
Marcel Holtmann040030e2012-02-20 14:50:37 +0100358/* Generate internal stack event */
359static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
360{
361 struct hci_event_hdr *hdr;
362 struct hci_ev_stack_internal *ev;
363 struct sk_buff *skb;
364
365 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
366 if (!skb)
367 return;
368
369 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
370 hdr->evt = HCI_EV_STACK_INTERNAL;
371 hdr->plen = sizeof(*ev) + dlen;
372
373 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
374 ev->type = type;
375 memcpy(ev->data, data, dlen);
376
377 bt_cb(skb)->incoming = 1;
378 __net_timestamp(skb);
379
380 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
381 skb->dev = (void *) hdev;
382 hci_send_to_sock(hdev, skb);
383 kfree_skb(skb);
384}
385
386void hci_sock_dev_event(struct hci_dev *hdev, int event)
387{
388 struct hci_ev_si_device ev;
389
390 BT_DBG("hdev %s event %d", hdev->name, event);
391
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100392 /* Send event to monitor */
393 if (atomic_read(&monitor_promisc)) {
394 struct sk_buff *skb;
395
396 skb = create_monitor_event(hdev, event);
397 if (skb) {
398 send_monitor_event(skb);
399 kfree_skb(skb);
400 }
401 }
402
Marcel Holtmann040030e2012-02-20 14:50:37 +0100403 /* Send event to sockets */
404 ev.event = event;
405 ev.dev_id = hdev->id;
406 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
407
408 if (event == HCI_DEV_UNREG) {
409 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100410
411 /* Detach sockets from device */
412 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800413 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100414 bh_lock_sock_nested(sk);
415 if (hci_pi(sk)->hdev == hdev) {
416 hci_pi(sk)->hdev = NULL;
417 sk->sk_err = EPIPE;
418 sk->sk_state = BT_OPEN;
419 sk->sk_state_change(sk);
420
421 hci_dev_put(hdev);
422 }
423 bh_unlock_sock(sk);
424 }
425 read_unlock(&hci_sk_list.lock);
426 }
427}
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429static int hci_sock_release(struct socket *sock)
430{
431 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100432 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434 BT_DBG("sock %p sk %p", sock, sk);
435
436 if (!sk)
437 return 0;
438
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100439 hdev = hci_pi(sk)->hdev;
440
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100441 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
442 atomic_dec(&monitor_promisc);
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 bt_sock_unlink(&hci_sk_list, sk);
445
446 if (hdev) {
447 atomic_dec(&hdev->promisc);
448 hci_dev_put(hdev);
449 }
450
451 sock_orphan(sk);
452
453 skb_queue_purge(&sk->sk_receive_queue);
454 skb_queue_purge(&sk->sk_write_queue);
455
456 sock_put(sk);
457 return 0;
458}
459
Antti Julkub2a66aa2011-06-15 12:01:14 +0300460static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200461{
462 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300463 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200464
465 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
466 return -EFAULT;
467
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300468 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300469
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200470 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300471
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300472 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300473
474 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200475}
476
Antti Julkub2a66aa2011-06-15 12:01:14 +0300477static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200478{
479 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300480 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200481
482 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
483 return -EFAULT;
484
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300485 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300486
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200487 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300488
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300489 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300490
491 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200492}
493
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900494/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300495static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
496 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497{
498 struct hci_dev *hdev = hci_pi(sk)->hdev;
499
500 if (!hdev)
501 return -EBADFD;
502
503 switch (cmd) {
504 case HCISETRAW:
505 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000506 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
509 return -EPERM;
510
511 if (arg)
512 set_bit(HCI_RAW, &hdev->flags);
513 else
514 clear_bit(HCI_RAW, &hdev->flags);
515
516 return 0;
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200519 return hci_get_conn_info(hdev, (void __user *) arg);
520
521 case HCIGETAUTHINFO:
522 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Johan Hedbergf0358562010-05-18 13:20:32 +0200524 case HCIBLOCKADDR:
525 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000526 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300527 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200528
529 case HCIUNBLOCKADDR:
530 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000531 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300532 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 default:
535 if (hdev->ioctl)
536 return hdev->ioctl(hdev, cmd, arg);
537 return -EINVAL;
538 }
539}
540
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300541static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
542 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543{
544 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200545 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 int err;
547
548 BT_DBG("cmd %x arg %lx", cmd, arg);
549
550 switch (cmd) {
551 case HCIGETDEVLIST:
552 return hci_get_dev_list(argp);
553
554 case HCIGETDEVINFO:
555 return hci_get_dev_info(argp);
556
557 case HCIGETCONNLIST:
558 return hci_get_conn_list(argp);
559
560 case HCIDEVUP:
561 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000562 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 return hci_dev_open(arg);
564
565 case HCIDEVDOWN:
566 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000567 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 return hci_dev_close(arg);
569
570 case HCIDEVRESET:
571 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000572 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 return hci_dev_reset(arg);
574
575 case HCIDEVRESTAT:
576 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000577 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 return hci_dev_reset_stat(arg);
579
580 case HCISETSCAN:
581 case HCISETAUTH:
582 case HCISETENCRYPT:
583 case HCISETPTYPE:
584 case HCISETLINKPOL:
585 case HCISETLINKMODE:
586 case HCISETACLMTU:
587 case HCISETSCOMTU:
588 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000589 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 return hci_dev_cmd(cmd, argp);
591
592 case HCIINQUIRY:
593 return hci_inquiry(argp);
594
595 default:
596 lock_sock(sk);
597 err = hci_sock_bound_ioctl(sk, cmd, arg);
598 release_sock(sk);
599 return err;
600 }
601}
602
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300603static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
604 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Johan Hedberg03811012010-12-08 00:21:06 +0200606 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 struct sock *sk = sock->sk;
608 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200609 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 BT_DBG("sock %p sk %p", sock, sk);
612
Johan Hedberg03811012010-12-08 00:21:06 +0200613 if (!addr)
614 return -EINVAL;
615
616 memset(&haddr, 0, sizeof(haddr));
617 len = min_t(unsigned int, sizeof(haddr), addr_len);
618 memcpy(&haddr, addr, len);
619
620 if (haddr.hci_family != AF_BLUETOOTH)
621 return -EINVAL;
622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 lock_sock(sk);
624
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100625 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 err = -EALREADY;
627 goto done;
628 }
629
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100630 switch (haddr.hci_channel) {
631 case HCI_CHANNEL_RAW:
632 if (hci_pi(sk)->hdev) {
633 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 goto done;
635 }
636
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100637 if (haddr.hci_dev != HCI_DEV_NONE) {
638 hdev = hci_dev_get(haddr.hci_dev);
639 if (!hdev) {
640 err = -ENODEV;
641 goto done;
642 }
643
644 atomic_inc(&hdev->promisc);
645 }
646
647 hci_pi(sk)->hdev = hdev;
648 break;
649
650 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100651 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100652 err = -EINVAL;
653 goto done;
654 }
655
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100656 if (!capable(CAP_NET_ADMIN)) {
657 err = -EPERM;
658 goto done;
659 }
660
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100661 break;
662
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100663 case HCI_CHANNEL_MONITOR:
664 if (haddr.hci_dev != HCI_DEV_NONE) {
665 err = -EINVAL;
666 goto done;
667 }
668
669 if (!capable(CAP_NET_RAW)) {
670 err = -EPERM;
671 goto done;
672 }
673
674 send_monitor_replay(sk);
675
676 atomic_inc(&monitor_promisc);
677 break;
678
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100679 default:
680 err = -EINVAL;
681 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
683
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100684
Johan Hedberg03811012010-12-08 00:21:06 +0200685 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 sk->sk_state = BT_BOUND;
687
688done:
689 release_sock(sk);
690 return err;
691}
692
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300693static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
694 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
696 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
697 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100698 struct hci_dev *hdev = hci_pi(sk)->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700 BT_DBG("sock %p sk %p", sock, sk);
701
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100702 if (!hdev)
703 return -EBADFD;
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 lock_sock(sk);
706
707 *addr_len = sizeof(*haddr);
708 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100709 haddr->hci_dev = hdev->id;
Mathias Krause3f68ba02012-08-15 11:31:47 +0000710 haddr->hci_channel= 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 release_sock(sk);
713 return 0;
714}
715
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300716static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
717 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
719 __u32 mask = hci_pi(sk)->cmsg_mask;
720
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700721 if (mask & HCI_CMSG_DIR) {
722 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300723 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
724 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700725 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700727 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100728#ifdef CONFIG_COMPAT
729 struct compat_timeval ctv;
730#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700731 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200732 void *data;
733 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700734
735 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200736
David S. Miller1da97f82007-09-12 14:10:58 +0200737 data = &tv;
738 len = sizeof(tv);
739#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800740 if (!COMPAT_USE_64BIT_TIME &&
741 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200742 ctv.tv_sec = tv.tv_sec;
743 ctv.tv_usec = tv.tv_usec;
744 data = &ctv;
745 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200746 }
David S. Miller1da97f82007-09-12 14:10:58 +0200747#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200748
749 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700750 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900752
753static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300754 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755{
756 int noblock = flags & MSG_DONTWAIT;
757 struct sock *sk = sock->sk;
758 struct sk_buff *skb;
759 int copied, err;
760
761 BT_DBG("sock %p, sk %p", sock, sk);
762
763 if (flags & (MSG_OOB))
764 return -EOPNOTSUPP;
765
766 if (sk->sk_state == BT_CLOSED)
767 return 0;
768
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200769 skb = skb_recv_datagram(sk, flags, noblock, &err);
770 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 return err;
772
773 msg->msg_namelen = 0;
774
775 copied = skb->len;
776 if (len < copied) {
777 msg->msg_flags |= MSG_TRUNC;
778 copied = len;
779 }
780
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300781 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
783
Marcel Holtmann3a208622012-02-20 14:50:34 +0100784 switch (hci_pi(sk)->channel) {
785 case HCI_CHANNEL_RAW:
786 hci_sock_cmsg(sk, msg, skb);
787 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100788 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100789 case HCI_CHANNEL_MONITOR:
790 sock_recv_timestamp(msg, sk, skb);
791 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794 skb_free_datagram(sk, skb);
795
796 return err ? : copied;
797}
798
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900799static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct msghdr *msg, size_t len)
801{
802 struct sock *sk = sock->sk;
803 struct hci_dev *hdev;
804 struct sk_buff *skb;
805 int err;
806
807 BT_DBG("sock %p sk %p", sock, sk);
808
809 if (msg->msg_flags & MSG_OOB)
810 return -EOPNOTSUPP;
811
812 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
813 return -EINVAL;
814
815 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
816 return -EINVAL;
817
818 lock_sock(sk);
819
Johan Hedberg03811012010-12-08 00:21:06 +0200820 switch (hci_pi(sk)->channel) {
821 case HCI_CHANNEL_RAW:
822 break;
823 case HCI_CHANNEL_CONTROL:
824 err = mgmt_control(sk, msg, len);
825 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100826 case HCI_CHANNEL_MONITOR:
827 err = -EOPNOTSUPP;
828 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200829 default:
830 err = -EINVAL;
831 goto done;
832 }
833
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200834 hdev = hci_pi(sk)->hdev;
835 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 err = -EBADFD;
837 goto done;
838 }
839
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100840 if (!test_bit(HCI_UP, &hdev->flags)) {
841 err = -ENETDOWN;
842 goto done;
843 }
844
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200845 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
846 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 goto done;
848
849 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
850 err = -EFAULT;
851 goto drop;
852 }
853
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700854 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 skb_pull(skb, 1);
856 skb->dev = (void *) hdev;
857
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700858 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700859 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 u16 ogf = hci_opcode_ogf(opcode);
861 u16 ocf = hci_opcode_ocf(opcode);
862
863 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300864 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
865 &hci_sec_filter.ocf_mask[ogf])) &&
866 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 err = -EPERM;
868 goto drop;
869 }
870
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200871 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200873 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 } else {
Johan Hedberg11714b32013-03-05 20:37:47 +0200875 /* Stand-alone HCI commands must be flaged as
876 * single-command requests.
877 */
878 bt_cb(skb)->req.start = true;
879
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200881 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
883 } else {
884 if (!capable(CAP_NET_RAW)) {
885 err = -EPERM;
886 goto drop;
887 }
888
889 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200890 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 }
892
893 err = len;
894
895done:
896 release_sock(sk);
897 return err;
898
899drop:
900 kfree_skb(skb);
901 goto done;
902}
903
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300904static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
905 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
907 struct hci_ufilter uf = { .opcode = 0 };
908 struct sock *sk = sock->sk;
909 int err = 0, opt = 0;
910
911 BT_DBG("sk %p, opt %d", sk, optname);
912
913 lock_sock(sk);
914
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100915 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
916 err = -EINVAL;
917 goto done;
918 }
919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 switch (optname) {
921 case HCI_DATA_DIR:
922 if (get_user(opt, (int __user *)optval)) {
923 err = -EFAULT;
924 break;
925 }
926
927 if (opt)
928 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
929 else
930 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
931 break;
932
933 case HCI_TIME_STAMP:
934 if (get_user(opt, (int __user *)optval)) {
935 err = -EFAULT;
936 break;
937 }
938
939 if (opt)
940 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
941 else
942 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
943 break;
944
945 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200946 {
947 struct hci_filter *f = &hci_pi(sk)->filter;
948
949 uf.type_mask = f->type_mask;
950 uf.opcode = f->opcode;
951 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
952 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
953 }
954
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 len = min_t(unsigned int, len, sizeof(uf));
956 if (copy_from_user(&uf, optval, len)) {
957 err = -EFAULT;
958 break;
959 }
960
961 if (!capable(CAP_NET_RAW)) {
962 uf.type_mask &= hci_sec_filter.type_mask;
963 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
964 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
965 }
966
967 {
968 struct hci_filter *f = &hci_pi(sk)->filter;
969
970 f->type_mask = uf.type_mask;
971 f->opcode = uf.opcode;
972 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
973 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
974 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900975 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
977 default:
978 err = -ENOPROTOOPT;
979 break;
980 }
981
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100982done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 release_sock(sk);
984 return err;
985}
986
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300987static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
988 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989{
990 struct hci_ufilter uf;
991 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100992 int len, opt, err = 0;
993
994 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
996 if (get_user(len, optlen))
997 return -EFAULT;
998
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100999 lock_sock(sk);
1000
1001 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1002 err = -EINVAL;
1003 goto done;
1004 }
1005
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 switch (optname) {
1007 case HCI_DATA_DIR:
1008 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1009 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001010 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 opt = 0;
1012
1013 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001014 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 break;
1016
1017 case HCI_TIME_STAMP:
1018 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1019 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001020 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 opt = 0;
1022
1023 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001024 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 break;
1026
1027 case HCI_FILTER:
1028 {
1029 struct hci_filter *f = &hci_pi(sk)->filter;
1030
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001031 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 uf.type_mask = f->type_mask;
1033 uf.opcode = f->opcode;
1034 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1035 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1036 }
1037
1038 len = min_t(unsigned int, len, sizeof(uf));
1039 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001040 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 break;
1042
1043 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001044 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 break;
1046 }
1047
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001048done:
1049 release_sock(sk);
1050 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051}
1052
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001053static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 .family = PF_BLUETOOTH,
1055 .owner = THIS_MODULE,
1056 .release = hci_sock_release,
1057 .bind = hci_sock_bind,
1058 .getname = hci_sock_getname,
1059 .sendmsg = hci_sock_sendmsg,
1060 .recvmsg = hci_sock_recvmsg,
1061 .ioctl = hci_sock_ioctl,
1062 .poll = datagram_poll,
1063 .listen = sock_no_listen,
1064 .shutdown = sock_no_shutdown,
1065 .setsockopt = hci_sock_setsockopt,
1066 .getsockopt = hci_sock_getsockopt,
1067 .connect = sock_no_connect,
1068 .socketpair = sock_no_socketpair,
1069 .accept = sock_no_accept,
1070 .mmap = sock_no_mmap
1071};
1072
1073static struct proto hci_sk_proto = {
1074 .name = "HCI",
1075 .owner = THIS_MODULE,
1076 .obj_size = sizeof(struct hci_pinfo)
1077};
1078
Eric Paris3f378b62009-11-05 22:18:14 -08001079static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1080 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081{
1082 struct sock *sk;
1083
1084 BT_DBG("sock %p", sock);
1085
1086 if (sock->type != SOCK_RAW)
1087 return -ESOCKTNOSUPPORT;
1088
1089 sock->ops = &hci_sock_ops;
1090
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001091 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 if (!sk)
1093 return -ENOMEM;
1094
1095 sock_init_data(sock, sk);
1096
1097 sock_reset_flag(sk, SOCK_ZAPPED);
1098
1099 sk->sk_protocol = protocol;
1100
1101 sock->state = SS_UNCONNECTED;
1102 sk->sk_state = BT_OPEN;
1103
1104 bt_sock_link(&hci_sk_list, sk);
1105 return 0;
1106}
1107
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001108static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 .family = PF_BLUETOOTH,
1110 .owner = THIS_MODULE,
1111 .create = hci_sock_create,
1112};
1113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114int __init hci_sock_init(void)
1115{
1116 int err;
1117
1118 err = proto_register(&hci_sk_proto, 0);
1119 if (err < 0)
1120 return err;
1121
1122 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001123 if (err < 0) {
1124 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001126 }
1127
Al Virob0316612013-04-04 19:14:33 -04001128 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001129 if (err < 0) {
1130 BT_ERR("Failed to create HCI proc file");
1131 bt_sock_unregister(BTPROTO_HCI);
1132 goto error;
1133 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 BT_INFO("HCI socket layer initialized");
1136
1137 return 0;
1138
1139error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 proto_unregister(&hci_sk_proto);
1141 return err;
1142}
1143
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301144void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001146 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001147 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149}