blob: 6a93614f2c49c4a7c498f20656ed2a0560de7c33 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
44static struct hci_sec_filter hci_sec_filter = {
45 /* Packet types */
46 0x10,
47 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020048 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 /* Commands */
50 {
51 { 0x0 },
52 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020053 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020055 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020057 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020059 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020061 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 }
63};
64
65static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070066 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067};
68
69/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010070void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +010073 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 BT_DBG("hdev %p len %d", hdev, skb->len);
76
77 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010078
Sasha Levinb67bfe02013-02-27 17:06:00 -080079 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 struct hci_filter *flt;
81 struct sk_buff *nskb;
82
83 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
84 continue;
85
86 /* Don't send frame to the socket it came from */
87 if (skb->sk == sk)
88 continue;
89
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010090 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +020091 continue;
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 /* Apply filter */
94 flt = &hci_pi(sk)->filter;
95
Marcel Holtmann0d48d932005-08-09 20:30:28 -070096 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
Gustavo Padovan3bb3c752012-05-17 00:36:22 -030097 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
98 &flt->type_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 continue;
100
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700101 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
Gustavo Padovanfc5fef62012-05-23 04:04:19 -0300102 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (!hci_test_bit(evt, &flt->event_mask))
105 continue;
106
David S. Miller4498c802006-11-21 16:17:41 -0800107 if (flt->opcode &&
108 ((evt == HCI_EV_CMD_COMPLETE &&
109 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800110 get_unaligned((__le16 *)(skb->data + 3))) ||
David S. Miller4498c802006-11-21 16:17:41 -0800111 (evt == HCI_EV_CMD_STATUS &&
112 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800113 get_unaligned((__le16 *)(skb->data + 4)))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 continue;
115 }
116
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100117 if (!skb_copy) {
118 /* Create a private copy with headroom */
119 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
120 if (!skb_copy)
121 continue;
122
123 /* Put type byte before the data */
124 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
125 }
126
127 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200128 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 continue;
130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 if (sock_queue_rcv_skb(sk, nskb))
132 kfree_skb(nskb);
133 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100134
135 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100136
137 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100138}
139
140/* Send frame to control socket */
141void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
142{
143 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100144
145 BT_DBG("len %d", skb->len);
146
147 read_lock(&hci_sk_list.lock);
148
Sasha Levinb67bfe02013-02-27 17:06:00 -0800149 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100150 struct sk_buff *nskb;
151
152 /* Skip the original socket */
153 if (sk == skip_sk)
154 continue;
155
156 if (sk->sk_state != BT_BOUND)
157 continue;
158
159 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
160 continue;
161
162 nskb = skb_clone(skb, GFP_ATOMIC);
163 if (!nskb)
164 continue;
165
166 if (sock_queue_rcv_skb(sk, nskb))
167 kfree_skb(nskb);
168 }
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 read_unlock(&hci_sk_list.lock);
171}
172
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100173/* Send frame to monitor socket */
174void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
175{
176 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100177 struct sk_buff *skb_copy = NULL;
178 __le16 opcode;
179
180 if (!atomic_read(&monitor_promisc))
181 return;
182
183 BT_DBG("hdev %p len %d", hdev, skb->len);
184
185 switch (bt_cb(skb)->pkt_type) {
186 case HCI_COMMAND_PKT:
187 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
188 break;
189 case HCI_EVENT_PKT:
190 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
191 break;
192 case HCI_ACLDATA_PKT:
193 if (bt_cb(skb)->incoming)
194 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
195 else
196 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
197 break;
198 case HCI_SCODATA_PKT:
199 if (bt_cb(skb)->incoming)
200 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
201 else
202 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
203 break;
204 default:
205 return;
206 }
207
208 read_lock(&hci_sk_list.lock);
209
Sasha Levinb67bfe02013-02-27 17:06:00 -0800210 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100211 struct sk_buff *nskb;
212
213 if (sk->sk_state != BT_BOUND)
214 continue;
215
216 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
217 continue;
218
219 if (!skb_copy) {
220 struct hci_mon_hdr *hdr;
221
222 /* Create a private copy with headroom */
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300223 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
224 GFP_ATOMIC);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100225 if (!skb_copy)
226 continue;
227
228 /* Put header before the data */
229 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
230 hdr->opcode = opcode;
231 hdr->index = cpu_to_le16(hdev->id);
232 hdr->len = cpu_to_le16(skb->len);
233 }
234
235 nskb = skb_clone(skb_copy, GFP_ATOMIC);
236 if (!nskb)
237 continue;
238
239 if (sock_queue_rcv_skb(sk, nskb))
240 kfree_skb(nskb);
241 }
242
243 read_unlock(&hci_sk_list.lock);
244
245 kfree_skb(skb_copy);
246}
247
248static void send_monitor_event(struct sk_buff *skb)
249{
250 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100251
252 BT_DBG("len %d", skb->len);
253
254 read_lock(&hci_sk_list.lock);
255
Sasha Levinb67bfe02013-02-27 17:06:00 -0800256 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100257 struct sk_buff *nskb;
258
259 if (sk->sk_state != BT_BOUND)
260 continue;
261
262 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
263 continue;
264
265 nskb = skb_clone(skb, GFP_ATOMIC);
266 if (!nskb)
267 continue;
268
269 if (sock_queue_rcv_skb(sk, nskb))
270 kfree_skb(nskb);
271 }
272
273 read_unlock(&hci_sk_list.lock);
274}
275
276static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
277{
278 struct hci_mon_hdr *hdr;
279 struct hci_mon_new_index *ni;
280 struct sk_buff *skb;
281 __le16 opcode;
282
283 switch (event) {
284 case HCI_DEV_REG:
285 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
286 if (!skb)
287 return NULL;
288
289 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
290 ni->type = hdev->dev_type;
291 ni->bus = hdev->bus;
292 bacpy(&ni->bdaddr, &hdev->bdaddr);
293 memcpy(ni->name, hdev->name, 8);
294
295 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
296 break;
297
298 case HCI_DEV_UNREG:
299 skb = bt_skb_alloc(0, GFP_ATOMIC);
300 if (!skb)
301 return NULL;
302
303 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
304 break;
305
306 default:
307 return NULL;
308 }
309
310 __net_timestamp(skb);
311
312 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
313 hdr->opcode = opcode;
314 hdr->index = cpu_to_le16(hdev->id);
315 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
316
317 return skb;
318}
319
320static void send_monitor_replay(struct sock *sk)
321{
322 struct hci_dev *hdev;
323
324 read_lock(&hci_dev_list_lock);
325
326 list_for_each_entry(hdev, &hci_dev_list, list) {
327 struct sk_buff *skb;
328
329 skb = create_monitor_event(hdev, HCI_DEV_REG);
330 if (!skb)
331 continue;
332
333 if (sock_queue_rcv_skb(sk, skb))
334 kfree_skb(skb);
335 }
336
337 read_unlock(&hci_dev_list_lock);
338}
339
Marcel Holtmann040030e2012-02-20 14:50:37 +0100340/* Generate internal stack event */
341static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
342{
343 struct hci_event_hdr *hdr;
344 struct hci_ev_stack_internal *ev;
345 struct sk_buff *skb;
346
347 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
348 if (!skb)
349 return;
350
351 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
352 hdr->evt = HCI_EV_STACK_INTERNAL;
353 hdr->plen = sizeof(*ev) + dlen;
354
355 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
356 ev->type = type;
357 memcpy(ev->data, data, dlen);
358
359 bt_cb(skb)->incoming = 1;
360 __net_timestamp(skb);
361
362 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
363 skb->dev = (void *) hdev;
364 hci_send_to_sock(hdev, skb);
365 kfree_skb(skb);
366}
367
368void hci_sock_dev_event(struct hci_dev *hdev, int event)
369{
370 struct hci_ev_si_device ev;
371
372 BT_DBG("hdev %s event %d", hdev->name, event);
373
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100374 /* Send event to monitor */
375 if (atomic_read(&monitor_promisc)) {
376 struct sk_buff *skb;
377
378 skb = create_monitor_event(hdev, event);
379 if (skb) {
380 send_monitor_event(skb);
381 kfree_skb(skb);
382 }
383 }
384
Marcel Holtmann040030e2012-02-20 14:50:37 +0100385 /* Send event to sockets */
386 ev.event = event;
387 ev.dev_id = hdev->id;
388 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
389
390 if (event == HCI_DEV_UNREG) {
391 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100392
393 /* Detach sockets from device */
394 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800395 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100396 bh_lock_sock_nested(sk);
397 if (hci_pi(sk)->hdev == hdev) {
398 hci_pi(sk)->hdev = NULL;
399 sk->sk_err = EPIPE;
400 sk->sk_state = BT_OPEN;
401 sk->sk_state_change(sk);
402
403 hci_dev_put(hdev);
404 }
405 bh_unlock_sock(sk);
406 }
407 read_unlock(&hci_sk_list.lock);
408 }
409}
410
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411static int hci_sock_release(struct socket *sock)
412{
413 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100414 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416 BT_DBG("sock %p sk %p", sock, sk);
417
418 if (!sk)
419 return 0;
420
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100421 hdev = hci_pi(sk)->hdev;
422
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100423 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
424 atomic_dec(&monitor_promisc);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 bt_sock_unlink(&hci_sk_list, sk);
427
428 if (hdev) {
429 atomic_dec(&hdev->promisc);
430 hci_dev_put(hdev);
431 }
432
433 sock_orphan(sk);
434
435 skb_queue_purge(&sk->sk_receive_queue);
436 skb_queue_purge(&sk->sk_write_queue);
437
438 sock_put(sk);
439 return 0;
440}
441
Antti Julkub2a66aa2011-06-15 12:01:14 +0300442static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200443{
444 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300445 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200446
447 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
448 return -EFAULT;
449
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300450 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300451
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200452 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300453
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300454 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300455
456 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200457}
458
Antti Julkub2a66aa2011-06-15 12:01:14 +0300459static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200460{
461 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300462 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200463
464 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
465 return -EFAULT;
466
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300467 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300468
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200469 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300470
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300471 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300472
473 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200474}
475
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900476/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300477static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
478 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
480 struct hci_dev *hdev = hci_pi(sk)->hdev;
481
482 if (!hdev)
483 return -EBADFD;
484
485 switch (cmd) {
486 case HCISETRAW:
487 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000488 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
491 return -EPERM;
492
493 if (arg)
494 set_bit(HCI_RAW, &hdev->flags);
495 else
496 clear_bit(HCI_RAW, &hdev->flags);
497
498 return 0;
499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200501 return hci_get_conn_info(hdev, (void __user *) arg);
502
503 case HCIGETAUTHINFO:
504 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Johan Hedbergf0358562010-05-18 13:20:32 +0200506 case HCIBLOCKADDR:
507 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000508 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300509 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200510
511 case HCIUNBLOCKADDR:
512 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000513 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300514 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 default:
517 if (hdev->ioctl)
518 return hdev->ioctl(hdev, cmd, arg);
519 return -EINVAL;
520 }
521}
522
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300523static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
524 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525{
526 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200527 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 int err;
529
530 BT_DBG("cmd %x arg %lx", cmd, arg);
531
532 switch (cmd) {
533 case HCIGETDEVLIST:
534 return hci_get_dev_list(argp);
535
536 case HCIGETDEVINFO:
537 return hci_get_dev_info(argp);
538
539 case HCIGETCONNLIST:
540 return hci_get_conn_list(argp);
541
542 case HCIDEVUP:
543 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000544 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 return hci_dev_open(arg);
546
547 case HCIDEVDOWN:
548 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000549 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 return hci_dev_close(arg);
551
552 case HCIDEVRESET:
553 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000554 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 return hci_dev_reset(arg);
556
557 case HCIDEVRESTAT:
558 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000559 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 return hci_dev_reset_stat(arg);
561
562 case HCISETSCAN:
563 case HCISETAUTH:
564 case HCISETENCRYPT:
565 case HCISETPTYPE:
566 case HCISETLINKPOL:
567 case HCISETLINKMODE:
568 case HCISETACLMTU:
569 case HCISETSCOMTU:
570 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000571 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 return hci_dev_cmd(cmd, argp);
573
574 case HCIINQUIRY:
575 return hci_inquiry(argp);
576
577 default:
578 lock_sock(sk);
579 err = hci_sock_bound_ioctl(sk, cmd, arg);
580 release_sock(sk);
581 return err;
582 }
583}
584
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300585static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
586 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
Johan Hedberg03811012010-12-08 00:21:06 +0200588 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 struct sock *sk = sock->sk;
590 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200591 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
593 BT_DBG("sock %p sk %p", sock, sk);
594
Johan Hedberg03811012010-12-08 00:21:06 +0200595 if (!addr)
596 return -EINVAL;
597
598 memset(&haddr, 0, sizeof(haddr));
599 len = min_t(unsigned int, sizeof(haddr), addr_len);
600 memcpy(&haddr, addr, len);
601
602 if (haddr.hci_family != AF_BLUETOOTH)
603 return -EINVAL;
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 lock_sock(sk);
606
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100607 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 err = -EALREADY;
609 goto done;
610 }
611
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100612 switch (haddr.hci_channel) {
613 case HCI_CHANNEL_RAW:
614 if (hci_pi(sk)->hdev) {
615 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 goto done;
617 }
618
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100619 if (haddr.hci_dev != HCI_DEV_NONE) {
620 hdev = hci_dev_get(haddr.hci_dev);
621 if (!hdev) {
622 err = -ENODEV;
623 goto done;
624 }
625
626 atomic_inc(&hdev->promisc);
627 }
628
629 hci_pi(sk)->hdev = hdev;
630 break;
631
632 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100633 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100634 err = -EINVAL;
635 goto done;
636 }
637
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100638 if (!capable(CAP_NET_ADMIN)) {
639 err = -EPERM;
640 goto done;
641 }
642
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100643 break;
644
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100645 case HCI_CHANNEL_MONITOR:
646 if (haddr.hci_dev != HCI_DEV_NONE) {
647 err = -EINVAL;
648 goto done;
649 }
650
651 if (!capable(CAP_NET_RAW)) {
652 err = -EPERM;
653 goto done;
654 }
655
656 send_monitor_replay(sk);
657
658 atomic_inc(&monitor_promisc);
659 break;
660
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100661 default:
662 err = -EINVAL;
663 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 }
665
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100666
Johan Hedberg03811012010-12-08 00:21:06 +0200667 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 sk->sk_state = BT_BOUND;
669
670done:
671 release_sock(sk);
672 return err;
673}
674
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300675static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
676 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677{
678 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
679 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100680 struct hci_dev *hdev = hci_pi(sk)->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
682 BT_DBG("sock %p sk %p", sock, sk);
683
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100684 if (!hdev)
685 return -EBADFD;
686
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 lock_sock(sk);
688
689 *addr_len = sizeof(*haddr);
690 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100691 haddr->hci_dev = hdev->id;
Mathias Krause3f68ba02012-08-15 11:31:47 +0000692 haddr->hci_channel= 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 release_sock(sk);
695 return 0;
696}
697
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300698static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
699 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
701 __u32 mask = hci_pi(sk)->cmsg_mask;
702
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700703 if (mask & HCI_CMSG_DIR) {
704 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300705 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
706 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700707 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700709 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100710#ifdef CONFIG_COMPAT
711 struct compat_timeval ctv;
712#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700713 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200714 void *data;
715 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700716
717 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200718
David S. Miller1da97f82007-09-12 14:10:58 +0200719 data = &tv;
720 len = sizeof(tv);
721#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800722 if (!COMPAT_USE_64BIT_TIME &&
723 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200724 ctv.tv_sec = tv.tv_sec;
725 ctv.tv_usec = tv.tv_usec;
726 data = &ctv;
727 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200728 }
David S. Miller1da97f82007-09-12 14:10:58 +0200729#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200730
731 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700732 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900734
735static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300736 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
738 int noblock = flags & MSG_DONTWAIT;
739 struct sock *sk = sock->sk;
740 struct sk_buff *skb;
741 int copied, err;
742
743 BT_DBG("sock %p, sk %p", sock, sk);
744
745 if (flags & (MSG_OOB))
746 return -EOPNOTSUPP;
747
748 if (sk->sk_state == BT_CLOSED)
749 return 0;
750
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200751 skb = skb_recv_datagram(sk, flags, noblock, &err);
752 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 return err;
754
755 msg->msg_namelen = 0;
756
757 copied = skb->len;
758 if (len < copied) {
759 msg->msg_flags |= MSG_TRUNC;
760 copied = len;
761 }
762
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300763 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
765
Marcel Holtmann3a208622012-02-20 14:50:34 +0100766 switch (hci_pi(sk)->channel) {
767 case HCI_CHANNEL_RAW:
768 hci_sock_cmsg(sk, msg, skb);
769 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100770 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100771 case HCI_CHANNEL_MONITOR:
772 sock_recv_timestamp(msg, sk, skb);
773 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
776 skb_free_datagram(sk, skb);
777
778 return err ? : copied;
779}
780
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900781static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 struct msghdr *msg, size_t len)
783{
784 struct sock *sk = sock->sk;
785 struct hci_dev *hdev;
786 struct sk_buff *skb;
787 int err;
788
789 BT_DBG("sock %p sk %p", sock, sk);
790
791 if (msg->msg_flags & MSG_OOB)
792 return -EOPNOTSUPP;
793
794 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
795 return -EINVAL;
796
797 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
798 return -EINVAL;
799
800 lock_sock(sk);
801
Johan Hedberg03811012010-12-08 00:21:06 +0200802 switch (hci_pi(sk)->channel) {
803 case HCI_CHANNEL_RAW:
804 break;
805 case HCI_CHANNEL_CONTROL:
806 err = mgmt_control(sk, msg, len);
807 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100808 case HCI_CHANNEL_MONITOR:
809 err = -EOPNOTSUPP;
810 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200811 default:
812 err = -EINVAL;
813 goto done;
814 }
815
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200816 hdev = hci_pi(sk)->hdev;
817 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 err = -EBADFD;
819 goto done;
820 }
821
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100822 if (!test_bit(HCI_UP, &hdev->flags)) {
823 err = -ENETDOWN;
824 goto done;
825 }
826
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200827 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
828 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 goto done;
830
831 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
832 err = -EFAULT;
833 goto drop;
834 }
835
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700836 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 skb_pull(skb, 1);
838 skb->dev = (void *) hdev;
839
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700840 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700841 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 u16 ogf = hci_opcode_ogf(opcode);
843 u16 ocf = hci_opcode_ocf(opcode);
844
845 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300846 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
847 &hci_sec_filter.ocf_mask[ogf])) &&
848 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 err = -EPERM;
850 goto drop;
851 }
852
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200853 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200855 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 } else {
857 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200858 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 }
860 } else {
861 if (!capable(CAP_NET_RAW)) {
862 err = -EPERM;
863 goto drop;
864 }
865
866 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200867 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 }
869
870 err = len;
871
872done:
873 release_sock(sk);
874 return err;
875
876drop:
877 kfree_skb(skb);
878 goto done;
879}
880
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300881static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
882 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883{
884 struct hci_ufilter uf = { .opcode = 0 };
885 struct sock *sk = sock->sk;
886 int err = 0, opt = 0;
887
888 BT_DBG("sk %p, opt %d", sk, optname);
889
890 lock_sock(sk);
891
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100892 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
893 err = -EINVAL;
894 goto done;
895 }
896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 switch (optname) {
898 case HCI_DATA_DIR:
899 if (get_user(opt, (int __user *)optval)) {
900 err = -EFAULT;
901 break;
902 }
903
904 if (opt)
905 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
906 else
907 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
908 break;
909
910 case HCI_TIME_STAMP:
911 if (get_user(opt, (int __user *)optval)) {
912 err = -EFAULT;
913 break;
914 }
915
916 if (opt)
917 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
918 else
919 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
920 break;
921
922 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200923 {
924 struct hci_filter *f = &hci_pi(sk)->filter;
925
926 uf.type_mask = f->type_mask;
927 uf.opcode = f->opcode;
928 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
929 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
930 }
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 len = min_t(unsigned int, len, sizeof(uf));
933 if (copy_from_user(&uf, optval, len)) {
934 err = -EFAULT;
935 break;
936 }
937
938 if (!capable(CAP_NET_RAW)) {
939 uf.type_mask &= hci_sec_filter.type_mask;
940 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
941 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
942 }
943
944 {
945 struct hci_filter *f = &hci_pi(sk)->filter;
946
947 f->type_mask = uf.type_mask;
948 f->opcode = uf.opcode;
949 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
950 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
951 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900952 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954 default:
955 err = -ENOPROTOOPT;
956 break;
957 }
958
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100959done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 release_sock(sk);
961 return err;
962}
963
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300964static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
965 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966{
967 struct hci_ufilter uf;
968 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100969 int len, opt, err = 0;
970
971 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 if (get_user(len, optlen))
974 return -EFAULT;
975
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100976 lock_sock(sk);
977
978 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
979 err = -EINVAL;
980 goto done;
981 }
982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 switch (optname) {
984 case HCI_DATA_DIR:
985 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
986 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900987 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 opt = 0;
989
990 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100991 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 break;
993
994 case HCI_TIME_STAMP:
995 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
996 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900997 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 opt = 0;
999
1000 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001001 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 break;
1003
1004 case HCI_FILTER:
1005 {
1006 struct hci_filter *f = &hci_pi(sk)->filter;
1007
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001008 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 uf.type_mask = f->type_mask;
1010 uf.opcode = f->opcode;
1011 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1012 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1013 }
1014
1015 len = min_t(unsigned int, len, sizeof(uf));
1016 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001017 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 break;
1019
1020 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001021 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 break;
1023 }
1024
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001025done:
1026 release_sock(sk);
1027 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028}
1029
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001030static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 .family = PF_BLUETOOTH,
1032 .owner = THIS_MODULE,
1033 .release = hci_sock_release,
1034 .bind = hci_sock_bind,
1035 .getname = hci_sock_getname,
1036 .sendmsg = hci_sock_sendmsg,
1037 .recvmsg = hci_sock_recvmsg,
1038 .ioctl = hci_sock_ioctl,
1039 .poll = datagram_poll,
1040 .listen = sock_no_listen,
1041 .shutdown = sock_no_shutdown,
1042 .setsockopt = hci_sock_setsockopt,
1043 .getsockopt = hci_sock_getsockopt,
1044 .connect = sock_no_connect,
1045 .socketpair = sock_no_socketpair,
1046 .accept = sock_no_accept,
1047 .mmap = sock_no_mmap
1048};
1049
1050static struct proto hci_sk_proto = {
1051 .name = "HCI",
1052 .owner = THIS_MODULE,
1053 .obj_size = sizeof(struct hci_pinfo)
1054};
1055
Eric Paris3f378b62009-11-05 22:18:14 -08001056static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1057 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058{
1059 struct sock *sk;
1060
1061 BT_DBG("sock %p", sock);
1062
1063 if (sock->type != SOCK_RAW)
1064 return -ESOCKTNOSUPPORT;
1065
1066 sock->ops = &hci_sock_ops;
1067
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001068 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 if (!sk)
1070 return -ENOMEM;
1071
1072 sock_init_data(sock, sk);
1073
1074 sock_reset_flag(sk, SOCK_ZAPPED);
1075
1076 sk->sk_protocol = protocol;
1077
1078 sock->state = SS_UNCONNECTED;
1079 sk->sk_state = BT_OPEN;
1080
1081 bt_sock_link(&hci_sk_list, sk);
1082 return 0;
1083}
1084
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001085static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 .family = PF_BLUETOOTH,
1087 .owner = THIS_MODULE,
1088 .create = hci_sock_create,
1089};
1090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091int __init hci_sock_init(void)
1092{
1093 int err;
1094
1095 err = proto_register(&hci_sk_proto, 0);
1096 if (err < 0)
1097 return err;
1098
1099 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001100 if (err < 0) {
1101 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001103 }
1104
1105 err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL);
1106 if (err < 0) {
1107 BT_ERR("Failed to create HCI proc file");
1108 bt_sock_unregister(BTPROTO_HCI);
1109 goto error;
1110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 BT_INFO("HCI socket layer initialized");
1113
1114 return 0;
1115
1116error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 proto_unregister(&hci_sk_proto);
1118 return err;
1119}
1120
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301121void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001123 bt_procfs_cleanup(&init_net, "hci");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1125 BT_ERR("HCI socket unregistration failed");
1126
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}