blob: 19fdac78e555b97c44dd6826bc60ef89a888395b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
44static struct hci_sec_filter hci_sec_filter = {
45 /* Packet types */
46 0x10,
47 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020048 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 /* Commands */
50 {
51 { 0x0 },
52 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020053 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020055 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020057 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020059 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020061 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 }
63};
64
65static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070066 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067};
68
69/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010070void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 struct sock *sk;
73 struct hlist_node *node;
Marcel Holtmanne0edf372012-02-20 14:50:36 +010074 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76 BT_DBG("hdev %p len %d", hdev, skb->len);
77
78 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010079
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 sk_for_each(sk, node, &hci_sk_list.head) {
81 struct hci_filter *flt;
82 struct sk_buff *nskb;
83
84 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
85 continue;
86
87 /* Don't send frame to the socket it came from */
88 if (skb->sk == sk)
89 continue;
90
Marcel Holtmann470fe1b2012-02-20 14:50:30 +010091 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
Johan Hedberga40c4062010-12-08 00:21:07 +020092 continue;
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 /* Apply filter */
95 flt = &hci_pi(sk)->filter;
96
Marcel Holtmann0d48d932005-08-09 20:30:28 -070097 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
Gustavo Padovan3bb3c752012-05-17 00:36:22 -030098 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
99 &flt->type_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 continue;
101
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700102 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
Gustavo Padovanfc5fef62012-05-23 04:04:19 -0300103 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105 if (!hci_test_bit(evt, &flt->event_mask))
106 continue;
107
David S. Miller4498c802006-11-21 16:17:41 -0800108 if (flt->opcode &&
109 ((evt == HCI_EV_CMD_COMPLETE &&
110 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800111 get_unaligned((__le16 *)(skb->data + 3))) ||
David S. Miller4498c802006-11-21 16:17:41 -0800112 (evt == HCI_EV_CMD_STATUS &&
113 flt->opcode !=
Al Viro905f3ed2006-12-13 00:35:01 -0800114 get_unaligned((__le16 *)(skb->data + 4)))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 continue;
116 }
117
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100118 if (!skb_copy) {
119 /* Create a private copy with headroom */
120 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
121 if (!skb_copy)
122 continue;
123
124 /* Put type byte before the data */
125 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
126 }
127
128 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200129 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 continue;
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 if (sock_queue_rcv_skb(sk, nskb))
133 kfree_skb(nskb);
134 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100135
136 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100137
138 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100139}
140
141/* Send frame to control socket */
142void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
143{
144 struct sock *sk;
145 struct hlist_node *node;
146
147 BT_DBG("len %d", skb->len);
148
149 read_lock(&hci_sk_list.lock);
150
151 sk_for_each(sk, node, &hci_sk_list.head) {
152 struct sk_buff *nskb;
153
154 /* Skip the original socket */
155 if (sk == skip_sk)
156 continue;
157
158 if (sk->sk_state != BT_BOUND)
159 continue;
160
161 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
162 continue;
163
164 nskb = skb_clone(skb, GFP_ATOMIC);
165 if (!nskb)
166 continue;
167
168 if (sock_queue_rcv_skb(sk, nskb))
169 kfree_skb(nskb);
170 }
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 read_unlock(&hci_sk_list.lock);
173}
174
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100175/* Send frame to monitor socket */
176void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
177{
178 struct sock *sk;
179 struct hlist_node *node;
180 struct sk_buff *skb_copy = NULL;
181 __le16 opcode;
182
183 if (!atomic_read(&monitor_promisc))
184 return;
185
186 BT_DBG("hdev %p len %d", hdev, skb->len);
187
188 switch (bt_cb(skb)->pkt_type) {
189 case HCI_COMMAND_PKT:
190 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
191 break;
192 case HCI_EVENT_PKT:
193 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
194 break;
195 case HCI_ACLDATA_PKT:
196 if (bt_cb(skb)->incoming)
197 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
198 else
199 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
200 break;
201 case HCI_SCODATA_PKT:
202 if (bt_cb(skb)->incoming)
203 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
204 else
205 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
206 break;
207 default:
208 return;
209 }
210
211 read_lock(&hci_sk_list.lock);
212
213 sk_for_each(sk, node, &hci_sk_list.head) {
214 struct sk_buff *nskb;
215
216 if (sk->sk_state != BT_BOUND)
217 continue;
218
219 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
220 continue;
221
222 if (!skb_copy) {
223 struct hci_mon_hdr *hdr;
224
225 /* Create a private copy with headroom */
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300226 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
227 GFP_ATOMIC);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100228 if (!skb_copy)
229 continue;
230
231 /* Put header before the data */
232 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
233 hdr->opcode = opcode;
234 hdr->index = cpu_to_le16(hdev->id);
235 hdr->len = cpu_to_le16(skb->len);
236 }
237
238 nskb = skb_clone(skb_copy, GFP_ATOMIC);
239 if (!nskb)
240 continue;
241
242 if (sock_queue_rcv_skb(sk, nskb))
243 kfree_skb(nskb);
244 }
245
246 read_unlock(&hci_sk_list.lock);
247
248 kfree_skb(skb_copy);
249}
250
251static void send_monitor_event(struct sk_buff *skb)
252{
253 struct sock *sk;
254 struct hlist_node *node;
255
256 BT_DBG("len %d", skb->len);
257
258 read_lock(&hci_sk_list.lock);
259
260 sk_for_each(sk, node, &hci_sk_list.head) {
261 struct sk_buff *nskb;
262
263 if (sk->sk_state != BT_BOUND)
264 continue;
265
266 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
267 continue;
268
269 nskb = skb_clone(skb, GFP_ATOMIC);
270 if (!nskb)
271 continue;
272
273 if (sock_queue_rcv_skb(sk, nskb))
274 kfree_skb(nskb);
275 }
276
277 read_unlock(&hci_sk_list.lock);
278}
279
280static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
281{
282 struct hci_mon_hdr *hdr;
283 struct hci_mon_new_index *ni;
284 struct sk_buff *skb;
285 __le16 opcode;
286
287 switch (event) {
288 case HCI_DEV_REG:
289 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
290 if (!skb)
291 return NULL;
292
293 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
294 ni->type = hdev->dev_type;
295 ni->bus = hdev->bus;
296 bacpy(&ni->bdaddr, &hdev->bdaddr);
297 memcpy(ni->name, hdev->name, 8);
298
299 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
300 break;
301
302 case HCI_DEV_UNREG:
303 skb = bt_skb_alloc(0, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
308 break;
309
310 default:
311 return NULL;
312 }
313
314 __net_timestamp(skb);
315
316 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
317 hdr->opcode = opcode;
318 hdr->index = cpu_to_le16(hdev->id);
319 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
320
321 return skb;
322}
323
324static void send_monitor_replay(struct sock *sk)
325{
326 struct hci_dev *hdev;
327
328 read_lock(&hci_dev_list_lock);
329
330 list_for_each_entry(hdev, &hci_dev_list, list) {
331 struct sk_buff *skb;
332
333 skb = create_monitor_event(hdev, HCI_DEV_REG);
334 if (!skb)
335 continue;
336
337 if (sock_queue_rcv_skb(sk, skb))
338 kfree_skb(skb);
339 }
340
341 read_unlock(&hci_dev_list_lock);
342}
343
Marcel Holtmann040030e2012-02-20 14:50:37 +0100344/* Generate internal stack event */
345static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
346{
347 struct hci_event_hdr *hdr;
348 struct hci_ev_stack_internal *ev;
349 struct sk_buff *skb;
350
351 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
352 if (!skb)
353 return;
354
355 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
356 hdr->evt = HCI_EV_STACK_INTERNAL;
357 hdr->plen = sizeof(*ev) + dlen;
358
359 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
360 ev->type = type;
361 memcpy(ev->data, data, dlen);
362
363 bt_cb(skb)->incoming = 1;
364 __net_timestamp(skb);
365
366 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
367 skb->dev = (void *) hdev;
368 hci_send_to_sock(hdev, skb);
369 kfree_skb(skb);
370}
371
372void hci_sock_dev_event(struct hci_dev *hdev, int event)
373{
374 struct hci_ev_si_device ev;
375
376 BT_DBG("hdev %s event %d", hdev->name, event);
377
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100378 /* Send event to monitor */
379 if (atomic_read(&monitor_promisc)) {
380 struct sk_buff *skb;
381
382 skb = create_monitor_event(hdev, event);
383 if (skb) {
384 send_monitor_event(skb);
385 kfree_skb(skb);
386 }
387 }
388
Marcel Holtmann040030e2012-02-20 14:50:37 +0100389 /* Send event to sockets */
390 ev.event = event;
391 ev.dev_id = hdev->id;
392 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
393
394 if (event == HCI_DEV_UNREG) {
395 struct sock *sk;
396 struct hlist_node *node;
397
398 /* Detach sockets from device */
399 read_lock(&hci_sk_list.lock);
400 sk_for_each(sk, node, &hci_sk_list.head) {
401 bh_lock_sock_nested(sk);
402 if (hci_pi(sk)->hdev == hdev) {
403 hci_pi(sk)->hdev = NULL;
404 sk->sk_err = EPIPE;
405 sk->sk_state = BT_OPEN;
406 sk->sk_state_change(sk);
407
408 hci_dev_put(hdev);
409 }
410 bh_unlock_sock(sk);
411 }
412 read_unlock(&hci_sk_list.lock);
413 }
414}
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416static int hci_sock_release(struct socket *sock)
417{
418 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100419 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421 BT_DBG("sock %p sk %p", sock, sk);
422
423 if (!sk)
424 return 0;
425
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100426 hdev = hci_pi(sk)->hdev;
427
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100428 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
429 atomic_dec(&monitor_promisc);
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 bt_sock_unlink(&hci_sk_list, sk);
432
433 if (hdev) {
434 atomic_dec(&hdev->promisc);
435 hci_dev_put(hdev);
436 }
437
438 sock_orphan(sk);
439
440 skb_queue_purge(&sk->sk_receive_queue);
441 skb_queue_purge(&sk->sk_write_queue);
442
443 sock_put(sk);
444 return 0;
445}
446
Antti Julkub2a66aa2011-06-15 12:01:14 +0300447static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200448{
449 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300450 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200451
452 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
453 return -EFAULT;
454
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300455 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300456
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200457 err = hci_blacklist_add(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300458
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300459 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300460
461 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200462}
463
Antti Julkub2a66aa2011-06-15 12:01:14 +0300464static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200465{
466 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300467 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200468
469 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
470 return -EFAULT;
471
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300472 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300473
Johan Hedberg88c1fe42012-02-09 15:56:11 +0200474 err = hci_blacklist_del(hdev, &bdaddr, 0);
Antti Julku5e762442011-08-25 16:48:02 +0300475
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300476 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300477
478 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200479}
480
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900481/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300482static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
483 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
485 struct hci_dev *hdev = hci_pi(sk)->hdev;
486
487 if (!hdev)
488 return -EBADFD;
489
490 switch (cmd) {
491 case HCISETRAW:
492 if (!capable(CAP_NET_ADMIN))
493 return -EACCES;
494
495 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
496 return -EPERM;
497
498 if (arg)
499 set_bit(HCI_RAW, &hdev->flags);
500 else
501 clear_bit(HCI_RAW, &hdev->flags);
502
503 return 0;
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200506 return hci_get_conn_info(hdev, (void __user *) arg);
507
508 case HCIGETAUTHINFO:
509 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Johan Hedbergf0358562010-05-18 13:20:32 +0200511 case HCIBLOCKADDR:
512 if (!capable(CAP_NET_ADMIN))
513 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300514 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200515
516 case HCIUNBLOCKADDR:
517 if (!capable(CAP_NET_ADMIN))
518 return -EACCES;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300519 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 default:
522 if (hdev->ioctl)
523 return hdev->ioctl(hdev, cmd, arg);
524 return -EINVAL;
525 }
526}
527
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300528static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
529 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
531 struct sock *sk = sock->sk;
Marcel Holtmann40be4922008-07-14 20:13:50 +0200532 void __user *argp = (void __user *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 int err;
534
535 BT_DBG("cmd %x arg %lx", cmd, arg);
536
537 switch (cmd) {
538 case HCIGETDEVLIST:
539 return hci_get_dev_list(argp);
540
541 case HCIGETDEVINFO:
542 return hci_get_dev_info(argp);
543
544 case HCIGETCONNLIST:
545 return hci_get_conn_list(argp);
546
547 case HCIDEVUP:
548 if (!capable(CAP_NET_ADMIN))
549 return -EACCES;
550 return hci_dev_open(arg);
551
552 case HCIDEVDOWN:
553 if (!capable(CAP_NET_ADMIN))
554 return -EACCES;
555 return hci_dev_close(arg);
556
557 case HCIDEVRESET:
558 if (!capable(CAP_NET_ADMIN))
559 return -EACCES;
560 return hci_dev_reset(arg);
561
562 case HCIDEVRESTAT:
563 if (!capable(CAP_NET_ADMIN))
564 return -EACCES;
565 return hci_dev_reset_stat(arg);
566
567 case HCISETSCAN:
568 case HCISETAUTH:
569 case HCISETENCRYPT:
570 case HCISETPTYPE:
571 case HCISETLINKPOL:
572 case HCISETLINKMODE:
573 case HCISETACLMTU:
574 case HCISETSCOMTU:
575 if (!capable(CAP_NET_ADMIN))
576 return -EACCES;
577 return hci_dev_cmd(cmd, argp);
578
579 case HCIINQUIRY:
580 return hci_inquiry(argp);
581
582 default:
583 lock_sock(sk);
584 err = hci_sock_bound_ioctl(sk, cmd, arg);
585 release_sock(sk);
586 return err;
587 }
588}
589
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300590static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
591 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592{
Johan Hedberg03811012010-12-08 00:21:06 +0200593 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 struct sock *sk = sock->sk;
595 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200596 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
598 BT_DBG("sock %p sk %p", sock, sk);
599
Johan Hedberg03811012010-12-08 00:21:06 +0200600 if (!addr)
601 return -EINVAL;
602
603 memset(&haddr, 0, sizeof(haddr));
604 len = min_t(unsigned int, sizeof(haddr), addr_len);
605 memcpy(&haddr, addr, len);
606
607 if (haddr.hci_family != AF_BLUETOOTH)
608 return -EINVAL;
609
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 lock_sock(sk);
611
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100612 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 err = -EALREADY;
614 goto done;
615 }
616
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100617 switch (haddr.hci_channel) {
618 case HCI_CHANNEL_RAW:
619 if (hci_pi(sk)->hdev) {
620 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 goto done;
622 }
623
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100624 if (haddr.hci_dev != HCI_DEV_NONE) {
625 hdev = hci_dev_get(haddr.hci_dev);
626 if (!hdev) {
627 err = -ENODEV;
628 goto done;
629 }
630
631 atomic_inc(&hdev->promisc);
632 }
633
634 hci_pi(sk)->hdev = hdev;
635 break;
636
637 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100638 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100639 err = -EINVAL;
640 goto done;
641 }
642
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100643 if (!capable(CAP_NET_ADMIN)) {
644 err = -EPERM;
645 goto done;
646 }
647
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100648 break;
649
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100650 case HCI_CHANNEL_MONITOR:
651 if (haddr.hci_dev != HCI_DEV_NONE) {
652 err = -EINVAL;
653 goto done;
654 }
655
656 if (!capable(CAP_NET_RAW)) {
657 err = -EPERM;
658 goto done;
659 }
660
661 send_monitor_replay(sk);
662
663 atomic_inc(&monitor_promisc);
664 break;
665
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100666 default:
667 err = -EINVAL;
668 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 }
670
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100671
Johan Hedberg03811012010-12-08 00:21:06 +0200672 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 sk->sk_state = BT_BOUND;
674
675done:
676 release_sock(sk);
677 return err;
678}
679
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300680static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
681 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
684 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100685 struct hci_dev *hdev = hci_pi(sk)->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
687 BT_DBG("sock %p sk %p", sock, sk);
688
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100689 if (!hdev)
690 return -EBADFD;
691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 lock_sock(sk);
693
694 *addr_len = sizeof(*haddr);
695 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100696 haddr->hci_dev = hdev->id;
Mathias Krause3f68ba02012-08-15 11:31:47 +0000697 haddr->hci_channel= 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699 release_sock(sk);
700 return 0;
701}
702
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300703static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
704 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705{
706 __u32 mask = hci_pi(sk)->cmsg_mask;
707
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700708 if (mask & HCI_CMSG_DIR) {
709 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300710 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
711 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700714 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100715#ifdef CONFIG_COMPAT
716 struct compat_timeval ctv;
717#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700718 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200719 void *data;
720 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700721
722 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200723
David S. Miller1da97f82007-09-12 14:10:58 +0200724 data = &tv;
725 len = sizeof(tv);
726#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800727 if (!COMPAT_USE_64BIT_TIME &&
728 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200729 ctv.tv_sec = tv.tv_sec;
730 ctv.tv_usec = tv.tv_usec;
731 data = &ctv;
732 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200733 }
David S. Miller1da97f82007-09-12 14:10:58 +0200734#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200735
736 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900739
740static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300741 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
743 int noblock = flags & MSG_DONTWAIT;
744 struct sock *sk = sock->sk;
745 struct sk_buff *skb;
746 int copied, err;
747
748 BT_DBG("sock %p, sk %p", sock, sk);
749
750 if (flags & (MSG_OOB))
751 return -EOPNOTSUPP;
752
753 if (sk->sk_state == BT_CLOSED)
754 return 0;
755
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200756 skb = skb_recv_datagram(sk, flags, noblock, &err);
757 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 return err;
759
760 msg->msg_namelen = 0;
761
762 copied = skb->len;
763 if (len < copied) {
764 msg->msg_flags |= MSG_TRUNC;
765 copied = len;
766 }
767
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300768 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
770
Marcel Holtmann3a208622012-02-20 14:50:34 +0100771 switch (hci_pi(sk)->channel) {
772 case HCI_CHANNEL_RAW:
773 hci_sock_cmsg(sk, msg, skb);
774 break;
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100775 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100776 case HCI_CHANNEL_MONITOR:
777 sock_recv_timestamp(msg, sk, skb);
778 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100779 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
781 skb_free_datagram(sk, skb);
782
783 return err ? : copied;
784}
785
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900786static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 struct msghdr *msg, size_t len)
788{
789 struct sock *sk = sock->sk;
790 struct hci_dev *hdev;
791 struct sk_buff *skb;
792 int err;
793
794 BT_DBG("sock %p sk %p", sock, sk);
795
796 if (msg->msg_flags & MSG_OOB)
797 return -EOPNOTSUPP;
798
799 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
800 return -EINVAL;
801
802 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
803 return -EINVAL;
804
805 lock_sock(sk);
806
Johan Hedberg03811012010-12-08 00:21:06 +0200807 switch (hci_pi(sk)->channel) {
808 case HCI_CHANNEL_RAW:
809 break;
810 case HCI_CHANNEL_CONTROL:
811 err = mgmt_control(sk, msg, len);
812 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100813 case HCI_CHANNEL_MONITOR:
814 err = -EOPNOTSUPP;
815 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200816 default:
817 err = -EINVAL;
818 goto done;
819 }
820
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200821 hdev = hci_pi(sk)->hdev;
822 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 err = -EBADFD;
824 goto done;
825 }
826
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100827 if (!test_bit(HCI_UP, &hdev->flags)) {
828 err = -ENETDOWN;
829 goto done;
830 }
831
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200832 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
833 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 goto done;
835
836 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
837 err = -EFAULT;
838 goto drop;
839 }
840
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700841 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 skb_pull(skb, 1);
843 skb->dev = (void *) hdev;
844
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700845 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700846 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 u16 ogf = hci_opcode_ogf(opcode);
848 u16 ocf = hci_opcode_ocf(opcode);
849
850 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300851 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
852 &hci_sec_filter.ocf_mask[ogf])) &&
853 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 err = -EPERM;
855 goto drop;
856 }
857
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200858 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200860 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 } else {
862 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200863 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 }
865 } else {
866 if (!capable(CAP_NET_RAW)) {
867 err = -EPERM;
868 goto drop;
869 }
870
871 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200872 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 }
874
875 err = len;
876
877done:
878 release_sock(sk);
879 return err;
880
881drop:
882 kfree_skb(skb);
883 goto done;
884}
885
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300886static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
887 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888{
889 struct hci_ufilter uf = { .opcode = 0 };
890 struct sock *sk = sock->sk;
891 int err = 0, opt = 0;
892
893 BT_DBG("sk %p, opt %d", sk, optname);
894
895 lock_sock(sk);
896
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100897 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
898 err = -EINVAL;
899 goto done;
900 }
901
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 switch (optname) {
903 case HCI_DATA_DIR:
904 if (get_user(opt, (int __user *)optval)) {
905 err = -EFAULT;
906 break;
907 }
908
909 if (opt)
910 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
911 else
912 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
913 break;
914
915 case HCI_TIME_STAMP:
916 if (get_user(opt, (int __user *)optval)) {
917 err = -EFAULT;
918 break;
919 }
920
921 if (opt)
922 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
923 else
924 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
925 break;
926
927 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +0200928 {
929 struct hci_filter *f = &hci_pi(sk)->filter;
930
931 uf.type_mask = f->type_mask;
932 uf.opcode = f->opcode;
933 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
934 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
935 }
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 len = min_t(unsigned int, len, sizeof(uf));
938 if (copy_from_user(&uf, optval, len)) {
939 err = -EFAULT;
940 break;
941 }
942
943 if (!capable(CAP_NET_RAW)) {
944 uf.type_mask &= hci_sec_filter.type_mask;
945 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
946 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
947 }
948
949 {
950 struct hci_filter *f = &hci_pi(sk)->filter;
951
952 f->type_mask = uf.type_mask;
953 f->opcode = uf.opcode;
954 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
955 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
956 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900957 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
959 default:
960 err = -ENOPROTOOPT;
961 break;
962 }
963
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100964done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 release_sock(sk);
966 return err;
967}
968
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300969static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
970 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971{
972 struct hci_ufilter uf;
973 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100974 int len, opt, err = 0;
975
976 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 if (get_user(len, optlen))
979 return -EFAULT;
980
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100981 lock_sock(sk);
982
983 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
984 err = -EINVAL;
985 goto done;
986 }
987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 switch (optname) {
989 case HCI_DATA_DIR:
990 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
991 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900992 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 opt = 0;
994
995 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +0100996 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 break;
998
999 case HCI_TIME_STAMP:
1000 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1001 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001002 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 opt = 0;
1004
1005 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001006 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 break;
1008
1009 case HCI_FILTER:
1010 {
1011 struct hci_filter *f = &hci_pi(sk)->filter;
1012
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001013 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 uf.type_mask = f->type_mask;
1015 uf.opcode = f->opcode;
1016 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1017 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1018 }
1019
1020 len = min_t(unsigned int, len, sizeof(uf));
1021 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001022 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 break;
1024
1025 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001026 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 break;
1028 }
1029
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001030done:
1031 release_sock(sk);
1032 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033}
1034
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001035static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 .family = PF_BLUETOOTH,
1037 .owner = THIS_MODULE,
1038 .release = hci_sock_release,
1039 .bind = hci_sock_bind,
1040 .getname = hci_sock_getname,
1041 .sendmsg = hci_sock_sendmsg,
1042 .recvmsg = hci_sock_recvmsg,
1043 .ioctl = hci_sock_ioctl,
1044 .poll = datagram_poll,
1045 .listen = sock_no_listen,
1046 .shutdown = sock_no_shutdown,
1047 .setsockopt = hci_sock_setsockopt,
1048 .getsockopt = hci_sock_getsockopt,
1049 .connect = sock_no_connect,
1050 .socketpair = sock_no_socketpair,
1051 .accept = sock_no_accept,
1052 .mmap = sock_no_mmap
1053};
1054
1055static struct proto hci_sk_proto = {
1056 .name = "HCI",
1057 .owner = THIS_MODULE,
1058 .obj_size = sizeof(struct hci_pinfo)
1059};
1060
Eric Paris3f378b62009-11-05 22:18:14 -08001061static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1062 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
1064 struct sock *sk;
1065
1066 BT_DBG("sock %p", sock);
1067
1068 if (sock->type != SOCK_RAW)
1069 return -ESOCKTNOSUPPORT;
1070
1071 sock->ops = &hci_sock_ops;
1072
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001073 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 if (!sk)
1075 return -ENOMEM;
1076
1077 sock_init_data(sock, sk);
1078
1079 sock_reset_flag(sk, SOCK_ZAPPED);
1080
1081 sk->sk_protocol = protocol;
1082
1083 sock->state = SS_UNCONNECTED;
1084 sk->sk_state = BT_OPEN;
1085
1086 bt_sock_link(&hci_sk_list, sk);
1087 return 0;
1088}
1089
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001090static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 .family = PF_BLUETOOTH,
1092 .owner = THIS_MODULE,
1093 .create = hci_sock_create,
1094};
1095
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096int __init hci_sock_init(void)
1097{
1098 int err;
1099
1100 err = proto_register(&hci_sk_proto, 0);
1101 if (err < 0)
1102 return err;
1103
1104 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1105 if (err < 0)
1106 goto error;
1107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 BT_INFO("HCI socket layer initialized");
1109
1110 return 0;
1111
1112error:
1113 BT_ERR("HCI socket registration failed");
1114 proto_unregister(&hci_sk_proto);
1115 return err;
1116}
1117
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301118void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119{
1120 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1121 BT_ERR("HCI socket unregistration failed");
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124}