blob: bbc4ac7482630a2e9eadef7aa6bd44bc8a69ad69 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
Marcel Holtmann863def52014-07-11 05:41:00 +020038/* Socket info */
39#define hci_pi(sk) ((struct hci_pinfo *) sk)
40
41struct hci_pinfo {
42 struct bt_sock bt;
43 struct hci_dev *hdev;
44 struct hci_filter filter;
45 __u32 cmsg_mask;
46 unsigned short channel;
47};
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049static inline int hci_test_bit(int nr, void *addr)
50{
51 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
52}
53
54/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020055#define HCI_SFLT_MAX_OGF 5
56
57struct hci_sec_filter {
58 __u32 type_mask;
59 __u32 event_mask[2];
60 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
61};
62
Marcel Holtmann7e67c112014-07-11 05:36:40 +020063static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 /* Packet types */
65 0x10,
66 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020067 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 /* Commands */
69 {
70 { 0x0 },
71 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020072 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020074 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020076 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020078 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020080 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 }
82};
83
84static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070085 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086};
87
Marcel Holtmannf81fe642013-08-25 23:25:15 -070088static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
89{
90 struct hci_filter *flt;
91 int flt_type, flt_event;
92
93 /* Apply filter */
94 flt = &hci_pi(sk)->filter;
95
96 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
97 flt_type = 0;
98 else
99 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
100
101 if (!test_bit(flt_type, &flt->type_mask))
102 return true;
103
104 /* Extra filter for event packets only */
105 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
106 return false;
107
108 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
109
110 if (!hci_test_bit(flt_event, &flt->event_mask))
111 return true;
112
113 /* Check filter only when opcode is set */
114 if (!flt->opcode)
115 return false;
116
117 if (flt_event == HCI_EV_CMD_COMPLETE &&
118 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
119 return true;
120
121 if (flt_event == HCI_EV_CMD_STATUS &&
122 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
123 return true;
124
125 return false;
126}
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100129void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100132 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134 BT_DBG("hdev %p len %d", hdev, skb->len);
135
136 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100137
Sasha Levinb67bfe02013-02-27 17:06:00 -0800138 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 struct sk_buff *nskb;
140
141 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
142 continue;
143
144 /* Don't send frame to the socket it came from */
145 if (skb->sk == sk)
146 continue;
147
Marcel Holtmann23500182013-08-26 21:40:52 -0700148 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149 if (is_filtered_packet(sk, skb))
150 continue;
151 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152 if (!bt_cb(skb)->incoming)
153 continue;
154 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
157 continue;
158 } else {
159 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200160 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100163 if (!skb_copy) {
164 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300165 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100166 if (!skb_copy)
167 continue;
168
169 /* Put type byte before the data */
170 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
171 }
172
173 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200174 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 continue;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 if (sock_queue_rcv_skb(sk, nskb))
178 kfree_skb(nskb);
179 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100180
181 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100182
183 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100184}
185
186/* Send frame to control socket */
187void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
188{
189 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100190
191 BT_DBG("len %d", skb->len);
192
193 read_lock(&hci_sk_list.lock);
194
Sasha Levinb67bfe02013-02-27 17:06:00 -0800195 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100196 struct sk_buff *nskb;
197
198 /* Skip the original socket */
199 if (sk == skip_sk)
200 continue;
201
202 if (sk->sk_state != BT_BOUND)
203 continue;
204
205 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
206 continue;
207
208 nskb = skb_clone(skb, GFP_ATOMIC);
209 if (!nskb)
210 continue;
211
212 if (sock_queue_rcv_skb(sk, nskb))
213 kfree_skb(nskb);
214 }
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 read_unlock(&hci_sk_list.lock);
217}
218
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100219/* Send frame to monitor socket */
220void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
221{
222 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100223 struct sk_buff *skb_copy = NULL;
224 __le16 opcode;
225
226 if (!atomic_read(&monitor_promisc))
227 return;
228
229 BT_DBG("hdev %p len %d", hdev, skb->len);
230
231 switch (bt_cb(skb)->pkt_type) {
232 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700233 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100234 break;
235 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700236 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100237 break;
238 case HCI_ACLDATA_PKT:
239 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700240 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100241 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700242 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100243 break;
244 case HCI_SCODATA_PKT:
245 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700246 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100247 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700248 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100249 break;
250 default:
251 return;
252 }
253
254 read_lock(&hci_sk_list.lock);
255
Sasha Levinb67bfe02013-02-27 17:06:00 -0800256 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100257 struct sk_buff *nskb;
258
259 if (sk->sk_state != BT_BOUND)
260 continue;
261
262 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
263 continue;
264
265 if (!skb_copy) {
266 struct hci_mon_hdr *hdr;
267
268 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300269 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
270 GFP_ATOMIC, true);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100271 if (!skb_copy)
272 continue;
273
274 /* Put header before the data */
275 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
276 hdr->opcode = opcode;
277 hdr->index = cpu_to_le16(hdev->id);
278 hdr->len = cpu_to_le16(skb->len);
279 }
280
281 nskb = skb_clone(skb_copy, GFP_ATOMIC);
282 if (!nskb)
283 continue;
284
285 if (sock_queue_rcv_skb(sk, nskb))
286 kfree_skb(nskb);
287 }
288
289 read_unlock(&hci_sk_list.lock);
290
291 kfree_skb(skb_copy);
292}
293
294static void send_monitor_event(struct sk_buff *skb)
295{
296 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100297
298 BT_DBG("len %d", skb->len);
299
300 read_lock(&hci_sk_list.lock);
301
Sasha Levinb67bfe02013-02-27 17:06:00 -0800302 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100303 struct sk_buff *nskb;
304
305 if (sk->sk_state != BT_BOUND)
306 continue;
307
308 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
309 continue;
310
311 nskb = skb_clone(skb, GFP_ATOMIC);
312 if (!nskb)
313 continue;
314
315 if (sock_queue_rcv_skb(sk, nskb))
316 kfree_skb(nskb);
317 }
318
319 read_unlock(&hci_sk_list.lock);
320}
321
322static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
323{
324 struct hci_mon_hdr *hdr;
325 struct hci_mon_new_index *ni;
326 struct sk_buff *skb;
327 __le16 opcode;
328
329 switch (event) {
330 case HCI_DEV_REG:
331 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
332 if (!skb)
333 return NULL;
334
335 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
336 ni->type = hdev->dev_type;
337 ni->bus = hdev->bus;
338 bacpy(&ni->bdaddr, &hdev->bdaddr);
339 memcpy(ni->name, hdev->name, 8);
340
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700341 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100342 break;
343
344 case HCI_DEV_UNREG:
345 skb = bt_skb_alloc(0, GFP_ATOMIC);
346 if (!skb)
347 return NULL;
348
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700349 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100350 break;
351
352 default:
353 return NULL;
354 }
355
356 __net_timestamp(skb);
357
358 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
359 hdr->opcode = opcode;
360 hdr->index = cpu_to_le16(hdev->id);
361 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
362
363 return skb;
364}
365
366static void send_monitor_replay(struct sock *sk)
367{
368 struct hci_dev *hdev;
369
370 read_lock(&hci_dev_list_lock);
371
372 list_for_each_entry(hdev, &hci_dev_list, list) {
373 struct sk_buff *skb;
374
375 skb = create_monitor_event(hdev, HCI_DEV_REG);
376 if (!skb)
377 continue;
378
379 if (sock_queue_rcv_skb(sk, skb))
380 kfree_skb(skb);
381 }
382
383 read_unlock(&hci_dev_list_lock);
384}
385
Marcel Holtmann040030e2012-02-20 14:50:37 +0100386/* Generate internal stack event */
387static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
388{
389 struct hci_event_hdr *hdr;
390 struct hci_ev_stack_internal *ev;
391 struct sk_buff *skb;
392
393 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
394 if (!skb)
395 return;
396
397 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
398 hdr->evt = HCI_EV_STACK_INTERNAL;
399 hdr->plen = sizeof(*ev) + dlen;
400
401 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
402 ev->type = type;
403 memcpy(ev->data, data, dlen);
404
405 bt_cb(skb)->incoming = 1;
406 __net_timestamp(skb);
407
408 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100409 hci_send_to_sock(hdev, skb);
410 kfree_skb(skb);
411}
412
413void hci_sock_dev_event(struct hci_dev *hdev, int event)
414{
415 struct hci_ev_si_device ev;
416
417 BT_DBG("hdev %s event %d", hdev->name, event);
418
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100419 /* Send event to monitor */
420 if (atomic_read(&monitor_promisc)) {
421 struct sk_buff *skb;
422
423 skb = create_monitor_event(hdev, event);
424 if (skb) {
425 send_monitor_event(skb);
426 kfree_skb(skb);
427 }
428 }
429
Marcel Holtmann040030e2012-02-20 14:50:37 +0100430 /* Send event to sockets */
431 ev.event = event;
432 ev.dev_id = hdev->id;
433 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
434
435 if (event == HCI_DEV_UNREG) {
436 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100437
438 /* Detach sockets from device */
439 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800440 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100441 bh_lock_sock_nested(sk);
442 if (hci_pi(sk)->hdev == hdev) {
443 hci_pi(sk)->hdev = NULL;
444 sk->sk_err = EPIPE;
445 sk->sk_state = BT_OPEN;
446 sk->sk_state_change(sk);
447
448 hci_dev_put(hdev);
449 }
450 bh_unlock_sock(sk);
451 }
452 read_unlock(&hci_sk_list.lock);
453 }
454}
455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456static int hci_sock_release(struct socket *sock)
457{
458 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100459 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461 BT_DBG("sock %p sk %p", sock, sk);
462
463 if (!sk)
464 return 0;
465
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100466 hdev = hci_pi(sk)->hdev;
467
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100468 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
469 atomic_dec(&monitor_promisc);
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 bt_sock_unlink(&hci_sk_list, sk);
472
473 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700474 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200475 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700476 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
477 hci_dev_close(hdev->id);
478 }
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 atomic_dec(&hdev->promisc);
481 hci_dev_put(hdev);
482 }
483
484 sock_orphan(sk);
485
486 skb_queue_purge(&sk->sk_receive_queue);
487 skb_queue_purge(&sk->sk_write_queue);
488
489 sock_put(sk);
490 return 0;
491}
492
Antti Julkub2a66aa2011-06-15 12:01:14 +0300493static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200494{
495 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300496 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200497
498 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
499 return -EFAULT;
500
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300501 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300502
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300503 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300504
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300505 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300506
507 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200508}
509
Antti Julkub2a66aa2011-06-15 12:01:14 +0300510static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200511{
512 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300513 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200514
515 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
516 return -EFAULT;
517
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300518 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300519
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300520 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300521
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300522 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300523
524 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200525}
526
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900527/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300528static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
529 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
531 struct hci_dev *hdev = hci_pi(sk)->hdev;
532
533 if (!hdev)
534 return -EBADFD;
535
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700536 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
537 return -EBUSY;
538
Marcel Holtmann4a964402014-07-02 19:10:33 +0200539 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200540 return -EOPNOTSUPP;
541
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700542 if (hdev->dev_type != HCI_BREDR)
543 return -EOPNOTSUPP;
544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 switch (cmd) {
546 case HCISETRAW:
547 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000548 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700549 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200552 return hci_get_conn_info(hdev, (void __user *) arg);
553
554 case HCIGETAUTHINFO:
555 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Johan Hedbergf0358562010-05-18 13:20:32 +0200557 case HCIBLOCKADDR:
558 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000559 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300560 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200561
562 case HCIUNBLOCKADDR:
563 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000564 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300565 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700567
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700568 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
570
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300571static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
572 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200574 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700575 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 int err;
577
578 BT_DBG("cmd %x arg %lx", cmd, arg);
579
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700580 lock_sock(sk);
581
582 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
583 err = -EBADFD;
584 goto done;
585 }
586
587 release_sock(sk);
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 switch (cmd) {
590 case HCIGETDEVLIST:
591 return hci_get_dev_list(argp);
592
593 case HCIGETDEVINFO:
594 return hci_get_dev_info(argp);
595
596 case HCIGETCONNLIST:
597 return hci_get_conn_list(argp);
598
599 case HCIDEVUP:
600 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000601 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return hci_dev_open(arg);
603
604 case HCIDEVDOWN:
605 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000606 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return hci_dev_close(arg);
608
609 case HCIDEVRESET:
610 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000611 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 return hci_dev_reset(arg);
613
614 case HCIDEVRESTAT:
615 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000616 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 return hci_dev_reset_stat(arg);
618
619 case HCISETSCAN:
620 case HCISETAUTH:
621 case HCISETENCRYPT:
622 case HCISETPTYPE:
623 case HCISETLINKPOL:
624 case HCISETLINKMODE:
625 case HCISETACLMTU:
626 case HCISETSCOMTU:
627 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000628 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 return hci_dev_cmd(cmd, argp);
630
631 case HCIINQUIRY:
632 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700634
635 lock_sock(sk);
636
637 err = hci_sock_bound_ioctl(sk, cmd, arg);
638
639done:
640 release_sock(sk);
641 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642}
643
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300644static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
645 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646{
Johan Hedberg03811012010-12-08 00:21:06 +0200647 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 struct sock *sk = sock->sk;
649 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200650 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652 BT_DBG("sock %p sk %p", sock, sk);
653
Johan Hedberg03811012010-12-08 00:21:06 +0200654 if (!addr)
655 return -EINVAL;
656
657 memset(&haddr, 0, sizeof(haddr));
658 len = min_t(unsigned int, sizeof(haddr), addr_len);
659 memcpy(&haddr, addr, len);
660
661 if (haddr.hci_family != AF_BLUETOOTH)
662 return -EINVAL;
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 lock_sock(sk);
665
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100666 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 err = -EALREADY;
668 goto done;
669 }
670
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100671 switch (haddr.hci_channel) {
672 case HCI_CHANNEL_RAW:
673 if (hci_pi(sk)->hdev) {
674 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 goto done;
676 }
677
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100678 if (haddr.hci_dev != HCI_DEV_NONE) {
679 hdev = hci_dev_get(haddr.hci_dev);
680 if (!hdev) {
681 err = -ENODEV;
682 goto done;
683 }
684
685 atomic_inc(&hdev->promisc);
686 }
687
688 hci_pi(sk)->hdev = hdev;
689 break;
690
Marcel Holtmann23500182013-08-26 21:40:52 -0700691 case HCI_CHANNEL_USER:
692 if (hci_pi(sk)->hdev) {
693 err = -EALREADY;
694 goto done;
695 }
696
697 if (haddr.hci_dev == HCI_DEV_NONE) {
698 err = -EINVAL;
699 goto done;
700 }
701
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700702 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700703 err = -EPERM;
704 goto done;
705 }
706
707 hdev = hci_dev_get(haddr.hci_dev);
708 if (!hdev) {
709 err = -ENODEV;
710 goto done;
711 }
712
713 if (test_bit(HCI_UP, &hdev->flags) ||
714 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +0200715 test_bit(HCI_SETUP, &hdev->dev_flags) ||
716 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700717 err = -EBUSY;
718 hci_dev_put(hdev);
719 goto done;
720 }
721
722 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
723 err = -EUSERS;
724 hci_dev_put(hdev);
725 goto done;
726 }
727
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200728 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700729
730 err = hci_dev_open(hdev->id);
731 if (err) {
732 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200733 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700734 hci_dev_put(hdev);
735 goto done;
736 }
737
738 atomic_inc(&hdev->promisc);
739
740 hci_pi(sk)->hdev = hdev;
741 break;
742
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100743 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100744 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100745 err = -EINVAL;
746 goto done;
747 }
748
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100749 if (!capable(CAP_NET_ADMIN)) {
750 err = -EPERM;
751 goto done;
752 }
753
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100754 break;
755
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100756 case HCI_CHANNEL_MONITOR:
757 if (haddr.hci_dev != HCI_DEV_NONE) {
758 err = -EINVAL;
759 goto done;
760 }
761
762 if (!capable(CAP_NET_RAW)) {
763 err = -EPERM;
764 goto done;
765 }
766
767 send_monitor_replay(sk);
768
769 atomic_inc(&monitor_promisc);
770 break;
771
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100772 default:
773 err = -EINVAL;
774 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 }
776
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100777
Johan Hedberg03811012010-12-08 00:21:06 +0200778 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 sk->sk_state = BT_BOUND;
780
781done:
782 release_sock(sk);
783 return err;
784}
785
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300786static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
787 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
790 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700791 struct hci_dev *hdev;
792 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794 BT_DBG("sock %p sk %p", sock, sk);
795
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700796 if (peer)
797 return -EOPNOTSUPP;
798
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 lock_sock(sk);
800
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700801 hdev = hci_pi(sk)->hdev;
802 if (!hdev) {
803 err = -EBADFD;
804 goto done;
805 }
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 *addr_len = sizeof(*haddr);
808 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100809 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700810 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700812done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700814 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815}
816
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300817static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
818 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819{
820 __u32 mask = hci_pi(sk)->cmsg_mask;
821
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700822 if (mask & HCI_CMSG_DIR) {
823 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300824 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
825 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700826 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700828 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100829#ifdef CONFIG_COMPAT
830 struct compat_timeval ctv;
831#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700832 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200833 void *data;
834 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700835
836 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200837
David S. Miller1da97f82007-09-12 14:10:58 +0200838 data = &tv;
839 len = sizeof(tv);
840#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800841 if (!COMPAT_USE_64BIT_TIME &&
842 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200843 ctv.tv_sec = tv.tv_sec;
844 ctv.tv_usec = tv.tv_usec;
845 data = &ctv;
846 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200847 }
David S. Miller1da97f82007-09-12 14:10:58 +0200848#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200849
850 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900853
854static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300855 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856{
857 int noblock = flags & MSG_DONTWAIT;
858 struct sock *sk = sock->sk;
859 struct sk_buff *skb;
860 int copied, err;
861
862 BT_DBG("sock %p, sk %p", sock, sk);
863
864 if (flags & (MSG_OOB))
865 return -EOPNOTSUPP;
866
867 if (sk->sk_state == BT_CLOSED)
868 return 0;
869
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200870 skb = skb_recv_datagram(sk, flags, noblock, &err);
871 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 return err;
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 copied = skb->len;
875 if (len < copied) {
876 msg->msg_flags |= MSG_TRUNC;
877 copied = len;
878 }
879
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300880 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
882
Marcel Holtmann3a208622012-02-20 14:50:34 +0100883 switch (hci_pi(sk)->channel) {
884 case HCI_CHANNEL_RAW:
885 hci_sock_cmsg(sk, msg, skb);
886 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700887 case HCI_CHANNEL_USER:
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100888 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100889 case HCI_CHANNEL_MONITOR:
890 sock_recv_timestamp(msg, sk, skb);
891 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 skb_free_datagram(sk, skb);
895
896 return err ? : copied;
897}
898
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900899static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 struct msghdr *msg, size_t len)
901{
902 struct sock *sk = sock->sk;
903 struct hci_dev *hdev;
904 struct sk_buff *skb;
905 int err;
906
907 BT_DBG("sock %p sk %p", sock, sk);
908
909 if (msg->msg_flags & MSG_OOB)
910 return -EOPNOTSUPP;
911
912 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
913 return -EINVAL;
914
915 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
916 return -EINVAL;
917
918 lock_sock(sk);
919
Johan Hedberg03811012010-12-08 00:21:06 +0200920 switch (hci_pi(sk)->channel) {
921 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700922 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200923 break;
924 case HCI_CHANNEL_CONTROL:
925 err = mgmt_control(sk, msg, len);
926 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100927 case HCI_CHANNEL_MONITOR:
928 err = -EOPNOTSUPP;
929 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200930 default:
931 err = -EINVAL;
932 goto done;
933 }
934
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200935 hdev = hci_pi(sk)->hdev;
936 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 err = -EBADFD;
938 goto done;
939 }
940
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100941 if (!test_bit(HCI_UP, &hdev->flags)) {
942 err = -ENETDOWN;
943 goto done;
944 }
945
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200946 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
947 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 goto done;
949
950 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
951 err = -EFAULT;
952 goto drop;
953 }
954
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700955 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -0800958 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
959 /* No permission check is needed for user channel
960 * since that gets enforced when binding the socket.
961 *
962 * However check that the packet type is valid.
963 */
964 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
965 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
966 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
967 err = -EINVAL;
968 goto drop;
969 }
970
971 skb_queue_tail(&hdev->raw_q, skb);
972 queue_work(hdev->workqueue, &hdev->tx_work);
973 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700974 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 u16 ogf = hci_opcode_ogf(opcode);
976 u16 ocf = hci_opcode_ocf(opcode);
977
978 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300979 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
980 &hci_sec_filter.ocf_mask[ogf])) &&
981 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 err = -EPERM;
983 goto drop;
984 }
985
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200986 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200988 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -0700990 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +0200991 * single-command requests.
992 */
993 bt_cb(skb)->req.start = true;
994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200996 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 }
998 } else {
999 if (!capable(CAP_NET_RAW)) {
1000 err = -EPERM;
1001 goto drop;
1002 }
1003
1004 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001005 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 }
1007
1008 err = len;
1009
1010done:
1011 release_sock(sk);
1012 return err;
1013
1014drop:
1015 kfree_skb(skb);
1016 goto done;
1017}
1018
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001019static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1020 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021{
1022 struct hci_ufilter uf = { .opcode = 0 };
1023 struct sock *sk = sock->sk;
1024 int err = 0, opt = 0;
1025
1026 BT_DBG("sk %p, opt %d", sk, optname);
1027
1028 lock_sock(sk);
1029
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001030 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001031 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001032 goto done;
1033 }
1034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 switch (optname) {
1036 case HCI_DATA_DIR:
1037 if (get_user(opt, (int __user *)optval)) {
1038 err = -EFAULT;
1039 break;
1040 }
1041
1042 if (opt)
1043 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1044 else
1045 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1046 break;
1047
1048 case HCI_TIME_STAMP:
1049 if (get_user(opt, (int __user *)optval)) {
1050 err = -EFAULT;
1051 break;
1052 }
1053
1054 if (opt)
1055 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1056 else
1057 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1058 break;
1059
1060 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001061 {
1062 struct hci_filter *f = &hci_pi(sk)->filter;
1063
1064 uf.type_mask = f->type_mask;
1065 uf.opcode = f->opcode;
1066 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1067 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1068 }
1069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 len = min_t(unsigned int, len, sizeof(uf));
1071 if (copy_from_user(&uf, optval, len)) {
1072 err = -EFAULT;
1073 break;
1074 }
1075
1076 if (!capable(CAP_NET_RAW)) {
1077 uf.type_mask &= hci_sec_filter.type_mask;
1078 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1079 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1080 }
1081
1082 {
1083 struct hci_filter *f = &hci_pi(sk)->filter;
1084
1085 f->type_mask = uf.type_mask;
1086 f->opcode = uf.opcode;
1087 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1088 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1089 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001090 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
1092 default:
1093 err = -ENOPROTOOPT;
1094 break;
1095 }
1096
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001097done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 release_sock(sk);
1099 return err;
1100}
1101
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001102static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1103 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
1105 struct hci_ufilter uf;
1106 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001107 int len, opt, err = 0;
1108
1109 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 if (get_user(len, optlen))
1112 return -EFAULT;
1113
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001114 lock_sock(sk);
1115
1116 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001117 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001118 goto done;
1119 }
1120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 switch (optname) {
1122 case HCI_DATA_DIR:
1123 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1124 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001125 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 opt = 0;
1127
1128 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001129 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 break;
1131
1132 case HCI_TIME_STAMP:
1133 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1134 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001135 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 opt = 0;
1137
1138 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001139 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 break;
1141
1142 case HCI_FILTER:
1143 {
1144 struct hci_filter *f = &hci_pi(sk)->filter;
1145
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001146 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 uf.type_mask = f->type_mask;
1148 uf.opcode = f->opcode;
1149 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1150 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1151 }
1152
1153 len = min_t(unsigned int, len, sizeof(uf));
1154 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001155 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 break;
1157
1158 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001159 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 break;
1161 }
1162
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001163done:
1164 release_sock(sk);
1165 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166}
1167
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001168static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 .family = PF_BLUETOOTH,
1170 .owner = THIS_MODULE,
1171 .release = hci_sock_release,
1172 .bind = hci_sock_bind,
1173 .getname = hci_sock_getname,
1174 .sendmsg = hci_sock_sendmsg,
1175 .recvmsg = hci_sock_recvmsg,
1176 .ioctl = hci_sock_ioctl,
1177 .poll = datagram_poll,
1178 .listen = sock_no_listen,
1179 .shutdown = sock_no_shutdown,
1180 .setsockopt = hci_sock_setsockopt,
1181 .getsockopt = hci_sock_getsockopt,
1182 .connect = sock_no_connect,
1183 .socketpair = sock_no_socketpair,
1184 .accept = sock_no_accept,
1185 .mmap = sock_no_mmap
1186};
1187
1188static struct proto hci_sk_proto = {
1189 .name = "HCI",
1190 .owner = THIS_MODULE,
1191 .obj_size = sizeof(struct hci_pinfo)
1192};
1193
Eric Paris3f378b62009-11-05 22:18:14 -08001194static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1195 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
1197 struct sock *sk;
1198
1199 BT_DBG("sock %p", sock);
1200
1201 if (sock->type != SOCK_RAW)
1202 return -ESOCKTNOSUPPORT;
1203
1204 sock->ops = &hci_sock_ops;
1205
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001206 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 if (!sk)
1208 return -ENOMEM;
1209
1210 sock_init_data(sock, sk);
1211
1212 sock_reset_flag(sk, SOCK_ZAPPED);
1213
1214 sk->sk_protocol = protocol;
1215
1216 sock->state = SS_UNCONNECTED;
1217 sk->sk_state = BT_OPEN;
1218
1219 bt_sock_link(&hci_sk_list, sk);
1220 return 0;
1221}
1222
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001223static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 .family = PF_BLUETOOTH,
1225 .owner = THIS_MODULE,
1226 .create = hci_sock_create,
1227};
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229int __init hci_sock_init(void)
1230{
1231 int err;
1232
1233 err = proto_register(&hci_sk_proto, 0);
1234 if (err < 0)
1235 return err;
1236
1237 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001238 if (err < 0) {
1239 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001241 }
1242
Al Virob0316612013-04-04 19:14:33 -04001243 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001244 if (err < 0) {
1245 BT_ERR("Failed to create HCI proc file");
1246 bt_sock_unregister(BTPROTO_HCI);
1247 goto error;
1248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 BT_INFO("HCI socket layer initialized");
1251
1252 return 0;
1253
1254error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 proto_unregister(&hci_sk_proto);
1256 return err;
1257}
1258
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301259void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001261 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001262 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}