blob: 1d65c5be7c823a282bc74a4d9691db53eb37b8d3 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
Marcel Holtmann863def52014-07-11 05:41:00 +020038/* Socket info */
39#define hci_pi(sk) ((struct hci_pinfo *) sk)
40
41struct hci_pinfo {
42 struct bt_sock bt;
43 struct hci_dev *hdev;
44 struct hci_filter filter;
45 __u32 cmsg_mask;
46 unsigned short channel;
47};
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049static inline int hci_test_bit(int nr, void *addr)
50{
51 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
52}
53
54/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020055#define HCI_SFLT_MAX_OGF 5
56
57struct hci_sec_filter {
58 __u32 type_mask;
59 __u32 event_mask[2];
60 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
61};
62
Marcel Holtmann7e67c112014-07-11 05:36:40 +020063static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 /* Packet types */
65 0x10,
66 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020067 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 /* Commands */
69 {
70 { 0x0 },
71 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020072 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020074 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020076 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020078 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020080 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 }
82};
83
84static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070085 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086};
87
Marcel Holtmannf81fe642013-08-25 23:25:15 -070088static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
89{
90 struct hci_filter *flt;
91 int flt_type, flt_event;
92
93 /* Apply filter */
94 flt = &hci_pi(sk)->filter;
95
96 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
97 flt_type = 0;
98 else
99 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
100
101 if (!test_bit(flt_type, &flt->type_mask))
102 return true;
103
104 /* Extra filter for event packets only */
105 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
106 return false;
107
108 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
109
110 if (!hci_test_bit(flt_event, &flt->event_mask))
111 return true;
112
113 /* Check filter only when opcode is set */
114 if (!flt->opcode)
115 return false;
116
117 if (flt_event == HCI_EV_CMD_COMPLETE &&
118 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
119 return true;
120
121 if (flt_event == HCI_EV_CMD_STATUS &&
122 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
123 return true;
124
125 return false;
126}
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100129void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100132 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134 BT_DBG("hdev %p len %d", hdev, skb->len);
135
136 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100137
Sasha Levinb67bfe02013-02-27 17:06:00 -0800138 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 struct sk_buff *nskb;
140
141 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
142 continue;
143
144 /* Don't send frame to the socket it came from */
145 if (skb->sk == sk)
146 continue;
147
Marcel Holtmann23500182013-08-26 21:40:52 -0700148 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149 if (is_filtered_packet(sk, skb))
150 continue;
151 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152 if (!bt_cb(skb)->incoming)
153 continue;
154 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
157 continue;
158 } else {
159 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200160 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100163 if (!skb_copy) {
164 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300165 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100166 if (!skb_copy)
167 continue;
168
169 /* Put type byte before the data */
170 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
171 }
172
173 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200174 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 continue;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 if (sock_queue_rcv_skb(sk, nskb))
178 kfree_skb(nskb);
179 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100180
181 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100182
183 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100184}
185
186/* Send frame to control socket */
187void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
188{
189 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100190
191 BT_DBG("len %d", skb->len);
192
193 read_lock(&hci_sk_list.lock);
194
Sasha Levinb67bfe02013-02-27 17:06:00 -0800195 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100196 struct sk_buff *nskb;
197
198 /* Skip the original socket */
199 if (sk == skip_sk)
200 continue;
201
202 if (sk->sk_state != BT_BOUND)
203 continue;
204
205 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
206 continue;
207
208 nskb = skb_clone(skb, GFP_ATOMIC);
209 if (!nskb)
210 continue;
211
212 if (sock_queue_rcv_skb(sk, nskb))
213 kfree_skb(nskb);
214 }
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 read_unlock(&hci_sk_list.lock);
217}
218
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800219static void queue_monitor_skb(struct sk_buff *skb)
220{
221 struct sock *sk;
222
223 BT_DBG("len %d", skb->len);
224
225 read_lock(&hci_sk_list.lock);
226
227 sk_for_each(sk, &hci_sk_list.head) {
228 struct sk_buff *nskb;
229
230 if (sk->sk_state != BT_BOUND)
231 continue;
232
233 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
234 continue;
235
236 nskb = skb_clone(skb, GFP_ATOMIC);
237 if (!nskb)
238 continue;
239
240 if (sock_queue_rcv_skb(sk, nskb))
241 kfree_skb(nskb);
242 }
243
244 read_unlock(&hci_sk_list.lock);
245}
246
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100247/* Send frame to monitor socket */
248void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
249{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100250 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800251 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100252 __le16 opcode;
253
254 if (!atomic_read(&monitor_promisc))
255 return;
256
257 BT_DBG("hdev %p len %d", hdev, skb->len);
258
259 switch (bt_cb(skb)->pkt_type) {
260 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700261 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100262 break;
263 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700264 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100265 break;
266 case HCI_ACLDATA_PKT:
267 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700268 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700270 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100271 break;
272 case HCI_SCODATA_PKT:
273 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700274 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700276 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100277 break;
278 default:
279 return;
280 }
281
Marcel Holtmann2b531292015-01-11 19:33:31 -0800282 /* Create a private copy with headroom */
283 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
284 if (!skb_copy)
285 return;
286
287 /* Put header before the data */
288 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
289 hdr->opcode = opcode;
290 hdr->index = cpu_to_le16(hdev->id);
291 hdr->len = cpu_to_le16(skb->len);
292
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800293 queue_monitor_skb(skb_copy);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100294 kfree_skb(skb_copy);
295}
296
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100297static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
298{
299 struct hci_mon_hdr *hdr;
300 struct hci_mon_new_index *ni;
301 struct sk_buff *skb;
302 __le16 opcode;
303
304 switch (event) {
305 case HCI_DEV_REG:
306 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 if (!skb)
308 return NULL;
309
310 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311 ni->type = hdev->dev_type;
312 ni->bus = hdev->bus;
313 bacpy(&ni->bdaddr, &hdev->bdaddr);
314 memcpy(ni->name, hdev->name, 8);
315
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700316 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100317 break;
318
319 case HCI_DEV_UNREG:
320 skb = bt_skb_alloc(0, GFP_ATOMIC);
321 if (!skb)
322 return NULL;
323
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700324 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100325 break;
326
327 default:
328 return NULL;
329 }
330
331 __net_timestamp(skb);
332
333 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334 hdr->opcode = opcode;
335 hdr->index = cpu_to_le16(hdev->id);
336 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
337
338 return skb;
339}
340
341static void send_monitor_replay(struct sock *sk)
342{
343 struct hci_dev *hdev;
344
345 read_lock(&hci_dev_list_lock);
346
347 list_for_each_entry(hdev, &hci_dev_list, list) {
348 struct sk_buff *skb;
349
350 skb = create_monitor_event(hdev, HCI_DEV_REG);
351 if (!skb)
352 continue;
353
354 if (sock_queue_rcv_skb(sk, skb))
355 kfree_skb(skb);
356 }
357
358 read_unlock(&hci_dev_list_lock);
359}
360
Marcel Holtmann040030e2012-02-20 14:50:37 +0100361/* Generate internal stack event */
362static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
363{
364 struct hci_event_hdr *hdr;
365 struct hci_ev_stack_internal *ev;
366 struct sk_buff *skb;
367
368 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 if (!skb)
370 return;
371
372 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373 hdr->evt = HCI_EV_STACK_INTERNAL;
374 hdr->plen = sizeof(*ev) + dlen;
375
376 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
377 ev->type = type;
378 memcpy(ev->data, data, dlen);
379
380 bt_cb(skb)->incoming = 1;
381 __net_timestamp(skb);
382
383 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100384 hci_send_to_sock(hdev, skb);
385 kfree_skb(skb);
386}
387
388void hci_sock_dev_event(struct hci_dev *hdev, int event)
389{
390 struct hci_ev_si_device ev;
391
392 BT_DBG("hdev %s event %d", hdev->name, event);
393
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100394 /* Send event to monitor */
395 if (atomic_read(&monitor_promisc)) {
396 struct sk_buff *skb;
397
398 skb = create_monitor_event(hdev, event);
399 if (skb) {
Marcel Holtmann41e91e72015-01-11 19:33:33 -0800400 queue_monitor_skb(skb);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100401 kfree_skb(skb);
402 }
403 }
404
Marcel Holtmann040030e2012-02-20 14:50:37 +0100405 /* Send event to sockets */
406 ev.event = event;
407 ev.dev_id = hdev->id;
408 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
409
410 if (event == HCI_DEV_UNREG) {
411 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100412
413 /* Detach sockets from device */
414 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800415 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100416 bh_lock_sock_nested(sk);
417 if (hci_pi(sk)->hdev == hdev) {
418 hci_pi(sk)->hdev = NULL;
419 sk->sk_err = EPIPE;
420 sk->sk_state = BT_OPEN;
421 sk->sk_state_change(sk);
422
423 hci_dev_put(hdev);
424 }
425 bh_unlock_sock(sk);
426 }
427 read_unlock(&hci_sk_list.lock);
428 }
429}
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431static int hci_sock_release(struct socket *sock)
432{
433 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100434 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436 BT_DBG("sock %p sk %p", sock, sk);
437
438 if (!sk)
439 return 0;
440
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100441 hdev = hci_pi(sk)->hdev;
442
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100443 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
444 atomic_dec(&monitor_promisc);
445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 bt_sock_unlink(&hci_sk_list, sk);
447
448 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700449 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200450 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700451 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
452 hci_dev_close(hdev->id);
453 }
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 atomic_dec(&hdev->promisc);
456 hci_dev_put(hdev);
457 }
458
459 sock_orphan(sk);
460
461 skb_queue_purge(&sk->sk_receive_queue);
462 skb_queue_purge(&sk->sk_write_queue);
463
464 sock_put(sk);
465 return 0;
466}
467
Antti Julkub2a66aa2011-06-15 12:01:14 +0300468static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200469{
470 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300471 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200472
473 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
474 return -EFAULT;
475
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300476 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300477
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300478 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300479
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300480 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300481
482 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200483}
484
Antti Julkub2a66aa2011-06-15 12:01:14 +0300485static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200486{
487 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300488 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200489
490 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
491 return -EFAULT;
492
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300493 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300494
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300495 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300496
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300497 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300498
499 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200500}
501
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900502/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300503static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
504 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
506 struct hci_dev *hdev = hci_pi(sk)->hdev;
507
508 if (!hdev)
509 return -EBADFD;
510
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700511 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
512 return -EBUSY;
513
Marcel Holtmann4a964402014-07-02 19:10:33 +0200514 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200515 return -EOPNOTSUPP;
516
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700517 if (hdev->dev_type != HCI_BREDR)
518 return -EOPNOTSUPP;
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 switch (cmd) {
521 case HCISETRAW:
522 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000523 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700524 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200527 return hci_get_conn_info(hdev, (void __user *) arg);
528
529 case HCIGETAUTHINFO:
530 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
Johan Hedbergf0358562010-05-18 13:20:32 +0200532 case HCIBLOCKADDR:
533 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000534 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300535 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200536
537 case HCIUNBLOCKADDR:
538 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000539 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300540 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700542
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700543 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544}
545
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300546static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
547 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200549 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700550 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 int err;
552
553 BT_DBG("cmd %x arg %lx", cmd, arg);
554
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700555 lock_sock(sk);
556
557 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
558 err = -EBADFD;
559 goto done;
560 }
561
562 release_sock(sk);
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 switch (cmd) {
565 case HCIGETDEVLIST:
566 return hci_get_dev_list(argp);
567
568 case HCIGETDEVINFO:
569 return hci_get_dev_info(argp);
570
571 case HCIGETCONNLIST:
572 return hci_get_conn_list(argp);
573
574 case HCIDEVUP:
575 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000576 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 return hci_dev_open(arg);
578
579 case HCIDEVDOWN:
580 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000581 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 return hci_dev_close(arg);
583
584 case HCIDEVRESET:
585 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000586 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 return hci_dev_reset(arg);
588
589 case HCIDEVRESTAT:
590 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000591 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 return hci_dev_reset_stat(arg);
593
594 case HCISETSCAN:
595 case HCISETAUTH:
596 case HCISETENCRYPT:
597 case HCISETPTYPE:
598 case HCISETLINKPOL:
599 case HCISETLINKMODE:
600 case HCISETACLMTU:
601 case HCISETSCOMTU:
602 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000603 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return hci_dev_cmd(cmd, argp);
605
606 case HCIINQUIRY:
607 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700609
610 lock_sock(sk);
611
612 err = hci_sock_bound_ioctl(sk, cmd, arg);
613
614done:
615 release_sock(sk);
616 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617}
618
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300619static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
620 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
Johan Hedberg03811012010-12-08 00:21:06 +0200622 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 struct sock *sk = sock->sk;
624 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200625 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627 BT_DBG("sock %p sk %p", sock, sk);
628
Johan Hedberg03811012010-12-08 00:21:06 +0200629 if (!addr)
630 return -EINVAL;
631
632 memset(&haddr, 0, sizeof(haddr));
633 len = min_t(unsigned int, sizeof(haddr), addr_len);
634 memcpy(&haddr, addr, len);
635
636 if (haddr.hci_family != AF_BLUETOOTH)
637 return -EINVAL;
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 lock_sock(sk);
640
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100641 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 err = -EALREADY;
643 goto done;
644 }
645
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100646 switch (haddr.hci_channel) {
647 case HCI_CHANNEL_RAW:
648 if (hci_pi(sk)->hdev) {
649 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 goto done;
651 }
652
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100653 if (haddr.hci_dev != HCI_DEV_NONE) {
654 hdev = hci_dev_get(haddr.hci_dev);
655 if (!hdev) {
656 err = -ENODEV;
657 goto done;
658 }
659
660 atomic_inc(&hdev->promisc);
661 }
662
663 hci_pi(sk)->hdev = hdev;
664 break;
665
Marcel Holtmann23500182013-08-26 21:40:52 -0700666 case HCI_CHANNEL_USER:
667 if (hci_pi(sk)->hdev) {
668 err = -EALREADY;
669 goto done;
670 }
671
672 if (haddr.hci_dev == HCI_DEV_NONE) {
673 err = -EINVAL;
674 goto done;
675 }
676
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700677 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700678 err = -EPERM;
679 goto done;
680 }
681
682 hdev = hci_dev_get(haddr.hci_dev);
683 if (!hdev) {
684 err = -ENODEV;
685 goto done;
686 }
687
688 if (test_bit(HCI_UP, &hdev->flags) ||
689 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +0200690 test_bit(HCI_SETUP, &hdev->dev_flags) ||
691 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700692 err = -EBUSY;
693 hci_dev_put(hdev);
694 goto done;
695 }
696
697 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
698 err = -EUSERS;
699 hci_dev_put(hdev);
700 goto done;
701 }
702
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200703 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700704
705 err = hci_dev_open(hdev->id);
706 if (err) {
707 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200708 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700709 hci_dev_put(hdev);
710 goto done;
711 }
712
713 atomic_inc(&hdev->promisc);
714
715 hci_pi(sk)->hdev = hdev;
716 break;
717
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100718 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100719 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100720 err = -EINVAL;
721 goto done;
722 }
723
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100724 if (!capable(CAP_NET_ADMIN)) {
725 err = -EPERM;
726 goto done;
727 }
728
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100729 break;
730
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100731 case HCI_CHANNEL_MONITOR:
732 if (haddr.hci_dev != HCI_DEV_NONE) {
733 err = -EINVAL;
734 goto done;
735 }
736
737 if (!capable(CAP_NET_RAW)) {
738 err = -EPERM;
739 goto done;
740 }
741
742 send_monitor_replay(sk);
743
744 atomic_inc(&monitor_promisc);
745 break;
746
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100747 default:
748 err = -EINVAL;
749 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 }
751
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100752
Johan Hedberg03811012010-12-08 00:21:06 +0200753 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 sk->sk_state = BT_BOUND;
755
756done:
757 release_sock(sk);
758 return err;
759}
760
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300761static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
762 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
764 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
765 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700766 struct hci_dev *hdev;
767 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769 BT_DBG("sock %p sk %p", sock, sk);
770
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700771 if (peer)
772 return -EOPNOTSUPP;
773
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 lock_sock(sk);
775
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700776 hdev = hci_pi(sk)->hdev;
777 if (!hdev) {
778 err = -EBADFD;
779 goto done;
780 }
781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 *addr_len = sizeof(*haddr);
783 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100784 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700785 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700787done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700789 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790}
791
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300792static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
793 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794{
795 __u32 mask = hci_pi(sk)->cmsg_mask;
796
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700797 if (mask & HCI_CMSG_DIR) {
798 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300799 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
800 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700801 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700803 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100804#ifdef CONFIG_COMPAT
805 struct compat_timeval ctv;
806#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700807 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200808 void *data;
809 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700810
811 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200812
David S. Miller1da97f82007-09-12 14:10:58 +0200813 data = &tv;
814 len = sizeof(tv);
815#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800816 if (!COMPAT_USE_64BIT_TIME &&
817 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200818 ctv.tv_sec = tv.tv_sec;
819 ctv.tv_usec = tv.tv_usec;
820 data = &ctv;
821 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200822 }
David S. Miller1da97f82007-09-12 14:10:58 +0200823#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200824
825 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700826 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900828
829static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300830 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831{
832 int noblock = flags & MSG_DONTWAIT;
833 struct sock *sk = sock->sk;
834 struct sk_buff *skb;
835 int copied, err;
836
837 BT_DBG("sock %p, sk %p", sock, sk);
838
839 if (flags & (MSG_OOB))
840 return -EOPNOTSUPP;
841
842 if (sk->sk_state == BT_CLOSED)
843 return 0;
844
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200845 skb = skb_recv_datagram(sk, flags, noblock, &err);
846 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return err;
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 copied = skb->len;
850 if (len < copied) {
851 msg->msg_flags |= MSG_TRUNC;
852 copied = len;
853 }
854
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300855 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500856 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Marcel Holtmann3a208622012-02-20 14:50:34 +0100858 switch (hci_pi(sk)->channel) {
859 case HCI_CHANNEL_RAW:
860 hci_sock_cmsg(sk, msg, skb);
861 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700862 case HCI_CHANNEL_USER:
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100863 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100864 case HCI_CHANNEL_MONITOR:
865 sock_recv_timestamp(msg, sk, skb);
866 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100867 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
869 skb_free_datagram(sk, skb);
870
871 return err ? : copied;
872}
873
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900874static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 struct msghdr *msg, size_t len)
876{
877 struct sock *sk = sock->sk;
878 struct hci_dev *hdev;
879 struct sk_buff *skb;
880 int err;
881
882 BT_DBG("sock %p sk %p", sock, sk);
883
884 if (msg->msg_flags & MSG_OOB)
885 return -EOPNOTSUPP;
886
887 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
888 return -EINVAL;
889
890 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
891 return -EINVAL;
892
893 lock_sock(sk);
894
Johan Hedberg03811012010-12-08 00:21:06 +0200895 switch (hci_pi(sk)->channel) {
896 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700897 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200898 break;
899 case HCI_CHANNEL_CONTROL:
900 err = mgmt_control(sk, msg, len);
901 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100902 case HCI_CHANNEL_MONITOR:
903 err = -EOPNOTSUPP;
904 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200905 default:
906 err = -EINVAL;
907 goto done;
908 }
909
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200910 hdev = hci_pi(sk)->hdev;
911 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 err = -EBADFD;
913 goto done;
914 }
915
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100916 if (!test_bit(HCI_UP, &hdev->flags)) {
917 err = -ENETDOWN;
918 goto done;
919 }
920
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200921 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
922 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 goto done;
924
Al Viro6ce8e9c2014-04-06 21:25:44 -0400925 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 err = -EFAULT;
927 goto drop;
928 }
929
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700930 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -0800933 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
934 /* No permission check is needed for user channel
935 * since that gets enforced when binding the socket.
936 *
937 * However check that the packet type is valid.
938 */
939 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
940 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
941 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
942 err = -EINVAL;
943 goto drop;
944 }
945
946 skb_queue_tail(&hdev->raw_q, skb);
947 queue_work(hdev->workqueue, &hdev->tx_work);
948 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700949 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 u16 ogf = hci_opcode_ogf(opcode);
951 u16 ocf = hci_opcode_ocf(opcode);
952
953 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300954 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
955 &hci_sec_filter.ocf_mask[ogf])) &&
956 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 err = -EPERM;
958 goto drop;
959 }
960
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200961 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200963 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -0700965 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +0200966 * single-command requests.
967 */
968 bt_cb(skb)->req.start = true;
969
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200971 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 }
973 } else {
974 if (!capable(CAP_NET_RAW)) {
975 err = -EPERM;
976 goto drop;
977 }
978
979 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200980 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 }
982
983 err = len;
984
985done:
986 release_sock(sk);
987 return err;
988
989drop:
990 kfree_skb(skb);
991 goto done;
992}
993
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300994static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
995 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
997 struct hci_ufilter uf = { .opcode = 0 };
998 struct sock *sk = sock->sk;
999 int err = 0, opt = 0;
1000
1001 BT_DBG("sk %p, opt %d", sk, optname);
1002
1003 lock_sock(sk);
1004
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001005 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001006 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001007 goto done;
1008 }
1009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 switch (optname) {
1011 case HCI_DATA_DIR:
1012 if (get_user(opt, (int __user *)optval)) {
1013 err = -EFAULT;
1014 break;
1015 }
1016
1017 if (opt)
1018 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1019 else
1020 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1021 break;
1022
1023 case HCI_TIME_STAMP:
1024 if (get_user(opt, (int __user *)optval)) {
1025 err = -EFAULT;
1026 break;
1027 }
1028
1029 if (opt)
1030 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1031 else
1032 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1033 break;
1034
1035 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001036 {
1037 struct hci_filter *f = &hci_pi(sk)->filter;
1038
1039 uf.type_mask = f->type_mask;
1040 uf.opcode = f->opcode;
1041 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1042 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1043 }
1044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 len = min_t(unsigned int, len, sizeof(uf));
1046 if (copy_from_user(&uf, optval, len)) {
1047 err = -EFAULT;
1048 break;
1049 }
1050
1051 if (!capable(CAP_NET_RAW)) {
1052 uf.type_mask &= hci_sec_filter.type_mask;
1053 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1054 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1055 }
1056
1057 {
1058 struct hci_filter *f = &hci_pi(sk)->filter;
1059
1060 f->type_mask = uf.type_mask;
1061 f->opcode = uf.opcode;
1062 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1063 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1064 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001065 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
1067 default:
1068 err = -ENOPROTOOPT;
1069 break;
1070 }
1071
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001072done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 release_sock(sk);
1074 return err;
1075}
1076
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001077static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1078 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079{
1080 struct hci_ufilter uf;
1081 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001082 int len, opt, err = 0;
1083
1084 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 if (get_user(len, optlen))
1087 return -EFAULT;
1088
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001089 lock_sock(sk);
1090
1091 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001092 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001093 goto done;
1094 }
1095
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 switch (optname) {
1097 case HCI_DATA_DIR:
1098 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1099 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001100 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 opt = 0;
1102
1103 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001104 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 break;
1106
1107 case HCI_TIME_STAMP:
1108 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1109 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001110 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 opt = 0;
1112
1113 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001114 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 break;
1116
1117 case HCI_FILTER:
1118 {
1119 struct hci_filter *f = &hci_pi(sk)->filter;
1120
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001121 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 uf.type_mask = f->type_mask;
1123 uf.opcode = f->opcode;
1124 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1125 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1126 }
1127
1128 len = min_t(unsigned int, len, sizeof(uf));
1129 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001130 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 break;
1132
1133 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001134 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 break;
1136 }
1137
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001138done:
1139 release_sock(sk);
1140 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141}
1142
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001143static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 .family = PF_BLUETOOTH,
1145 .owner = THIS_MODULE,
1146 .release = hci_sock_release,
1147 .bind = hci_sock_bind,
1148 .getname = hci_sock_getname,
1149 .sendmsg = hci_sock_sendmsg,
1150 .recvmsg = hci_sock_recvmsg,
1151 .ioctl = hci_sock_ioctl,
1152 .poll = datagram_poll,
1153 .listen = sock_no_listen,
1154 .shutdown = sock_no_shutdown,
1155 .setsockopt = hci_sock_setsockopt,
1156 .getsockopt = hci_sock_getsockopt,
1157 .connect = sock_no_connect,
1158 .socketpair = sock_no_socketpair,
1159 .accept = sock_no_accept,
1160 .mmap = sock_no_mmap
1161};
1162
1163static struct proto hci_sk_proto = {
1164 .name = "HCI",
1165 .owner = THIS_MODULE,
1166 .obj_size = sizeof(struct hci_pinfo)
1167};
1168
Eric Paris3f378b62009-11-05 22:18:14 -08001169static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1170 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171{
1172 struct sock *sk;
1173
1174 BT_DBG("sock %p", sock);
1175
1176 if (sock->type != SOCK_RAW)
1177 return -ESOCKTNOSUPPORT;
1178
1179 sock->ops = &hci_sock_ops;
1180
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001181 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 if (!sk)
1183 return -ENOMEM;
1184
1185 sock_init_data(sock, sk);
1186
1187 sock_reset_flag(sk, SOCK_ZAPPED);
1188
1189 sk->sk_protocol = protocol;
1190
1191 sock->state = SS_UNCONNECTED;
1192 sk->sk_state = BT_OPEN;
1193
1194 bt_sock_link(&hci_sk_list, sk);
1195 return 0;
1196}
1197
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001198static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 .family = PF_BLUETOOTH,
1200 .owner = THIS_MODULE,
1201 .create = hci_sock_create,
1202};
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204int __init hci_sock_init(void)
1205{
1206 int err;
1207
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001208 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 err = proto_register(&hci_sk_proto, 0);
1211 if (err < 0)
1212 return err;
1213
1214 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001215 if (err < 0) {
1216 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001218 }
1219
Al Virob0316612013-04-04 19:14:33 -04001220 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001221 if (err < 0) {
1222 BT_ERR("Failed to create HCI proc file");
1223 bt_sock_unregister(BTPROTO_HCI);
1224 goto error;
1225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 BT_INFO("HCI socket layer initialized");
1228
1229 return 0;
1230
1231error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 proto_unregister(&hci_sk_proto);
1233 return err;
1234}
1235
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301236void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001238 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001239 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241}