blob: 0d5ace8922b1785744156ee53b7bfd03e1e8c422 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Johan Hedberg801c1e82015-03-06 21:08:50 +020034static LIST_HEAD(mgmt_chan_list);
35static DEFINE_MUTEX(mgmt_chan_list_lock);
36
Marcel Holtmanncd82e612012-02-20 20:34:38 +010037static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039/* ----- HCI socket interface ----- */
40
Marcel Holtmann863def52014-07-11 05:41:00 +020041/* Socket info */
42#define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44struct hci_pinfo {
45 struct bt_sock bt;
46 struct hci_dev *hdev;
47 struct hci_filter filter;
48 __u32 cmsg_mask;
49 unsigned short channel;
50};
51
Jiri Slaby93919762015-02-19 15:20:43 +010052static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
Jiri Slaby93919762015-02-19 15:20:43 +010054 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070055}
56
57/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020058#define HCI_SFLT_MAX_OGF 5
59
60struct hci_sec_filter {
61 __u32 type_mask;
62 __u32 event_mask[2];
63 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
64};
65
Marcel Holtmann7e67c112014-07-11 05:36:40 +020066static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 /* Packet types */
68 0x10,
69 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020070 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 /* Commands */
72 {
73 { 0x0 },
74 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020075 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020077 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020079 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020081 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020083 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 }
85};
86
87static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070088 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089};
90
Marcel Holtmannf81fe642013-08-25 23:25:15 -070091static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
92{
93 struct hci_filter *flt;
94 int flt_type, flt_event;
95
96 /* Apply filter */
97 flt = &hci_pi(sk)->filter;
98
99 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
100 flt_type = 0;
101 else
102 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
103
104 if (!test_bit(flt_type, &flt->type_mask))
105 return true;
106
107 /* Extra filter for event packets only */
108 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
109 return false;
110
111 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
112
113 if (!hci_test_bit(flt_event, &flt->event_mask))
114 return true;
115
116 /* Check filter only when opcode is set */
117 if (!flt->opcode)
118 return false;
119
120 if (flt_event == HCI_EV_CMD_COMPLETE &&
121 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
122 return true;
123
124 if (flt_event == HCI_EV_CMD_STATUS &&
125 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
126 return true;
127
128 return false;
129}
130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100132void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
134 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100135 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137 BT_DBG("hdev %p len %d", hdev, skb->len);
138
139 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100140
Sasha Levinb67bfe02013-02-27 17:06:00 -0800141 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 struct sk_buff *nskb;
143
144 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
145 continue;
146
147 /* Don't send frame to the socket it came from */
148 if (skb->sk == sk)
149 continue;
150
Marcel Holtmann23500182013-08-26 21:40:52 -0700151 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
152 if (is_filtered_packet(sk, skb))
153 continue;
154 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
155 if (!bt_cb(skb)->incoming)
156 continue;
157 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
158 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
159 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
160 continue;
161 } else {
162 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200163 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100166 if (!skb_copy) {
167 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300168 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100169 if (!skb_copy)
170 continue;
171
172 /* Put type byte before the data */
173 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
174 }
175
176 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200177 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 continue;
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 if (sock_queue_rcv_skb(sk, nskb))
181 kfree_skb(nskb);
182 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100183
184 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100185
186 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100187}
188
Johan Hedberg71290692015-02-20 13:26:23 +0200189/* Send frame to sockets with specific channel */
190void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
191 struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100192{
193 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100194
Johan Hedberg71290692015-02-20 13:26:23 +0200195 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100196
197 read_lock(&hci_sk_list.lock);
198
Sasha Levinb67bfe02013-02-27 17:06:00 -0800199 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100200 struct sk_buff *nskb;
201
202 /* Skip the original socket */
203 if (sk == skip_sk)
204 continue;
205
206 if (sk->sk_state != BT_BOUND)
207 continue;
208
Johan Hedberg71290692015-02-20 13:26:23 +0200209 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800210 continue;
211
212 nskb = skb_clone(skb, GFP_ATOMIC);
213 if (!nskb)
214 continue;
215
216 if (sock_queue_rcv_skb(sk, nskb))
217 kfree_skb(nskb);
218 }
219
220 read_unlock(&hci_sk_list.lock);
221}
222
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100223/* Send frame to monitor socket */
224void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
225{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100226 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800227 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100228 __le16 opcode;
229
230 if (!atomic_read(&monitor_promisc))
231 return;
232
233 BT_DBG("hdev %p len %d", hdev, skb->len);
234
235 switch (bt_cb(skb)->pkt_type) {
236 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700237 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100238 break;
239 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700240 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100241 break;
242 case HCI_ACLDATA_PKT:
243 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700244 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100245 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700246 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100247 break;
248 case HCI_SCODATA_PKT:
249 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700250 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100251 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700252 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100253 break;
254 default:
255 return;
256 }
257
Marcel Holtmann2b531292015-01-11 19:33:31 -0800258 /* Create a private copy with headroom */
259 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
260 if (!skb_copy)
261 return;
262
263 /* Put header before the data */
264 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
265 hdr->opcode = opcode;
266 hdr->index = cpu_to_le16(hdev->id);
267 hdr->len = cpu_to_le16(skb->len);
268
Johan Hedberg03f310e2015-02-20 13:26:24 +0200269 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100270 kfree_skb(skb_copy);
271}
272
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
274{
275 struct hci_mon_hdr *hdr;
276 struct hci_mon_new_index *ni;
277 struct sk_buff *skb;
278 __le16 opcode;
279
280 switch (event) {
281 case HCI_DEV_REG:
282 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
283 if (!skb)
284 return NULL;
285
286 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
287 ni->type = hdev->dev_type;
288 ni->bus = hdev->bus;
289 bacpy(&ni->bdaddr, &hdev->bdaddr);
290 memcpy(ni->name, hdev->name, 8);
291
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700292 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100293 break;
294
295 case HCI_DEV_UNREG:
296 skb = bt_skb_alloc(0, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700300 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100301 break;
302
303 default:
304 return NULL;
305 }
306
307 __net_timestamp(skb);
308
309 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
310 hdr->opcode = opcode;
311 hdr->index = cpu_to_le16(hdev->id);
312 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
313
314 return skb;
315}
316
317static void send_monitor_replay(struct sock *sk)
318{
319 struct hci_dev *hdev;
320
321 read_lock(&hci_dev_list_lock);
322
323 list_for_each_entry(hdev, &hci_dev_list, list) {
324 struct sk_buff *skb;
325
326 skb = create_monitor_event(hdev, HCI_DEV_REG);
327 if (!skb)
328 continue;
329
330 if (sock_queue_rcv_skb(sk, skb))
331 kfree_skb(skb);
332 }
333
334 read_unlock(&hci_dev_list_lock);
335}
336
Marcel Holtmann040030e2012-02-20 14:50:37 +0100337/* Generate internal stack event */
338static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
339{
340 struct hci_event_hdr *hdr;
341 struct hci_ev_stack_internal *ev;
342 struct sk_buff *skb;
343
344 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
345 if (!skb)
346 return;
347
348 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
349 hdr->evt = HCI_EV_STACK_INTERNAL;
350 hdr->plen = sizeof(*ev) + dlen;
351
352 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
353 ev->type = type;
354 memcpy(ev->data, data, dlen);
355
356 bt_cb(skb)->incoming = 1;
357 __net_timestamp(skb);
358
359 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100360 hci_send_to_sock(hdev, skb);
361 kfree_skb(skb);
362}
363
364void hci_sock_dev_event(struct hci_dev *hdev, int event)
365{
366 struct hci_ev_si_device ev;
367
368 BT_DBG("hdev %s event %d", hdev->name, event);
369
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100370 /* Send event to monitor */
371 if (atomic_read(&monitor_promisc)) {
372 struct sk_buff *skb;
373
374 skb = create_monitor_event(hdev, event);
375 if (skb) {
Johan Hedberg03f310e2015-02-20 13:26:24 +0200376 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100377 kfree_skb(skb);
378 }
379 }
380
Marcel Holtmann040030e2012-02-20 14:50:37 +0100381 /* Send event to sockets */
382 ev.event = event;
383 ev.dev_id = hdev->id;
384 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
385
386 if (event == HCI_DEV_UNREG) {
387 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100388
389 /* Detach sockets from device */
390 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800391 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100392 bh_lock_sock_nested(sk);
393 if (hci_pi(sk)->hdev == hdev) {
394 hci_pi(sk)->hdev = NULL;
395 sk->sk_err = EPIPE;
396 sk->sk_state = BT_OPEN;
397 sk->sk_state_change(sk);
398
399 hci_dev_put(hdev);
400 }
401 bh_unlock_sock(sk);
402 }
403 read_unlock(&hci_sk_list.lock);
404 }
405}
406
Johan Hedberg801c1e82015-03-06 21:08:50 +0200407static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
408{
409 struct hci_mgmt_chan *c;
410
411 list_for_each_entry(c, &mgmt_chan_list, list) {
412 if (c->channel == channel)
413 return c;
414 }
415
416 return NULL;
417}
418
419static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
420{
421 struct hci_mgmt_chan *c;
422
423 mutex_lock(&mgmt_chan_list_lock);
424 c = __hci_mgmt_chan_find(channel);
425 mutex_unlock(&mgmt_chan_list_lock);
426
427 return c;
428}
429
430int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
431{
432 if (c->channel < HCI_CHANNEL_CONTROL)
433 return -EINVAL;
434
435 mutex_lock(&mgmt_chan_list_lock);
436 if (__hci_mgmt_chan_find(c->channel)) {
437 mutex_unlock(&mgmt_chan_list_lock);
438 return -EALREADY;
439 }
440
441 list_add_tail(&c->list, &mgmt_chan_list);
442
443 mutex_unlock(&mgmt_chan_list_lock);
444
445 return 0;
446}
447EXPORT_SYMBOL(hci_mgmt_chan_register);
448
449void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
450{
451 mutex_lock(&mgmt_chan_list_lock);
452 list_del(&c->list);
453 mutex_unlock(&mgmt_chan_list_lock);
454}
455EXPORT_SYMBOL(hci_mgmt_chan_unregister);
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457static int hci_sock_release(struct socket *sock)
458{
459 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100460 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 BT_DBG("sock %p sk %p", sock, sk);
463
464 if (!sk)
465 return 0;
466
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100467 hdev = hci_pi(sk)->hdev;
468
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100469 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
470 atomic_dec(&monitor_promisc);
471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 bt_sock_unlink(&hci_sk_list, sk);
473
474 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700475 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200476 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700477 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
478 hci_dev_close(hdev->id);
479 }
480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 atomic_dec(&hdev->promisc);
482 hci_dev_put(hdev);
483 }
484
485 sock_orphan(sk);
486
487 skb_queue_purge(&sk->sk_receive_queue);
488 skb_queue_purge(&sk->sk_write_queue);
489
490 sock_put(sk);
491 return 0;
492}
493
Antti Julkub2a66aa2011-06-15 12:01:14 +0300494static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200495{
496 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300497 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200498
499 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
500 return -EFAULT;
501
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300502 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300503
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300504 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300505
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300506 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300507
508 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200509}
510
Antti Julkub2a66aa2011-06-15 12:01:14 +0300511static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200512{
513 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300514 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200515
516 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
517 return -EFAULT;
518
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300519 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300520
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300521 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300522
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300523 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300524
525 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200526}
527
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900528/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300529static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
530 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
532 struct hci_dev *hdev = hci_pi(sk)->hdev;
533
534 if (!hdev)
535 return -EBADFD;
536
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700537 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
538 return -EBUSY;
539
Marcel Holtmann4a964402014-07-02 19:10:33 +0200540 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200541 return -EOPNOTSUPP;
542
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700543 if (hdev->dev_type != HCI_BREDR)
544 return -EOPNOTSUPP;
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 switch (cmd) {
547 case HCISETRAW:
548 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000549 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700550 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200553 return hci_get_conn_info(hdev, (void __user *) arg);
554
555 case HCIGETAUTHINFO:
556 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Johan Hedbergf0358562010-05-18 13:20:32 +0200558 case HCIBLOCKADDR:
559 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000560 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300561 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200562
563 case HCIUNBLOCKADDR:
564 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000565 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300566 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700568
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700569 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570}
571
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300572static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
573 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200575 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700576 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 int err;
578
579 BT_DBG("cmd %x arg %lx", cmd, arg);
580
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700581 lock_sock(sk);
582
583 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
584 err = -EBADFD;
585 goto done;
586 }
587
588 release_sock(sk);
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 switch (cmd) {
591 case HCIGETDEVLIST:
592 return hci_get_dev_list(argp);
593
594 case HCIGETDEVINFO:
595 return hci_get_dev_info(argp);
596
597 case HCIGETCONNLIST:
598 return hci_get_conn_list(argp);
599
600 case HCIDEVUP:
601 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000602 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 return hci_dev_open(arg);
604
605 case HCIDEVDOWN:
606 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000607 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 return hci_dev_close(arg);
609
610 case HCIDEVRESET:
611 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000612 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 return hci_dev_reset(arg);
614
615 case HCIDEVRESTAT:
616 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000617 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 return hci_dev_reset_stat(arg);
619
620 case HCISETSCAN:
621 case HCISETAUTH:
622 case HCISETENCRYPT:
623 case HCISETPTYPE:
624 case HCISETLINKPOL:
625 case HCISETLINKMODE:
626 case HCISETACLMTU:
627 case HCISETSCOMTU:
628 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000629 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 return hci_dev_cmd(cmd, argp);
631
632 case HCIINQUIRY:
633 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700635
636 lock_sock(sk);
637
638 err = hci_sock_bound_ioctl(sk, cmd, arg);
639
640done:
641 release_sock(sk);
642 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
644
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300645static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
646 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Johan Hedberg03811012010-12-08 00:21:06 +0200648 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 struct sock *sk = sock->sk;
650 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200651 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
653 BT_DBG("sock %p sk %p", sock, sk);
654
Johan Hedberg03811012010-12-08 00:21:06 +0200655 if (!addr)
656 return -EINVAL;
657
658 memset(&haddr, 0, sizeof(haddr));
659 len = min_t(unsigned int, sizeof(haddr), addr_len);
660 memcpy(&haddr, addr, len);
661
662 if (haddr.hci_family != AF_BLUETOOTH)
663 return -EINVAL;
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 lock_sock(sk);
666
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100667 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 err = -EALREADY;
669 goto done;
670 }
671
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100672 switch (haddr.hci_channel) {
673 case HCI_CHANNEL_RAW:
674 if (hci_pi(sk)->hdev) {
675 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 goto done;
677 }
678
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100679 if (haddr.hci_dev != HCI_DEV_NONE) {
680 hdev = hci_dev_get(haddr.hci_dev);
681 if (!hdev) {
682 err = -ENODEV;
683 goto done;
684 }
685
686 atomic_inc(&hdev->promisc);
687 }
688
689 hci_pi(sk)->hdev = hdev;
690 break;
691
Marcel Holtmann23500182013-08-26 21:40:52 -0700692 case HCI_CHANNEL_USER:
693 if (hci_pi(sk)->hdev) {
694 err = -EALREADY;
695 goto done;
696 }
697
698 if (haddr.hci_dev == HCI_DEV_NONE) {
699 err = -EINVAL;
700 goto done;
701 }
702
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700703 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700704 err = -EPERM;
705 goto done;
706 }
707
708 hdev = hci_dev_get(haddr.hci_dev);
709 if (!hdev) {
710 err = -ENODEV;
711 goto done;
712 }
713
714 if (test_bit(HCI_UP, &hdev->flags) ||
715 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +0200716 test_bit(HCI_SETUP, &hdev->dev_flags) ||
717 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700718 err = -EBUSY;
719 hci_dev_put(hdev);
720 goto done;
721 }
722
723 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
724 err = -EUSERS;
725 hci_dev_put(hdev);
726 goto done;
727 }
728
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200729 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700730
731 err = hci_dev_open(hdev->id);
732 if (err) {
733 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200734 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700735 hci_dev_put(hdev);
736 goto done;
737 }
738
739 atomic_inc(&hdev->promisc);
740
741 hci_pi(sk)->hdev = hdev;
742 break;
743
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100744 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100745 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100746 err = -EINVAL;
747 goto done;
748 }
749
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100750 if (!capable(CAP_NET_ADMIN)) {
751 err = -EPERM;
752 goto done;
753 }
754
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100755 break;
756
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100757 case HCI_CHANNEL_MONITOR:
758 if (haddr.hci_dev != HCI_DEV_NONE) {
759 err = -EINVAL;
760 goto done;
761 }
762
763 if (!capable(CAP_NET_RAW)) {
764 err = -EPERM;
765 goto done;
766 }
767
768 send_monitor_replay(sk);
769
770 atomic_inc(&monitor_promisc);
771 break;
772
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100773 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200774 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
775 err = -EINVAL;
776 goto done;
777 }
778
779 if (haddr.hci_dev != HCI_DEV_NONE) {
780 err = -EINVAL;
781 goto done;
782 }
783
784 if (!capable(CAP_NET_ADMIN)) {
785 err = -EPERM;
786 goto done;
787 }
788
789 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 }
791
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100792
Johan Hedberg03811012010-12-08 00:21:06 +0200793 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 sk->sk_state = BT_BOUND;
795
796done:
797 release_sock(sk);
798 return err;
799}
800
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300801static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
802 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
804 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
805 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700806 struct hci_dev *hdev;
807 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
809 BT_DBG("sock %p sk %p", sock, sk);
810
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700811 if (peer)
812 return -EOPNOTSUPP;
813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 lock_sock(sk);
815
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700816 hdev = hci_pi(sk)->hdev;
817 if (!hdev) {
818 err = -EBADFD;
819 goto done;
820 }
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 *addr_len = sizeof(*haddr);
823 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100824 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700825 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700827done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700829 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830}
831
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300832static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
833 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834{
835 __u32 mask = hci_pi(sk)->cmsg_mask;
836
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700837 if (mask & HCI_CMSG_DIR) {
838 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300839 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
840 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700843 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100844#ifdef CONFIG_COMPAT
845 struct compat_timeval ctv;
846#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700847 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200848 void *data;
849 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700850
851 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200852
David S. Miller1da97f82007-09-12 14:10:58 +0200853 data = &tv;
854 len = sizeof(tv);
855#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800856 if (!COMPAT_USE_64BIT_TIME &&
857 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200858 ctv.tv_sec = tv.tv_sec;
859 ctv.tv_usec = tv.tv_usec;
860 data = &ctv;
861 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200862 }
David S. Miller1da97f82007-09-12 14:10:58 +0200863#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200864
865 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700866 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900868
Ying Xue1b784142015-03-02 15:37:48 +0800869static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
870 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871{
872 int noblock = flags & MSG_DONTWAIT;
873 struct sock *sk = sock->sk;
874 struct sk_buff *skb;
875 int copied, err;
876
877 BT_DBG("sock %p, sk %p", sock, sk);
878
879 if (flags & (MSG_OOB))
880 return -EOPNOTSUPP;
881
882 if (sk->sk_state == BT_CLOSED)
883 return 0;
884
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200885 skb = skb_recv_datagram(sk, flags, noblock, &err);
886 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 return err;
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 copied = skb->len;
890 if (len < copied) {
891 msg->msg_flags |= MSG_TRUNC;
892 copied = len;
893 }
894
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300895 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500896 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Marcel Holtmann3a208622012-02-20 14:50:34 +0100898 switch (hci_pi(sk)->channel) {
899 case HCI_CHANNEL_RAW:
900 hci_sock_cmsg(sk, msg, skb);
901 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700902 case HCI_CHANNEL_USER:
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100903 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100904 case HCI_CHANNEL_MONITOR:
905 sock_recv_timestamp(msg, sk, skb);
906 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200907 default:
908 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
909 sock_recv_timestamp(msg, sk, skb);
910 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
913 skb_free_datagram(sk, skb);
914
915 return err ? : copied;
916}
917
Ying Xue1b784142015-03-02 15:37:48 +0800918static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
919 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
921 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200922 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 struct hci_dev *hdev;
924 struct sk_buff *skb;
925 int err;
926
927 BT_DBG("sock %p sk %p", sock, sk);
928
929 if (msg->msg_flags & MSG_OOB)
930 return -EOPNOTSUPP;
931
932 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
933 return -EINVAL;
934
935 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
936 return -EINVAL;
937
938 lock_sock(sk);
939
Johan Hedberg03811012010-12-08 00:21:06 +0200940 switch (hci_pi(sk)->channel) {
941 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700942 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200943 break;
944 case HCI_CHANNEL_CONTROL:
945 err = mgmt_control(sk, msg, len);
946 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100947 case HCI_CHANNEL_MONITOR:
948 err = -EOPNOTSUPP;
949 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200950 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200951 mutex_lock(&mgmt_chan_list_lock);
952 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
953 if (chan)
954 err = -ENOSYS; /* FIXME: call handler */
955 else
956 err = -EINVAL;
957
958 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +0200959 goto done;
960 }
961
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200962 hdev = hci_pi(sk)->hdev;
963 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 err = -EBADFD;
965 goto done;
966 }
967
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100968 if (!test_bit(HCI_UP, &hdev->flags)) {
969 err = -ENETDOWN;
970 goto done;
971 }
972
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200973 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
974 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 goto done;
976
Al Viro6ce8e9c2014-04-06 21:25:44 -0400977 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 err = -EFAULT;
979 goto drop;
980 }
981
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700982 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -0800985 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
986 /* No permission check is needed for user channel
987 * since that gets enforced when binding the socket.
988 *
989 * However check that the packet type is valid.
990 */
991 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
992 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
993 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
994 err = -EINVAL;
995 goto drop;
996 }
997
998 skb_queue_tail(&hdev->raw_q, skb);
999 queue_work(hdev->workqueue, &hdev->tx_work);
1000 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001001 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 u16 ogf = hci_opcode_ogf(opcode);
1003 u16 ocf = hci_opcode_ocf(opcode);
1004
1005 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001006 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1007 &hci_sec_filter.ocf_mask[ogf])) &&
1008 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 err = -EPERM;
1010 goto drop;
1011 }
1012
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001013 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001015 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001017 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001018 * single-command requests.
1019 */
Eyal Birger6368c232015-03-01 14:58:26 +02001020 bt_cb(skb)->req_start = 1;
Johan Hedberg11714b32013-03-05 20:37:47 +02001021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001023 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 }
1025 } else {
1026 if (!capable(CAP_NET_RAW)) {
1027 err = -EPERM;
1028 goto drop;
1029 }
1030
1031 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001032 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 }
1034
1035 err = len;
1036
1037done:
1038 release_sock(sk);
1039 return err;
1040
1041drop:
1042 kfree_skb(skb);
1043 goto done;
1044}
1045
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001046static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1047 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048{
1049 struct hci_ufilter uf = { .opcode = 0 };
1050 struct sock *sk = sock->sk;
1051 int err = 0, opt = 0;
1052
1053 BT_DBG("sk %p, opt %d", sk, optname);
1054
1055 lock_sock(sk);
1056
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001057 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001058 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001059 goto done;
1060 }
1061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 switch (optname) {
1063 case HCI_DATA_DIR:
1064 if (get_user(opt, (int __user *)optval)) {
1065 err = -EFAULT;
1066 break;
1067 }
1068
1069 if (opt)
1070 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1071 else
1072 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1073 break;
1074
1075 case HCI_TIME_STAMP:
1076 if (get_user(opt, (int __user *)optval)) {
1077 err = -EFAULT;
1078 break;
1079 }
1080
1081 if (opt)
1082 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1083 else
1084 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1085 break;
1086
1087 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001088 {
1089 struct hci_filter *f = &hci_pi(sk)->filter;
1090
1091 uf.type_mask = f->type_mask;
1092 uf.opcode = f->opcode;
1093 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1094 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1095 }
1096
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 len = min_t(unsigned int, len, sizeof(uf));
1098 if (copy_from_user(&uf, optval, len)) {
1099 err = -EFAULT;
1100 break;
1101 }
1102
1103 if (!capable(CAP_NET_RAW)) {
1104 uf.type_mask &= hci_sec_filter.type_mask;
1105 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1106 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1107 }
1108
1109 {
1110 struct hci_filter *f = &hci_pi(sk)->filter;
1111
1112 f->type_mask = uf.type_mask;
1113 f->opcode = uf.opcode;
1114 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1115 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1116 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001117 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
1119 default:
1120 err = -ENOPROTOOPT;
1121 break;
1122 }
1123
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001124done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 release_sock(sk);
1126 return err;
1127}
1128
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001129static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1130 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
1132 struct hci_ufilter uf;
1133 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001134 int len, opt, err = 0;
1135
1136 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
1138 if (get_user(len, optlen))
1139 return -EFAULT;
1140
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001141 lock_sock(sk);
1142
1143 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001144 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001145 goto done;
1146 }
1147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 switch (optname) {
1149 case HCI_DATA_DIR:
1150 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1151 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001152 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 opt = 0;
1154
1155 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001156 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 break;
1158
1159 case HCI_TIME_STAMP:
1160 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1161 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001162 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 opt = 0;
1164
1165 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001166 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 break;
1168
1169 case HCI_FILTER:
1170 {
1171 struct hci_filter *f = &hci_pi(sk)->filter;
1172
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001173 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 uf.type_mask = f->type_mask;
1175 uf.opcode = f->opcode;
1176 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1177 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1178 }
1179
1180 len = min_t(unsigned int, len, sizeof(uf));
1181 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001182 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 break;
1184
1185 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001186 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 break;
1188 }
1189
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001190done:
1191 release_sock(sk);
1192 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193}
1194
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001195static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 .family = PF_BLUETOOTH,
1197 .owner = THIS_MODULE,
1198 .release = hci_sock_release,
1199 .bind = hci_sock_bind,
1200 .getname = hci_sock_getname,
1201 .sendmsg = hci_sock_sendmsg,
1202 .recvmsg = hci_sock_recvmsg,
1203 .ioctl = hci_sock_ioctl,
1204 .poll = datagram_poll,
1205 .listen = sock_no_listen,
1206 .shutdown = sock_no_shutdown,
1207 .setsockopt = hci_sock_setsockopt,
1208 .getsockopt = hci_sock_getsockopt,
1209 .connect = sock_no_connect,
1210 .socketpair = sock_no_socketpair,
1211 .accept = sock_no_accept,
1212 .mmap = sock_no_mmap
1213};
1214
1215static struct proto hci_sk_proto = {
1216 .name = "HCI",
1217 .owner = THIS_MODULE,
1218 .obj_size = sizeof(struct hci_pinfo)
1219};
1220
Eric Paris3f378b62009-11-05 22:18:14 -08001221static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1222 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
1224 struct sock *sk;
1225
1226 BT_DBG("sock %p", sock);
1227
1228 if (sock->type != SOCK_RAW)
1229 return -ESOCKTNOSUPPORT;
1230
1231 sock->ops = &hci_sock_ops;
1232
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001233 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 if (!sk)
1235 return -ENOMEM;
1236
1237 sock_init_data(sock, sk);
1238
1239 sock_reset_flag(sk, SOCK_ZAPPED);
1240
1241 sk->sk_protocol = protocol;
1242
1243 sock->state = SS_UNCONNECTED;
1244 sk->sk_state = BT_OPEN;
1245
1246 bt_sock_link(&hci_sk_list, sk);
1247 return 0;
1248}
1249
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001250static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 .family = PF_BLUETOOTH,
1252 .owner = THIS_MODULE,
1253 .create = hci_sock_create,
1254};
1255
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256int __init hci_sock_init(void)
1257{
1258 int err;
1259
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001260 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1261
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 err = proto_register(&hci_sk_proto, 0);
1263 if (err < 0)
1264 return err;
1265
1266 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001267 if (err < 0) {
1268 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001270 }
1271
Al Virob0316612013-04-04 19:14:33 -04001272 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001273 if (err < 0) {
1274 BT_ERR("Failed to create HCI proc file");
1275 bt_sock_unregister(BTPROTO_HCI);
1276 goto error;
1277 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 BT_INFO("HCI socket layer initialized");
1280
1281 return 0;
1282
1283error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 proto_unregister(&hci_sk_proto);
1285 return err;
1286}
1287
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301288void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001290 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001291 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293}