blob: b297709d82bf450780b0fffedd972e2638fa77f3 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Johan Hedberg801c1e82015-03-06 21:08:50 +020034static LIST_HEAD(mgmt_chan_list);
35static DEFINE_MUTEX(mgmt_chan_list_lock);
36
Marcel Holtmanncd82e612012-02-20 20:34:38 +010037static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039/* ----- HCI socket interface ----- */
40
Marcel Holtmann863def52014-07-11 05:41:00 +020041/* Socket info */
42#define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44struct hci_pinfo {
45 struct bt_sock bt;
46 struct hci_dev *hdev;
47 struct hci_filter filter;
48 __u32 cmsg_mask;
49 unsigned short channel;
50};
51
Jiri Slaby93919762015-02-19 15:20:43 +010052static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
Jiri Slaby93919762015-02-19 15:20:43 +010054 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070055}
56
57/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020058#define HCI_SFLT_MAX_OGF 5
59
60struct hci_sec_filter {
61 __u32 type_mask;
62 __u32 event_mask[2];
63 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
64};
65
Marcel Holtmann7e67c112014-07-11 05:36:40 +020066static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 /* Packet types */
68 0x10,
69 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020070 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 /* Commands */
72 {
73 { 0x0 },
74 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020075 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020077 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020079 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020081 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020083 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 }
85};
86
87static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070088 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089};
90
Marcel Holtmannf81fe642013-08-25 23:25:15 -070091static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
92{
93 struct hci_filter *flt;
94 int flt_type, flt_event;
95
96 /* Apply filter */
97 flt = &hci_pi(sk)->filter;
98
99 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
100 flt_type = 0;
101 else
102 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
103
104 if (!test_bit(flt_type, &flt->type_mask))
105 return true;
106
107 /* Extra filter for event packets only */
108 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
109 return false;
110
111 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
112
113 if (!hci_test_bit(flt_event, &flt->event_mask))
114 return true;
115
116 /* Check filter only when opcode is set */
117 if (!flt->opcode)
118 return false;
119
120 if (flt_event == HCI_EV_CMD_COMPLETE &&
121 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
122 return true;
123
124 if (flt_event == HCI_EV_CMD_STATUS &&
125 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
126 return true;
127
128 return false;
129}
130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100132void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
134 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100135 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137 BT_DBG("hdev %p len %d", hdev, skb->len);
138
139 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100140
Sasha Levinb67bfe02013-02-27 17:06:00 -0800141 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 struct sk_buff *nskb;
143
144 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
145 continue;
146
147 /* Don't send frame to the socket it came from */
148 if (skb->sk == sk)
149 continue;
150
Marcel Holtmann23500182013-08-26 21:40:52 -0700151 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
152 if (is_filtered_packet(sk, skb))
153 continue;
154 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
155 if (!bt_cb(skb)->incoming)
156 continue;
157 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
158 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
159 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
160 continue;
161 } else {
162 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200163 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100166 if (!skb_copy) {
167 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300168 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100169 if (!skb_copy)
170 continue;
171
172 /* Put type byte before the data */
173 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
174 }
175
176 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200177 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 continue;
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 if (sock_queue_rcv_skb(sk, nskb))
181 kfree_skb(nskb);
182 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100183
184 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100185
186 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100187}
188
Johan Hedberg71290692015-02-20 13:26:23 +0200189/* Send frame to sockets with specific channel */
190void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
191 struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100192{
193 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100194
Johan Hedberg71290692015-02-20 13:26:23 +0200195 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100196
197 read_lock(&hci_sk_list.lock);
198
Sasha Levinb67bfe02013-02-27 17:06:00 -0800199 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100200 struct sk_buff *nskb;
201
202 /* Skip the original socket */
203 if (sk == skip_sk)
204 continue;
205
206 if (sk->sk_state != BT_BOUND)
207 continue;
208
Johan Hedberg71290692015-02-20 13:26:23 +0200209 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800210 continue;
211
212 nskb = skb_clone(skb, GFP_ATOMIC);
213 if (!nskb)
214 continue;
215
216 if (sock_queue_rcv_skb(sk, nskb))
217 kfree_skb(nskb);
218 }
219
220 read_unlock(&hci_sk_list.lock);
221}
222
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100223/* Send frame to monitor socket */
224void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
225{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100226 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800227 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100228 __le16 opcode;
229
230 if (!atomic_read(&monitor_promisc))
231 return;
232
233 BT_DBG("hdev %p len %d", hdev, skb->len);
234
235 switch (bt_cb(skb)->pkt_type) {
236 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700237 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100238 break;
239 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700240 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100241 break;
242 case HCI_ACLDATA_PKT:
243 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700244 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100245 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700246 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100247 break;
248 case HCI_SCODATA_PKT:
249 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700250 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100251 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700252 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100253 break;
254 default:
255 return;
256 }
257
Marcel Holtmann2b531292015-01-11 19:33:31 -0800258 /* Create a private copy with headroom */
259 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
260 if (!skb_copy)
261 return;
262
263 /* Put header before the data */
264 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
265 hdr->opcode = opcode;
266 hdr->index = cpu_to_le16(hdev->id);
267 hdr->len = cpu_to_le16(skb->len);
268
Johan Hedberg03f310e2015-02-20 13:26:24 +0200269 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100270 kfree_skb(skb_copy);
271}
272
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
274{
275 struct hci_mon_hdr *hdr;
276 struct hci_mon_new_index *ni;
277 struct sk_buff *skb;
278 __le16 opcode;
279
280 switch (event) {
281 case HCI_DEV_REG:
282 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
283 if (!skb)
284 return NULL;
285
286 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
287 ni->type = hdev->dev_type;
288 ni->bus = hdev->bus;
289 bacpy(&ni->bdaddr, &hdev->bdaddr);
290 memcpy(ni->name, hdev->name, 8);
291
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700292 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100293 break;
294
295 case HCI_DEV_UNREG:
296 skb = bt_skb_alloc(0, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700300 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100301 break;
302
303 default:
304 return NULL;
305 }
306
307 __net_timestamp(skb);
308
309 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
310 hdr->opcode = opcode;
311 hdr->index = cpu_to_le16(hdev->id);
312 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
313
314 return skb;
315}
316
317static void send_monitor_replay(struct sock *sk)
318{
319 struct hci_dev *hdev;
320
321 read_lock(&hci_dev_list_lock);
322
323 list_for_each_entry(hdev, &hci_dev_list, list) {
324 struct sk_buff *skb;
325
326 skb = create_monitor_event(hdev, HCI_DEV_REG);
327 if (!skb)
328 continue;
329
330 if (sock_queue_rcv_skb(sk, skb))
331 kfree_skb(skb);
332 }
333
334 read_unlock(&hci_dev_list_lock);
335}
336
Marcel Holtmann040030e2012-02-20 14:50:37 +0100337/* Generate internal stack event */
338static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
339{
340 struct hci_event_hdr *hdr;
341 struct hci_ev_stack_internal *ev;
342 struct sk_buff *skb;
343
344 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
345 if (!skb)
346 return;
347
348 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
349 hdr->evt = HCI_EV_STACK_INTERNAL;
350 hdr->plen = sizeof(*ev) + dlen;
351
352 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
353 ev->type = type;
354 memcpy(ev->data, data, dlen);
355
356 bt_cb(skb)->incoming = 1;
357 __net_timestamp(skb);
358
359 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100360 hci_send_to_sock(hdev, skb);
361 kfree_skb(skb);
362}
363
364void hci_sock_dev_event(struct hci_dev *hdev, int event)
365{
366 struct hci_ev_si_device ev;
367
368 BT_DBG("hdev %s event %d", hdev->name, event);
369
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100370 /* Send event to monitor */
371 if (atomic_read(&monitor_promisc)) {
372 struct sk_buff *skb;
373
374 skb = create_monitor_event(hdev, event);
375 if (skb) {
Johan Hedberg03f310e2015-02-20 13:26:24 +0200376 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100377 kfree_skb(skb);
378 }
379 }
380
Marcel Holtmann040030e2012-02-20 14:50:37 +0100381 /* Send event to sockets */
382 ev.event = event;
383 ev.dev_id = hdev->id;
384 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
385
386 if (event == HCI_DEV_UNREG) {
387 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100388
389 /* Detach sockets from device */
390 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800391 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100392 bh_lock_sock_nested(sk);
393 if (hci_pi(sk)->hdev == hdev) {
394 hci_pi(sk)->hdev = NULL;
395 sk->sk_err = EPIPE;
396 sk->sk_state = BT_OPEN;
397 sk->sk_state_change(sk);
398
399 hci_dev_put(hdev);
400 }
401 bh_unlock_sock(sk);
402 }
403 read_unlock(&hci_sk_list.lock);
404 }
405}
406
Johan Hedberg801c1e82015-03-06 21:08:50 +0200407static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
408{
409 struct hci_mgmt_chan *c;
410
411 list_for_each_entry(c, &mgmt_chan_list, list) {
412 if (c->channel == channel)
413 return c;
414 }
415
416 return NULL;
417}
418
419static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
420{
421 struct hci_mgmt_chan *c;
422
423 mutex_lock(&mgmt_chan_list_lock);
424 c = __hci_mgmt_chan_find(channel);
425 mutex_unlock(&mgmt_chan_list_lock);
426
427 return c;
428}
429
430int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
431{
432 if (c->channel < HCI_CHANNEL_CONTROL)
433 return -EINVAL;
434
435 mutex_lock(&mgmt_chan_list_lock);
436 if (__hci_mgmt_chan_find(c->channel)) {
437 mutex_unlock(&mgmt_chan_list_lock);
438 return -EALREADY;
439 }
440
441 list_add_tail(&c->list, &mgmt_chan_list);
442
443 mutex_unlock(&mgmt_chan_list_lock);
444
445 return 0;
446}
447EXPORT_SYMBOL(hci_mgmt_chan_register);
448
449void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
450{
451 mutex_lock(&mgmt_chan_list_lock);
452 list_del(&c->list);
453 mutex_unlock(&mgmt_chan_list_lock);
454}
455EXPORT_SYMBOL(hci_mgmt_chan_unregister);
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457static int hci_sock_release(struct socket *sock)
458{
459 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100460 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 BT_DBG("sock %p sk %p", sock, sk);
463
464 if (!sk)
465 return 0;
466
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100467 hdev = hci_pi(sk)->hdev;
468
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100469 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
470 atomic_dec(&monitor_promisc);
471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 bt_sock_unlink(&hci_sk_list, sk);
473
474 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700475 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200476 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700477 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
478 hci_dev_close(hdev->id);
479 }
480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 atomic_dec(&hdev->promisc);
482 hci_dev_put(hdev);
483 }
484
485 sock_orphan(sk);
486
487 skb_queue_purge(&sk->sk_receive_queue);
488 skb_queue_purge(&sk->sk_write_queue);
489
490 sock_put(sk);
491 return 0;
492}
493
Antti Julkub2a66aa2011-06-15 12:01:14 +0300494static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200495{
496 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300497 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200498
499 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
500 return -EFAULT;
501
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300502 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300503
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300504 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300505
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300506 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300507
508 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200509}
510
Antti Julkub2a66aa2011-06-15 12:01:14 +0300511static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200512{
513 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300514 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200515
516 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
517 return -EFAULT;
518
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300519 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300520
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300521 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300522
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300523 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300524
525 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200526}
527
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900528/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300529static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
530 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
532 struct hci_dev *hdev = hci_pi(sk)->hdev;
533
534 if (!hdev)
535 return -EBADFD;
536
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700537 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700538 return -EBUSY;
539
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700540 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200541 return -EOPNOTSUPP;
542
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700543 if (hdev->dev_type != HCI_BREDR)
544 return -EOPNOTSUPP;
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 switch (cmd) {
547 case HCISETRAW:
548 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000549 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700550 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200553 return hci_get_conn_info(hdev, (void __user *) arg);
554
555 case HCIGETAUTHINFO:
556 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Johan Hedbergf0358562010-05-18 13:20:32 +0200558 case HCIBLOCKADDR:
559 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000560 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300561 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200562
563 case HCIUNBLOCKADDR:
564 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000565 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300566 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700568
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700569 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570}
571
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300572static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
573 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200575 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700576 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 int err;
578
579 BT_DBG("cmd %x arg %lx", cmd, arg);
580
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700581 lock_sock(sk);
582
583 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
584 err = -EBADFD;
585 goto done;
586 }
587
588 release_sock(sk);
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 switch (cmd) {
591 case HCIGETDEVLIST:
592 return hci_get_dev_list(argp);
593
594 case HCIGETDEVINFO:
595 return hci_get_dev_info(argp);
596
597 case HCIGETCONNLIST:
598 return hci_get_conn_list(argp);
599
600 case HCIDEVUP:
601 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000602 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 return hci_dev_open(arg);
604
605 case HCIDEVDOWN:
606 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000607 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 return hci_dev_close(arg);
609
610 case HCIDEVRESET:
611 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000612 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 return hci_dev_reset(arg);
614
615 case HCIDEVRESTAT:
616 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000617 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 return hci_dev_reset_stat(arg);
619
620 case HCISETSCAN:
621 case HCISETAUTH:
622 case HCISETENCRYPT:
623 case HCISETPTYPE:
624 case HCISETLINKPOL:
625 case HCISETLINKMODE:
626 case HCISETACLMTU:
627 case HCISETSCOMTU:
628 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000629 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 return hci_dev_cmd(cmd, argp);
631
632 case HCIINQUIRY:
633 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700635
636 lock_sock(sk);
637
638 err = hci_sock_bound_ioctl(sk, cmd, arg);
639
640done:
641 release_sock(sk);
642 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
644
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300645static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
646 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Johan Hedberg03811012010-12-08 00:21:06 +0200648 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 struct sock *sk = sock->sk;
650 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200651 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
653 BT_DBG("sock %p sk %p", sock, sk);
654
Johan Hedberg03811012010-12-08 00:21:06 +0200655 if (!addr)
656 return -EINVAL;
657
658 memset(&haddr, 0, sizeof(haddr));
659 len = min_t(unsigned int, sizeof(haddr), addr_len);
660 memcpy(&haddr, addr, len);
661
662 if (haddr.hci_family != AF_BLUETOOTH)
663 return -EINVAL;
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 lock_sock(sk);
666
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100667 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 err = -EALREADY;
669 goto done;
670 }
671
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100672 switch (haddr.hci_channel) {
673 case HCI_CHANNEL_RAW:
674 if (hci_pi(sk)->hdev) {
675 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 goto done;
677 }
678
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100679 if (haddr.hci_dev != HCI_DEV_NONE) {
680 hdev = hci_dev_get(haddr.hci_dev);
681 if (!hdev) {
682 err = -ENODEV;
683 goto done;
684 }
685
686 atomic_inc(&hdev->promisc);
687 }
688
689 hci_pi(sk)->hdev = hdev;
690 break;
691
Marcel Holtmann23500182013-08-26 21:40:52 -0700692 case HCI_CHANNEL_USER:
693 if (hci_pi(sk)->hdev) {
694 err = -EALREADY;
695 goto done;
696 }
697
698 if (haddr.hci_dev == HCI_DEV_NONE) {
699 err = -EINVAL;
700 goto done;
701 }
702
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700703 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700704 err = -EPERM;
705 goto done;
706 }
707
708 hdev = hci_dev_get(haddr.hci_dev);
709 if (!hdev) {
710 err = -ENODEV;
711 goto done;
712 }
713
714 if (test_bit(HCI_UP, &hdev->flags) ||
715 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700716 hci_dev_test_flag(hdev, HCI_SETUP) ||
717 hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700718 err = -EBUSY;
719 hci_dev_put(hdev);
720 goto done;
721 }
722
723 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
724 err = -EUSERS;
725 hci_dev_put(hdev);
726 goto done;
727 }
728
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200729 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700730
731 err = hci_dev_open(hdev->id);
732 if (err) {
733 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200734 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700735 hci_dev_put(hdev);
736 goto done;
737 }
738
739 atomic_inc(&hdev->promisc);
740
741 hci_pi(sk)->hdev = hdev;
742 break;
743
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100744 case HCI_CHANNEL_MONITOR:
745 if (haddr.hci_dev != HCI_DEV_NONE) {
746 err = -EINVAL;
747 goto done;
748 }
749
750 if (!capable(CAP_NET_RAW)) {
751 err = -EPERM;
752 goto done;
753 }
754
755 send_monitor_replay(sk);
756
757 atomic_inc(&monitor_promisc);
758 break;
759
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100760 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200761 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
762 err = -EINVAL;
763 goto done;
764 }
765
766 if (haddr.hci_dev != HCI_DEV_NONE) {
767 err = -EINVAL;
768 goto done;
769 }
770
771 if (!capable(CAP_NET_ADMIN)) {
772 err = -EPERM;
773 goto done;
774 }
775
776 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 }
778
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100779
Johan Hedberg03811012010-12-08 00:21:06 +0200780 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 sk->sk_state = BT_BOUND;
782
783done:
784 release_sock(sk);
785 return err;
786}
787
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300788static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
789 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
791 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
792 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700793 struct hci_dev *hdev;
794 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
796 BT_DBG("sock %p sk %p", sock, sk);
797
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700798 if (peer)
799 return -EOPNOTSUPP;
800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 lock_sock(sk);
802
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700803 hdev = hci_pi(sk)->hdev;
804 if (!hdev) {
805 err = -EBADFD;
806 goto done;
807 }
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 *addr_len = sizeof(*haddr);
810 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100811 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700812 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700814done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700816 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817}
818
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300819static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
820 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821{
822 __u32 mask = hci_pi(sk)->cmsg_mask;
823
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700824 if (mask & HCI_CMSG_DIR) {
825 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300826 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
827 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700830 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100831#ifdef CONFIG_COMPAT
832 struct compat_timeval ctv;
833#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700834 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200835 void *data;
836 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700837
838 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200839
David S. Miller1da97f82007-09-12 14:10:58 +0200840 data = &tv;
841 len = sizeof(tv);
842#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800843 if (!COMPAT_USE_64BIT_TIME &&
844 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200845 ctv.tv_sec = tv.tv_sec;
846 ctv.tv_usec = tv.tv_usec;
847 data = &ctv;
848 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200849 }
David S. Miller1da97f82007-09-12 14:10:58 +0200850#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200851
852 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700853 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900855
Ying Xue1b784142015-03-02 15:37:48 +0800856static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
857 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858{
859 int noblock = flags & MSG_DONTWAIT;
860 struct sock *sk = sock->sk;
861 struct sk_buff *skb;
862 int copied, err;
863
864 BT_DBG("sock %p, sk %p", sock, sk);
865
866 if (flags & (MSG_OOB))
867 return -EOPNOTSUPP;
868
869 if (sk->sk_state == BT_CLOSED)
870 return 0;
871
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200872 skb = skb_recv_datagram(sk, flags, noblock, &err);
873 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 return err;
875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 copied = skb->len;
877 if (len < copied) {
878 msg->msg_flags |= MSG_TRUNC;
879 copied = len;
880 }
881
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300882 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500883 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Marcel Holtmann3a208622012-02-20 14:50:34 +0100885 switch (hci_pi(sk)->channel) {
886 case HCI_CHANNEL_RAW:
887 hci_sock_cmsg(sk, msg, skb);
888 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700889 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100890 case HCI_CHANNEL_MONITOR:
891 sock_recv_timestamp(msg, sk, skb);
892 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200893 default:
894 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
895 sock_recv_timestamp(msg, sk, skb);
896 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899 skb_free_datagram(sk, skb);
900
901 return err ? : copied;
902}
903
Ying Xue1b784142015-03-02 15:37:48 +0800904static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
905 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
907 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200908 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 struct hci_dev *hdev;
910 struct sk_buff *skb;
911 int err;
912
913 BT_DBG("sock %p sk %p", sock, sk);
914
915 if (msg->msg_flags & MSG_OOB)
916 return -EOPNOTSUPP;
917
918 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
919 return -EINVAL;
920
921 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
922 return -EINVAL;
923
924 lock_sock(sk);
925
Johan Hedberg03811012010-12-08 00:21:06 +0200926 switch (hci_pi(sk)->channel) {
927 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700928 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200929 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100930 case HCI_CHANNEL_MONITOR:
931 err = -EOPNOTSUPP;
932 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200933 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200934 mutex_lock(&mgmt_chan_list_lock);
935 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
936 if (chan)
Johan Hedberg6d785aa32015-03-06 21:08:51 +0200937 err = mgmt_control(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +0200938 else
939 err = -EINVAL;
940
941 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +0200942 goto done;
943 }
944
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200945 hdev = hci_pi(sk)->hdev;
946 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 err = -EBADFD;
948 goto done;
949 }
950
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100951 if (!test_bit(HCI_UP, &hdev->flags)) {
952 err = -ENETDOWN;
953 goto done;
954 }
955
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200956 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
957 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 goto done;
959
Al Viro6ce8e9c2014-04-06 21:25:44 -0400960 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 err = -EFAULT;
962 goto drop;
963 }
964
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700965 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -0800968 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
969 /* No permission check is needed for user channel
970 * since that gets enforced when binding the socket.
971 *
972 * However check that the packet type is valid.
973 */
974 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
975 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
976 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
977 err = -EINVAL;
978 goto drop;
979 }
980
981 skb_queue_tail(&hdev->raw_q, skb);
982 queue_work(hdev->workqueue, &hdev->tx_work);
983 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700984 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 u16 ogf = hci_opcode_ogf(opcode);
986 u16 ocf = hci_opcode_ocf(opcode);
987
988 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300989 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
990 &hci_sec_filter.ocf_mask[ogf])) &&
991 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 err = -EPERM;
993 goto drop;
994 }
995
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200996 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200998 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001000 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001001 * single-command requests.
1002 */
Eyal Birger6368c232015-03-01 14:58:26 +02001003 bt_cb(skb)->req_start = 1;
Johan Hedberg11714b32013-03-05 20:37:47 +02001004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001006 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 }
1008 } else {
1009 if (!capable(CAP_NET_RAW)) {
1010 err = -EPERM;
1011 goto drop;
1012 }
1013
1014 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001015 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 }
1017
1018 err = len;
1019
1020done:
1021 release_sock(sk);
1022 return err;
1023
1024drop:
1025 kfree_skb(skb);
1026 goto done;
1027}
1028
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001029static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1030 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031{
1032 struct hci_ufilter uf = { .opcode = 0 };
1033 struct sock *sk = sock->sk;
1034 int err = 0, opt = 0;
1035
1036 BT_DBG("sk %p, opt %d", sk, optname);
1037
1038 lock_sock(sk);
1039
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001040 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001041 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001042 goto done;
1043 }
1044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 switch (optname) {
1046 case HCI_DATA_DIR:
1047 if (get_user(opt, (int __user *)optval)) {
1048 err = -EFAULT;
1049 break;
1050 }
1051
1052 if (opt)
1053 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1054 else
1055 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1056 break;
1057
1058 case HCI_TIME_STAMP:
1059 if (get_user(opt, (int __user *)optval)) {
1060 err = -EFAULT;
1061 break;
1062 }
1063
1064 if (opt)
1065 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1066 else
1067 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1068 break;
1069
1070 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001071 {
1072 struct hci_filter *f = &hci_pi(sk)->filter;
1073
1074 uf.type_mask = f->type_mask;
1075 uf.opcode = f->opcode;
1076 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1077 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1078 }
1079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 len = min_t(unsigned int, len, sizeof(uf));
1081 if (copy_from_user(&uf, optval, len)) {
1082 err = -EFAULT;
1083 break;
1084 }
1085
1086 if (!capable(CAP_NET_RAW)) {
1087 uf.type_mask &= hci_sec_filter.type_mask;
1088 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1089 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1090 }
1091
1092 {
1093 struct hci_filter *f = &hci_pi(sk)->filter;
1094
1095 f->type_mask = uf.type_mask;
1096 f->opcode = uf.opcode;
1097 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1098 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1099 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001100 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 default:
1103 err = -ENOPROTOOPT;
1104 break;
1105 }
1106
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001107done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 release_sock(sk);
1109 return err;
1110}
1111
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001112static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1113 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114{
1115 struct hci_ufilter uf;
1116 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001117 int len, opt, err = 0;
1118
1119 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
1121 if (get_user(len, optlen))
1122 return -EFAULT;
1123
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001124 lock_sock(sk);
1125
1126 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001127 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001128 goto done;
1129 }
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 switch (optname) {
1132 case HCI_DATA_DIR:
1133 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1134 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001135 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 opt = 0;
1137
1138 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001139 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 break;
1141
1142 case HCI_TIME_STAMP:
1143 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1144 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001145 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 opt = 0;
1147
1148 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001149 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 break;
1151
1152 case HCI_FILTER:
1153 {
1154 struct hci_filter *f = &hci_pi(sk)->filter;
1155
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001156 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 uf.type_mask = f->type_mask;
1158 uf.opcode = f->opcode;
1159 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1160 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1161 }
1162
1163 len = min_t(unsigned int, len, sizeof(uf));
1164 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001165 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 break;
1167
1168 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001169 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 break;
1171 }
1172
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001173done:
1174 release_sock(sk);
1175 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176}
1177
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001178static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 .family = PF_BLUETOOTH,
1180 .owner = THIS_MODULE,
1181 .release = hci_sock_release,
1182 .bind = hci_sock_bind,
1183 .getname = hci_sock_getname,
1184 .sendmsg = hci_sock_sendmsg,
1185 .recvmsg = hci_sock_recvmsg,
1186 .ioctl = hci_sock_ioctl,
1187 .poll = datagram_poll,
1188 .listen = sock_no_listen,
1189 .shutdown = sock_no_shutdown,
1190 .setsockopt = hci_sock_setsockopt,
1191 .getsockopt = hci_sock_getsockopt,
1192 .connect = sock_no_connect,
1193 .socketpair = sock_no_socketpair,
1194 .accept = sock_no_accept,
1195 .mmap = sock_no_mmap
1196};
1197
1198static struct proto hci_sk_proto = {
1199 .name = "HCI",
1200 .owner = THIS_MODULE,
1201 .obj_size = sizeof(struct hci_pinfo)
1202};
1203
Eric Paris3f378b62009-11-05 22:18:14 -08001204static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1205 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206{
1207 struct sock *sk;
1208
1209 BT_DBG("sock %p", sock);
1210
1211 if (sock->type != SOCK_RAW)
1212 return -ESOCKTNOSUPPORT;
1213
1214 sock->ops = &hci_sock_ops;
1215
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001216 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 if (!sk)
1218 return -ENOMEM;
1219
1220 sock_init_data(sock, sk);
1221
1222 sock_reset_flag(sk, SOCK_ZAPPED);
1223
1224 sk->sk_protocol = protocol;
1225
1226 sock->state = SS_UNCONNECTED;
1227 sk->sk_state = BT_OPEN;
1228
1229 bt_sock_link(&hci_sk_list, sk);
1230 return 0;
1231}
1232
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001233static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 .family = PF_BLUETOOTH,
1235 .owner = THIS_MODULE,
1236 .create = hci_sock_create,
1237};
1238
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239int __init hci_sock_init(void)
1240{
1241 int err;
1242
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001243 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1244
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 err = proto_register(&hci_sk_proto, 0);
1246 if (err < 0)
1247 return err;
1248
1249 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001250 if (err < 0) {
1251 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001253 }
1254
Al Virob0316612013-04-04 19:14:33 -04001255 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001256 if (err < 0) {
1257 BT_ERR("Failed to create HCI proc file");
1258 bt_sock_unregister(BTPROTO_HCI);
1259 goto error;
1260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 BT_INFO("HCI socket layer initialized");
1263
1264 return 0;
1265
1266error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 proto_unregister(&hci_sk_proto);
1268 return err;
1269}
1270
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301271void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001273 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001274 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276}