blob: f4b10344b1e5833048793a7de4307603c0a45e52 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Johan Hedberg801c1e82015-03-06 21:08:50 +020034static LIST_HEAD(mgmt_chan_list);
35static DEFINE_MUTEX(mgmt_chan_list_lock);
36
Marcel Holtmanncd82e612012-02-20 20:34:38 +010037static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039/* ----- HCI socket interface ----- */
40
Marcel Holtmann863def52014-07-11 05:41:00 +020041/* Socket info */
42#define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44struct hci_pinfo {
45 struct bt_sock bt;
46 struct hci_dev *hdev;
47 struct hci_filter filter;
48 __u32 cmsg_mask;
49 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070050 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020051};
52
Marcel Holtmann6befc642015-03-14 19:27:53 -070053void hci_sock_set_flag(struct sock *sk, int nr)
54{
55 set_bit(nr, &hci_pi(sk)->flags);
56}
57
58void hci_sock_clear_flag(struct sock *sk, int nr)
59{
60 clear_bit(nr, &hci_pi(sk)->flags);
61}
62
Marcel Holtmannc85be542015-03-14 19:28:00 -070063int hci_sock_test_flag(struct sock *sk, int nr)
64{
65 return test_bit(nr, &hci_pi(sk)->flags);
66}
67
Jiri Slaby93919762015-02-19 15:20:43 +010068static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Jiri Slaby93919762015-02-19 15:20:43 +010070 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
73/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020074#define HCI_SFLT_MAX_OGF 5
75
76struct hci_sec_filter {
77 __u32 type_mask;
78 __u32 event_mask[2];
79 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
80};
81
Marcel Holtmann7e67c112014-07-11 05:36:40 +020082static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 /* Packet types */
84 0x10,
85 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020086 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* Commands */
88 {
89 { 0x0 },
90 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020091 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020093 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020095 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020097 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020099 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 }
101};
102
103static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700104 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105};
106
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700107static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
108{
109 struct hci_filter *flt;
110 int flt_type, flt_event;
111
112 /* Apply filter */
113 flt = &hci_pi(sk)->filter;
114
115 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
116 flt_type = 0;
117 else
118 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
119
120 if (!test_bit(flt_type, &flt->type_mask))
121 return true;
122
123 /* Extra filter for event packets only */
124 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
125 return false;
126
127 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
128
129 if (!hci_test_bit(flt_event, &flt->event_mask))
130 return true;
131
132 /* Check filter only when opcode is set */
133 if (!flt->opcode)
134 return false;
135
136 if (flt_event == HCI_EV_CMD_COMPLETE &&
137 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
138 return true;
139
140 if (flt_event == HCI_EV_CMD_STATUS &&
141 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
142 return true;
143
144 return false;
145}
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100148void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
150 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100151 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153 BT_DBG("hdev %p len %d", hdev, skb->len);
154
155 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156
Sasha Levinb67bfe02013-02-27 17:06:00 -0800157 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 struct sk_buff *nskb;
159
160 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
161 continue;
162
163 /* Don't send frame to the socket it came from */
164 if (skb->sk == sk)
165 continue;
166
Marcel Holtmann23500182013-08-26 21:40:52 -0700167 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
168 if (is_filtered_packet(sk, skb))
169 continue;
170 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
171 if (!bt_cb(skb)->incoming)
172 continue;
173 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
174 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
175 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
176 continue;
177 } else {
178 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200179 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100182 if (!skb_copy) {
183 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300184 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100185 if (!skb_copy)
186 continue;
187
188 /* Put type byte before the data */
189 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
190 }
191
192 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200193 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 continue;
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 if (sock_queue_rcv_skb(sk, nskb))
197 kfree_skb(nskb);
198 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100199
200 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100201
202 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100203}
204
Johan Hedberg71290692015-02-20 13:26:23 +0200205/* Send frame to sockets with specific channel */
206void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700207 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100208{
209 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100210
Johan Hedberg71290692015-02-20 13:26:23 +0200211 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100212
213 read_lock(&hci_sk_list.lock);
214
Sasha Levinb67bfe02013-02-27 17:06:00 -0800215 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100216 struct sk_buff *nskb;
217
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700218 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700219 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700220 continue;
221
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100222 /* Skip the original socket */
223 if (sk == skip_sk)
224 continue;
225
226 if (sk->sk_state != BT_BOUND)
227 continue;
228
Johan Hedberg71290692015-02-20 13:26:23 +0200229 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800230 continue;
231
232 nskb = skb_clone(skb, GFP_ATOMIC);
233 if (!nskb)
234 continue;
235
236 if (sock_queue_rcv_skb(sk, nskb))
237 kfree_skb(nskb);
238 }
239
240 read_unlock(&hci_sk_list.lock);
241}
242
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100243/* Send frame to monitor socket */
244void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
245{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100246 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800247 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100248 __le16 opcode;
249
250 if (!atomic_read(&monitor_promisc))
251 return;
252
253 BT_DBG("hdev %p len %d", hdev, skb->len);
254
255 switch (bt_cb(skb)->pkt_type) {
256 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700257 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100258 break;
259 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700260 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100261 break;
262 case HCI_ACLDATA_PKT:
263 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700264 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100265 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700266 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100267 break;
268 case HCI_SCODATA_PKT:
269 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700270 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100271 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700272 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273 break;
274 default:
275 return;
276 }
277
Marcel Holtmann2b531292015-01-11 19:33:31 -0800278 /* Create a private copy with headroom */
279 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
280 if (!skb_copy)
281 return;
282
283 /* Put header before the data */
284 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
285 hdr->opcode = opcode;
286 hdr->index = cpu_to_le16(hdev->id);
287 hdr->len = cpu_to_le16(skb->len);
288
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700289 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
290 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100291 kfree_skb(skb_copy);
292}
293
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100294static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
295{
296 struct hci_mon_hdr *hdr;
297 struct hci_mon_new_index *ni;
298 struct sk_buff *skb;
299 __le16 opcode;
300
301 switch (event) {
302 case HCI_DEV_REG:
303 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
308 ni->type = hdev->dev_type;
309 ni->bus = hdev->bus;
310 bacpy(&ni->bdaddr, &hdev->bdaddr);
311 memcpy(ni->name, hdev->name, 8);
312
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700313 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100314 break;
315
316 case HCI_DEV_UNREG:
317 skb = bt_skb_alloc(0, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
320
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700321 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100322 break;
323
324 default:
325 return NULL;
326 }
327
328 __net_timestamp(skb);
329
330 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
331 hdr->opcode = opcode;
332 hdr->index = cpu_to_le16(hdev->id);
333 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
334
335 return skb;
336}
337
338static void send_monitor_replay(struct sock *sk)
339{
340 struct hci_dev *hdev;
341
342 read_lock(&hci_dev_list_lock);
343
344 list_for_each_entry(hdev, &hci_dev_list, list) {
345 struct sk_buff *skb;
346
347 skb = create_monitor_event(hdev, HCI_DEV_REG);
348 if (!skb)
349 continue;
350
351 if (sock_queue_rcv_skb(sk, skb))
352 kfree_skb(skb);
353 }
354
355 read_unlock(&hci_dev_list_lock);
356}
357
Marcel Holtmann040030e2012-02-20 14:50:37 +0100358/* Generate internal stack event */
359static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
360{
361 struct hci_event_hdr *hdr;
362 struct hci_ev_stack_internal *ev;
363 struct sk_buff *skb;
364
365 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
366 if (!skb)
367 return;
368
369 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
370 hdr->evt = HCI_EV_STACK_INTERNAL;
371 hdr->plen = sizeof(*ev) + dlen;
372
373 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
374 ev->type = type;
375 memcpy(ev->data, data, dlen);
376
377 bt_cb(skb)->incoming = 1;
378 __net_timestamp(skb);
379
380 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100381 hci_send_to_sock(hdev, skb);
382 kfree_skb(skb);
383}
384
385void hci_sock_dev_event(struct hci_dev *hdev, int event)
386{
387 struct hci_ev_si_device ev;
388
389 BT_DBG("hdev %s event %d", hdev->name, event);
390
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100391 /* Send event to monitor */
392 if (atomic_read(&monitor_promisc)) {
393 struct sk_buff *skb;
394
395 skb = create_monitor_event(hdev, event);
396 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700397 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
398 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100399 kfree_skb(skb);
400 }
401 }
402
Marcel Holtmann040030e2012-02-20 14:50:37 +0100403 /* Send event to sockets */
404 ev.event = event;
405 ev.dev_id = hdev->id;
406 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
407
408 if (event == HCI_DEV_UNREG) {
409 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100410
411 /* Detach sockets from device */
412 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800413 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100414 bh_lock_sock_nested(sk);
415 if (hci_pi(sk)->hdev == hdev) {
416 hci_pi(sk)->hdev = NULL;
417 sk->sk_err = EPIPE;
418 sk->sk_state = BT_OPEN;
419 sk->sk_state_change(sk);
420
421 hci_dev_put(hdev);
422 }
423 bh_unlock_sock(sk);
424 }
425 read_unlock(&hci_sk_list.lock);
426 }
427}
428
Johan Hedberg801c1e82015-03-06 21:08:50 +0200429static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
430{
431 struct hci_mgmt_chan *c;
432
433 list_for_each_entry(c, &mgmt_chan_list, list) {
434 if (c->channel == channel)
435 return c;
436 }
437
438 return NULL;
439}
440
441static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
442{
443 struct hci_mgmt_chan *c;
444
445 mutex_lock(&mgmt_chan_list_lock);
446 c = __hci_mgmt_chan_find(channel);
447 mutex_unlock(&mgmt_chan_list_lock);
448
449 return c;
450}
451
452int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
453{
454 if (c->channel < HCI_CHANNEL_CONTROL)
455 return -EINVAL;
456
457 mutex_lock(&mgmt_chan_list_lock);
458 if (__hci_mgmt_chan_find(c->channel)) {
459 mutex_unlock(&mgmt_chan_list_lock);
460 return -EALREADY;
461 }
462
463 list_add_tail(&c->list, &mgmt_chan_list);
464
465 mutex_unlock(&mgmt_chan_list_lock);
466
467 return 0;
468}
469EXPORT_SYMBOL(hci_mgmt_chan_register);
470
471void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
472{
473 mutex_lock(&mgmt_chan_list_lock);
474 list_del(&c->list);
475 mutex_unlock(&mgmt_chan_list_lock);
476}
477EXPORT_SYMBOL(hci_mgmt_chan_unregister);
478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479static int hci_sock_release(struct socket *sock)
480{
481 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100482 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 BT_DBG("sock %p sk %p", sock, sk);
485
486 if (!sk)
487 return 0;
488
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100489 hdev = hci_pi(sk)->hdev;
490
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100491 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
492 atomic_dec(&monitor_promisc);
493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 bt_sock_unlink(&hci_sk_list, sk);
495
496 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700497 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200498 mgmt_index_added(hdev);
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700499 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann23500182013-08-26 21:40:52 -0700500 hci_dev_close(hdev->id);
501 }
502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 atomic_dec(&hdev->promisc);
504 hci_dev_put(hdev);
505 }
506
507 sock_orphan(sk);
508
509 skb_queue_purge(&sk->sk_receive_queue);
510 skb_queue_purge(&sk->sk_write_queue);
511
512 sock_put(sk);
513 return 0;
514}
515
Antti Julkub2a66aa2011-06-15 12:01:14 +0300516static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200517{
518 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300519 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200520
521 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
522 return -EFAULT;
523
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300524 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300525
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300526 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300527
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300528 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300529
530 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200531}
532
Antti Julkub2a66aa2011-06-15 12:01:14 +0300533static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200534{
535 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300536 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200537
538 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
539 return -EFAULT;
540
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300541 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300542
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300543 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300544
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300545 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300546
547 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200548}
549
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900550/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300551static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
552 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553{
554 struct hci_dev *hdev = hci_pi(sk)->hdev;
555
556 if (!hdev)
557 return -EBADFD;
558
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700559 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700560 return -EBUSY;
561
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700562 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200563 return -EOPNOTSUPP;
564
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700565 if (hdev->dev_type != HCI_BREDR)
566 return -EOPNOTSUPP;
567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 switch (cmd) {
569 case HCISETRAW:
570 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000571 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700572 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200575 return hci_get_conn_info(hdev, (void __user *) arg);
576
577 case HCIGETAUTHINFO:
578 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Johan Hedbergf0358562010-05-18 13:20:32 +0200580 case HCIBLOCKADDR:
581 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000582 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300583 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200584
585 case HCIUNBLOCKADDR:
586 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000587 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300588 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700590
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700591 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592}
593
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300594static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
595 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200597 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700598 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 int err;
600
601 BT_DBG("cmd %x arg %lx", cmd, arg);
602
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700603 lock_sock(sk);
604
605 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
606 err = -EBADFD;
607 goto done;
608 }
609
610 release_sock(sk);
611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 switch (cmd) {
613 case HCIGETDEVLIST:
614 return hci_get_dev_list(argp);
615
616 case HCIGETDEVINFO:
617 return hci_get_dev_info(argp);
618
619 case HCIGETCONNLIST:
620 return hci_get_conn_list(argp);
621
622 case HCIDEVUP:
623 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000624 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return hci_dev_open(arg);
626
627 case HCIDEVDOWN:
628 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000629 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 return hci_dev_close(arg);
631
632 case HCIDEVRESET:
633 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000634 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 return hci_dev_reset(arg);
636
637 case HCIDEVRESTAT:
638 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000639 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 return hci_dev_reset_stat(arg);
641
642 case HCISETSCAN:
643 case HCISETAUTH:
644 case HCISETENCRYPT:
645 case HCISETPTYPE:
646 case HCISETLINKPOL:
647 case HCISETLINKMODE:
648 case HCISETACLMTU:
649 case HCISETSCOMTU:
650 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000651 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 return hci_dev_cmd(cmd, argp);
653
654 case HCIINQUIRY:
655 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700657
658 lock_sock(sk);
659
660 err = hci_sock_bound_ioctl(sk, cmd, arg);
661
662done:
663 release_sock(sk);
664 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665}
666
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300667static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
668 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
Johan Hedberg03811012010-12-08 00:21:06 +0200670 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 struct sock *sk = sock->sk;
672 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200673 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
675 BT_DBG("sock %p sk %p", sock, sk);
676
Johan Hedberg03811012010-12-08 00:21:06 +0200677 if (!addr)
678 return -EINVAL;
679
680 memset(&haddr, 0, sizeof(haddr));
681 len = min_t(unsigned int, sizeof(haddr), addr_len);
682 memcpy(&haddr, addr, len);
683
684 if (haddr.hci_family != AF_BLUETOOTH)
685 return -EINVAL;
686
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 lock_sock(sk);
688
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100689 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 err = -EALREADY;
691 goto done;
692 }
693
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100694 switch (haddr.hci_channel) {
695 case HCI_CHANNEL_RAW:
696 if (hci_pi(sk)->hdev) {
697 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 goto done;
699 }
700
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100701 if (haddr.hci_dev != HCI_DEV_NONE) {
702 hdev = hci_dev_get(haddr.hci_dev);
703 if (!hdev) {
704 err = -ENODEV;
705 goto done;
706 }
707
708 atomic_inc(&hdev->promisc);
709 }
710
711 hci_pi(sk)->hdev = hdev;
712 break;
713
Marcel Holtmann23500182013-08-26 21:40:52 -0700714 case HCI_CHANNEL_USER:
715 if (hci_pi(sk)->hdev) {
716 err = -EALREADY;
717 goto done;
718 }
719
720 if (haddr.hci_dev == HCI_DEV_NONE) {
721 err = -EINVAL;
722 goto done;
723 }
724
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700725 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700726 err = -EPERM;
727 goto done;
728 }
729
730 hdev = hci_dev_get(haddr.hci_dev);
731 if (!hdev) {
732 err = -ENODEV;
733 goto done;
734 }
735
736 if (test_bit(HCI_UP, &hdev->flags) ||
737 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700738 hci_dev_test_flag(hdev, HCI_SETUP) ||
739 hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700740 err = -EBUSY;
741 hci_dev_put(hdev);
742 goto done;
743 }
744
Marcel Holtmann238be782015-03-13 02:11:06 -0700745 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700746 err = -EUSERS;
747 hci_dev_put(hdev);
748 goto done;
749 }
750
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200751 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700752
753 err = hci_dev_open(hdev->id);
754 if (err) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700755 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200756 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700757 hci_dev_put(hdev);
758 goto done;
759 }
760
761 atomic_inc(&hdev->promisc);
762
763 hci_pi(sk)->hdev = hdev;
764 break;
765
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100766 case HCI_CHANNEL_MONITOR:
767 if (haddr.hci_dev != HCI_DEV_NONE) {
768 err = -EINVAL;
769 goto done;
770 }
771
772 if (!capable(CAP_NET_RAW)) {
773 err = -EPERM;
774 goto done;
775 }
776
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700777 /* The monitor interface is restricted to CAP_NET_RAW
778 * capabilities and with that implicitly trusted.
779 */
780 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
781
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100782 send_monitor_replay(sk);
783
784 atomic_inc(&monitor_promisc);
785 break;
786
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100787 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200788 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
789 err = -EINVAL;
790 goto done;
791 }
792
793 if (haddr.hci_dev != HCI_DEV_NONE) {
794 err = -EINVAL;
795 goto done;
796 }
797
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700798 /* Users with CAP_NET_ADMIN capabilities are allowed
799 * access to all management commands and events. For
800 * untrusted users the interface is restricted and
801 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700802 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700803 if (capable(CAP_NET_ADMIN))
804 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700805
Marcel Holtmannf9207332015-03-14 19:27:55 -0700806 /* At the moment the index and unconfigured index events
807 * are enabled unconditionally. Setting them on each
808 * socket when binding keeps this functionality. They
809 * however might be cleared later and then sending of these
810 * events will be disabled, but that is then intentional.
811 */
812 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
813 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
814 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
815 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200816 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 }
818
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100819
Johan Hedberg03811012010-12-08 00:21:06 +0200820 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 sk->sk_state = BT_BOUND;
822
823done:
824 release_sock(sk);
825 return err;
826}
827
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300828static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
829 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830{
831 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
832 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700833 struct hci_dev *hdev;
834 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 BT_DBG("sock %p sk %p", sock, sk);
837
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700838 if (peer)
839 return -EOPNOTSUPP;
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 lock_sock(sk);
842
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700843 hdev = hci_pi(sk)->hdev;
844 if (!hdev) {
845 err = -EBADFD;
846 goto done;
847 }
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 *addr_len = sizeof(*haddr);
850 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100851 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700852 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700854done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700856 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857}
858
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300859static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
860 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861{
862 __u32 mask = hci_pi(sk)->cmsg_mask;
863
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700864 if (mask & HCI_CMSG_DIR) {
865 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300866 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
867 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700868 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700870 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100871#ifdef CONFIG_COMPAT
872 struct compat_timeval ctv;
873#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700874 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200875 void *data;
876 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700877
878 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200879
David S. Miller1da97f82007-09-12 14:10:58 +0200880 data = &tv;
881 len = sizeof(tv);
882#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800883 if (!COMPAT_USE_64BIT_TIME &&
884 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200885 ctv.tv_sec = tv.tv_sec;
886 ctv.tv_usec = tv.tv_usec;
887 data = &ctv;
888 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200889 }
David S. Miller1da97f82007-09-12 14:10:58 +0200890#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200891
892 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700893 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900895
Ying Xue1b784142015-03-02 15:37:48 +0800896static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
897 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898{
899 int noblock = flags & MSG_DONTWAIT;
900 struct sock *sk = sock->sk;
901 struct sk_buff *skb;
902 int copied, err;
903
904 BT_DBG("sock %p, sk %p", sock, sk);
905
906 if (flags & (MSG_OOB))
907 return -EOPNOTSUPP;
908
909 if (sk->sk_state == BT_CLOSED)
910 return 0;
911
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200912 skb = skb_recv_datagram(sk, flags, noblock, &err);
913 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 return err;
915
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 copied = skb->len;
917 if (len < copied) {
918 msg->msg_flags |= MSG_TRUNC;
919 copied = len;
920 }
921
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300922 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500923 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Marcel Holtmann3a208622012-02-20 14:50:34 +0100925 switch (hci_pi(sk)->channel) {
926 case HCI_CHANNEL_RAW:
927 hci_sock_cmsg(sk, msg, skb);
928 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700929 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100930 case HCI_CHANNEL_MONITOR:
931 sock_recv_timestamp(msg, sk, skb);
932 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200933 default:
934 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
935 sock_recv_timestamp(msg, sk, skb);
936 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100937 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 skb_free_datagram(sk, skb);
940
941 return err ? : copied;
942}
943
Ying Xue1b784142015-03-02 15:37:48 +0800944static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
945 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
947 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200948 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 struct hci_dev *hdev;
950 struct sk_buff *skb;
951 int err;
952
953 BT_DBG("sock %p sk %p", sock, sk);
954
955 if (msg->msg_flags & MSG_OOB)
956 return -EOPNOTSUPP;
957
958 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
959 return -EINVAL;
960
961 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
962 return -EINVAL;
963
964 lock_sock(sk);
965
Johan Hedberg03811012010-12-08 00:21:06 +0200966 switch (hci_pi(sk)->channel) {
967 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700968 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200969 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100970 case HCI_CHANNEL_MONITOR:
971 err = -EOPNOTSUPP;
972 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200973 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200974 mutex_lock(&mgmt_chan_list_lock);
975 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
976 if (chan)
Johan Hedberg6d785aa32015-03-06 21:08:51 +0200977 err = mgmt_control(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +0200978 else
979 err = -EINVAL;
980
981 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +0200982 goto done;
983 }
984
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200985 hdev = hci_pi(sk)->hdev;
986 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 err = -EBADFD;
988 goto done;
989 }
990
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100991 if (!test_bit(HCI_UP, &hdev->flags)) {
992 err = -ENETDOWN;
993 goto done;
994 }
995
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200996 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
997 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 goto done;
999
Al Viro6ce8e9c2014-04-06 21:25:44 -04001000 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 err = -EFAULT;
1002 goto drop;
1003 }
1004
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001005 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001008 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1009 /* No permission check is needed for user channel
1010 * since that gets enforced when binding the socket.
1011 *
1012 * However check that the packet type is valid.
1013 */
1014 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1015 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1016 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1017 err = -EINVAL;
1018 goto drop;
1019 }
1020
1021 skb_queue_tail(&hdev->raw_q, skb);
1022 queue_work(hdev->workqueue, &hdev->tx_work);
1023 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001024 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 u16 ogf = hci_opcode_ogf(opcode);
1026 u16 ocf = hci_opcode_ocf(opcode);
1027
1028 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001029 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1030 &hci_sec_filter.ocf_mask[ogf])) &&
1031 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 err = -EPERM;
1033 goto drop;
1034 }
1035
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001036 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001038 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001040 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001041 * single-command requests.
1042 */
Eyal Birger6368c232015-03-01 14:58:26 +02001043 bt_cb(skb)->req_start = 1;
Johan Hedberg11714b32013-03-05 20:37:47 +02001044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001046 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 }
1048 } else {
1049 if (!capable(CAP_NET_RAW)) {
1050 err = -EPERM;
1051 goto drop;
1052 }
1053
1054 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001055 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 }
1057
1058 err = len;
1059
1060done:
1061 release_sock(sk);
1062 return err;
1063
1064drop:
1065 kfree_skb(skb);
1066 goto done;
1067}
1068
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001069static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1070 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
1072 struct hci_ufilter uf = { .opcode = 0 };
1073 struct sock *sk = sock->sk;
1074 int err = 0, opt = 0;
1075
1076 BT_DBG("sk %p, opt %d", sk, optname);
1077
1078 lock_sock(sk);
1079
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001080 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001081 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001082 goto done;
1083 }
1084
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 switch (optname) {
1086 case HCI_DATA_DIR:
1087 if (get_user(opt, (int __user *)optval)) {
1088 err = -EFAULT;
1089 break;
1090 }
1091
1092 if (opt)
1093 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1094 else
1095 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1096 break;
1097
1098 case HCI_TIME_STAMP:
1099 if (get_user(opt, (int __user *)optval)) {
1100 err = -EFAULT;
1101 break;
1102 }
1103
1104 if (opt)
1105 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1106 else
1107 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1108 break;
1109
1110 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001111 {
1112 struct hci_filter *f = &hci_pi(sk)->filter;
1113
1114 uf.type_mask = f->type_mask;
1115 uf.opcode = f->opcode;
1116 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1117 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1118 }
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 len = min_t(unsigned int, len, sizeof(uf));
1121 if (copy_from_user(&uf, optval, len)) {
1122 err = -EFAULT;
1123 break;
1124 }
1125
1126 if (!capable(CAP_NET_RAW)) {
1127 uf.type_mask &= hci_sec_filter.type_mask;
1128 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1129 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1130 }
1131
1132 {
1133 struct hci_filter *f = &hci_pi(sk)->filter;
1134
1135 f->type_mask = uf.type_mask;
1136 f->opcode = uf.opcode;
1137 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1138 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1139 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001140 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
1142 default:
1143 err = -ENOPROTOOPT;
1144 break;
1145 }
1146
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001147done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 release_sock(sk);
1149 return err;
1150}
1151
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001152static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1153 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154{
1155 struct hci_ufilter uf;
1156 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001157 int len, opt, err = 0;
1158
1159 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160
1161 if (get_user(len, optlen))
1162 return -EFAULT;
1163
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001164 lock_sock(sk);
1165
1166 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001167 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001168 goto done;
1169 }
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 switch (optname) {
1172 case HCI_DATA_DIR:
1173 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1174 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001175 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 opt = 0;
1177
1178 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001179 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 break;
1181
1182 case HCI_TIME_STAMP:
1183 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1184 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001185 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 opt = 0;
1187
1188 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001189 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 break;
1191
1192 case HCI_FILTER:
1193 {
1194 struct hci_filter *f = &hci_pi(sk)->filter;
1195
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001196 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 uf.type_mask = f->type_mask;
1198 uf.opcode = f->opcode;
1199 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1200 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1201 }
1202
1203 len = min_t(unsigned int, len, sizeof(uf));
1204 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001205 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 break;
1207
1208 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001209 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 break;
1211 }
1212
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001213done:
1214 release_sock(sk);
1215 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216}
1217
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001218static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 .family = PF_BLUETOOTH,
1220 .owner = THIS_MODULE,
1221 .release = hci_sock_release,
1222 .bind = hci_sock_bind,
1223 .getname = hci_sock_getname,
1224 .sendmsg = hci_sock_sendmsg,
1225 .recvmsg = hci_sock_recvmsg,
1226 .ioctl = hci_sock_ioctl,
1227 .poll = datagram_poll,
1228 .listen = sock_no_listen,
1229 .shutdown = sock_no_shutdown,
1230 .setsockopt = hci_sock_setsockopt,
1231 .getsockopt = hci_sock_getsockopt,
1232 .connect = sock_no_connect,
1233 .socketpair = sock_no_socketpair,
1234 .accept = sock_no_accept,
1235 .mmap = sock_no_mmap
1236};
1237
1238static struct proto hci_sk_proto = {
1239 .name = "HCI",
1240 .owner = THIS_MODULE,
1241 .obj_size = sizeof(struct hci_pinfo)
1242};
1243
Eric Paris3f378b62009-11-05 22:18:14 -08001244static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1245 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
1247 struct sock *sk;
1248
1249 BT_DBG("sock %p", sock);
1250
1251 if (sock->type != SOCK_RAW)
1252 return -ESOCKTNOSUPPORT;
1253
1254 sock->ops = &hci_sock_ops;
1255
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001256 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 if (!sk)
1258 return -ENOMEM;
1259
1260 sock_init_data(sock, sk);
1261
1262 sock_reset_flag(sk, SOCK_ZAPPED);
1263
1264 sk->sk_protocol = protocol;
1265
1266 sock->state = SS_UNCONNECTED;
1267 sk->sk_state = BT_OPEN;
1268
1269 bt_sock_link(&hci_sk_list, sk);
1270 return 0;
1271}
1272
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001273static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 .family = PF_BLUETOOTH,
1275 .owner = THIS_MODULE,
1276 .create = hci_sock_create,
1277};
1278
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279int __init hci_sock_init(void)
1280{
1281 int err;
1282
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001283 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 err = proto_register(&hci_sk_proto, 0);
1286 if (err < 0)
1287 return err;
1288
1289 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001290 if (err < 0) {
1291 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001293 }
1294
Al Virob0316612013-04-04 19:14:33 -04001295 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001296 if (err < 0) {
1297 BT_ERR("Failed to create HCI proc file");
1298 bt_sock_unregister(BTPROTO_HCI);
1299 goto error;
1300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 BT_INFO("HCI socket layer initialized");
1303
1304 return 0;
1305
1306error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 proto_unregister(&hci_sk_proto);
1308 return err;
1309}
1310
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301311void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001313 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001314 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316}