blob: 9ba1a2667eaaf03a0c7b6385541956a540db9354 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Johan Hedberg801c1e82015-03-06 21:08:50 +020034static LIST_HEAD(mgmt_chan_list);
35static DEFINE_MUTEX(mgmt_chan_list_lock);
36
Marcel Holtmanncd82e612012-02-20 20:34:38 +010037static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039/* ----- HCI socket interface ----- */
40
Marcel Holtmann863def52014-07-11 05:41:00 +020041/* Socket info */
42#define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44struct hci_pinfo {
45 struct bt_sock bt;
46 struct hci_dev *hdev;
47 struct hci_filter filter;
48 __u32 cmsg_mask;
49 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070050 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020051};
52
Marcel Holtmann6befc642015-03-14 19:27:53 -070053void hci_sock_set_flag(struct sock *sk, int nr)
54{
55 set_bit(nr, &hci_pi(sk)->flags);
56}
57
58void hci_sock_clear_flag(struct sock *sk, int nr)
59{
60 clear_bit(nr, &hci_pi(sk)->flags);
61}
62
Marcel Holtmannc85be542015-03-14 19:28:00 -070063int hci_sock_test_flag(struct sock *sk, int nr)
64{
65 return test_bit(nr, &hci_pi(sk)->flags);
66}
67
Johan Hedbergd0f172b2015-03-17 13:48:46 +020068unsigned short hci_sock_get_channel(struct sock *sk)
69{
70 return hci_pi(sk)->channel;
71}
72
Jiri Slaby93919762015-02-19 15:20:43 +010073static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
Jiri Slaby93919762015-02-19 15:20:43 +010075 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
77
78/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020079#define HCI_SFLT_MAX_OGF 5
80
81struct hci_sec_filter {
82 __u32 type_mask;
83 __u32 event_mask[2];
84 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
85};
86
Marcel Holtmann7e67c112014-07-11 05:36:40 +020087static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 /* Packet types */
89 0x10,
90 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020091 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 /* Commands */
93 {
94 { 0x0 },
95 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020096 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020098 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200100 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200102 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200104 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
106};
107
108static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700109 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110};
111
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700112static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
113{
114 struct hci_filter *flt;
115 int flt_type, flt_event;
116
117 /* Apply filter */
118 flt = &hci_pi(sk)->filter;
119
120 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
121 flt_type = 0;
122 else
123 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
124
125 if (!test_bit(flt_type, &flt->type_mask))
126 return true;
127
128 /* Extra filter for event packets only */
129 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
130 return false;
131
132 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
133
134 if (!hci_test_bit(flt_event, &flt->event_mask))
135 return true;
136
137 /* Check filter only when opcode is set */
138 if (!flt->opcode)
139 return false;
140
141 if (flt_event == HCI_EV_CMD_COMPLETE &&
142 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
143 return true;
144
145 if (flt_event == HCI_EV_CMD_STATUS &&
146 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
147 return true;
148
149 return false;
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100153void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
155 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100156 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
158 BT_DBG("hdev %p len %d", hdev, skb->len);
159
160 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100161
Sasha Levinb67bfe02013-02-27 17:06:00 -0800162 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 struct sk_buff *nskb;
164
165 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
166 continue;
167
168 /* Don't send frame to the socket it came from */
169 if (skb->sk == sk)
170 continue;
171
Marcel Holtmann23500182013-08-26 21:40:52 -0700172 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
173 if (is_filtered_packet(sk, skb))
174 continue;
175 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
176 if (!bt_cb(skb)->incoming)
177 continue;
178 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
179 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
180 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
181 continue;
182 } else {
183 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200184 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700185 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100187 if (!skb_copy) {
188 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300189 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100190 if (!skb_copy)
191 continue;
192
193 /* Put type byte before the data */
194 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
195 }
196
197 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200198 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 continue;
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 if (sock_queue_rcv_skb(sk, nskb))
202 kfree_skb(nskb);
203 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100204
205 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100206
207 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100208}
209
Johan Hedberg71290692015-02-20 13:26:23 +0200210/* Send frame to sockets with specific channel */
211void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700212 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100213{
214 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100215
Johan Hedberg71290692015-02-20 13:26:23 +0200216 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100217
218 read_lock(&hci_sk_list.lock);
219
Sasha Levinb67bfe02013-02-27 17:06:00 -0800220 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100221 struct sk_buff *nskb;
222
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700223 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700224 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700225 continue;
226
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100227 /* Skip the original socket */
228 if (sk == skip_sk)
229 continue;
230
231 if (sk->sk_state != BT_BOUND)
232 continue;
233
Johan Hedberg71290692015-02-20 13:26:23 +0200234 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800235 continue;
236
237 nskb = skb_clone(skb, GFP_ATOMIC);
238 if (!nskb)
239 continue;
240
241 if (sock_queue_rcv_skb(sk, nskb))
242 kfree_skb(nskb);
243 }
244
245 read_unlock(&hci_sk_list.lock);
246}
247
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100248/* Send frame to monitor socket */
249void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
250{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100251 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800252 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100253 __le16 opcode;
254
255 if (!atomic_read(&monitor_promisc))
256 return;
257
258 BT_DBG("hdev %p len %d", hdev, skb->len);
259
260 switch (bt_cb(skb)->pkt_type) {
261 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700262 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100263 break;
264 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700265 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100266 break;
267 case HCI_ACLDATA_PKT:
268 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700269 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100270 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700271 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100272 break;
273 case HCI_SCODATA_PKT:
274 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700275 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100276 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700277 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100278 break;
279 default:
280 return;
281 }
282
Marcel Holtmann2b531292015-01-11 19:33:31 -0800283 /* Create a private copy with headroom */
284 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
285 if (!skb_copy)
286 return;
287
288 /* Put header before the data */
289 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
290 hdr->opcode = opcode;
291 hdr->index = cpu_to_le16(hdev->id);
292 hdr->len = cpu_to_le16(skb->len);
293
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700294 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
295 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100296 kfree_skb(skb_copy);
297}
298
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100299static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
300{
301 struct hci_mon_hdr *hdr;
302 struct hci_mon_new_index *ni;
303 struct sk_buff *skb;
304 __le16 opcode;
305
306 switch (event) {
307 case HCI_DEV_REG:
308 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
309 if (!skb)
310 return NULL;
311
312 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
313 ni->type = hdev->dev_type;
314 ni->bus = hdev->bus;
315 bacpy(&ni->bdaddr, &hdev->bdaddr);
316 memcpy(ni->name, hdev->name, 8);
317
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700318 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100319 break;
320
321 case HCI_DEV_UNREG:
322 skb = bt_skb_alloc(0, GFP_ATOMIC);
323 if (!skb)
324 return NULL;
325
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700326 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100327 break;
328
329 default:
330 return NULL;
331 }
332
333 __net_timestamp(skb);
334
335 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
336 hdr->opcode = opcode;
337 hdr->index = cpu_to_le16(hdev->id);
338 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
339
340 return skb;
341}
342
343static void send_monitor_replay(struct sock *sk)
344{
345 struct hci_dev *hdev;
346
347 read_lock(&hci_dev_list_lock);
348
349 list_for_each_entry(hdev, &hci_dev_list, list) {
350 struct sk_buff *skb;
351
352 skb = create_monitor_event(hdev, HCI_DEV_REG);
353 if (!skb)
354 continue;
355
356 if (sock_queue_rcv_skb(sk, skb))
357 kfree_skb(skb);
358 }
359
360 read_unlock(&hci_dev_list_lock);
361}
362
Marcel Holtmann040030e2012-02-20 14:50:37 +0100363/* Generate internal stack event */
364static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
365{
366 struct hci_event_hdr *hdr;
367 struct hci_ev_stack_internal *ev;
368 struct sk_buff *skb;
369
370 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
371 if (!skb)
372 return;
373
374 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
375 hdr->evt = HCI_EV_STACK_INTERNAL;
376 hdr->plen = sizeof(*ev) + dlen;
377
378 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
379 ev->type = type;
380 memcpy(ev->data, data, dlen);
381
382 bt_cb(skb)->incoming = 1;
383 __net_timestamp(skb);
384
385 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100386 hci_send_to_sock(hdev, skb);
387 kfree_skb(skb);
388}
389
390void hci_sock_dev_event(struct hci_dev *hdev, int event)
391{
392 struct hci_ev_si_device ev;
393
394 BT_DBG("hdev %s event %d", hdev->name, event);
395
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100396 /* Send event to monitor */
397 if (atomic_read(&monitor_promisc)) {
398 struct sk_buff *skb;
399
400 skb = create_monitor_event(hdev, event);
401 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700402 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
403 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100404 kfree_skb(skb);
405 }
406 }
407
Marcel Holtmann040030e2012-02-20 14:50:37 +0100408 /* Send event to sockets */
409 ev.event = event;
410 ev.dev_id = hdev->id;
411 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
412
413 if (event == HCI_DEV_UNREG) {
414 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100415
416 /* Detach sockets from device */
417 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800418 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100419 bh_lock_sock_nested(sk);
420 if (hci_pi(sk)->hdev == hdev) {
421 hci_pi(sk)->hdev = NULL;
422 sk->sk_err = EPIPE;
423 sk->sk_state = BT_OPEN;
424 sk->sk_state_change(sk);
425
426 hci_dev_put(hdev);
427 }
428 bh_unlock_sock(sk);
429 }
430 read_unlock(&hci_sk_list.lock);
431 }
432}
433
Johan Hedberg801c1e82015-03-06 21:08:50 +0200434static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
435{
436 struct hci_mgmt_chan *c;
437
438 list_for_each_entry(c, &mgmt_chan_list, list) {
439 if (c->channel == channel)
440 return c;
441 }
442
443 return NULL;
444}
445
446static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
447{
448 struct hci_mgmt_chan *c;
449
450 mutex_lock(&mgmt_chan_list_lock);
451 c = __hci_mgmt_chan_find(channel);
452 mutex_unlock(&mgmt_chan_list_lock);
453
454 return c;
455}
456
457int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
458{
459 if (c->channel < HCI_CHANNEL_CONTROL)
460 return -EINVAL;
461
462 mutex_lock(&mgmt_chan_list_lock);
463 if (__hci_mgmt_chan_find(c->channel)) {
464 mutex_unlock(&mgmt_chan_list_lock);
465 return -EALREADY;
466 }
467
468 list_add_tail(&c->list, &mgmt_chan_list);
469
470 mutex_unlock(&mgmt_chan_list_lock);
471
472 return 0;
473}
474EXPORT_SYMBOL(hci_mgmt_chan_register);
475
476void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
477{
478 mutex_lock(&mgmt_chan_list_lock);
479 list_del(&c->list);
480 mutex_unlock(&mgmt_chan_list_lock);
481}
482EXPORT_SYMBOL(hci_mgmt_chan_unregister);
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484static int hci_sock_release(struct socket *sock)
485{
486 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100487 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489 BT_DBG("sock %p sk %p", sock, sk);
490
491 if (!sk)
492 return 0;
493
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100494 hdev = hci_pi(sk)->hdev;
495
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100496 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
497 atomic_dec(&monitor_promisc);
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 bt_sock_unlink(&hci_sk_list, sk);
500
501 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700502 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200503 mgmt_index_added(hdev);
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700504 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann23500182013-08-26 21:40:52 -0700505 hci_dev_close(hdev->id);
506 }
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 atomic_dec(&hdev->promisc);
509 hci_dev_put(hdev);
510 }
511
512 sock_orphan(sk);
513
514 skb_queue_purge(&sk->sk_receive_queue);
515 skb_queue_purge(&sk->sk_write_queue);
516
517 sock_put(sk);
518 return 0;
519}
520
Antti Julkub2a66aa2011-06-15 12:01:14 +0300521static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200522{
523 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300524 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200525
526 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
527 return -EFAULT;
528
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300529 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300530
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300531 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300532
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300533 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300534
535 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200536}
537
Antti Julkub2a66aa2011-06-15 12:01:14 +0300538static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200539{
540 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300541 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200542
543 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
544 return -EFAULT;
545
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300546 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300547
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300548 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300549
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300550 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300551
552 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200553}
554
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900555/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300556static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
557 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
559 struct hci_dev *hdev = hci_pi(sk)->hdev;
560
561 if (!hdev)
562 return -EBADFD;
563
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700564 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700565 return -EBUSY;
566
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700567 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200568 return -EOPNOTSUPP;
569
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700570 if (hdev->dev_type != HCI_BREDR)
571 return -EOPNOTSUPP;
572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 switch (cmd) {
574 case HCISETRAW:
575 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000576 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700577 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200580 return hci_get_conn_info(hdev, (void __user *) arg);
581
582 case HCIGETAUTHINFO:
583 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Johan Hedbergf0358562010-05-18 13:20:32 +0200585 case HCIBLOCKADDR:
586 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000587 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300588 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200589
590 case HCIUNBLOCKADDR:
591 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000592 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300593 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700595
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700596 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597}
598
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300599static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
600 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200602 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700603 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 int err;
605
606 BT_DBG("cmd %x arg %lx", cmd, arg);
607
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700608 lock_sock(sk);
609
610 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
611 err = -EBADFD;
612 goto done;
613 }
614
615 release_sock(sk);
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 switch (cmd) {
618 case HCIGETDEVLIST:
619 return hci_get_dev_list(argp);
620
621 case HCIGETDEVINFO:
622 return hci_get_dev_info(argp);
623
624 case HCIGETCONNLIST:
625 return hci_get_conn_list(argp);
626
627 case HCIDEVUP:
628 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000629 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 return hci_dev_open(arg);
631
632 case HCIDEVDOWN:
633 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000634 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 return hci_dev_close(arg);
636
637 case HCIDEVRESET:
638 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000639 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 return hci_dev_reset(arg);
641
642 case HCIDEVRESTAT:
643 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000644 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 return hci_dev_reset_stat(arg);
646
647 case HCISETSCAN:
648 case HCISETAUTH:
649 case HCISETENCRYPT:
650 case HCISETPTYPE:
651 case HCISETLINKPOL:
652 case HCISETLINKMODE:
653 case HCISETACLMTU:
654 case HCISETSCOMTU:
655 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000656 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 return hci_dev_cmd(cmd, argp);
658
659 case HCIINQUIRY:
660 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700662
663 lock_sock(sk);
664
665 err = hci_sock_bound_ioctl(sk, cmd, arg);
666
667done:
668 release_sock(sk);
669 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670}
671
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300672static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
673 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
Johan Hedberg03811012010-12-08 00:21:06 +0200675 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 struct sock *sk = sock->sk;
677 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200678 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
680 BT_DBG("sock %p sk %p", sock, sk);
681
Johan Hedberg03811012010-12-08 00:21:06 +0200682 if (!addr)
683 return -EINVAL;
684
685 memset(&haddr, 0, sizeof(haddr));
686 len = min_t(unsigned int, sizeof(haddr), addr_len);
687 memcpy(&haddr, addr, len);
688
689 if (haddr.hci_family != AF_BLUETOOTH)
690 return -EINVAL;
691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 lock_sock(sk);
693
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100694 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 err = -EALREADY;
696 goto done;
697 }
698
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100699 switch (haddr.hci_channel) {
700 case HCI_CHANNEL_RAW:
701 if (hci_pi(sk)->hdev) {
702 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 goto done;
704 }
705
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100706 if (haddr.hci_dev != HCI_DEV_NONE) {
707 hdev = hci_dev_get(haddr.hci_dev);
708 if (!hdev) {
709 err = -ENODEV;
710 goto done;
711 }
712
713 atomic_inc(&hdev->promisc);
714 }
715
716 hci_pi(sk)->hdev = hdev;
717 break;
718
Marcel Holtmann23500182013-08-26 21:40:52 -0700719 case HCI_CHANNEL_USER:
720 if (hci_pi(sk)->hdev) {
721 err = -EALREADY;
722 goto done;
723 }
724
725 if (haddr.hci_dev == HCI_DEV_NONE) {
726 err = -EINVAL;
727 goto done;
728 }
729
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700730 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700731 err = -EPERM;
732 goto done;
733 }
734
735 hdev = hci_dev_get(haddr.hci_dev);
736 if (!hdev) {
737 err = -ENODEV;
738 goto done;
739 }
740
741 if (test_bit(HCI_UP, &hdev->flags) ||
742 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700743 hci_dev_test_flag(hdev, HCI_SETUP) ||
744 hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700745 err = -EBUSY;
746 hci_dev_put(hdev);
747 goto done;
748 }
749
Marcel Holtmann238be782015-03-13 02:11:06 -0700750 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700751 err = -EUSERS;
752 hci_dev_put(hdev);
753 goto done;
754 }
755
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200756 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700757
758 err = hci_dev_open(hdev->id);
759 if (err) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700760 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200761 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700762 hci_dev_put(hdev);
763 goto done;
764 }
765
766 atomic_inc(&hdev->promisc);
767
768 hci_pi(sk)->hdev = hdev;
769 break;
770
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100771 case HCI_CHANNEL_MONITOR:
772 if (haddr.hci_dev != HCI_DEV_NONE) {
773 err = -EINVAL;
774 goto done;
775 }
776
777 if (!capable(CAP_NET_RAW)) {
778 err = -EPERM;
779 goto done;
780 }
781
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700782 /* The monitor interface is restricted to CAP_NET_RAW
783 * capabilities and with that implicitly trusted.
784 */
785 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
786
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100787 send_monitor_replay(sk);
788
789 atomic_inc(&monitor_promisc);
790 break;
791
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100792 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200793 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
794 err = -EINVAL;
795 goto done;
796 }
797
798 if (haddr.hci_dev != HCI_DEV_NONE) {
799 err = -EINVAL;
800 goto done;
801 }
802
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700803 /* Users with CAP_NET_ADMIN capabilities are allowed
804 * access to all management commands and events. For
805 * untrusted users the interface is restricted and
806 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700807 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700808 if (capable(CAP_NET_ADMIN))
809 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700810
Marcel Holtmannf9207332015-03-14 19:27:55 -0700811 /* At the moment the index and unconfigured index events
812 * are enabled unconditionally. Setting them on each
813 * socket when binding keeps this functionality. They
814 * however might be cleared later and then sending of these
815 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700816 *
817 * This also enables generic events that are safe to be
818 * received by untrusted users. Example for such events
819 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700820 */
821 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
822 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
823 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700824 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700825 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200826 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 }
828
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100829
Johan Hedberg03811012010-12-08 00:21:06 +0200830 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 sk->sk_state = BT_BOUND;
832
833done:
834 release_sock(sk);
835 return err;
836}
837
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300838static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
839 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
841 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
842 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700843 struct hci_dev *hdev;
844 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846 BT_DBG("sock %p sk %p", sock, sk);
847
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700848 if (peer)
849 return -EOPNOTSUPP;
850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 lock_sock(sk);
852
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700853 hdev = hci_pi(sk)->hdev;
854 if (!hdev) {
855 err = -EBADFD;
856 goto done;
857 }
858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 *addr_len = sizeof(*haddr);
860 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100861 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700862 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700864done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700866 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867}
868
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300869static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
870 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871{
872 __u32 mask = hci_pi(sk)->cmsg_mask;
873
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700874 if (mask & HCI_CMSG_DIR) {
875 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300876 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
877 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700878 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700880 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100881#ifdef CONFIG_COMPAT
882 struct compat_timeval ctv;
883#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700884 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200885 void *data;
886 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700887
888 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200889
David S. Miller1da97f82007-09-12 14:10:58 +0200890 data = &tv;
891 len = sizeof(tv);
892#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800893 if (!COMPAT_USE_64BIT_TIME &&
894 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200895 ctv.tv_sec = tv.tv_sec;
896 ctv.tv_usec = tv.tv_usec;
897 data = &ctv;
898 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200899 }
David S. Miller1da97f82007-09-12 14:10:58 +0200900#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200901
902 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900905
Ying Xue1b784142015-03-02 15:37:48 +0800906static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
907 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
909 int noblock = flags & MSG_DONTWAIT;
910 struct sock *sk = sock->sk;
911 struct sk_buff *skb;
912 int copied, err;
913
914 BT_DBG("sock %p, sk %p", sock, sk);
915
916 if (flags & (MSG_OOB))
917 return -EOPNOTSUPP;
918
919 if (sk->sk_state == BT_CLOSED)
920 return 0;
921
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200922 skb = skb_recv_datagram(sk, flags, noblock, &err);
923 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 return err;
925
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 copied = skb->len;
927 if (len < copied) {
928 msg->msg_flags |= MSG_TRUNC;
929 copied = len;
930 }
931
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300932 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500933 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
Marcel Holtmann3a208622012-02-20 14:50:34 +0100935 switch (hci_pi(sk)->channel) {
936 case HCI_CHANNEL_RAW:
937 hci_sock_cmsg(sk, msg, skb);
938 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700939 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100940 case HCI_CHANNEL_MONITOR:
941 sock_recv_timestamp(msg, sk, skb);
942 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200943 default:
944 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
945 sock_recv_timestamp(msg, sk, skb);
946 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
949 skb_free_datagram(sk, skb);
950
951 return err ? : copied;
952}
953
Ying Xue1b784142015-03-02 15:37:48 +0800954static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
955 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956{
957 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200958 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 struct hci_dev *hdev;
960 struct sk_buff *skb;
961 int err;
962
963 BT_DBG("sock %p sk %p", sock, sk);
964
965 if (msg->msg_flags & MSG_OOB)
966 return -EOPNOTSUPP;
967
968 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
969 return -EINVAL;
970
971 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
972 return -EINVAL;
973
974 lock_sock(sk);
975
Johan Hedberg03811012010-12-08 00:21:06 +0200976 switch (hci_pi(sk)->channel) {
977 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700978 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200979 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100980 case HCI_CHANNEL_MONITOR:
981 err = -EOPNOTSUPP;
982 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200983 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200984 mutex_lock(&mgmt_chan_list_lock);
985 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
986 if (chan)
Johan Hedberg6d785aa32015-03-06 21:08:51 +0200987 err = mgmt_control(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +0200988 else
989 err = -EINVAL;
990
991 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +0200992 goto done;
993 }
994
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200995 hdev = hci_pi(sk)->hdev;
996 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 err = -EBADFD;
998 goto done;
999 }
1000
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001001 if (!test_bit(HCI_UP, &hdev->flags)) {
1002 err = -ENETDOWN;
1003 goto done;
1004 }
1005
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001006 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1007 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 goto done;
1009
Al Viro6ce8e9c2014-04-06 21:25:44 -04001010 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 err = -EFAULT;
1012 goto drop;
1013 }
1014
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001015 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001018 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1019 /* No permission check is needed for user channel
1020 * since that gets enforced when binding the socket.
1021 *
1022 * However check that the packet type is valid.
1023 */
1024 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1025 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1026 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1027 err = -EINVAL;
1028 goto drop;
1029 }
1030
1031 skb_queue_tail(&hdev->raw_q, skb);
1032 queue_work(hdev->workqueue, &hdev->tx_work);
1033 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001034 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 u16 ogf = hci_opcode_ogf(opcode);
1036 u16 ocf = hci_opcode_ocf(opcode);
1037
1038 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001039 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1040 &hci_sec_filter.ocf_mask[ogf])) &&
1041 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 err = -EPERM;
1043 goto drop;
1044 }
1045
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001046 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001048 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001050 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001051 * single-command requests.
1052 */
Eyal Birger6368c232015-03-01 14:58:26 +02001053 bt_cb(skb)->req_start = 1;
Johan Hedberg11714b32013-03-05 20:37:47 +02001054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001056 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 }
1058 } else {
1059 if (!capable(CAP_NET_RAW)) {
1060 err = -EPERM;
1061 goto drop;
1062 }
1063
1064 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001065 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 }
1067
1068 err = len;
1069
1070done:
1071 release_sock(sk);
1072 return err;
1073
1074drop:
1075 kfree_skb(skb);
1076 goto done;
1077}
1078
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001079static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1080 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081{
1082 struct hci_ufilter uf = { .opcode = 0 };
1083 struct sock *sk = sock->sk;
1084 int err = 0, opt = 0;
1085
1086 BT_DBG("sk %p, opt %d", sk, optname);
1087
1088 lock_sock(sk);
1089
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001090 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001091 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001092 goto done;
1093 }
1094
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 switch (optname) {
1096 case HCI_DATA_DIR:
1097 if (get_user(opt, (int __user *)optval)) {
1098 err = -EFAULT;
1099 break;
1100 }
1101
1102 if (opt)
1103 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1104 else
1105 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1106 break;
1107
1108 case HCI_TIME_STAMP:
1109 if (get_user(opt, (int __user *)optval)) {
1110 err = -EFAULT;
1111 break;
1112 }
1113
1114 if (opt)
1115 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1116 else
1117 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1118 break;
1119
1120 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001121 {
1122 struct hci_filter *f = &hci_pi(sk)->filter;
1123
1124 uf.type_mask = f->type_mask;
1125 uf.opcode = f->opcode;
1126 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1127 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1128 }
1129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 len = min_t(unsigned int, len, sizeof(uf));
1131 if (copy_from_user(&uf, optval, len)) {
1132 err = -EFAULT;
1133 break;
1134 }
1135
1136 if (!capable(CAP_NET_RAW)) {
1137 uf.type_mask &= hci_sec_filter.type_mask;
1138 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1139 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1140 }
1141
1142 {
1143 struct hci_filter *f = &hci_pi(sk)->filter;
1144
1145 f->type_mask = uf.type_mask;
1146 f->opcode = uf.opcode;
1147 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1148 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1149 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001150 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
1152 default:
1153 err = -ENOPROTOOPT;
1154 break;
1155 }
1156
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001157done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 release_sock(sk);
1159 return err;
1160}
1161
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001162static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1163 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164{
1165 struct hci_ufilter uf;
1166 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001167 int len, opt, err = 0;
1168
1169 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
1171 if (get_user(len, optlen))
1172 return -EFAULT;
1173
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001174 lock_sock(sk);
1175
1176 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001177 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001178 goto done;
1179 }
1180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 switch (optname) {
1182 case HCI_DATA_DIR:
1183 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1184 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001185 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 opt = 0;
1187
1188 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001189 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 break;
1191
1192 case HCI_TIME_STAMP:
1193 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1194 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001195 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 opt = 0;
1197
1198 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001199 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 break;
1201
1202 case HCI_FILTER:
1203 {
1204 struct hci_filter *f = &hci_pi(sk)->filter;
1205
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001206 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 uf.type_mask = f->type_mask;
1208 uf.opcode = f->opcode;
1209 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1210 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1211 }
1212
1213 len = min_t(unsigned int, len, sizeof(uf));
1214 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001215 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 break;
1217
1218 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001219 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 break;
1221 }
1222
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001223done:
1224 release_sock(sk);
1225 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226}
1227
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001228static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 .family = PF_BLUETOOTH,
1230 .owner = THIS_MODULE,
1231 .release = hci_sock_release,
1232 .bind = hci_sock_bind,
1233 .getname = hci_sock_getname,
1234 .sendmsg = hci_sock_sendmsg,
1235 .recvmsg = hci_sock_recvmsg,
1236 .ioctl = hci_sock_ioctl,
1237 .poll = datagram_poll,
1238 .listen = sock_no_listen,
1239 .shutdown = sock_no_shutdown,
1240 .setsockopt = hci_sock_setsockopt,
1241 .getsockopt = hci_sock_getsockopt,
1242 .connect = sock_no_connect,
1243 .socketpair = sock_no_socketpair,
1244 .accept = sock_no_accept,
1245 .mmap = sock_no_mmap
1246};
1247
1248static struct proto hci_sk_proto = {
1249 .name = "HCI",
1250 .owner = THIS_MODULE,
1251 .obj_size = sizeof(struct hci_pinfo)
1252};
1253
Eric Paris3f378b62009-11-05 22:18:14 -08001254static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1255 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256{
1257 struct sock *sk;
1258
1259 BT_DBG("sock %p", sock);
1260
1261 if (sock->type != SOCK_RAW)
1262 return -ESOCKTNOSUPPORT;
1263
1264 sock->ops = &hci_sock_ops;
1265
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001266 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 if (!sk)
1268 return -ENOMEM;
1269
1270 sock_init_data(sock, sk);
1271
1272 sock_reset_flag(sk, SOCK_ZAPPED);
1273
1274 sk->sk_protocol = protocol;
1275
1276 sock->state = SS_UNCONNECTED;
1277 sk->sk_state = BT_OPEN;
1278
1279 bt_sock_link(&hci_sk_list, sk);
1280 return 0;
1281}
1282
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001283static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 .family = PF_BLUETOOTH,
1285 .owner = THIS_MODULE,
1286 .create = hci_sock_create,
1287};
1288
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289int __init hci_sock_init(void)
1290{
1291 int err;
1292
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001293 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 err = proto_register(&hci_sk_proto, 0);
1296 if (err < 0)
1297 return err;
1298
1299 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001300 if (err < 0) {
1301 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001303 }
1304
Al Virob0316612013-04-04 19:14:33 -04001305 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001306 if (err < 0) {
1307 BT_ERR("Failed to create HCI proc file");
1308 bt_sock_unregister(BTPROTO_HCI);
1309 goto error;
1310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 BT_INFO("HCI socket layer initialized");
1313
1314 return 0;
1315
1316error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 proto_unregister(&hci_sk_proto);
1318 return err;
1319}
1320
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301321void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001323 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001324 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326}