blob: e7f463f6fd692cfb387a14036d5571c7a7e7f7c3 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Johan Hedberg801c1e82015-03-06 21:08:50 +020034static LIST_HEAD(mgmt_chan_list);
35static DEFINE_MUTEX(mgmt_chan_list_lock);
36
Marcel Holtmanncd82e612012-02-20 20:34:38 +010037static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039/* ----- HCI socket interface ----- */
40
Marcel Holtmann863def52014-07-11 05:41:00 +020041/* Socket info */
42#define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44struct hci_pinfo {
45 struct bt_sock bt;
46 struct hci_dev *hdev;
47 struct hci_filter filter;
48 __u32 cmsg_mask;
49 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070050 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020051};
52
Marcel Holtmann6befc642015-03-14 19:27:53 -070053void hci_sock_set_flag(struct sock *sk, int nr)
54{
55 set_bit(nr, &hci_pi(sk)->flags);
56}
57
58void hci_sock_clear_flag(struct sock *sk, int nr)
59{
60 clear_bit(nr, &hci_pi(sk)->flags);
61}
62
Jiri Slaby93919762015-02-19 15:20:43 +010063static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064{
Jiri Slaby93919762015-02-19 15:20:43 +010065 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070066}
67
68/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020069#define HCI_SFLT_MAX_OGF 5
70
71struct hci_sec_filter {
72 __u32 type_mask;
73 __u32 event_mask[2];
74 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
75};
76
Marcel Holtmann7e67c112014-07-11 05:36:40 +020077static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 /* Packet types */
79 0x10,
80 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020081 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 /* Commands */
83 {
84 { 0x0 },
85 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020086 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020088 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020090 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020092 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020094 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 }
96};
97
98static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070099 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100};
101
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700102static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
103{
104 struct hci_filter *flt;
105 int flt_type, flt_event;
106
107 /* Apply filter */
108 flt = &hci_pi(sk)->filter;
109
110 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
111 flt_type = 0;
112 else
113 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
114
115 if (!test_bit(flt_type, &flt->type_mask))
116 return true;
117
118 /* Extra filter for event packets only */
119 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
120 return false;
121
122 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
123
124 if (!hci_test_bit(flt_event, &flt->event_mask))
125 return true;
126
127 /* Check filter only when opcode is set */
128 if (!flt->opcode)
129 return false;
130
131 if (flt_event == HCI_EV_CMD_COMPLETE &&
132 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
133 return true;
134
135 if (flt_event == HCI_EV_CMD_STATUS &&
136 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
137 return true;
138
139 return false;
140}
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100143void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
145 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100146 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("hdev %p len %d", hdev, skb->len);
149
150 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100151
Sasha Levinb67bfe02013-02-27 17:06:00 -0800152 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 struct sk_buff *nskb;
154
155 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
156 continue;
157
158 /* Don't send frame to the socket it came from */
159 if (skb->sk == sk)
160 continue;
161
Marcel Holtmann23500182013-08-26 21:40:52 -0700162 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
163 if (is_filtered_packet(sk, skb))
164 continue;
165 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
166 if (!bt_cb(skb)->incoming)
167 continue;
168 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
169 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
170 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
171 continue;
172 } else {
173 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200174 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100177 if (!skb_copy) {
178 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300179 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100180 if (!skb_copy)
181 continue;
182
183 /* Put type byte before the data */
184 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
185 }
186
187 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200188 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 continue;
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 if (sock_queue_rcv_skb(sk, nskb))
192 kfree_skb(nskb);
193 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100194
195 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100196
197 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100198}
199
Johan Hedberg71290692015-02-20 13:26:23 +0200200/* Send frame to sockets with specific channel */
201void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700202 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100203{
204 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100205
Johan Hedberg71290692015-02-20 13:26:23 +0200206 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100207
208 read_lock(&hci_sk_list.lock);
209
Sasha Levinb67bfe02013-02-27 17:06:00 -0800210 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100211 struct sk_buff *nskb;
212
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700213 /* Ignore socket without the flag set */
214 if (!test_bit(flag, &hci_pi(sk)->flags))
215 continue;
216
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100217 /* Skip the original socket */
218 if (sk == skip_sk)
219 continue;
220
221 if (sk->sk_state != BT_BOUND)
222 continue;
223
Johan Hedberg71290692015-02-20 13:26:23 +0200224 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800225 continue;
226
227 nskb = skb_clone(skb, GFP_ATOMIC);
228 if (!nskb)
229 continue;
230
231 if (sock_queue_rcv_skb(sk, nskb))
232 kfree_skb(nskb);
233 }
234
235 read_unlock(&hci_sk_list.lock);
236}
237
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100238/* Send frame to monitor socket */
239void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
240{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100241 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800242 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100243 __le16 opcode;
244
245 if (!atomic_read(&monitor_promisc))
246 return;
247
248 BT_DBG("hdev %p len %d", hdev, skb->len);
249
250 switch (bt_cb(skb)->pkt_type) {
251 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700252 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100253 break;
254 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700255 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100256 break;
257 case HCI_ACLDATA_PKT:
258 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700259 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100260 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700261 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100262 break;
263 case HCI_SCODATA_PKT:
264 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700265 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100266 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700267 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100268 break;
269 default:
270 return;
271 }
272
Marcel Holtmann2b531292015-01-11 19:33:31 -0800273 /* Create a private copy with headroom */
274 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
275 if (!skb_copy)
276 return;
277
278 /* Put header before the data */
279 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
280 hdr->opcode = opcode;
281 hdr->index = cpu_to_le16(hdev->id);
282 hdr->len = cpu_to_le16(skb->len);
283
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700284 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
285 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100286 kfree_skb(skb_copy);
287}
288
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100289static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
290{
291 struct hci_mon_hdr *hdr;
292 struct hci_mon_new_index *ni;
293 struct sk_buff *skb;
294 __le16 opcode;
295
296 switch (event) {
297 case HCI_DEV_REG:
298 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
299 if (!skb)
300 return NULL;
301
302 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
303 ni->type = hdev->dev_type;
304 ni->bus = hdev->bus;
305 bacpy(&ni->bdaddr, &hdev->bdaddr);
306 memcpy(ni->name, hdev->name, 8);
307
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700308 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100309 break;
310
311 case HCI_DEV_UNREG:
312 skb = bt_skb_alloc(0, GFP_ATOMIC);
313 if (!skb)
314 return NULL;
315
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700316 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100317 break;
318
319 default:
320 return NULL;
321 }
322
323 __net_timestamp(skb);
324
325 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
326 hdr->opcode = opcode;
327 hdr->index = cpu_to_le16(hdev->id);
328 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
329
330 return skb;
331}
332
333static void send_monitor_replay(struct sock *sk)
334{
335 struct hci_dev *hdev;
336
337 read_lock(&hci_dev_list_lock);
338
339 list_for_each_entry(hdev, &hci_dev_list, list) {
340 struct sk_buff *skb;
341
342 skb = create_monitor_event(hdev, HCI_DEV_REG);
343 if (!skb)
344 continue;
345
346 if (sock_queue_rcv_skb(sk, skb))
347 kfree_skb(skb);
348 }
349
350 read_unlock(&hci_dev_list_lock);
351}
352
Marcel Holtmann040030e2012-02-20 14:50:37 +0100353/* Generate internal stack event */
354static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
355{
356 struct hci_event_hdr *hdr;
357 struct hci_ev_stack_internal *ev;
358 struct sk_buff *skb;
359
360 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
361 if (!skb)
362 return;
363
364 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
365 hdr->evt = HCI_EV_STACK_INTERNAL;
366 hdr->plen = sizeof(*ev) + dlen;
367
368 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
369 ev->type = type;
370 memcpy(ev->data, data, dlen);
371
372 bt_cb(skb)->incoming = 1;
373 __net_timestamp(skb);
374
375 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100376 hci_send_to_sock(hdev, skb);
377 kfree_skb(skb);
378}
379
380void hci_sock_dev_event(struct hci_dev *hdev, int event)
381{
382 struct hci_ev_si_device ev;
383
384 BT_DBG("hdev %s event %d", hdev->name, event);
385
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100386 /* Send event to monitor */
387 if (atomic_read(&monitor_promisc)) {
388 struct sk_buff *skb;
389
390 skb = create_monitor_event(hdev, event);
391 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700392 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
393 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100394 kfree_skb(skb);
395 }
396 }
397
Marcel Holtmann040030e2012-02-20 14:50:37 +0100398 /* Send event to sockets */
399 ev.event = event;
400 ev.dev_id = hdev->id;
401 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
402
403 if (event == HCI_DEV_UNREG) {
404 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100405
406 /* Detach sockets from device */
407 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800408 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100409 bh_lock_sock_nested(sk);
410 if (hci_pi(sk)->hdev == hdev) {
411 hci_pi(sk)->hdev = NULL;
412 sk->sk_err = EPIPE;
413 sk->sk_state = BT_OPEN;
414 sk->sk_state_change(sk);
415
416 hci_dev_put(hdev);
417 }
418 bh_unlock_sock(sk);
419 }
420 read_unlock(&hci_sk_list.lock);
421 }
422}
423
Johan Hedberg801c1e82015-03-06 21:08:50 +0200424static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
425{
426 struct hci_mgmt_chan *c;
427
428 list_for_each_entry(c, &mgmt_chan_list, list) {
429 if (c->channel == channel)
430 return c;
431 }
432
433 return NULL;
434}
435
436static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
437{
438 struct hci_mgmt_chan *c;
439
440 mutex_lock(&mgmt_chan_list_lock);
441 c = __hci_mgmt_chan_find(channel);
442 mutex_unlock(&mgmt_chan_list_lock);
443
444 return c;
445}
446
447int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
448{
449 if (c->channel < HCI_CHANNEL_CONTROL)
450 return -EINVAL;
451
452 mutex_lock(&mgmt_chan_list_lock);
453 if (__hci_mgmt_chan_find(c->channel)) {
454 mutex_unlock(&mgmt_chan_list_lock);
455 return -EALREADY;
456 }
457
458 list_add_tail(&c->list, &mgmt_chan_list);
459
460 mutex_unlock(&mgmt_chan_list_lock);
461
462 return 0;
463}
464EXPORT_SYMBOL(hci_mgmt_chan_register);
465
466void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
467{
468 mutex_lock(&mgmt_chan_list_lock);
469 list_del(&c->list);
470 mutex_unlock(&mgmt_chan_list_lock);
471}
472EXPORT_SYMBOL(hci_mgmt_chan_unregister);
473
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474static int hci_sock_release(struct socket *sock)
475{
476 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100477 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 BT_DBG("sock %p sk %p", sock, sk);
480
481 if (!sk)
482 return 0;
483
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100484 hdev = hci_pi(sk)->hdev;
485
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100486 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
487 atomic_dec(&monitor_promisc);
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 bt_sock_unlink(&hci_sk_list, sk);
490
491 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700492 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200493 mgmt_index_added(hdev);
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700494 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann23500182013-08-26 21:40:52 -0700495 hci_dev_close(hdev->id);
496 }
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 atomic_dec(&hdev->promisc);
499 hci_dev_put(hdev);
500 }
501
502 sock_orphan(sk);
503
504 skb_queue_purge(&sk->sk_receive_queue);
505 skb_queue_purge(&sk->sk_write_queue);
506
507 sock_put(sk);
508 return 0;
509}
510
Antti Julkub2a66aa2011-06-15 12:01:14 +0300511static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200512{
513 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300514 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200515
516 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
517 return -EFAULT;
518
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300519 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300520
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300521 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300522
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300523 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300524
525 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200526}
527
Antti Julkub2a66aa2011-06-15 12:01:14 +0300528static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200529{
530 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300531 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200532
533 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
534 return -EFAULT;
535
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300536 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300537
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300538 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300539
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300540 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300541
542 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200543}
544
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900545/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300546static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
547 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
549 struct hci_dev *hdev = hci_pi(sk)->hdev;
550
551 if (!hdev)
552 return -EBADFD;
553
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700554 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700555 return -EBUSY;
556
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700557 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200558 return -EOPNOTSUPP;
559
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700560 if (hdev->dev_type != HCI_BREDR)
561 return -EOPNOTSUPP;
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 switch (cmd) {
564 case HCISETRAW:
565 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000566 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700567 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200570 return hci_get_conn_info(hdev, (void __user *) arg);
571
572 case HCIGETAUTHINFO:
573 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
Johan Hedbergf0358562010-05-18 13:20:32 +0200575 case HCIBLOCKADDR:
576 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000577 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300578 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200579
580 case HCIUNBLOCKADDR:
581 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000582 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300583 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700585
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700586 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300589static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
590 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200592 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700593 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 int err;
595
596 BT_DBG("cmd %x arg %lx", cmd, arg);
597
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700598 lock_sock(sk);
599
600 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
601 err = -EBADFD;
602 goto done;
603 }
604
605 release_sock(sk);
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 switch (cmd) {
608 case HCIGETDEVLIST:
609 return hci_get_dev_list(argp);
610
611 case HCIGETDEVINFO:
612 return hci_get_dev_info(argp);
613
614 case HCIGETCONNLIST:
615 return hci_get_conn_list(argp);
616
617 case HCIDEVUP:
618 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000619 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 return hci_dev_open(arg);
621
622 case HCIDEVDOWN:
623 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000624 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return hci_dev_close(arg);
626
627 case HCIDEVRESET:
628 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000629 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 return hci_dev_reset(arg);
631
632 case HCIDEVRESTAT:
633 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000634 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 return hci_dev_reset_stat(arg);
636
637 case HCISETSCAN:
638 case HCISETAUTH:
639 case HCISETENCRYPT:
640 case HCISETPTYPE:
641 case HCISETLINKPOL:
642 case HCISETLINKMODE:
643 case HCISETACLMTU:
644 case HCISETSCOMTU:
645 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000646 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 return hci_dev_cmd(cmd, argp);
648
649 case HCIINQUIRY:
650 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700652
653 lock_sock(sk);
654
655 err = hci_sock_bound_ioctl(sk, cmd, arg);
656
657done:
658 release_sock(sk);
659 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660}
661
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300662static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
663 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
Johan Hedberg03811012010-12-08 00:21:06 +0200665 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 struct sock *sk = sock->sk;
667 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200668 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670 BT_DBG("sock %p sk %p", sock, sk);
671
Johan Hedberg03811012010-12-08 00:21:06 +0200672 if (!addr)
673 return -EINVAL;
674
675 memset(&haddr, 0, sizeof(haddr));
676 len = min_t(unsigned int, sizeof(haddr), addr_len);
677 memcpy(&haddr, addr, len);
678
679 if (haddr.hci_family != AF_BLUETOOTH)
680 return -EINVAL;
681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 lock_sock(sk);
683
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100684 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 err = -EALREADY;
686 goto done;
687 }
688
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100689 switch (haddr.hci_channel) {
690 case HCI_CHANNEL_RAW:
691 if (hci_pi(sk)->hdev) {
692 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 goto done;
694 }
695
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100696 if (haddr.hci_dev != HCI_DEV_NONE) {
697 hdev = hci_dev_get(haddr.hci_dev);
698 if (!hdev) {
699 err = -ENODEV;
700 goto done;
701 }
702
703 atomic_inc(&hdev->promisc);
704 }
705
706 hci_pi(sk)->hdev = hdev;
707 break;
708
Marcel Holtmann23500182013-08-26 21:40:52 -0700709 case HCI_CHANNEL_USER:
710 if (hci_pi(sk)->hdev) {
711 err = -EALREADY;
712 goto done;
713 }
714
715 if (haddr.hci_dev == HCI_DEV_NONE) {
716 err = -EINVAL;
717 goto done;
718 }
719
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700720 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700721 err = -EPERM;
722 goto done;
723 }
724
725 hdev = hci_dev_get(haddr.hci_dev);
726 if (!hdev) {
727 err = -ENODEV;
728 goto done;
729 }
730
731 if (test_bit(HCI_UP, &hdev->flags) ||
732 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700733 hci_dev_test_flag(hdev, HCI_SETUP) ||
734 hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700735 err = -EBUSY;
736 hci_dev_put(hdev);
737 goto done;
738 }
739
Marcel Holtmann238be782015-03-13 02:11:06 -0700740 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700741 err = -EUSERS;
742 hci_dev_put(hdev);
743 goto done;
744 }
745
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200746 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700747
748 err = hci_dev_open(hdev->id);
749 if (err) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700750 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200751 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700752 hci_dev_put(hdev);
753 goto done;
754 }
755
756 atomic_inc(&hdev->promisc);
757
758 hci_pi(sk)->hdev = hdev;
759 break;
760
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100761 case HCI_CHANNEL_MONITOR:
762 if (haddr.hci_dev != HCI_DEV_NONE) {
763 err = -EINVAL;
764 goto done;
765 }
766
767 if (!capable(CAP_NET_RAW)) {
768 err = -EPERM;
769 goto done;
770 }
771
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700772 /* The monitor interface is restricted to CAP_NET_RAW
773 * capabilities and with that implicitly trusted.
774 */
775 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
776
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100777 send_monitor_replay(sk);
778
779 atomic_inc(&monitor_promisc);
780 break;
781
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100782 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200783 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
784 err = -EINVAL;
785 goto done;
786 }
787
788 if (haddr.hci_dev != HCI_DEV_NONE) {
789 err = -EINVAL;
790 goto done;
791 }
792
793 if (!capable(CAP_NET_ADMIN)) {
794 err = -EPERM;
795 goto done;
796 }
797
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700798 /* Since the access to control channels is currently
799 * restricted to CAP_NET_ADMIN capabilities, every
800 * socket is implicitly trusted.
801 */
802 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
803
Marcel Holtmannf9207332015-03-14 19:27:55 -0700804 /* At the moment the index and unconfigured index events
805 * are enabled unconditionally. Setting them on each
806 * socket when binding keeps this functionality. They
807 * however might be cleared later and then sending of these
808 * events will be disabled, but that is then intentional.
809 */
810 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
811 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
812 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
813 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200814 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 }
816
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100817
Johan Hedberg03811012010-12-08 00:21:06 +0200818 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 sk->sk_state = BT_BOUND;
820
821done:
822 release_sock(sk);
823 return err;
824}
825
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300826static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
827 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828{
829 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
830 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700831 struct hci_dev *hdev;
832 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
834 BT_DBG("sock %p sk %p", sock, sk);
835
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700836 if (peer)
837 return -EOPNOTSUPP;
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 lock_sock(sk);
840
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700841 hdev = hci_pi(sk)->hdev;
842 if (!hdev) {
843 err = -EBADFD;
844 goto done;
845 }
846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 *addr_len = sizeof(*haddr);
848 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100849 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700850 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700852done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700854 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855}
856
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300857static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
858 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859{
860 __u32 mask = hci_pi(sk)->cmsg_mask;
861
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700862 if (mask & HCI_CMSG_DIR) {
863 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300864 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
865 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700866 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700868 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100869#ifdef CONFIG_COMPAT
870 struct compat_timeval ctv;
871#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700872 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200873 void *data;
874 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700875
876 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200877
David S. Miller1da97f82007-09-12 14:10:58 +0200878 data = &tv;
879 len = sizeof(tv);
880#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800881 if (!COMPAT_USE_64BIT_TIME &&
882 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200883 ctv.tv_sec = tv.tv_sec;
884 ctv.tv_usec = tv.tv_usec;
885 data = &ctv;
886 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200887 }
David S. Miller1da97f82007-09-12 14:10:58 +0200888#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200889
890 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700891 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900893
Ying Xue1b784142015-03-02 15:37:48 +0800894static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
895 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
897 int noblock = flags & MSG_DONTWAIT;
898 struct sock *sk = sock->sk;
899 struct sk_buff *skb;
900 int copied, err;
901
902 BT_DBG("sock %p, sk %p", sock, sk);
903
904 if (flags & (MSG_OOB))
905 return -EOPNOTSUPP;
906
907 if (sk->sk_state == BT_CLOSED)
908 return 0;
909
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200910 skb = skb_recv_datagram(sk, flags, noblock, &err);
911 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 return err;
913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 copied = skb->len;
915 if (len < copied) {
916 msg->msg_flags |= MSG_TRUNC;
917 copied = len;
918 }
919
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300920 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500921 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
Marcel Holtmann3a208622012-02-20 14:50:34 +0100923 switch (hci_pi(sk)->channel) {
924 case HCI_CHANNEL_RAW:
925 hci_sock_cmsg(sk, msg, skb);
926 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700927 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100928 case HCI_CHANNEL_MONITOR:
929 sock_recv_timestamp(msg, sk, skb);
930 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200931 default:
932 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
933 sock_recv_timestamp(msg, sk, skb);
934 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
937 skb_free_datagram(sk, skb);
938
939 return err ? : copied;
940}
941
Ying Xue1b784142015-03-02 15:37:48 +0800942static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
943 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
945 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200946 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 struct hci_dev *hdev;
948 struct sk_buff *skb;
949 int err;
950
951 BT_DBG("sock %p sk %p", sock, sk);
952
953 if (msg->msg_flags & MSG_OOB)
954 return -EOPNOTSUPP;
955
956 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
957 return -EINVAL;
958
959 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
960 return -EINVAL;
961
962 lock_sock(sk);
963
Johan Hedberg03811012010-12-08 00:21:06 +0200964 switch (hci_pi(sk)->channel) {
965 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700966 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200967 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100968 case HCI_CHANNEL_MONITOR:
969 err = -EOPNOTSUPP;
970 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200971 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200972 mutex_lock(&mgmt_chan_list_lock);
973 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
974 if (chan)
Johan Hedberg6d785aa32015-03-06 21:08:51 +0200975 err = mgmt_control(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +0200976 else
977 err = -EINVAL;
978
979 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +0200980 goto done;
981 }
982
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200983 hdev = hci_pi(sk)->hdev;
984 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 err = -EBADFD;
986 goto done;
987 }
988
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100989 if (!test_bit(HCI_UP, &hdev->flags)) {
990 err = -ENETDOWN;
991 goto done;
992 }
993
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200994 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
995 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 goto done;
997
Al Viro6ce8e9c2014-04-06 21:25:44 -0400998 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 err = -EFAULT;
1000 goto drop;
1001 }
1002
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001003 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001006 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1007 /* No permission check is needed for user channel
1008 * since that gets enforced when binding the socket.
1009 *
1010 * However check that the packet type is valid.
1011 */
1012 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1013 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1014 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1015 err = -EINVAL;
1016 goto drop;
1017 }
1018
1019 skb_queue_tail(&hdev->raw_q, skb);
1020 queue_work(hdev->workqueue, &hdev->tx_work);
1021 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001022 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 u16 ogf = hci_opcode_ogf(opcode);
1024 u16 ocf = hci_opcode_ocf(opcode);
1025
1026 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001027 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1028 &hci_sec_filter.ocf_mask[ogf])) &&
1029 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 err = -EPERM;
1031 goto drop;
1032 }
1033
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001034 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001036 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001038 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001039 * single-command requests.
1040 */
Eyal Birger6368c232015-03-01 14:58:26 +02001041 bt_cb(skb)->req_start = 1;
Johan Hedberg11714b32013-03-05 20:37:47 +02001042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001044 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 }
1046 } else {
1047 if (!capable(CAP_NET_RAW)) {
1048 err = -EPERM;
1049 goto drop;
1050 }
1051
1052 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001053 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 }
1055
1056 err = len;
1057
1058done:
1059 release_sock(sk);
1060 return err;
1061
1062drop:
1063 kfree_skb(skb);
1064 goto done;
1065}
1066
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001067static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1068 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
1070 struct hci_ufilter uf = { .opcode = 0 };
1071 struct sock *sk = sock->sk;
1072 int err = 0, opt = 0;
1073
1074 BT_DBG("sk %p, opt %d", sk, optname);
1075
1076 lock_sock(sk);
1077
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001078 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001079 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001080 goto done;
1081 }
1082
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 switch (optname) {
1084 case HCI_DATA_DIR:
1085 if (get_user(opt, (int __user *)optval)) {
1086 err = -EFAULT;
1087 break;
1088 }
1089
1090 if (opt)
1091 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1092 else
1093 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1094 break;
1095
1096 case HCI_TIME_STAMP:
1097 if (get_user(opt, (int __user *)optval)) {
1098 err = -EFAULT;
1099 break;
1100 }
1101
1102 if (opt)
1103 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1104 else
1105 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1106 break;
1107
1108 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001109 {
1110 struct hci_filter *f = &hci_pi(sk)->filter;
1111
1112 uf.type_mask = f->type_mask;
1113 uf.opcode = f->opcode;
1114 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1115 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1116 }
1117
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 len = min_t(unsigned int, len, sizeof(uf));
1119 if (copy_from_user(&uf, optval, len)) {
1120 err = -EFAULT;
1121 break;
1122 }
1123
1124 if (!capable(CAP_NET_RAW)) {
1125 uf.type_mask &= hci_sec_filter.type_mask;
1126 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1127 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1128 }
1129
1130 {
1131 struct hci_filter *f = &hci_pi(sk)->filter;
1132
1133 f->type_mask = uf.type_mask;
1134 f->opcode = uf.opcode;
1135 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1136 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1137 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001138 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
1140 default:
1141 err = -ENOPROTOOPT;
1142 break;
1143 }
1144
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001145done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 release_sock(sk);
1147 return err;
1148}
1149
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001150static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1151 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152{
1153 struct hci_ufilter uf;
1154 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001155 int len, opt, err = 0;
1156
1157 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
1159 if (get_user(len, optlen))
1160 return -EFAULT;
1161
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001162 lock_sock(sk);
1163
1164 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001165 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001166 goto done;
1167 }
1168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 switch (optname) {
1170 case HCI_DATA_DIR:
1171 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1172 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001173 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 opt = 0;
1175
1176 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001177 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 break;
1179
1180 case HCI_TIME_STAMP:
1181 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1182 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001183 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 opt = 0;
1185
1186 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001187 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 break;
1189
1190 case HCI_FILTER:
1191 {
1192 struct hci_filter *f = &hci_pi(sk)->filter;
1193
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001194 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 uf.type_mask = f->type_mask;
1196 uf.opcode = f->opcode;
1197 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1198 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1199 }
1200
1201 len = min_t(unsigned int, len, sizeof(uf));
1202 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001203 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 break;
1205
1206 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001207 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 break;
1209 }
1210
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001211done:
1212 release_sock(sk);
1213 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214}
1215
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001216static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 .family = PF_BLUETOOTH,
1218 .owner = THIS_MODULE,
1219 .release = hci_sock_release,
1220 .bind = hci_sock_bind,
1221 .getname = hci_sock_getname,
1222 .sendmsg = hci_sock_sendmsg,
1223 .recvmsg = hci_sock_recvmsg,
1224 .ioctl = hci_sock_ioctl,
1225 .poll = datagram_poll,
1226 .listen = sock_no_listen,
1227 .shutdown = sock_no_shutdown,
1228 .setsockopt = hci_sock_setsockopt,
1229 .getsockopt = hci_sock_getsockopt,
1230 .connect = sock_no_connect,
1231 .socketpair = sock_no_socketpair,
1232 .accept = sock_no_accept,
1233 .mmap = sock_no_mmap
1234};
1235
1236static struct proto hci_sk_proto = {
1237 .name = "HCI",
1238 .owner = THIS_MODULE,
1239 .obj_size = sizeof(struct hci_pinfo)
1240};
1241
Eric Paris3f378b62009-11-05 22:18:14 -08001242static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1243 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244{
1245 struct sock *sk;
1246
1247 BT_DBG("sock %p", sock);
1248
1249 if (sock->type != SOCK_RAW)
1250 return -ESOCKTNOSUPPORT;
1251
1252 sock->ops = &hci_sock_ops;
1253
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001254 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 if (!sk)
1256 return -ENOMEM;
1257
1258 sock_init_data(sock, sk);
1259
1260 sock_reset_flag(sk, SOCK_ZAPPED);
1261
1262 sk->sk_protocol = protocol;
1263
1264 sock->state = SS_UNCONNECTED;
1265 sk->sk_state = BT_OPEN;
1266
1267 bt_sock_link(&hci_sk_list, sk);
1268 return 0;
1269}
1270
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001271static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 .family = PF_BLUETOOTH,
1273 .owner = THIS_MODULE,
1274 .create = hci_sock_create,
1275};
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277int __init hci_sock_init(void)
1278{
1279 int err;
1280
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001281 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 err = proto_register(&hci_sk_proto, 0);
1284 if (err < 0)
1285 return err;
1286
1287 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001288 if (err < 0) {
1289 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001291 }
1292
Al Virob0316612013-04-04 19:14:33 -04001293 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001294 if (err < 0) {
1295 BT_ERR("Failed to create HCI proc file");
1296 bt_sock_unregister(BTPROTO_HCI);
1297 goto error;
1298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 BT_INFO("HCI socket layer initialized");
1301
1302 return 0;
1303
1304error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 proto_unregister(&hci_sk_proto);
1306 return err;
1307}
1308
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301309void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001311 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001312 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314}