blob: 3f8f69239e4116b1f89a247161426c6c3e86485e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
Marcel Holtmann863def52014-07-11 05:41:00 +020038/* Socket info */
39#define hci_pi(sk) ((struct hci_pinfo *) sk)
40
41struct hci_pinfo {
42 struct bt_sock bt;
43 struct hci_dev *hdev;
44 struct hci_filter filter;
45 __u32 cmsg_mask;
46 unsigned short channel;
47};
48
Jiri Slaby93919762015-02-19 15:20:43 +010049static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050{
Jiri Slaby93919762015-02-19 15:20:43 +010051 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070052}
53
54/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020055#define HCI_SFLT_MAX_OGF 5
56
57struct hci_sec_filter {
58 __u32 type_mask;
59 __u32 event_mask[2];
60 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
61};
62
Marcel Holtmann7e67c112014-07-11 05:36:40 +020063static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 /* Packet types */
65 0x10,
66 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020067 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 /* Commands */
69 {
70 { 0x0 },
71 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020072 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020074 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020076 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020078 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020080 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 }
82};
83
84static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070085 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086};
87
Marcel Holtmannf81fe642013-08-25 23:25:15 -070088static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
89{
90 struct hci_filter *flt;
91 int flt_type, flt_event;
92
93 /* Apply filter */
94 flt = &hci_pi(sk)->filter;
95
96 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
97 flt_type = 0;
98 else
99 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
100
101 if (!test_bit(flt_type, &flt->type_mask))
102 return true;
103
104 /* Extra filter for event packets only */
105 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
106 return false;
107
108 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
109
110 if (!hci_test_bit(flt_event, &flt->event_mask))
111 return true;
112
113 /* Check filter only when opcode is set */
114 if (!flt->opcode)
115 return false;
116
117 if (flt_event == HCI_EV_CMD_COMPLETE &&
118 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
119 return true;
120
121 if (flt_event == HCI_EV_CMD_STATUS &&
122 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
123 return true;
124
125 return false;
126}
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100129void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100132 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134 BT_DBG("hdev %p len %d", hdev, skb->len);
135
136 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100137
Sasha Levinb67bfe02013-02-27 17:06:00 -0800138 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 struct sk_buff *nskb;
140
141 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
142 continue;
143
144 /* Don't send frame to the socket it came from */
145 if (skb->sk == sk)
146 continue;
147
Marcel Holtmann23500182013-08-26 21:40:52 -0700148 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149 if (is_filtered_packet(sk, skb))
150 continue;
151 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152 if (!bt_cb(skb)->incoming)
153 continue;
154 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
157 continue;
158 } else {
159 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200160 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100163 if (!skb_copy) {
164 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300165 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100166 if (!skb_copy)
167 continue;
168
169 /* Put type byte before the data */
170 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
171 }
172
173 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200174 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 continue;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 if (sock_queue_rcv_skb(sk, nskb))
178 kfree_skb(nskb);
179 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100180
181 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100182
183 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100184}
185
Johan Hedberg71290692015-02-20 13:26:23 +0200186/* Send frame to sockets with specific channel */
187void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
188 struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100189{
190 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100191
Johan Hedberg71290692015-02-20 13:26:23 +0200192 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100193
194 read_lock(&hci_sk_list.lock);
195
Sasha Levinb67bfe02013-02-27 17:06:00 -0800196 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100197 struct sk_buff *nskb;
198
199 /* Skip the original socket */
200 if (sk == skip_sk)
201 continue;
202
203 if (sk->sk_state != BT_BOUND)
204 continue;
205
Johan Hedberg71290692015-02-20 13:26:23 +0200206 if (hci_pi(sk)->channel != channel)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100207 continue;
208
209 nskb = skb_clone(skb, GFP_ATOMIC);
210 if (!nskb)
211 continue;
212
213 if (sock_queue_rcv_skb(sk, nskb))
214 kfree_skb(nskb);
215 }
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 read_unlock(&hci_sk_list.lock);
218}
219
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100220/* Send frame to monitor socket */
221void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
222{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100223 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800224 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100225 __le16 opcode;
226
227 if (!atomic_read(&monitor_promisc))
228 return;
229
230 BT_DBG("hdev %p len %d", hdev, skb->len);
231
232 switch (bt_cb(skb)->pkt_type) {
233 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700234 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100235 break;
236 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700237 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100238 break;
239 case HCI_ACLDATA_PKT:
240 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700241 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100242 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700243 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100244 break;
245 case HCI_SCODATA_PKT:
246 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700247 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100248 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700249 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100250 break;
251 default:
252 return;
253 }
254
Marcel Holtmann2b531292015-01-11 19:33:31 -0800255 /* Create a private copy with headroom */
256 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
257 if (!skb_copy)
258 return;
259
260 /* Put header before the data */
261 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
262 hdr->opcode = opcode;
263 hdr->index = cpu_to_le16(hdev->id);
264 hdr->len = cpu_to_le16(skb->len);
265
Johan Hedberg03f310e2015-02-20 13:26:24 +0200266 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100267 kfree_skb(skb_copy);
268}
269
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100270static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
271{
272 struct hci_mon_hdr *hdr;
273 struct hci_mon_new_index *ni;
274 struct sk_buff *skb;
275 __le16 opcode;
276
277 switch (event) {
278 case HCI_DEV_REG:
279 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
280 if (!skb)
281 return NULL;
282
283 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
284 ni->type = hdev->dev_type;
285 ni->bus = hdev->bus;
286 bacpy(&ni->bdaddr, &hdev->bdaddr);
287 memcpy(ni->name, hdev->name, 8);
288
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700289 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100290 break;
291
292 case HCI_DEV_UNREG:
293 skb = bt_skb_alloc(0, GFP_ATOMIC);
294 if (!skb)
295 return NULL;
296
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700297 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100298 break;
299
300 default:
301 return NULL;
302 }
303
304 __net_timestamp(skb);
305
306 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
307 hdr->opcode = opcode;
308 hdr->index = cpu_to_le16(hdev->id);
309 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
310
311 return skb;
312}
313
314static void send_monitor_replay(struct sock *sk)
315{
316 struct hci_dev *hdev;
317
318 read_lock(&hci_dev_list_lock);
319
320 list_for_each_entry(hdev, &hci_dev_list, list) {
321 struct sk_buff *skb;
322
323 skb = create_monitor_event(hdev, HCI_DEV_REG);
324 if (!skb)
325 continue;
326
327 if (sock_queue_rcv_skb(sk, skb))
328 kfree_skb(skb);
329 }
330
331 read_unlock(&hci_dev_list_lock);
332}
333
Marcel Holtmann040030e2012-02-20 14:50:37 +0100334/* Generate internal stack event */
335static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
336{
337 struct hci_event_hdr *hdr;
338 struct hci_ev_stack_internal *ev;
339 struct sk_buff *skb;
340
341 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
342 if (!skb)
343 return;
344
345 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
346 hdr->evt = HCI_EV_STACK_INTERNAL;
347 hdr->plen = sizeof(*ev) + dlen;
348
349 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
350 ev->type = type;
351 memcpy(ev->data, data, dlen);
352
353 bt_cb(skb)->incoming = 1;
354 __net_timestamp(skb);
355
356 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100357 hci_send_to_sock(hdev, skb);
358 kfree_skb(skb);
359}
360
361void hci_sock_dev_event(struct hci_dev *hdev, int event)
362{
363 struct hci_ev_si_device ev;
364
365 BT_DBG("hdev %s event %d", hdev->name, event);
366
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100367 /* Send event to monitor */
368 if (atomic_read(&monitor_promisc)) {
369 struct sk_buff *skb;
370
371 skb = create_monitor_event(hdev, event);
372 if (skb) {
Johan Hedberg03f310e2015-02-20 13:26:24 +0200373 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100374 kfree_skb(skb);
375 }
376 }
377
Marcel Holtmann040030e2012-02-20 14:50:37 +0100378 /* Send event to sockets */
379 ev.event = event;
380 ev.dev_id = hdev->id;
381 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
382
383 if (event == HCI_DEV_UNREG) {
384 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100385
386 /* Detach sockets from device */
387 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800388 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100389 bh_lock_sock_nested(sk);
390 if (hci_pi(sk)->hdev == hdev) {
391 hci_pi(sk)->hdev = NULL;
392 sk->sk_err = EPIPE;
393 sk->sk_state = BT_OPEN;
394 sk->sk_state_change(sk);
395
396 hci_dev_put(hdev);
397 }
398 bh_unlock_sock(sk);
399 }
400 read_unlock(&hci_sk_list.lock);
401 }
402}
403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static int hci_sock_release(struct socket *sock)
405{
406 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100407 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
409 BT_DBG("sock %p sk %p", sock, sk);
410
411 if (!sk)
412 return 0;
413
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100414 hdev = hci_pi(sk)->hdev;
415
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100416 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
417 atomic_dec(&monitor_promisc);
418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 bt_sock_unlink(&hci_sk_list, sk);
420
421 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700422 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200423 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700424 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
425 hci_dev_close(hdev->id);
426 }
427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 atomic_dec(&hdev->promisc);
429 hci_dev_put(hdev);
430 }
431
432 sock_orphan(sk);
433
434 skb_queue_purge(&sk->sk_receive_queue);
435 skb_queue_purge(&sk->sk_write_queue);
436
437 sock_put(sk);
438 return 0;
439}
440
Antti Julkub2a66aa2011-06-15 12:01:14 +0300441static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200442{
443 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300444 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200445
446 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
447 return -EFAULT;
448
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300449 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300450
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300451 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300452
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300453 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300454
455 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200456}
457
Antti Julkub2a66aa2011-06-15 12:01:14 +0300458static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200459{
460 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300461 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200462
463 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
464 return -EFAULT;
465
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300466 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300467
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300468 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300469
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300470 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300471
472 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200473}
474
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900475/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300476static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
477 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
479 struct hci_dev *hdev = hci_pi(sk)->hdev;
480
481 if (!hdev)
482 return -EBADFD;
483
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700484 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
485 return -EBUSY;
486
Marcel Holtmann4a964402014-07-02 19:10:33 +0200487 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200488 return -EOPNOTSUPP;
489
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700490 if (hdev->dev_type != HCI_BREDR)
491 return -EOPNOTSUPP;
492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 switch (cmd) {
494 case HCISETRAW:
495 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000496 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700497 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200500 return hci_get_conn_info(hdev, (void __user *) arg);
501
502 case HCIGETAUTHINFO:
503 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Johan Hedbergf0358562010-05-18 13:20:32 +0200505 case HCIBLOCKADDR:
506 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000507 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300508 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200509
510 case HCIUNBLOCKADDR:
511 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000512 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300513 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700515
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700516 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517}
518
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300519static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
520 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200522 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700523 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 int err;
525
526 BT_DBG("cmd %x arg %lx", cmd, arg);
527
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700528 lock_sock(sk);
529
530 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
531 err = -EBADFD;
532 goto done;
533 }
534
535 release_sock(sk);
536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 switch (cmd) {
538 case HCIGETDEVLIST:
539 return hci_get_dev_list(argp);
540
541 case HCIGETDEVINFO:
542 return hci_get_dev_info(argp);
543
544 case HCIGETCONNLIST:
545 return hci_get_conn_list(argp);
546
547 case HCIDEVUP:
548 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000549 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 return hci_dev_open(arg);
551
552 case HCIDEVDOWN:
553 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000554 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 return hci_dev_close(arg);
556
557 case HCIDEVRESET:
558 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000559 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 return hci_dev_reset(arg);
561
562 case HCIDEVRESTAT:
563 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000564 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 return hci_dev_reset_stat(arg);
566
567 case HCISETSCAN:
568 case HCISETAUTH:
569 case HCISETENCRYPT:
570 case HCISETPTYPE:
571 case HCISETLINKPOL:
572 case HCISETLINKMODE:
573 case HCISETACLMTU:
574 case HCISETSCOMTU:
575 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000576 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 return hci_dev_cmd(cmd, argp);
578
579 case HCIINQUIRY:
580 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700582
583 lock_sock(sk);
584
585 err = hci_sock_bound_ioctl(sk, cmd, arg);
586
587done:
588 release_sock(sk);
589 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590}
591
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300592static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
593 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594{
Johan Hedberg03811012010-12-08 00:21:06 +0200595 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 struct sock *sk = sock->sk;
597 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200598 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 BT_DBG("sock %p sk %p", sock, sk);
601
Johan Hedberg03811012010-12-08 00:21:06 +0200602 if (!addr)
603 return -EINVAL;
604
605 memset(&haddr, 0, sizeof(haddr));
606 len = min_t(unsigned int, sizeof(haddr), addr_len);
607 memcpy(&haddr, addr, len);
608
609 if (haddr.hci_family != AF_BLUETOOTH)
610 return -EINVAL;
611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 lock_sock(sk);
613
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100614 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 err = -EALREADY;
616 goto done;
617 }
618
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100619 switch (haddr.hci_channel) {
620 case HCI_CHANNEL_RAW:
621 if (hci_pi(sk)->hdev) {
622 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 goto done;
624 }
625
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100626 if (haddr.hci_dev != HCI_DEV_NONE) {
627 hdev = hci_dev_get(haddr.hci_dev);
628 if (!hdev) {
629 err = -ENODEV;
630 goto done;
631 }
632
633 atomic_inc(&hdev->promisc);
634 }
635
636 hci_pi(sk)->hdev = hdev;
637 break;
638
Marcel Holtmann23500182013-08-26 21:40:52 -0700639 case HCI_CHANNEL_USER:
640 if (hci_pi(sk)->hdev) {
641 err = -EALREADY;
642 goto done;
643 }
644
645 if (haddr.hci_dev == HCI_DEV_NONE) {
646 err = -EINVAL;
647 goto done;
648 }
649
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700650 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700651 err = -EPERM;
652 goto done;
653 }
654
655 hdev = hci_dev_get(haddr.hci_dev);
656 if (!hdev) {
657 err = -ENODEV;
658 goto done;
659 }
660
661 if (test_bit(HCI_UP, &hdev->flags) ||
662 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +0200663 test_bit(HCI_SETUP, &hdev->dev_flags) ||
664 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700665 err = -EBUSY;
666 hci_dev_put(hdev);
667 goto done;
668 }
669
670 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
671 err = -EUSERS;
672 hci_dev_put(hdev);
673 goto done;
674 }
675
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200676 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700677
678 err = hci_dev_open(hdev->id);
679 if (err) {
680 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200681 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700682 hci_dev_put(hdev);
683 goto done;
684 }
685
686 atomic_inc(&hdev->promisc);
687
688 hci_pi(sk)->hdev = hdev;
689 break;
690
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100691 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100692 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100693 err = -EINVAL;
694 goto done;
695 }
696
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100697 if (!capable(CAP_NET_ADMIN)) {
698 err = -EPERM;
699 goto done;
700 }
701
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100702 break;
703
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100704 case HCI_CHANNEL_MONITOR:
705 if (haddr.hci_dev != HCI_DEV_NONE) {
706 err = -EINVAL;
707 goto done;
708 }
709
710 if (!capable(CAP_NET_RAW)) {
711 err = -EPERM;
712 goto done;
713 }
714
715 send_monitor_replay(sk);
716
717 atomic_inc(&monitor_promisc);
718 break;
719
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100720 default:
721 err = -EINVAL;
722 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 }
724
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100725
Johan Hedberg03811012010-12-08 00:21:06 +0200726 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 sk->sk_state = BT_BOUND;
728
729done:
730 release_sock(sk);
731 return err;
732}
733
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300734static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
735 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736{
737 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
738 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700739 struct hci_dev *hdev;
740 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742 BT_DBG("sock %p sk %p", sock, sk);
743
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700744 if (peer)
745 return -EOPNOTSUPP;
746
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 lock_sock(sk);
748
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700749 hdev = hci_pi(sk)->hdev;
750 if (!hdev) {
751 err = -EBADFD;
752 goto done;
753 }
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 *addr_len = sizeof(*haddr);
756 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100757 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700758 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700760done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700762 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763}
764
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300765static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
766 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767{
768 __u32 mask = hci_pi(sk)->cmsg_mask;
769
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700770 if (mask & HCI_CMSG_DIR) {
771 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300772 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
773 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700776 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100777#ifdef CONFIG_COMPAT
778 struct compat_timeval ctv;
779#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700780 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200781 void *data;
782 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700783
784 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200785
David S. Miller1da97f82007-09-12 14:10:58 +0200786 data = &tv;
787 len = sizeof(tv);
788#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800789 if (!COMPAT_USE_64BIT_TIME &&
790 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200791 ctv.tv_sec = tv.tv_sec;
792 ctv.tv_usec = tv.tv_usec;
793 data = &ctv;
794 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200795 }
David S. Miller1da97f82007-09-12 14:10:58 +0200796#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200797
798 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900801
802static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300803 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
805 int noblock = flags & MSG_DONTWAIT;
806 struct sock *sk = sock->sk;
807 struct sk_buff *skb;
808 int copied, err;
809
810 BT_DBG("sock %p, sk %p", sock, sk);
811
812 if (flags & (MSG_OOB))
813 return -EOPNOTSUPP;
814
815 if (sk->sk_state == BT_CLOSED)
816 return 0;
817
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200818 skb = skb_recv_datagram(sk, flags, noblock, &err);
819 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 return err;
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 copied = skb->len;
823 if (len < copied) {
824 msg->msg_flags |= MSG_TRUNC;
825 copied = len;
826 }
827
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300828 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500829 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
Marcel Holtmann3a208622012-02-20 14:50:34 +0100831 switch (hci_pi(sk)->channel) {
832 case HCI_CHANNEL_RAW:
833 hci_sock_cmsg(sk, msg, skb);
834 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700835 case HCI_CHANNEL_USER:
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100836 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100837 case HCI_CHANNEL_MONITOR:
838 sock_recv_timestamp(msg, sk, skb);
839 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842 skb_free_datagram(sk, skb);
843
844 return err ? : copied;
845}
846
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900847static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 struct msghdr *msg, size_t len)
849{
850 struct sock *sk = sock->sk;
851 struct hci_dev *hdev;
852 struct sk_buff *skb;
853 int err;
854
855 BT_DBG("sock %p sk %p", sock, sk);
856
857 if (msg->msg_flags & MSG_OOB)
858 return -EOPNOTSUPP;
859
860 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
861 return -EINVAL;
862
863 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
864 return -EINVAL;
865
866 lock_sock(sk);
867
Johan Hedberg03811012010-12-08 00:21:06 +0200868 switch (hci_pi(sk)->channel) {
869 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700870 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200871 break;
872 case HCI_CHANNEL_CONTROL:
873 err = mgmt_control(sk, msg, len);
874 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100875 case HCI_CHANNEL_MONITOR:
876 err = -EOPNOTSUPP;
877 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200878 default:
879 err = -EINVAL;
880 goto done;
881 }
882
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200883 hdev = hci_pi(sk)->hdev;
884 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 err = -EBADFD;
886 goto done;
887 }
888
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100889 if (!test_bit(HCI_UP, &hdev->flags)) {
890 err = -ENETDOWN;
891 goto done;
892 }
893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200894 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
895 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 goto done;
897
Al Viro6ce8e9c2014-04-06 21:25:44 -0400898 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 err = -EFAULT;
900 goto drop;
901 }
902
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700903 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -0800906 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
907 /* No permission check is needed for user channel
908 * since that gets enforced when binding the socket.
909 *
910 * However check that the packet type is valid.
911 */
912 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
913 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
914 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
915 err = -EINVAL;
916 goto drop;
917 }
918
919 skb_queue_tail(&hdev->raw_q, skb);
920 queue_work(hdev->workqueue, &hdev->tx_work);
921 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700922 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 u16 ogf = hci_opcode_ogf(opcode);
924 u16 ocf = hci_opcode_ocf(opcode);
925
926 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300927 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
928 &hci_sec_filter.ocf_mask[ogf])) &&
929 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 err = -EPERM;
931 goto drop;
932 }
933
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200934 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200936 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -0700938 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +0200939 * single-command requests.
940 */
941 bt_cb(skb)->req.start = true;
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200944 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 }
946 } else {
947 if (!capable(CAP_NET_RAW)) {
948 err = -EPERM;
949 goto drop;
950 }
951
952 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200953 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 }
955
956 err = len;
957
958done:
959 release_sock(sk);
960 return err;
961
962drop:
963 kfree_skb(skb);
964 goto done;
965}
966
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300967static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
968 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
970 struct hci_ufilter uf = { .opcode = 0 };
971 struct sock *sk = sock->sk;
972 int err = 0, opt = 0;
973
974 BT_DBG("sk %p, opt %d", sk, optname);
975
976 lock_sock(sk);
977
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100978 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -0700979 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +0100980 goto done;
981 }
982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 switch (optname) {
984 case HCI_DATA_DIR:
985 if (get_user(opt, (int __user *)optval)) {
986 err = -EFAULT;
987 break;
988 }
989
990 if (opt)
991 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
992 else
993 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
994 break;
995
996 case HCI_TIME_STAMP:
997 if (get_user(opt, (int __user *)optval)) {
998 err = -EFAULT;
999 break;
1000 }
1001
1002 if (opt)
1003 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1004 else
1005 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1006 break;
1007
1008 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001009 {
1010 struct hci_filter *f = &hci_pi(sk)->filter;
1011
1012 uf.type_mask = f->type_mask;
1013 uf.opcode = f->opcode;
1014 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1015 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1016 }
1017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 len = min_t(unsigned int, len, sizeof(uf));
1019 if (copy_from_user(&uf, optval, len)) {
1020 err = -EFAULT;
1021 break;
1022 }
1023
1024 if (!capable(CAP_NET_RAW)) {
1025 uf.type_mask &= hci_sec_filter.type_mask;
1026 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1027 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1028 }
1029
1030 {
1031 struct hci_filter *f = &hci_pi(sk)->filter;
1032
1033 f->type_mask = uf.type_mask;
1034 f->opcode = uf.opcode;
1035 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1036 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1037 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001038 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
1040 default:
1041 err = -ENOPROTOOPT;
1042 break;
1043 }
1044
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001045done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 release_sock(sk);
1047 return err;
1048}
1049
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001050static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1051 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052{
1053 struct hci_ufilter uf;
1054 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001055 int len, opt, err = 0;
1056
1057 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
1059 if (get_user(len, optlen))
1060 return -EFAULT;
1061
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001062 lock_sock(sk);
1063
1064 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001065 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001066 goto done;
1067 }
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 switch (optname) {
1070 case HCI_DATA_DIR:
1071 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1072 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001073 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 opt = 0;
1075
1076 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001077 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 break;
1079
1080 case HCI_TIME_STAMP:
1081 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1082 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001083 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 opt = 0;
1085
1086 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001087 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 break;
1089
1090 case HCI_FILTER:
1091 {
1092 struct hci_filter *f = &hci_pi(sk)->filter;
1093
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001094 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 uf.type_mask = f->type_mask;
1096 uf.opcode = f->opcode;
1097 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1098 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1099 }
1100
1101 len = min_t(unsigned int, len, sizeof(uf));
1102 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001103 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 break;
1105
1106 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001107 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 break;
1109 }
1110
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001111done:
1112 release_sock(sk);
1113 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114}
1115
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001116static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 .family = PF_BLUETOOTH,
1118 .owner = THIS_MODULE,
1119 .release = hci_sock_release,
1120 .bind = hci_sock_bind,
1121 .getname = hci_sock_getname,
1122 .sendmsg = hci_sock_sendmsg,
1123 .recvmsg = hci_sock_recvmsg,
1124 .ioctl = hci_sock_ioctl,
1125 .poll = datagram_poll,
1126 .listen = sock_no_listen,
1127 .shutdown = sock_no_shutdown,
1128 .setsockopt = hci_sock_setsockopt,
1129 .getsockopt = hci_sock_getsockopt,
1130 .connect = sock_no_connect,
1131 .socketpair = sock_no_socketpair,
1132 .accept = sock_no_accept,
1133 .mmap = sock_no_mmap
1134};
1135
1136static struct proto hci_sk_proto = {
1137 .name = "HCI",
1138 .owner = THIS_MODULE,
1139 .obj_size = sizeof(struct hci_pinfo)
1140};
1141
Eric Paris3f378b62009-11-05 22:18:14 -08001142static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1143 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144{
1145 struct sock *sk;
1146
1147 BT_DBG("sock %p", sock);
1148
1149 if (sock->type != SOCK_RAW)
1150 return -ESOCKTNOSUPPORT;
1151
1152 sock->ops = &hci_sock_ops;
1153
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001154 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 if (!sk)
1156 return -ENOMEM;
1157
1158 sock_init_data(sock, sk);
1159
1160 sock_reset_flag(sk, SOCK_ZAPPED);
1161
1162 sk->sk_protocol = protocol;
1163
1164 sock->state = SS_UNCONNECTED;
1165 sk->sk_state = BT_OPEN;
1166
1167 bt_sock_link(&hci_sk_list, sk);
1168 return 0;
1169}
1170
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001171static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 .family = PF_BLUETOOTH,
1173 .owner = THIS_MODULE,
1174 .create = hci_sock_create,
1175};
1176
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177int __init hci_sock_init(void)
1178{
1179 int err;
1180
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001181 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 err = proto_register(&hci_sk_proto, 0);
1184 if (err < 0)
1185 return err;
1186
1187 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001188 if (err < 0) {
1189 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001191 }
1192
Al Virob0316612013-04-04 19:14:33 -04001193 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001194 if (err < 0) {
1195 BT_ERR("Failed to create HCI proc file");
1196 bt_sock_unregister(BTPROTO_HCI);
1197 goto error;
1198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 BT_INFO("HCI socket layer initialized");
1201
1202 return 0;
1203
1204error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 proto_unregister(&hci_sk_proto);
1206 return err;
1207}
1208
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301209void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001211 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001212 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214}