blob: 41f579ba447b78e8cac9d20e41c9a7ca778a1cc4 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
Marcel Holtmanndd315062015-11-08 07:47:12 +010029#include <generated/compile.h>
30#include <generated/utsrelease.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020035#include <net/bluetooth/mgmt.h>
36
37#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg801c1e82015-03-06 21:08:50 +020039static LIST_HEAD(mgmt_chan_list);
40static DEFINE_MUTEX(mgmt_chan_list_lock);
41
Marcel Holtmanncd82e612012-02-20 20:34:38 +010042static atomic_t monitor_promisc = ATOMIC_INIT(0);
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* ----- HCI socket interface ----- */
45
Marcel Holtmann863def52014-07-11 05:41:00 +020046/* Socket info */
47#define hci_pi(sk) ((struct hci_pinfo *) sk)
48
49struct hci_pinfo {
50 struct bt_sock bt;
51 struct hci_dev *hdev;
52 struct hci_filter filter;
53 __u32 cmsg_mask;
54 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070055 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020056};
57
Marcel Holtmann6befc642015-03-14 19:27:53 -070058void hci_sock_set_flag(struct sock *sk, int nr)
59{
60 set_bit(nr, &hci_pi(sk)->flags);
61}
62
63void hci_sock_clear_flag(struct sock *sk, int nr)
64{
65 clear_bit(nr, &hci_pi(sk)->flags);
66}
67
Marcel Holtmannc85be542015-03-14 19:28:00 -070068int hci_sock_test_flag(struct sock *sk, int nr)
69{
70 return test_bit(nr, &hci_pi(sk)->flags);
71}
72
Johan Hedbergd0f172b2015-03-17 13:48:46 +020073unsigned short hci_sock_get_channel(struct sock *sk)
74{
75 return hci_pi(sk)->channel;
76}
77
Jiri Slaby93919762015-02-19 15:20:43 +010078static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Jiri Slaby93919762015-02-19 15:20:43 +010080 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
83/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020084#define HCI_SFLT_MAX_OGF 5
85
86struct hci_sec_filter {
87 __u32 type_mask;
88 __u32 event_mask[2];
89 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
90};
91
Marcel Holtmann7e67c112014-07-11 05:36:40 +020092static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 /* Packet types */
94 0x10,
95 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020096 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 /* Commands */
98 {
99 { 0x0 },
100 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200101 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200103 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200105 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200107 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200109 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 }
111};
112
113static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700114 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115};
116
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700117static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
118{
119 struct hci_filter *flt;
120 int flt_type, flt_event;
121
122 /* Apply filter */
123 flt = &hci_pi(sk)->filter;
124
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100125 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700126
127 if (!test_bit(flt_type, &flt->type_mask))
128 return true;
129
130 /* Extra filter for event packets only */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100131 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700132 return false;
133
134 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
135
136 if (!hci_test_bit(flt_event, &flt->event_mask))
137 return true;
138
139 /* Check filter only when opcode is set */
140 if (!flt->opcode)
141 return false;
142
143 if (flt_event == HCI_EV_CMD_COMPLETE &&
144 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
145 return true;
146
147 if (flt_event == HCI_EV_CMD_STATUS &&
148 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
149 return true;
150
151 return false;
152}
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100155void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100158 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("hdev %p len %d", hdev, skb->len);
161
162 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100163
Sasha Levinb67bfe02013-02-27 17:06:00 -0800164 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 struct sk_buff *nskb;
166
167 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
168 continue;
169
170 /* Don't send frame to the socket it came from */
171 if (skb->sk == sk)
172 continue;
173
Marcel Holtmann23500182013-08-26 21:40:52 -0700174 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100175 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
176 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
177 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
178 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmannbb775432015-10-09 16:13:50 +0200179 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700180 if (is_filtered_packet(sk, skb))
181 continue;
182 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
183 if (!bt_cb(skb)->incoming)
184 continue;
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100185 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
186 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
187 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmann23500182013-08-26 21:40:52 -0700188 continue;
189 } else {
190 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200191 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100194 if (!skb_copy) {
195 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300196 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100197 if (!skb_copy)
198 continue;
199
200 /* Put type byte before the data */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100201 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100202 }
203
204 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200205 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 continue;
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 if (sock_queue_rcv_skb(sk, nskb))
209 kfree_skb(nskb);
210 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100211
212 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100213
214 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100215}
216
Johan Hedberg71290692015-02-20 13:26:23 +0200217/* Send frame to sockets with specific channel */
218void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700219 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220{
221 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100222
Johan Hedberg71290692015-02-20 13:26:23 +0200223 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100224
225 read_lock(&hci_sk_list.lock);
226
Sasha Levinb67bfe02013-02-27 17:06:00 -0800227 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100228 struct sk_buff *nskb;
229
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700230 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700231 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700232 continue;
233
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100234 /* Skip the original socket */
235 if (sk == skip_sk)
236 continue;
237
238 if (sk->sk_state != BT_BOUND)
239 continue;
240
Johan Hedberg71290692015-02-20 13:26:23 +0200241 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800242 continue;
243
244 nskb = skb_clone(skb, GFP_ATOMIC);
245 if (!nskb)
246 continue;
247
248 if (sock_queue_rcv_skb(sk, nskb))
249 kfree_skb(nskb);
250 }
251
252 read_unlock(&hci_sk_list.lock);
253}
254
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100255/* Send frame to monitor socket */
256void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
257{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100258 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800259 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100260 __le16 opcode;
261
262 if (!atomic_read(&monitor_promisc))
263 return;
264
265 BT_DBG("hdev %p len %d", hdev, skb->len);
266
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100267 switch (hci_skb_pkt_type(skb)) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100268 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700269 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100270 break;
271 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700272 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273 break;
274 case HCI_ACLDATA_PKT:
275 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700276 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100277 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700278 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100279 break;
280 case HCI_SCODATA_PKT:
281 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700282 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100283 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700284 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100285 break;
Marcel Holtmanne875ff82015-10-07 16:38:35 +0200286 case HCI_DIAG_PKT:
287 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
288 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100289 default:
290 return;
291 }
292
Marcel Holtmann2b531292015-01-11 19:33:31 -0800293 /* Create a private copy with headroom */
294 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
295 if (!skb_copy)
296 return;
297
298 /* Put header before the data */
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100299 hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
Marcel Holtmann2b531292015-01-11 19:33:31 -0800300 hdr->opcode = opcode;
301 hdr->index = cpu_to_le16(hdev->id);
302 hdr->len = cpu_to_le16(skb->len);
303
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700304 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
305 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100306 kfree_skb(skb_copy);
307}
308
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100309static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
310{
311 struct hci_mon_hdr *hdr;
312 struct hci_mon_new_index *ni;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200313 struct hci_mon_index_info *ii;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100314 struct sk_buff *skb;
315 __le16 opcode;
316
317 switch (event) {
318 case HCI_DEV_REG:
319 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
320 if (!skb)
321 return NULL;
322
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200323 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100324 ni->type = hdev->dev_type;
325 ni->bus = hdev->bus;
326 bacpy(&ni->bdaddr, &hdev->bdaddr);
327 memcpy(ni->name, hdev->name, 8);
328
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700329 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100330 break;
331
332 case HCI_DEV_UNREG:
333 skb = bt_skb_alloc(0, GFP_ATOMIC);
334 if (!skb)
335 return NULL;
336
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700337 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100338 break;
339
Marcel Holtmanne131d742015-10-20 02:30:47 +0200340 case HCI_DEV_SETUP:
341 if (hdev->manufacturer == 0xffff)
342 return NULL;
343
344 /* fall through */
345
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200346 case HCI_DEV_UP:
347 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
348 if (!skb)
349 return NULL;
350
351 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
352 bacpy(&ii->bdaddr, &hdev->bdaddr);
353 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
354
355 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
356 break;
357
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200358 case HCI_DEV_OPEN:
359 skb = bt_skb_alloc(0, GFP_ATOMIC);
360 if (!skb)
361 return NULL;
362
363 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
364 break;
365
366 case HCI_DEV_CLOSE:
367 skb = bt_skb_alloc(0, GFP_ATOMIC);
368 if (!skb)
369 return NULL;
370
371 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
372 break;
373
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100374 default:
375 return NULL;
376 }
377
378 __net_timestamp(skb);
379
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100380 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100381 hdr->opcode = opcode;
382 hdr->index = cpu_to_le16(hdev->id);
383 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
384
385 return skb;
386}
387
Marcel Holtmanndd315062015-11-08 07:47:12 +0100388static void send_monitor_note(struct sock *sk, const char *text)
389{
390 size_t len = strlen(text);
391 struct hci_mon_hdr *hdr;
392 struct sk_buff *skb;
393
394 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
395 if (!skb)
396 return;
397
398 strcpy(skb_put(skb, len + 1), text);
399
400 __net_timestamp(skb);
401
402 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
403 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
404 hdr->index = cpu_to_le16(HCI_DEV_NONE);
405 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
406
407 if (sock_queue_rcv_skb(sk, skb))
408 kfree_skb(skb);
409}
410
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100411static void send_monitor_replay(struct sock *sk)
412{
413 struct hci_dev *hdev;
414
415 read_lock(&hci_dev_list_lock);
416
417 list_for_each_entry(hdev, &hci_dev_list, list) {
418 struct sk_buff *skb;
419
420 skb = create_monitor_event(hdev, HCI_DEV_REG);
421 if (!skb)
422 continue;
423
424 if (sock_queue_rcv_skb(sk, skb))
425 kfree_skb(skb);
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200426
427 if (!test_bit(HCI_RUNNING, &hdev->flags))
428 continue;
429
430 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
431 if (!skb)
432 continue;
433
434 if (sock_queue_rcv_skb(sk, skb))
435 kfree_skb(skb);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200436
Marcel Holtmanne131d742015-10-20 02:30:47 +0200437 if (test_bit(HCI_UP, &hdev->flags))
438 skb = create_monitor_event(hdev, HCI_DEV_UP);
439 else if (hci_dev_test_flag(hdev, HCI_SETUP))
440 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
441 else
442 skb = NULL;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200443
Marcel Holtmanne131d742015-10-20 02:30:47 +0200444 if (skb) {
445 if (sock_queue_rcv_skb(sk, skb))
446 kfree_skb(skb);
447 }
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100448 }
449
450 read_unlock(&hci_dev_list_lock);
451}
452
Marcel Holtmann040030e2012-02-20 14:50:37 +0100453/* Generate internal stack event */
454static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
455{
456 struct hci_event_hdr *hdr;
457 struct hci_ev_stack_internal *ev;
458 struct sk_buff *skb;
459
460 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
461 if (!skb)
462 return;
463
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100464 hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100465 hdr->evt = HCI_EV_STACK_INTERNAL;
466 hdr->plen = sizeof(*ev) + dlen;
467
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100468 ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100469 ev->type = type;
470 memcpy(ev->data, data, dlen);
471
472 bt_cb(skb)->incoming = 1;
473 __net_timestamp(skb);
474
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100475 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100476 hci_send_to_sock(hdev, skb);
477 kfree_skb(skb);
478}
479
480void hci_sock_dev_event(struct hci_dev *hdev, int event)
481{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100482 BT_DBG("hdev %s event %d", hdev->name, event);
483
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100484 if (atomic_read(&monitor_promisc)) {
485 struct sk_buff *skb;
486
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200487 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100488 skb = create_monitor_event(hdev, event);
489 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700490 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
491 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100492 kfree_skb(skb);
493 }
494 }
495
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200496 if (event <= HCI_DEV_DOWN) {
497 struct hci_ev_si_device ev;
498
499 /* Send event to sockets */
500 ev.event = event;
501 ev.dev_id = hdev->id;
502 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
503 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100504
505 if (event == HCI_DEV_UNREG) {
506 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100507
508 /* Detach sockets from device */
509 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800510 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100511 bh_lock_sock_nested(sk);
512 if (hci_pi(sk)->hdev == hdev) {
513 hci_pi(sk)->hdev = NULL;
514 sk->sk_err = EPIPE;
515 sk->sk_state = BT_OPEN;
516 sk->sk_state_change(sk);
517
518 hci_dev_put(hdev);
519 }
520 bh_unlock_sock(sk);
521 }
522 read_unlock(&hci_sk_list.lock);
523 }
524}
525
Johan Hedberg801c1e82015-03-06 21:08:50 +0200526static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
527{
528 struct hci_mgmt_chan *c;
529
530 list_for_each_entry(c, &mgmt_chan_list, list) {
531 if (c->channel == channel)
532 return c;
533 }
534
535 return NULL;
536}
537
538static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
539{
540 struct hci_mgmt_chan *c;
541
542 mutex_lock(&mgmt_chan_list_lock);
543 c = __hci_mgmt_chan_find(channel);
544 mutex_unlock(&mgmt_chan_list_lock);
545
546 return c;
547}
548
549int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
550{
551 if (c->channel < HCI_CHANNEL_CONTROL)
552 return -EINVAL;
553
554 mutex_lock(&mgmt_chan_list_lock);
555 if (__hci_mgmt_chan_find(c->channel)) {
556 mutex_unlock(&mgmt_chan_list_lock);
557 return -EALREADY;
558 }
559
560 list_add_tail(&c->list, &mgmt_chan_list);
561
562 mutex_unlock(&mgmt_chan_list_lock);
563
564 return 0;
565}
566EXPORT_SYMBOL(hci_mgmt_chan_register);
567
568void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
569{
570 mutex_lock(&mgmt_chan_list_lock);
571 list_del(&c->list);
572 mutex_unlock(&mgmt_chan_list_lock);
573}
574EXPORT_SYMBOL(hci_mgmt_chan_unregister);
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576static int hci_sock_release(struct socket *sock)
577{
578 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100579 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 BT_DBG("sock %p sk %p", sock, sk);
582
583 if (!sk)
584 return 0;
585
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100586 hdev = hci_pi(sk)->hdev;
587
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100588 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
589 atomic_dec(&monitor_promisc);
590
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 bt_sock_unlink(&hci_sk_list, sk);
592
593 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700594 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200595 /* When releasing an user channel exclusive access,
596 * call hci_dev_do_close directly instead of calling
597 * hci_dev_close to ensure the exclusive access will
598 * be released and the controller brought back down.
599 *
600 * The checking of HCI_AUTO_OFF is not needed in this
601 * case since it will have been cleared already when
602 * opening the user channel.
603 */
604 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200605 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
606 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700607 }
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 atomic_dec(&hdev->promisc);
610 hci_dev_put(hdev);
611 }
612
613 sock_orphan(sk);
614
615 skb_queue_purge(&sk->sk_receive_queue);
616 skb_queue_purge(&sk->sk_write_queue);
617
618 sock_put(sk);
619 return 0;
620}
621
Antti Julkub2a66aa2011-06-15 12:01:14 +0300622static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200623{
624 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300625 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200626
627 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
628 return -EFAULT;
629
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300630 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300631
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300632 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300633
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300634 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300635
636 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200637}
638
Antti Julkub2a66aa2011-06-15 12:01:14 +0300639static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200640{
641 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300642 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200643
644 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
645 return -EFAULT;
646
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300647 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300648
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300649 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300650
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300651 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300652
653 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200654}
655
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900656/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300657static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
658 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659{
660 struct hci_dev *hdev = hci_pi(sk)->hdev;
661
662 if (!hdev)
663 return -EBADFD;
664
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700665 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700666 return -EBUSY;
667
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700668 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200669 return -EOPNOTSUPP;
670
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700671 if (hdev->dev_type != HCI_BREDR)
672 return -EOPNOTSUPP;
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 switch (cmd) {
675 case HCISETRAW:
676 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000677 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700678 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 case HCIGETCONNINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100681 return hci_get_conn_info(hdev, (void __user *)arg);
Marcel Holtmann40be4922008-07-14 20:13:50 +0200682
683 case HCIGETAUTHINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100684 return hci_get_auth_info(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Johan Hedbergf0358562010-05-18 13:20:32 +0200686 case HCIBLOCKADDR:
687 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000688 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100689 return hci_sock_blacklist_add(hdev, (void __user *)arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200690
691 case HCIUNBLOCKADDR:
692 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000693 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100694 return hci_sock_blacklist_del(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700696
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700697 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698}
699
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300700static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
701 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100703 void __user *argp = (void __user *)arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700704 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 int err;
706
707 BT_DBG("cmd %x arg %lx", cmd, arg);
708
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700709 lock_sock(sk);
710
711 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
712 err = -EBADFD;
713 goto done;
714 }
715
716 release_sock(sk);
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 switch (cmd) {
719 case HCIGETDEVLIST:
720 return hci_get_dev_list(argp);
721
722 case HCIGETDEVINFO:
723 return hci_get_dev_info(argp);
724
725 case HCIGETCONNLIST:
726 return hci_get_conn_list(argp);
727
728 case HCIDEVUP:
729 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000730 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 return hci_dev_open(arg);
732
733 case HCIDEVDOWN:
734 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000735 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 return hci_dev_close(arg);
737
738 case HCIDEVRESET:
739 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000740 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return hci_dev_reset(arg);
742
743 case HCIDEVRESTAT:
744 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000745 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 return hci_dev_reset_stat(arg);
747
748 case HCISETSCAN:
749 case HCISETAUTH:
750 case HCISETENCRYPT:
751 case HCISETPTYPE:
752 case HCISETLINKPOL:
753 case HCISETLINKMODE:
754 case HCISETACLMTU:
755 case HCISETSCOMTU:
756 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000757 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 return hci_dev_cmd(cmd, argp);
759
760 case HCIINQUIRY:
761 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700763
764 lock_sock(sk);
765
766 err = hci_sock_bound_ioctl(sk, cmd, arg);
767
768done:
769 release_sock(sk);
770 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771}
772
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300773static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
774 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
Johan Hedberg03811012010-12-08 00:21:06 +0200776 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 struct sock *sk = sock->sk;
778 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200779 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
781 BT_DBG("sock %p sk %p", sock, sk);
782
Johan Hedberg03811012010-12-08 00:21:06 +0200783 if (!addr)
784 return -EINVAL;
785
786 memset(&haddr, 0, sizeof(haddr));
787 len = min_t(unsigned int, sizeof(haddr), addr_len);
788 memcpy(&haddr, addr, len);
789
790 if (haddr.hci_family != AF_BLUETOOTH)
791 return -EINVAL;
792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 lock_sock(sk);
794
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100795 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 err = -EALREADY;
797 goto done;
798 }
799
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100800 switch (haddr.hci_channel) {
801 case HCI_CHANNEL_RAW:
802 if (hci_pi(sk)->hdev) {
803 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 goto done;
805 }
806
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100807 if (haddr.hci_dev != HCI_DEV_NONE) {
808 hdev = hci_dev_get(haddr.hci_dev);
809 if (!hdev) {
810 err = -ENODEV;
811 goto done;
812 }
813
814 atomic_inc(&hdev->promisc);
815 }
816
817 hci_pi(sk)->hdev = hdev;
818 break;
819
Marcel Holtmann23500182013-08-26 21:40:52 -0700820 case HCI_CHANNEL_USER:
821 if (hci_pi(sk)->hdev) {
822 err = -EALREADY;
823 goto done;
824 }
825
826 if (haddr.hci_dev == HCI_DEV_NONE) {
827 err = -EINVAL;
828 goto done;
829 }
830
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700831 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700832 err = -EPERM;
833 goto done;
834 }
835
836 hdev = hci_dev_get(haddr.hci_dev);
837 if (!hdev) {
838 err = -ENODEV;
839 goto done;
840 }
841
Marcel Holtmann781f8992015-06-06 06:06:49 +0200842 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700843 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +0200844 hci_dev_test_flag(hdev, HCI_CONFIG) ||
845 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
846 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700847 err = -EBUSY;
848 hci_dev_put(hdev);
849 goto done;
850 }
851
Marcel Holtmann238be782015-03-13 02:11:06 -0700852 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700853 err = -EUSERS;
854 hci_dev_put(hdev);
855 goto done;
856 }
857
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200858 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700859
860 err = hci_dev_open(hdev->id);
861 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +0200862 if (err == -EALREADY) {
863 /* In case the transport is already up and
864 * running, clear the error here.
865 *
866 * This can happen when opening an user
867 * channel and HCI_AUTO_OFF grace period
868 * is still active.
869 */
870 err = 0;
871 } else {
872 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
873 mgmt_index_added(hdev);
874 hci_dev_put(hdev);
875 goto done;
876 }
Marcel Holtmann23500182013-08-26 21:40:52 -0700877 }
878
879 atomic_inc(&hdev->promisc);
880
881 hci_pi(sk)->hdev = hdev;
882 break;
883
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100884 case HCI_CHANNEL_MONITOR:
885 if (haddr.hci_dev != HCI_DEV_NONE) {
886 err = -EINVAL;
887 goto done;
888 }
889
890 if (!capable(CAP_NET_RAW)) {
891 err = -EPERM;
892 goto done;
893 }
894
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700895 /* The monitor interface is restricted to CAP_NET_RAW
896 * capabilities and with that implicitly trusted.
897 */
898 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
899
Marcel Holtmanndd315062015-11-08 07:47:12 +0100900 send_monitor_note(sk, "Linux version " UTS_RELEASE
901 " (" UTS_MACHINE ")");
902 send_monitor_note(sk, "Bluetooth subsystem version "
903 BT_SUBSYS_VERSION);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100904 send_monitor_replay(sk);
905
906 atomic_inc(&monitor_promisc);
907 break;
908
Marcel Holtmannac714942015-11-08 07:47:13 +0100909 case HCI_CHANNEL_LOGGING:
910 if (haddr.hci_dev != HCI_DEV_NONE) {
911 err = -EINVAL;
912 goto done;
913 }
914
915 if (!capable(CAP_NET_ADMIN)) {
916 err = -EPERM;
917 goto done;
918 }
919 break;
920
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100921 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200922 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
923 err = -EINVAL;
924 goto done;
925 }
926
927 if (haddr.hci_dev != HCI_DEV_NONE) {
928 err = -EINVAL;
929 goto done;
930 }
931
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700932 /* Users with CAP_NET_ADMIN capabilities are allowed
933 * access to all management commands and events. For
934 * untrusted users the interface is restricted and
935 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700936 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700937 if (capable(CAP_NET_ADMIN))
938 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700939
Marcel Holtmannf9207332015-03-14 19:27:55 -0700940 /* At the moment the index and unconfigured index events
941 * are enabled unconditionally. Setting them on each
942 * socket when binding keeps this functionality. They
943 * however might be cleared later and then sending of these
944 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700945 *
946 * This also enables generic events that are safe to be
947 * received by untrusted users. Example for such events
948 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700949 */
950 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
951 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
952 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700953 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700954 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200955 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 }
957
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100958
Johan Hedberg03811012010-12-08 00:21:06 +0200959 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 sk->sk_state = BT_BOUND;
961
962done:
963 release_sock(sk);
964 return err;
965}
966
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300967static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
968 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100970 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700972 struct hci_dev *hdev;
973 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975 BT_DBG("sock %p sk %p", sock, sk);
976
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700977 if (peer)
978 return -EOPNOTSUPP;
979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 lock_sock(sk);
981
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700982 hdev = hci_pi(sk)->hdev;
983 if (!hdev) {
984 err = -EBADFD;
985 goto done;
986 }
987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 *addr_len = sizeof(*haddr);
989 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100990 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700991 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700993done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700995 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996}
997
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300998static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
999 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000{
1001 __u32 mask = hci_pi(sk)->cmsg_mask;
1002
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001003 if (mask & HCI_CMSG_DIR) {
1004 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001005 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1006 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001007 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001009 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +01001010#ifdef CONFIG_COMPAT
1011 struct compat_timeval ctv;
1012#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001013 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001014 void *data;
1015 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001016
1017 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001018
David S. Miller1da97f82007-09-12 14:10:58 +02001019 data = &tv;
1020 len = sizeof(tv);
1021#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -08001022 if (!COMPAT_USE_64BIT_TIME &&
1023 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001024 ctv.tv_sec = tv.tv_sec;
1025 ctv.tv_usec = tv.tv_usec;
1026 data = &ctv;
1027 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001028 }
David S. Miller1da97f82007-09-12 14:10:58 +02001029#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001030
1031 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001032 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001034
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001035static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1036 size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
1038 int noblock = flags & MSG_DONTWAIT;
1039 struct sock *sk = sock->sk;
1040 struct sk_buff *skb;
1041 int copied, err;
1042
1043 BT_DBG("sock %p, sk %p", sock, sk);
1044
Marcel Holtmannd94a6102015-10-25 22:45:18 +01001045 if (flags & MSG_OOB)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 return -EOPNOTSUPP;
1047
Marcel Holtmannac714942015-11-08 07:47:13 +01001048 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1049 return -EOPNOTSUPP;
1050
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 if (sk->sk_state == BT_CLOSED)
1052 return 0;
1053
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001054 skb = skb_recv_datagram(sk, flags, noblock, &err);
1055 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 return err;
1057
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 copied = skb->len;
1059 if (len < copied) {
1060 msg->msg_flags |= MSG_TRUNC;
1061 copied = len;
1062 }
1063
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001064 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -05001065 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Marcel Holtmann3a208622012-02-20 14:50:34 +01001067 switch (hci_pi(sk)->channel) {
1068 case HCI_CHANNEL_RAW:
1069 hci_sock_cmsg(sk, msg, skb);
1070 break;
Marcel Holtmann23500182013-08-26 21:40:52 -07001071 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001072 case HCI_CHANNEL_MONITOR:
1073 sock_recv_timestamp(msg, sk, skb);
1074 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001075 default:
1076 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1077 sock_recv_timestamp(msg, sk, skb);
1078 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +01001079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
1081 skb_free_datagram(sk, skb);
1082
1083 return err ? : copied;
1084}
1085
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001086static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1087 struct msghdr *msg, size_t msglen)
1088{
1089 void *buf;
1090 u8 *cp;
1091 struct mgmt_hdr *hdr;
1092 u16 opcode, index, len;
1093 struct hci_dev *hdev = NULL;
1094 const struct hci_mgmt_handler *handler;
1095 bool var_len, no_hdev;
1096 int err;
1097
1098 BT_DBG("got %zu bytes", msglen);
1099
1100 if (msglen < sizeof(*hdr))
1101 return -EINVAL;
1102
1103 buf = kmalloc(msglen, GFP_KERNEL);
1104 if (!buf)
1105 return -ENOMEM;
1106
1107 if (memcpy_from_msg(buf, msg, msglen)) {
1108 err = -EFAULT;
1109 goto done;
1110 }
1111
1112 hdr = buf;
1113 opcode = __le16_to_cpu(hdr->opcode);
1114 index = __le16_to_cpu(hdr->index);
1115 len = __le16_to_cpu(hdr->len);
1116
1117 if (len != msglen - sizeof(*hdr)) {
1118 err = -EINVAL;
1119 goto done;
1120 }
1121
1122 if (opcode >= chan->handler_count ||
1123 chan->handlers[opcode].func == NULL) {
1124 BT_DBG("Unknown op %u", opcode);
1125 err = mgmt_cmd_status(sk, index, opcode,
1126 MGMT_STATUS_UNKNOWN_COMMAND);
1127 goto done;
1128 }
1129
1130 handler = &chan->handlers[opcode];
1131
1132 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1133 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1134 err = mgmt_cmd_status(sk, index, opcode,
1135 MGMT_STATUS_PERMISSION_DENIED);
1136 goto done;
1137 }
1138
1139 if (index != MGMT_INDEX_NONE) {
1140 hdev = hci_dev_get(index);
1141 if (!hdev) {
1142 err = mgmt_cmd_status(sk, index, opcode,
1143 MGMT_STATUS_INVALID_INDEX);
1144 goto done;
1145 }
1146
1147 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1148 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1149 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1150 err = mgmt_cmd_status(sk, index, opcode,
1151 MGMT_STATUS_INVALID_INDEX);
1152 goto done;
1153 }
1154
1155 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1156 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1157 err = mgmt_cmd_status(sk, index, opcode,
1158 MGMT_STATUS_INVALID_INDEX);
1159 goto done;
1160 }
1161 }
1162
1163 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1164 if (no_hdev != !hdev) {
1165 err = mgmt_cmd_status(sk, index, opcode,
1166 MGMT_STATUS_INVALID_INDEX);
1167 goto done;
1168 }
1169
1170 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1171 if ((var_len && len < handler->data_len) ||
1172 (!var_len && len != handler->data_len)) {
1173 err = mgmt_cmd_status(sk, index, opcode,
1174 MGMT_STATUS_INVALID_PARAMS);
1175 goto done;
1176 }
1177
1178 if (hdev && chan->hdev_init)
1179 chan->hdev_init(sk, hdev);
1180
1181 cp = buf + sizeof(*hdr);
1182
1183 err = handler->func(sk, hdev, cp, len);
1184 if (err < 0)
1185 goto done;
1186
1187 err = msglen;
1188
1189done:
1190 if (hdev)
1191 hci_dev_put(hdev);
1192
1193 kfree(buf);
1194 return err;
1195}
1196
Marcel Holtmannac714942015-11-08 07:47:13 +01001197static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1198{
1199 struct hci_mon_hdr *hdr;
1200 struct sk_buff *skb;
1201 struct hci_dev *hdev;
1202 u16 index;
1203 int err;
1204
1205 /* The logging frame consists at minimum of the standard header,
1206 * the priority byte, the ident length byte and at least one string
1207 * terminator NUL byte. Anything shorter are invalid packets.
1208 */
1209 if (len < sizeof(*hdr) + 3)
1210 return -EINVAL;
1211
1212 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1213 if (!skb)
1214 return err;
1215
1216 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1217 err = -EFAULT;
1218 goto drop;
1219 }
1220
1221 hdr = (void *)skb->data;
1222
1223 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1224 err = -EINVAL;
1225 goto drop;
1226 }
1227
1228 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1229 __u8 priority = skb->data[sizeof(*hdr)];
1230 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1231
1232 /* Only the priorities 0-7 are valid and with that any other
1233 * value results in an invalid packet.
1234 *
1235 * The priority byte is followed by an ident length byte and
1236 * the NUL terminated ident string. Check that the ident
1237 * length is not overflowing the packet and also that the
1238 * ident string itself is NUL terminated. In case the ident
1239 * length is zero, the length value actually doubles as NUL
1240 * terminator identifier.
1241 *
1242 * The message follows the ident string (if present) and
1243 * must be NUL terminated. Otherwise it is not a valid packet.
1244 */
1245 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1246 ident_len > len - sizeof(*hdr) - 3 ||
1247 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1248 err = -EINVAL;
1249 goto drop;
1250 }
1251 } else {
1252 err = -EINVAL;
1253 goto drop;
1254 }
1255
1256 index = __le16_to_cpu(hdr->index);
1257
1258 if (index != MGMT_INDEX_NONE) {
1259 hdev = hci_dev_get(index);
1260 if (!hdev) {
1261 err = -ENODEV;
1262 goto drop;
1263 }
1264 } else {
1265 hdev = NULL;
1266 }
1267
1268 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1269
1270 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1271 err = len;
1272
1273 if (hdev)
1274 hci_dev_put(hdev);
1275
1276drop:
1277 kfree_skb(skb);
1278 return err;
1279}
1280
Ying Xue1b784142015-03-02 15:37:48 +08001281static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1282 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283{
1284 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001285 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 struct hci_dev *hdev;
1287 struct sk_buff *skb;
1288 int err;
1289
1290 BT_DBG("sock %p sk %p", sock, sk);
1291
1292 if (msg->msg_flags & MSG_OOB)
1293 return -EOPNOTSUPP;
1294
1295 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1296 return -EINVAL;
1297
1298 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1299 return -EINVAL;
1300
1301 lock_sock(sk);
1302
Johan Hedberg03811012010-12-08 00:21:06 +02001303 switch (hci_pi(sk)->channel) {
1304 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001305 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001306 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001307 case HCI_CHANNEL_MONITOR:
1308 err = -EOPNOTSUPP;
1309 goto done;
Marcel Holtmannac714942015-11-08 07:47:13 +01001310 case HCI_CHANNEL_LOGGING:
1311 err = hci_logging_frame(sk, msg, len);
1312 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001313 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001314 mutex_lock(&mgmt_chan_list_lock);
1315 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1316 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001317 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001318 else
1319 err = -EINVAL;
1320
1321 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001322 goto done;
1323 }
1324
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001325 hdev = hci_pi(sk)->hdev;
1326 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 err = -EBADFD;
1328 goto done;
1329 }
1330
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001331 if (!test_bit(HCI_UP, &hdev->flags)) {
1332 err = -ENETDOWN;
1333 goto done;
1334 }
1335
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001336 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1337 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 goto done;
1339
Al Viro6ce8e9c2014-04-06 21:25:44 -04001340 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 err = -EFAULT;
1342 goto drop;
1343 }
1344
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001345 hci_skb_pkt_type(skb) = skb->data[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001348 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1349 /* No permission check is needed for user channel
1350 * since that gets enforced when binding the socket.
1351 *
1352 * However check that the packet type is valid.
1353 */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001354 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1355 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1356 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001357 err = -EINVAL;
1358 goto drop;
1359 }
1360
1361 skb_queue_tail(&hdev->raw_q, skb);
1362 queue_work(hdev->workqueue, &hdev->tx_work);
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001363 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001364 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 u16 ogf = hci_opcode_ogf(opcode);
1366 u16 ocf = hci_opcode_ocf(opcode);
1367
1368 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001369 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1370 &hci_sec_filter.ocf_mask[ogf])) &&
1371 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 err = -EPERM;
1373 goto drop;
1374 }
1375
Marcel Holtmann19821622015-11-06 07:42:20 +01001376 /* Since the opcode has already been extracted here, store
1377 * a copy of the value for later use by the drivers.
1378 */
1379 hci_skb_opcode(skb) = opcode;
1380
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001381 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001383 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001385 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001386 * single-command requests.
1387 */
Johan Hedberg44d27132015-11-05 09:31:40 +02001388 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02001389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001391 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 }
1393 } else {
1394 if (!capable(CAP_NET_RAW)) {
1395 err = -EPERM;
1396 goto drop;
1397 }
1398
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001399 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1400 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannbb775432015-10-09 16:13:50 +02001401 err = -EINVAL;
1402 goto drop;
1403 }
1404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001406 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 }
1408
1409 err = len;
1410
1411done:
1412 release_sock(sk);
1413 return err;
1414
1415drop:
1416 kfree_skb(skb);
1417 goto done;
1418}
1419
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001420static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1421 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422{
1423 struct hci_ufilter uf = { .opcode = 0 };
1424 struct sock *sk = sock->sk;
1425 int err = 0, opt = 0;
1426
1427 BT_DBG("sk %p, opt %d", sk, optname);
1428
1429 lock_sock(sk);
1430
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001431 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001432 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001433 goto done;
1434 }
1435
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 switch (optname) {
1437 case HCI_DATA_DIR:
1438 if (get_user(opt, (int __user *)optval)) {
1439 err = -EFAULT;
1440 break;
1441 }
1442
1443 if (opt)
1444 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1445 else
1446 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1447 break;
1448
1449 case HCI_TIME_STAMP:
1450 if (get_user(opt, (int __user *)optval)) {
1451 err = -EFAULT;
1452 break;
1453 }
1454
1455 if (opt)
1456 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1457 else
1458 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1459 break;
1460
1461 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001462 {
1463 struct hci_filter *f = &hci_pi(sk)->filter;
1464
1465 uf.type_mask = f->type_mask;
1466 uf.opcode = f->opcode;
1467 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1468 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1469 }
1470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 len = min_t(unsigned int, len, sizeof(uf));
1472 if (copy_from_user(&uf, optval, len)) {
1473 err = -EFAULT;
1474 break;
1475 }
1476
1477 if (!capable(CAP_NET_RAW)) {
1478 uf.type_mask &= hci_sec_filter.type_mask;
1479 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1480 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1481 }
1482
1483 {
1484 struct hci_filter *f = &hci_pi(sk)->filter;
1485
1486 f->type_mask = uf.type_mask;
1487 f->opcode = uf.opcode;
1488 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1489 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1490 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001491 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
1493 default:
1494 err = -ENOPROTOOPT;
1495 break;
1496 }
1497
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001498done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 release_sock(sk);
1500 return err;
1501}
1502
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001503static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1504 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505{
1506 struct hci_ufilter uf;
1507 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001508 int len, opt, err = 0;
1509
1510 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511
1512 if (get_user(len, optlen))
1513 return -EFAULT;
1514
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001515 lock_sock(sk);
1516
1517 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001518 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001519 goto done;
1520 }
1521
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 switch (optname) {
1523 case HCI_DATA_DIR:
1524 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1525 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001526 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 opt = 0;
1528
1529 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001530 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 break;
1532
1533 case HCI_TIME_STAMP:
1534 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1535 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001536 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 opt = 0;
1538
1539 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001540 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 break;
1542
1543 case HCI_FILTER:
1544 {
1545 struct hci_filter *f = &hci_pi(sk)->filter;
1546
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001547 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 uf.type_mask = f->type_mask;
1549 uf.opcode = f->opcode;
1550 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1551 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1552 }
1553
1554 len = min_t(unsigned int, len, sizeof(uf));
1555 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001556 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 break;
1558
1559 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001560 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 break;
1562 }
1563
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001564done:
1565 release_sock(sk);
1566 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567}
1568
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001569static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 .family = PF_BLUETOOTH,
1571 .owner = THIS_MODULE,
1572 .release = hci_sock_release,
1573 .bind = hci_sock_bind,
1574 .getname = hci_sock_getname,
1575 .sendmsg = hci_sock_sendmsg,
1576 .recvmsg = hci_sock_recvmsg,
1577 .ioctl = hci_sock_ioctl,
1578 .poll = datagram_poll,
1579 .listen = sock_no_listen,
1580 .shutdown = sock_no_shutdown,
1581 .setsockopt = hci_sock_setsockopt,
1582 .getsockopt = hci_sock_getsockopt,
1583 .connect = sock_no_connect,
1584 .socketpair = sock_no_socketpair,
1585 .accept = sock_no_accept,
1586 .mmap = sock_no_mmap
1587};
1588
1589static struct proto hci_sk_proto = {
1590 .name = "HCI",
1591 .owner = THIS_MODULE,
1592 .obj_size = sizeof(struct hci_pinfo)
1593};
1594
Eric Paris3f378b62009-11-05 22:18:14 -08001595static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1596 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597{
1598 struct sock *sk;
1599
1600 BT_DBG("sock %p", sock);
1601
1602 if (sock->type != SOCK_RAW)
1603 return -ESOCKTNOSUPPORT;
1604
1605 sock->ops = &hci_sock_ops;
1606
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001607 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 if (!sk)
1609 return -ENOMEM;
1610
1611 sock_init_data(sock, sk);
1612
1613 sock_reset_flag(sk, SOCK_ZAPPED);
1614
1615 sk->sk_protocol = protocol;
1616
1617 sock->state = SS_UNCONNECTED;
1618 sk->sk_state = BT_OPEN;
1619
1620 bt_sock_link(&hci_sk_list, sk);
1621 return 0;
1622}
1623
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001624static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 .family = PF_BLUETOOTH,
1626 .owner = THIS_MODULE,
1627 .create = hci_sock_create,
1628};
1629
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630int __init hci_sock_init(void)
1631{
1632 int err;
1633
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001634 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 err = proto_register(&hci_sk_proto, 0);
1637 if (err < 0)
1638 return err;
1639
1640 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001641 if (err < 0) {
1642 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001644 }
1645
Al Virob0316612013-04-04 19:14:33 -04001646 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001647 if (err < 0) {
1648 BT_ERR("Failed to create HCI proc file");
1649 bt_sock_unregister(BTPROTO_HCI);
1650 goto error;
1651 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 BT_INFO("HCI socket layer initialized");
1654
1655 return 0;
1656
1657error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 proto_unregister(&hci_sk_proto);
1659 return err;
1660}
1661
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301662void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001664 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001665 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667}