blob: 2d872500683824c754c996014463280f2f83b9d7 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Johannes Berg787b3062016-01-06 14:38:40 +010028#include <linux/utsname.h>
Marcel Holtmann70ecce92016-08-27 20:23:38 +020029#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/unaligned.h>
31
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020035#include <net/bluetooth/mgmt.h>
36
37#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg801c1e82015-03-06 21:08:50 +020039static LIST_HEAD(mgmt_chan_list);
40static DEFINE_MUTEX(mgmt_chan_list_lock);
41
Marcel Holtmann70ecce92016-08-27 20:23:38 +020042static DEFINE_IDA(sock_cookie_ida);
43
Marcel Holtmanncd82e612012-02-20 20:34:38 +010044static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* ----- HCI socket interface ----- */
47
Marcel Holtmann863def52014-07-11 05:41:00 +020048/* Socket info */
49#define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u32 cmsg_mask;
56 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070057 unsigned long flags;
Marcel Holtmann70ecce92016-08-27 20:23:38 +020058 __u32 cookie;
59 char comm[TASK_COMM_LEN];
Marcel Holtmann863def52014-07-11 05:41:00 +020060};
61
Marcel Holtmann6befc642015-03-14 19:27:53 -070062void hci_sock_set_flag(struct sock *sk, int nr)
63{
64 set_bit(nr, &hci_pi(sk)->flags);
65}
66
67void hci_sock_clear_flag(struct sock *sk, int nr)
68{
69 clear_bit(nr, &hci_pi(sk)->flags);
70}
71
Marcel Holtmannc85be542015-03-14 19:28:00 -070072int hci_sock_test_flag(struct sock *sk, int nr)
73{
74 return test_bit(nr, &hci_pi(sk)->flags);
75}
76
Johan Hedbergd0f172b2015-03-17 13:48:46 +020077unsigned short hci_sock_get_channel(struct sock *sk)
78{
79 return hci_pi(sk)->channel;
80}
81
Marcel Holtmann70ecce92016-08-27 20:23:38 +020082u32 hci_sock_get_cookie(struct sock *sk)
83{
84 return hci_pi(sk)->cookie;
85}
86
Jiri Slaby93919762015-02-19 15:20:43 +010087static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Jiri Slaby93919762015-02-19 15:20:43 +010089 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020093#define HCI_SFLT_MAX_OGF 5
94
95struct hci_sec_filter {
96 __u32 type_mask;
97 __u32 event_mask[2];
98 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
99};
100
Marcel Holtmann7e67c112014-07-11 05:36:40 +0200101static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* Packet types */
103 0x10,
104 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +0200105 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* Commands */
107 {
108 { 0x0 },
109 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200110 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200112 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200114 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200116 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200118 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 }
120};
121
122static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700123 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124};
125
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700126static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
127{
128 struct hci_filter *flt;
129 int flt_type, flt_event;
130
131 /* Apply filter */
132 flt = &hci_pi(sk)->filter;
133
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100134 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700135
136 if (!test_bit(flt_type, &flt->type_mask))
137 return true;
138
139 /* Extra filter for event packets only */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100140 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700141 return false;
142
143 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
144
145 if (!hci_test_bit(flt_event, &flt->event_mask))
146 return true;
147
148 /* Check filter only when opcode is set */
149 if (!flt->opcode)
150 return false;
151
152 if (flt_event == HCI_EV_CMD_COMPLETE &&
153 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
154 return true;
155
156 if (flt_event == HCI_EV_CMD_STATUS &&
157 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
158 return true;
159
160 return false;
161}
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100164void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
166 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100167 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("hdev %p len %d", hdev, skb->len);
170
171 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100172
Sasha Levinb67bfe02013-02-27 17:06:00 -0800173 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 struct sk_buff *nskb;
175
176 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
177 continue;
178
179 /* Don't send frame to the socket it came from */
180 if (skb->sk == sk)
181 continue;
182
Marcel Holtmann23500182013-08-26 21:40:52 -0700183 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100184 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
185 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
186 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
187 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmannbb775432015-10-09 16:13:50 +0200188 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700189 if (is_filtered_packet(sk, skb))
190 continue;
191 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
192 if (!bt_cb(skb)->incoming)
193 continue;
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100194 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
195 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
196 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmann23500182013-08-26 21:40:52 -0700197 continue;
198 } else {
199 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200200 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100203 if (!skb_copy) {
204 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300205 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100206 if (!skb_copy)
207 continue;
208
209 /* Put type byte before the data */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100210 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100211 }
212
213 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200214 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 continue;
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 if (sock_queue_rcv_skb(sk, nskb))
218 kfree_skb(nskb);
219 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220
221 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100222
223 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100224}
225
Johan Hedberg71290692015-02-20 13:26:23 +0200226/* Send frame to sockets with specific channel */
227void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700228 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100229{
230 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100231
Johan Hedberg71290692015-02-20 13:26:23 +0200232 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100233
234 read_lock(&hci_sk_list.lock);
235
Sasha Levinb67bfe02013-02-27 17:06:00 -0800236 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100237 struct sk_buff *nskb;
238
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700239 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700240 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700241 continue;
242
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100243 /* Skip the original socket */
244 if (sk == skip_sk)
245 continue;
246
247 if (sk->sk_state != BT_BOUND)
248 continue;
249
Johan Hedberg71290692015-02-20 13:26:23 +0200250 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800251 continue;
252
253 nskb = skb_clone(skb, GFP_ATOMIC);
254 if (!nskb)
255 continue;
256
257 if (sock_queue_rcv_skb(sk, nskb))
258 kfree_skb(nskb);
259 }
260
261 read_unlock(&hci_sk_list.lock);
262}
263
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100264/* Send frame to monitor socket */
265void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
266{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100267 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800268 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269 __le16 opcode;
270
271 if (!atomic_read(&monitor_promisc))
272 return;
273
274 BT_DBG("hdev %p len %d", hdev, skb->len);
275
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100276 switch (hci_skb_pkt_type(skb)) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100277 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700278 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100279 break;
280 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700281 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100282 break;
283 case HCI_ACLDATA_PKT:
284 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700285 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100286 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700287 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100288 break;
289 case HCI_SCODATA_PKT:
290 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700291 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100292 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700293 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100294 break;
Marcel Holtmanne875ff82015-10-07 16:38:35 +0200295 case HCI_DIAG_PKT:
296 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
297 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100298 default:
299 return;
300 }
301
Marcel Holtmann2b531292015-01-11 19:33:31 -0800302 /* Create a private copy with headroom */
303 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
304 if (!skb_copy)
305 return;
306
307 /* Put header before the data */
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100308 hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
Marcel Holtmann2b531292015-01-11 19:33:31 -0800309 hdr->opcode = opcode;
310 hdr->index = cpu_to_le16(hdev->id);
311 hdr->len = cpu_to_le16(skb->len);
312
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700313 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
314 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100315 kfree_skb(skb_copy);
316}
317
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100318static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
319{
320 struct hci_mon_hdr *hdr;
321 struct hci_mon_new_index *ni;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200322 struct hci_mon_index_info *ii;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100323 struct sk_buff *skb;
324 __le16 opcode;
325
326 switch (event) {
327 case HCI_DEV_REG:
328 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
329 if (!skb)
330 return NULL;
331
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200332 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100333 ni->type = hdev->dev_type;
334 ni->bus = hdev->bus;
335 bacpy(&ni->bdaddr, &hdev->bdaddr);
336 memcpy(ni->name, hdev->name, 8);
337
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700338 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100339 break;
340
341 case HCI_DEV_UNREG:
342 skb = bt_skb_alloc(0, GFP_ATOMIC);
343 if (!skb)
344 return NULL;
345
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700346 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100347 break;
348
Marcel Holtmanne131d742015-10-20 02:30:47 +0200349 case HCI_DEV_SETUP:
350 if (hdev->manufacturer == 0xffff)
351 return NULL;
352
353 /* fall through */
354
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200355 case HCI_DEV_UP:
356 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
357 if (!skb)
358 return NULL;
359
360 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
361 bacpy(&ii->bdaddr, &hdev->bdaddr);
362 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
363
364 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
365 break;
366
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200367 case HCI_DEV_OPEN:
368 skb = bt_skb_alloc(0, GFP_ATOMIC);
369 if (!skb)
370 return NULL;
371
372 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
373 break;
374
375 case HCI_DEV_CLOSE:
376 skb = bt_skb_alloc(0, GFP_ATOMIC);
377 if (!skb)
378 return NULL;
379
380 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
381 break;
382
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100383 default:
384 return NULL;
385 }
386
387 __net_timestamp(skb);
388
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100389 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100390 hdr->opcode = opcode;
391 hdr->index = cpu_to_le16(hdev->id);
392 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
393
394 return skb;
395}
396
Marcel Holtmann249fa162016-08-27 20:23:40 +0200397static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
398{
399 struct hci_mon_hdr *hdr;
400 struct sk_buff *skb;
401 u16 format = 0x0002;
402 u8 ver[3];
403 u32 flags;
404
405 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
406 if (!skb)
407 return NULL;
408
409 mgmt_fill_version_info(ver);
410 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
411
412 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
413 put_unaligned_le16(format, skb_put(skb, 2));
414 memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
415 put_unaligned_le32(flags, skb_put(skb, 4));
416 *skb_put(skb, 1) = TASK_COMM_LEN;
417 memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
418
419 __net_timestamp(skb);
420
421 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
422 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
423 hdr->index = cpu_to_le16(HCI_DEV_NONE);
424 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
425
426 return skb;
427}
428
429static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
430{
431 struct hci_mon_hdr *hdr;
432 struct sk_buff *skb;
433
434 skb = bt_skb_alloc(4, GFP_ATOMIC);
435 if (!skb)
436 return NULL;
437
438 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
439
440 __net_timestamp(skb);
441
442 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
443 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
444 hdr->index = cpu_to_le16(HCI_DEV_NONE);
445 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
446
447 return skb;
448}
449
Johannes Berg787b3062016-01-06 14:38:40 +0100450static void __printf(2, 3)
451send_monitor_note(struct sock *sk, const char *fmt, ...)
Marcel Holtmanndd315062015-11-08 07:47:12 +0100452{
Johannes Berg787b3062016-01-06 14:38:40 +0100453 size_t len;
Marcel Holtmanndd315062015-11-08 07:47:12 +0100454 struct hci_mon_hdr *hdr;
455 struct sk_buff *skb;
Johannes Berg787b3062016-01-06 14:38:40 +0100456 va_list args;
457
458 va_start(args, fmt);
459 len = vsnprintf(NULL, 0, fmt, args);
460 va_end(args);
Marcel Holtmanndd315062015-11-08 07:47:12 +0100461
462 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
463 if (!skb)
464 return;
465
Johannes Berg787b3062016-01-06 14:38:40 +0100466 va_start(args, fmt);
467 vsprintf(skb_put(skb, len), fmt, args);
468 *skb_put(skb, 1) = 0;
469 va_end(args);
Marcel Holtmanndd315062015-11-08 07:47:12 +0100470
471 __net_timestamp(skb);
472
473 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
474 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
475 hdr->index = cpu_to_le16(HCI_DEV_NONE);
476 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
477
478 if (sock_queue_rcv_skb(sk, skb))
479 kfree_skb(skb);
480}
481
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100482static void send_monitor_replay(struct sock *sk)
483{
484 struct hci_dev *hdev;
485
486 read_lock(&hci_dev_list_lock);
487
488 list_for_each_entry(hdev, &hci_dev_list, list) {
489 struct sk_buff *skb;
490
491 skb = create_monitor_event(hdev, HCI_DEV_REG);
492 if (!skb)
493 continue;
494
495 if (sock_queue_rcv_skb(sk, skb))
496 kfree_skb(skb);
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200497
498 if (!test_bit(HCI_RUNNING, &hdev->flags))
499 continue;
500
501 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
502 if (!skb)
503 continue;
504
505 if (sock_queue_rcv_skb(sk, skb))
506 kfree_skb(skb);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200507
Marcel Holtmanne131d742015-10-20 02:30:47 +0200508 if (test_bit(HCI_UP, &hdev->flags))
509 skb = create_monitor_event(hdev, HCI_DEV_UP);
510 else if (hci_dev_test_flag(hdev, HCI_SETUP))
511 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
512 else
513 skb = NULL;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200514
Marcel Holtmanne131d742015-10-20 02:30:47 +0200515 if (skb) {
516 if (sock_queue_rcv_skb(sk, skb))
517 kfree_skb(skb);
518 }
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100519 }
520
521 read_unlock(&hci_dev_list_lock);
522}
523
Marcel Holtmann249fa162016-08-27 20:23:40 +0200524static void send_monitor_control_replay(struct sock *mon_sk)
525{
526 struct sock *sk;
527
528 read_lock(&hci_sk_list.lock);
529
530 sk_for_each(sk, &hci_sk_list.head) {
531 struct sk_buff *skb;
532
533 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
534 continue;
535
536 skb = create_monitor_ctrl_open(sk);
537 if (!skb)
538 continue;
539
540 if (sock_queue_rcv_skb(mon_sk, skb))
541 kfree_skb(skb);
542 }
543
544 read_unlock(&hci_sk_list.lock);
545}
546
Marcel Holtmann040030e2012-02-20 14:50:37 +0100547/* Generate internal stack event */
548static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
549{
550 struct hci_event_hdr *hdr;
551 struct hci_ev_stack_internal *ev;
552 struct sk_buff *skb;
553
554 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
555 if (!skb)
556 return;
557
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100558 hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100559 hdr->evt = HCI_EV_STACK_INTERNAL;
560 hdr->plen = sizeof(*ev) + dlen;
561
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100562 ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100563 ev->type = type;
564 memcpy(ev->data, data, dlen);
565
566 bt_cb(skb)->incoming = 1;
567 __net_timestamp(skb);
568
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100569 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100570 hci_send_to_sock(hdev, skb);
571 kfree_skb(skb);
572}
573
574void hci_sock_dev_event(struct hci_dev *hdev, int event)
575{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100576 BT_DBG("hdev %s event %d", hdev->name, event);
577
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100578 if (atomic_read(&monitor_promisc)) {
579 struct sk_buff *skb;
580
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200581 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100582 skb = create_monitor_event(hdev, event);
583 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700584 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
585 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100586 kfree_skb(skb);
587 }
588 }
589
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200590 if (event <= HCI_DEV_DOWN) {
591 struct hci_ev_si_device ev;
592
593 /* Send event to sockets */
594 ev.event = event;
595 ev.dev_id = hdev->id;
596 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
597 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100598
599 if (event == HCI_DEV_UNREG) {
600 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100601
602 /* Detach sockets from device */
603 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800604 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100605 bh_lock_sock_nested(sk);
606 if (hci_pi(sk)->hdev == hdev) {
607 hci_pi(sk)->hdev = NULL;
608 sk->sk_err = EPIPE;
609 sk->sk_state = BT_OPEN;
610 sk->sk_state_change(sk);
611
612 hci_dev_put(hdev);
613 }
614 bh_unlock_sock(sk);
615 }
616 read_unlock(&hci_sk_list.lock);
617 }
618}
619
Johan Hedberg801c1e82015-03-06 21:08:50 +0200620static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
621{
622 struct hci_mgmt_chan *c;
623
624 list_for_each_entry(c, &mgmt_chan_list, list) {
625 if (c->channel == channel)
626 return c;
627 }
628
629 return NULL;
630}
631
632static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
633{
634 struct hci_mgmt_chan *c;
635
636 mutex_lock(&mgmt_chan_list_lock);
637 c = __hci_mgmt_chan_find(channel);
638 mutex_unlock(&mgmt_chan_list_lock);
639
640 return c;
641}
642
643int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
644{
645 if (c->channel < HCI_CHANNEL_CONTROL)
646 return -EINVAL;
647
648 mutex_lock(&mgmt_chan_list_lock);
649 if (__hci_mgmt_chan_find(c->channel)) {
650 mutex_unlock(&mgmt_chan_list_lock);
651 return -EALREADY;
652 }
653
654 list_add_tail(&c->list, &mgmt_chan_list);
655
656 mutex_unlock(&mgmt_chan_list_lock);
657
658 return 0;
659}
660EXPORT_SYMBOL(hci_mgmt_chan_register);
661
662void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
663{
664 mutex_lock(&mgmt_chan_list_lock);
665 list_del(&c->list);
666 mutex_unlock(&mgmt_chan_list_lock);
667}
668EXPORT_SYMBOL(hci_mgmt_chan_unregister);
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670static int hci_sock_release(struct socket *sock)
671{
672 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100673 struct hci_dev *hdev;
Marcel Holtmann249fa162016-08-27 20:23:40 +0200674 struct sk_buff *skb;
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200675 int id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677 BT_DBG("sock %p sk %p", sock, sk);
678
679 if (!sk)
680 return 0;
681
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100682 hdev = hci_pi(sk)->hdev;
683
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200684 switch (hci_pi(sk)->channel) {
685 case HCI_CHANNEL_MONITOR:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100686 atomic_dec(&monitor_promisc);
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200687 break;
688 case HCI_CHANNEL_CONTROL:
689 id = hci_pi(sk)->cookie;
690
Marcel Holtmann249fa162016-08-27 20:23:40 +0200691 /* Send event to monitor */
692 skb = create_monitor_ctrl_close(sk);
693 if (skb) {
694 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
695 HCI_SOCK_TRUSTED, NULL);
696 kfree_skb(skb);
697 }
698
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200699 hci_pi(sk)->cookie = 0xffffffff;
700 ida_simple_remove(&sock_cookie_ida, id);
701 break;
702 }
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 bt_sock_unlink(&hci_sk_list, sk);
705
706 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700707 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200708 /* When releasing an user channel exclusive access,
709 * call hci_dev_do_close directly instead of calling
710 * hci_dev_close to ensure the exclusive access will
711 * be released and the controller brought back down.
712 *
713 * The checking of HCI_AUTO_OFF is not needed in this
714 * case since it will have been cleared already when
715 * opening the user channel.
716 */
717 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200718 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
719 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700720 }
721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 atomic_dec(&hdev->promisc);
723 hci_dev_put(hdev);
724 }
725
726 sock_orphan(sk);
727
728 skb_queue_purge(&sk->sk_receive_queue);
729 skb_queue_purge(&sk->sk_write_queue);
730
731 sock_put(sk);
732 return 0;
733}
734
Antti Julkub2a66aa2011-06-15 12:01:14 +0300735static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200736{
737 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300738 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200739
740 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
741 return -EFAULT;
742
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300743 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300744
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300745 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300746
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300747 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300748
749 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200750}
751
Antti Julkub2a66aa2011-06-15 12:01:14 +0300752static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200753{
754 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300755 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200756
757 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
758 return -EFAULT;
759
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300760 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300761
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300762 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300763
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300764 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300765
766 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200767}
768
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900769/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300770static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
771 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
773 struct hci_dev *hdev = hci_pi(sk)->hdev;
774
775 if (!hdev)
776 return -EBADFD;
777
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700778 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700779 return -EBUSY;
780
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700781 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200782 return -EOPNOTSUPP;
783
Marcel Holtmannca8bee52016-07-05 14:30:14 +0200784 if (hdev->dev_type != HCI_PRIMARY)
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700785 return -EOPNOTSUPP;
786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 switch (cmd) {
788 case HCISETRAW:
789 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000790 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700791 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 case HCIGETCONNINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100794 return hci_get_conn_info(hdev, (void __user *)arg);
Marcel Holtmann40be4922008-07-14 20:13:50 +0200795
796 case HCIGETAUTHINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100797 return hci_get_auth_info(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
Johan Hedbergf0358562010-05-18 13:20:32 +0200799 case HCIBLOCKADDR:
800 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000801 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100802 return hci_sock_blacklist_add(hdev, (void __user *)arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200803
804 case HCIUNBLOCKADDR:
805 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000806 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100807 return hci_sock_blacklist_del(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700809
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700810 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811}
812
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300813static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
814 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100816 void __user *argp = (void __user *)arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700817 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 int err;
819
820 BT_DBG("cmd %x arg %lx", cmd, arg);
821
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700822 lock_sock(sk);
823
824 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
825 err = -EBADFD;
826 goto done;
827 }
828
829 release_sock(sk);
830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 switch (cmd) {
832 case HCIGETDEVLIST:
833 return hci_get_dev_list(argp);
834
835 case HCIGETDEVINFO:
836 return hci_get_dev_info(argp);
837
838 case HCIGETCONNLIST:
839 return hci_get_conn_list(argp);
840
841 case HCIDEVUP:
842 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000843 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 return hci_dev_open(arg);
845
846 case HCIDEVDOWN:
847 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000848 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 return hci_dev_close(arg);
850
851 case HCIDEVRESET:
852 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000853 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 return hci_dev_reset(arg);
855
856 case HCIDEVRESTAT:
857 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000858 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return hci_dev_reset_stat(arg);
860
861 case HCISETSCAN:
862 case HCISETAUTH:
863 case HCISETENCRYPT:
864 case HCISETPTYPE:
865 case HCISETLINKPOL:
866 case HCISETLINKMODE:
867 case HCISETACLMTU:
868 case HCISETSCOMTU:
869 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000870 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 return hci_dev_cmd(cmd, argp);
872
873 case HCIINQUIRY:
874 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700876
877 lock_sock(sk);
878
879 err = hci_sock_bound_ioctl(sk, cmd, arg);
880
881done:
882 release_sock(sk);
883 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884}
885
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300886static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
887 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888{
Johan Hedberg03811012010-12-08 00:21:06 +0200889 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 struct sock *sk = sock->sk;
891 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200892 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 BT_DBG("sock %p sk %p", sock, sk);
895
Johan Hedberg03811012010-12-08 00:21:06 +0200896 if (!addr)
897 return -EINVAL;
898
899 memset(&haddr, 0, sizeof(haddr));
900 len = min_t(unsigned int, sizeof(haddr), addr_len);
901 memcpy(&haddr, addr, len);
902
903 if (haddr.hci_family != AF_BLUETOOTH)
904 return -EINVAL;
905
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 lock_sock(sk);
907
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100908 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 err = -EALREADY;
910 goto done;
911 }
912
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100913 switch (haddr.hci_channel) {
914 case HCI_CHANNEL_RAW:
915 if (hci_pi(sk)->hdev) {
916 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 goto done;
918 }
919
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100920 if (haddr.hci_dev != HCI_DEV_NONE) {
921 hdev = hci_dev_get(haddr.hci_dev);
922 if (!hdev) {
923 err = -ENODEV;
924 goto done;
925 }
926
927 atomic_inc(&hdev->promisc);
928 }
929
930 hci_pi(sk)->hdev = hdev;
931 break;
932
Marcel Holtmann23500182013-08-26 21:40:52 -0700933 case HCI_CHANNEL_USER:
934 if (hci_pi(sk)->hdev) {
935 err = -EALREADY;
936 goto done;
937 }
938
939 if (haddr.hci_dev == HCI_DEV_NONE) {
940 err = -EINVAL;
941 goto done;
942 }
943
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700944 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700945 err = -EPERM;
946 goto done;
947 }
948
949 hdev = hci_dev_get(haddr.hci_dev);
950 if (!hdev) {
951 err = -ENODEV;
952 goto done;
953 }
954
Marcel Holtmann781f8992015-06-06 06:06:49 +0200955 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700956 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +0200957 hci_dev_test_flag(hdev, HCI_CONFIG) ||
958 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
959 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700960 err = -EBUSY;
961 hci_dev_put(hdev);
962 goto done;
963 }
964
Marcel Holtmann238be782015-03-13 02:11:06 -0700965 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700966 err = -EUSERS;
967 hci_dev_put(hdev);
968 goto done;
969 }
970
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200971 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700972
973 err = hci_dev_open(hdev->id);
974 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +0200975 if (err == -EALREADY) {
976 /* In case the transport is already up and
977 * running, clear the error here.
978 *
979 * This can happen when opening an user
980 * channel and HCI_AUTO_OFF grace period
981 * is still active.
982 */
983 err = 0;
984 } else {
985 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
986 mgmt_index_added(hdev);
987 hci_dev_put(hdev);
988 goto done;
989 }
Marcel Holtmann23500182013-08-26 21:40:52 -0700990 }
991
992 atomic_inc(&hdev->promisc);
993
994 hci_pi(sk)->hdev = hdev;
995 break;
996
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100997 case HCI_CHANNEL_MONITOR:
998 if (haddr.hci_dev != HCI_DEV_NONE) {
999 err = -EINVAL;
1000 goto done;
1001 }
1002
1003 if (!capable(CAP_NET_RAW)) {
1004 err = -EPERM;
1005 goto done;
1006 }
1007
Marcel Holtmann50ebc052015-03-14 19:27:58 -07001008 /* The monitor interface is restricted to CAP_NET_RAW
1009 * capabilities and with that implicitly trusted.
1010 */
1011 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1012
Johannes Berg787b3062016-01-06 14:38:40 +01001013 send_monitor_note(sk, "Linux version %s (%s)",
1014 init_utsname()->release,
1015 init_utsname()->machine);
1016 send_monitor_note(sk, "Bluetooth subsystem version %s",
1017 BT_SUBSYS_VERSION);
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001018 send_monitor_replay(sk);
Marcel Holtmann249fa162016-08-27 20:23:40 +02001019 send_monitor_control_replay(sk);
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001020
1021 atomic_inc(&monitor_promisc);
1022 break;
1023
Marcel Holtmannac714942015-11-08 07:47:13 +01001024 case HCI_CHANNEL_LOGGING:
1025 if (haddr.hci_dev != HCI_DEV_NONE) {
1026 err = -EINVAL;
1027 goto done;
1028 }
1029
1030 if (!capable(CAP_NET_ADMIN)) {
1031 err = -EPERM;
1032 goto done;
1033 }
1034 break;
1035
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001036 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001037 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1038 err = -EINVAL;
1039 goto done;
1040 }
1041
1042 if (haddr.hci_dev != HCI_DEV_NONE) {
1043 err = -EINVAL;
1044 goto done;
1045 }
1046
Marcel Holtmann1195fbb2015-03-14 19:28:04 -07001047 /* Users with CAP_NET_ADMIN capabilities are allowed
1048 * access to all management commands and events. For
1049 * untrusted users the interface is restricted and
1050 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -07001051 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -07001052 if (capable(CAP_NET_ADMIN))
1053 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -07001054
Marcel Holtmannf9207332015-03-14 19:27:55 -07001055 /* At the moment the index and unconfigured index events
1056 * are enabled unconditionally. Setting them on each
1057 * socket when binding keeps this functionality. They
1058 * however might be cleared later and then sending of these
1059 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -07001060 *
1061 * This also enables generic events that are safe to be
1062 * received by untrusted users. Example for such events
1063 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -07001064 */
1065 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
Marcel Holtmann249fa162016-08-27 20:23:40 +02001066 struct sk_buff *skb;
Marcel Holtmann70ecce92016-08-27 20:23:38 +02001067 int id;
1068
1069 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
1070 if (id < 0)
1071 id = 0xffffffff;
1072
1073 hci_pi(sk)->cookie = id;
1074 get_task_comm(hci_pi(sk)->comm, current);
1075
Marcel Holtmann249fa162016-08-27 20:23:40 +02001076 /* Send event to monitor */
1077 skb = create_monitor_ctrl_open(sk);
1078 if (skb) {
1079 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1080 HCI_SOCK_TRUSTED, NULL);
1081 kfree_skb(skb);
1082 }
1083
Marcel Holtmannf9207332015-03-14 19:27:55 -07001084 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1085 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -07001086 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -07001087 }
Johan Hedberg801c1e82015-03-06 21:08:50 +02001088 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 }
1090
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001091
Johan Hedberg03811012010-12-08 00:21:06 +02001092 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 sk->sk_state = BT_BOUND;
1094
1095done:
1096 release_sock(sk);
1097 return err;
1098}
1099
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001100static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1101 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001103 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001105 struct hci_dev *hdev;
1106 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 BT_DBG("sock %p sk %p", sock, sk);
1109
Marcel Holtmann06f43cb2013-08-26 00:06:30 -07001110 if (peer)
1111 return -EOPNOTSUPP;
1112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 lock_sock(sk);
1114
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001115 hdev = hci_pi(sk)->hdev;
1116 if (!hdev) {
1117 err = -EBADFD;
1118 goto done;
1119 }
1120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 *addr_len = sizeof(*haddr);
1122 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +01001123 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001124 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001126done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001128 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129}
1130
Gustavo Padovan6039aa732012-05-23 04:04:18 -03001131static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1132 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133{
1134 __u32 mask = hci_pi(sk)->cmsg_mask;
1135
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001136 if (mask & HCI_CMSG_DIR) {
1137 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001138 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1139 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001142 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +01001143#ifdef CONFIG_COMPAT
1144 struct compat_timeval ctv;
1145#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001146 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001147 void *data;
1148 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001149
1150 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001151
David S. Miller1da97f82007-09-12 14:10:58 +02001152 data = &tv;
1153 len = sizeof(tv);
1154#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -08001155 if (!COMPAT_USE_64BIT_TIME &&
1156 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001157 ctv.tv_sec = tv.tv_sec;
1158 ctv.tv_usec = tv.tv_usec;
1159 data = &ctv;
1160 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001161 }
David S. Miller1da97f82007-09-12 14:10:58 +02001162#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001163
1164 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001167
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001168static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1169 size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170{
1171 int noblock = flags & MSG_DONTWAIT;
1172 struct sock *sk = sock->sk;
1173 struct sk_buff *skb;
1174 int copied, err;
Denis Kenzior83871f82016-06-27 11:01:13 -05001175 unsigned int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
1177 BT_DBG("sock %p, sk %p", sock, sk);
1178
Marcel Holtmannd94a6102015-10-25 22:45:18 +01001179 if (flags & MSG_OOB)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 return -EOPNOTSUPP;
1181
Marcel Holtmannac714942015-11-08 07:47:13 +01001182 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1183 return -EOPNOTSUPP;
1184
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 if (sk->sk_state == BT_CLOSED)
1186 return 0;
1187
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001188 skb = skb_recv_datagram(sk, flags, noblock, &err);
1189 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 return err;
1191
Denis Kenzior83871f82016-06-27 11:01:13 -05001192 skblen = skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 copied = skb->len;
1194 if (len < copied) {
1195 msg->msg_flags |= MSG_TRUNC;
1196 copied = len;
1197 }
1198
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001199 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -05001200 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Marcel Holtmann3a208622012-02-20 14:50:34 +01001202 switch (hci_pi(sk)->channel) {
1203 case HCI_CHANNEL_RAW:
1204 hci_sock_cmsg(sk, msg, skb);
1205 break;
Marcel Holtmann23500182013-08-26 21:40:52 -07001206 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001207 case HCI_CHANNEL_MONITOR:
1208 sock_recv_timestamp(msg, sk, skb);
1209 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001210 default:
1211 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1212 sock_recv_timestamp(msg, sk, skb);
1213 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +01001214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216 skb_free_datagram(sk, skb);
1217
Luiz Augusto von Dentz4f342282016-08-15 16:02:20 +03001218 if (flags & MSG_TRUNC)
Denis Kenzior83871f82016-06-27 11:01:13 -05001219 copied = skblen;
1220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 return err ? : copied;
1222}
1223
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001224static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1225 struct msghdr *msg, size_t msglen)
1226{
1227 void *buf;
1228 u8 *cp;
1229 struct mgmt_hdr *hdr;
1230 u16 opcode, index, len;
1231 struct hci_dev *hdev = NULL;
1232 const struct hci_mgmt_handler *handler;
1233 bool var_len, no_hdev;
1234 int err;
1235
1236 BT_DBG("got %zu bytes", msglen);
1237
1238 if (msglen < sizeof(*hdr))
1239 return -EINVAL;
1240
1241 buf = kmalloc(msglen, GFP_KERNEL);
1242 if (!buf)
1243 return -ENOMEM;
1244
1245 if (memcpy_from_msg(buf, msg, msglen)) {
1246 err = -EFAULT;
1247 goto done;
1248 }
1249
1250 hdr = buf;
1251 opcode = __le16_to_cpu(hdr->opcode);
1252 index = __le16_to_cpu(hdr->index);
1253 len = __le16_to_cpu(hdr->len);
1254
1255 if (len != msglen - sizeof(*hdr)) {
1256 err = -EINVAL;
1257 goto done;
1258 }
1259
1260 if (opcode >= chan->handler_count ||
1261 chan->handlers[opcode].func == NULL) {
1262 BT_DBG("Unknown op %u", opcode);
1263 err = mgmt_cmd_status(sk, index, opcode,
1264 MGMT_STATUS_UNKNOWN_COMMAND);
1265 goto done;
1266 }
1267
1268 handler = &chan->handlers[opcode];
1269
1270 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1271 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1272 err = mgmt_cmd_status(sk, index, opcode,
1273 MGMT_STATUS_PERMISSION_DENIED);
1274 goto done;
1275 }
1276
1277 if (index != MGMT_INDEX_NONE) {
1278 hdev = hci_dev_get(index);
1279 if (!hdev) {
1280 err = mgmt_cmd_status(sk, index, opcode,
1281 MGMT_STATUS_INVALID_INDEX);
1282 goto done;
1283 }
1284
1285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1286 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1287 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1288 err = mgmt_cmd_status(sk, index, opcode,
1289 MGMT_STATUS_INVALID_INDEX);
1290 goto done;
1291 }
1292
1293 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1294 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1295 err = mgmt_cmd_status(sk, index, opcode,
1296 MGMT_STATUS_INVALID_INDEX);
1297 goto done;
1298 }
1299 }
1300
1301 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1302 if (no_hdev != !hdev) {
1303 err = mgmt_cmd_status(sk, index, opcode,
1304 MGMT_STATUS_INVALID_INDEX);
1305 goto done;
1306 }
1307
1308 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1309 if ((var_len && len < handler->data_len) ||
1310 (!var_len && len != handler->data_len)) {
1311 err = mgmt_cmd_status(sk, index, opcode,
1312 MGMT_STATUS_INVALID_PARAMS);
1313 goto done;
1314 }
1315
1316 if (hdev && chan->hdev_init)
1317 chan->hdev_init(sk, hdev);
1318
1319 cp = buf + sizeof(*hdr);
1320
1321 err = handler->func(sk, hdev, cp, len);
1322 if (err < 0)
1323 goto done;
1324
1325 err = msglen;
1326
1327done:
1328 if (hdev)
1329 hci_dev_put(hdev);
1330
1331 kfree(buf);
1332 return err;
1333}
1334
Marcel Holtmannac714942015-11-08 07:47:13 +01001335static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1336{
1337 struct hci_mon_hdr *hdr;
1338 struct sk_buff *skb;
1339 struct hci_dev *hdev;
1340 u16 index;
1341 int err;
1342
1343 /* The logging frame consists at minimum of the standard header,
1344 * the priority byte, the ident length byte and at least one string
1345 * terminator NUL byte. Anything shorter are invalid packets.
1346 */
1347 if (len < sizeof(*hdr) + 3)
1348 return -EINVAL;
1349
1350 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1351 if (!skb)
1352 return err;
1353
1354 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1355 err = -EFAULT;
1356 goto drop;
1357 }
1358
1359 hdr = (void *)skb->data;
1360
1361 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1362 err = -EINVAL;
1363 goto drop;
1364 }
1365
1366 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1367 __u8 priority = skb->data[sizeof(*hdr)];
1368 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1369
1370 /* Only the priorities 0-7 are valid and with that any other
1371 * value results in an invalid packet.
1372 *
1373 * The priority byte is followed by an ident length byte and
1374 * the NUL terminated ident string. Check that the ident
1375 * length is not overflowing the packet and also that the
1376 * ident string itself is NUL terminated. In case the ident
1377 * length is zero, the length value actually doubles as NUL
1378 * terminator identifier.
1379 *
1380 * The message follows the ident string (if present) and
1381 * must be NUL terminated. Otherwise it is not a valid packet.
1382 */
1383 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1384 ident_len > len - sizeof(*hdr) - 3 ||
1385 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1386 err = -EINVAL;
1387 goto drop;
1388 }
1389 } else {
1390 err = -EINVAL;
1391 goto drop;
1392 }
1393
1394 index = __le16_to_cpu(hdr->index);
1395
1396 if (index != MGMT_INDEX_NONE) {
1397 hdev = hci_dev_get(index);
1398 if (!hdev) {
1399 err = -ENODEV;
1400 goto drop;
1401 }
1402 } else {
1403 hdev = NULL;
1404 }
1405
1406 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1407
1408 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1409 err = len;
1410
1411 if (hdev)
1412 hci_dev_put(hdev);
1413
1414drop:
1415 kfree_skb(skb);
1416 return err;
1417}
1418
Ying Xue1b784142015-03-02 15:37:48 +08001419static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1420 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421{
1422 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001423 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 struct hci_dev *hdev;
1425 struct sk_buff *skb;
1426 int err;
1427
1428 BT_DBG("sock %p sk %p", sock, sk);
1429
1430 if (msg->msg_flags & MSG_OOB)
1431 return -EOPNOTSUPP;
1432
1433 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1434 return -EINVAL;
1435
1436 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1437 return -EINVAL;
1438
1439 lock_sock(sk);
1440
Johan Hedberg03811012010-12-08 00:21:06 +02001441 switch (hci_pi(sk)->channel) {
1442 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001443 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001444 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001445 case HCI_CHANNEL_MONITOR:
1446 err = -EOPNOTSUPP;
1447 goto done;
Marcel Holtmannac714942015-11-08 07:47:13 +01001448 case HCI_CHANNEL_LOGGING:
1449 err = hci_logging_frame(sk, msg, len);
1450 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001451 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001452 mutex_lock(&mgmt_chan_list_lock);
1453 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1454 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001455 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001456 else
1457 err = -EINVAL;
1458
1459 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001460 goto done;
1461 }
1462
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001463 hdev = hci_pi(sk)->hdev;
1464 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 err = -EBADFD;
1466 goto done;
1467 }
1468
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001469 if (!test_bit(HCI_UP, &hdev->flags)) {
1470 err = -ENETDOWN;
1471 goto done;
1472 }
1473
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001474 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1475 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 goto done;
1477
Al Viro6ce8e9c2014-04-06 21:25:44 -04001478 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 err = -EFAULT;
1480 goto drop;
1481 }
1482
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001483 hci_skb_pkt_type(skb) = skb->data[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001486 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1487 /* No permission check is needed for user channel
1488 * since that gets enforced when binding the socket.
1489 *
1490 * However check that the packet type is valid.
1491 */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001492 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1493 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1494 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001495 err = -EINVAL;
1496 goto drop;
1497 }
1498
1499 skb_queue_tail(&hdev->raw_q, skb);
1500 queue_work(hdev->workqueue, &hdev->tx_work);
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001501 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001502 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 u16 ogf = hci_opcode_ogf(opcode);
1504 u16 ocf = hci_opcode_ocf(opcode);
1505
1506 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001507 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1508 &hci_sec_filter.ocf_mask[ogf])) &&
1509 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 err = -EPERM;
1511 goto drop;
1512 }
1513
Marcel Holtmann19821622015-11-06 07:42:20 +01001514 /* Since the opcode has already been extracted here, store
1515 * a copy of the value for later use by the drivers.
1516 */
1517 hci_skb_opcode(skb) = opcode;
1518
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001519 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001521 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001523 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001524 * single-command requests.
1525 */
Johan Hedberg44d27132015-11-05 09:31:40 +02001526 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02001527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001529 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 }
1531 } else {
1532 if (!capable(CAP_NET_RAW)) {
1533 err = -EPERM;
1534 goto drop;
1535 }
1536
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001537 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1538 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannbb775432015-10-09 16:13:50 +02001539 err = -EINVAL;
1540 goto drop;
1541 }
1542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001544 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 }
1546
1547 err = len;
1548
1549done:
1550 release_sock(sk);
1551 return err;
1552
1553drop:
1554 kfree_skb(skb);
1555 goto done;
1556}
1557
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001558static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1559 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560{
1561 struct hci_ufilter uf = { .opcode = 0 };
1562 struct sock *sk = sock->sk;
1563 int err = 0, opt = 0;
1564
1565 BT_DBG("sk %p, opt %d", sk, optname);
1566
Marcel Holtmann47b0f572016-08-27 20:23:37 +02001567 if (level != SOL_HCI)
1568 return -ENOPROTOOPT;
1569
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 lock_sock(sk);
1571
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001572 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001573 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001574 goto done;
1575 }
1576
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 switch (optname) {
1578 case HCI_DATA_DIR:
1579 if (get_user(opt, (int __user *)optval)) {
1580 err = -EFAULT;
1581 break;
1582 }
1583
1584 if (opt)
1585 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1586 else
1587 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1588 break;
1589
1590 case HCI_TIME_STAMP:
1591 if (get_user(opt, (int __user *)optval)) {
1592 err = -EFAULT;
1593 break;
1594 }
1595
1596 if (opt)
1597 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1598 else
1599 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1600 break;
1601
1602 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001603 {
1604 struct hci_filter *f = &hci_pi(sk)->filter;
1605
1606 uf.type_mask = f->type_mask;
1607 uf.opcode = f->opcode;
1608 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1609 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1610 }
1611
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 len = min_t(unsigned int, len, sizeof(uf));
1613 if (copy_from_user(&uf, optval, len)) {
1614 err = -EFAULT;
1615 break;
1616 }
1617
1618 if (!capable(CAP_NET_RAW)) {
1619 uf.type_mask &= hci_sec_filter.type_mask;
1620 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1621 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1622 }
1623
1624 {
1625 struct hci_filter *f = &hci_pi(sk)->filter;
1626
1627 f->type_mask = uf.type_mask;
1628 f->opcode = uf.opcode;
1629 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1630 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1631 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001632 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 default:
1635 err = -ENOPROTOOPT;
1636 break;
1637 }
1638
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001639done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 release_sock(sk);
1641 return err;
1642}
1643
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001644static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1645 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646{
1647 struct hci_ufilter uf;
1648 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001649 int len, opt, err = 0;
1650
1651 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
Marcel Holtmann47b0f572016-08-27 20:23:37 +02001653 if (level != SOL_HCI)
1654 return -ENOPROTOOPT;
1655
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 if (get_user(len, optlen))
1657 return -EFAULT;
1658
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001659 lock_sock(sk);
1660
1661 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001662 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001663 goto done;
1664 }
1665
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 switch (optname) {
1667 case HCI_DATA_DIR:
1668 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1669 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001670 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 opt = 0;
1672
1673 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001674 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 break;
1676
1677 case HCI_TIME_STAMP:
1678 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1679 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001680 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 opt = 0;
1682
1683 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001684 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 break;
1686
1687 case HCI_FILTER:
1688 {
1689 struct hci_filter *f = &hci_pi(sk)->filter;
1690
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001691 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 uf.type_mask = f->type_mask;
1693 uf.opcode = f->opcode;
1694 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1695 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1696 }
1697
1698 len = min_t(unsigned int, len, sizeof(uf));
1699 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001700 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 break;
1702
1703 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001704 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 break;
1706 }
1707
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001708done:
1709 release_sock(sk);
1710 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711}
1712
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001713static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 .family = PF_BLUETOOTH,
1715 .owner = THIS_MODULE,
1716 .release = hci_sock_release,
1717 .bind = hci_sock_bind,
1718 .getname = hci_sock_getname,
1719 .sendmsg = hci_sock_sendmsg,
1720 .recvmsg = hci_sock_recvmsg,
1721 .ioctl = hci_sock_ioctl,
1722 .poll = datagram_poll,
1723 .listen = sock_no_listen,
1724 .shutdown = sock_no_shutdown,
1725 .setsockopt = hci_sock_setsockopt,
1726 .getsockopt = hci_sock_getsockopt,
1727 .connect = sock_no_connect,
1728 .socketpair = sock_no_socketpair,
1729 .accept = sock_no_accept,
1730 .mmap = sock_no_mmap
1731};
1732
1733static struct proto hci_sk_proto = {
1734 .name = "HCI",
1735 .owner = THIS_MODULE,
1736 .obj_size = sizeof(struct hci_pinfo)
1737};
1738
Eric Paris3f378b62009-11-05 22:18:14 -08001739static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1740 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741{
1742 struct sock *sk;
1743
1744 BT_DBG("sock %p", sock);
1745
1746 if (sock->type != SOCK_RAW)
1747 return -ESOCKTNOSUPPORT;
1748
1749 sock->ops = &hci_sock_ops;
1750
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001751 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 if (!sk)
1753 return -ENOMEM;
1754
1755 sock_init_data(sock, sk);
1756
1757 sock_reset_flag(sk, SOCK_ZAPPED);
1758
1759 sk->sk_protocol = protocol;
1760
1761 sock->state = SS_UNCONNECTED;
1762 sk->sk_state = BT_OPEN;
1763
1764 bt_sock_link(&hci_sk_list, sk);
1765 return 0;
1766}
1767
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001768static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 .family = PF_BLUETOOTH,
1770 .owner = THIS_MODULE,
1771 .create = hci_sock_create,
1772};
1773
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774int __init hci_sock_init(void)
1775{
1776 int err;
1777
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001778 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1779
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 err = proto_register(&hci_sk_proto, 0);
1781 if (err < 0)
1782 return err;
1783
1784 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001785 if (err < 0) {
1786 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001788 }
1789
Al Virob0316612013-04-04 19:14:33 -04001790 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001791 if (err < 0) {
1792 BT_ERR("Failed to create HCI proc file");
1793 bt_sock_unregister(BTPROTO_HCI);
1794 goto error;
1795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 BT_INFO("HCI socket layer initialized");
1798
1799 return 0;
1800
1801error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 proto_unregister(&hci_sk_proto);
1803 return err;
1804}
1805
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301806void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001808 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001809 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811}