blob: 710265c35d16ac2eeaf088059281529789197117 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
Marcel Holtmanndd315062015-11-08 07:47:12 +010029#include <generated/compile.h>
30#include <generated/utsrelease.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020035#include <net/bluetooth/mgmt.h>
36
37#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg801c1e82015-03-06 21:08:50 +020039static LIST_HEAD(mgmt_chan_list);
40static DEFINE_MUTEX(mgmt_chan_list_lock);
41
Marcel Holtmanncd82e612012-02-20 20:34:38 +010042static atomic_t monitor_promisc = ATOMIC_INIT(0);
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* ----- HCI socket interface ----- */
45
Marcel Holtmann863def52014-07-11 05:41:00 +020046/* Socket info */
47#define hci_pi(sk) ((struct hci_pinfo *) sk)
48
49struct hci_pinfo {
50 struct bt_sock bt;
51 struct hci_dev *hdev;
52 struct hci_filter filter;
53 __u32 cmsg_mask;
54 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070055 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020056};
57
Marcel Holtmann6befc642015-03-14 19:27:53 -070058void hci_sock_set_flag(struct sock *sk, int nr)
59{
60 set_bit(nr, &hci_pi(sk)->flags);
61}
62
63void hci_sock_clear_flag(struct sock *sk, int nr)
64{
65 clear_bit(nr, &hci_pi(sk)->flags);
66}
67
Marcel Holtmannc85be542015-03-14 19:28:00 -070068int hci_sock_test_flag(struct sock *sk, int nr)
69{
70 return test_bit(nr, &hci_pi(sk)->flags);
71}
72
Johan Hedbergd0f172b2015-03-17 13:48:46 +020073unsigned short hci_sock_get_channel(struct sock *sk)
74{
75 return hci_pi(sk)->channel;
76}
77
Jiri Slaby93919762015-02-19 15:20:43 +010078static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Jiri Slaby93919762015-02-19 15:20:43 +010080 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
83/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020084#define HCI_SFLT_MAX_OGF 5
85
86struct hci_sec_filter {
87 __u32 type_mask;
88 __u32 event_mask[2];
89 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
90};
91
Marcel Holtmann7e67c112014-07-11 05:36:40 +020092static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 /* Packet types */
94 0x10,
95 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020096 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 /* Commands */
98 {
99 { 0x0 },
100 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200101 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200103 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200105 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200107 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200109 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 }
111};
112
113static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700114 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115};
116
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700117static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
118{
119 struct hci_filter *flt;
120 int flt_type, flt_event;
121
122 /* Apply filter */
123 flt = &hci_pi(sk)->filter;
124
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100125 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700126
127 if (!test_bit(flt_type, &flt->type_mask))
128 return true;
129
130 /* Extra filter for event packets only */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100131 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700132 return false;
133
134 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
135
136 if (!hci_test_bit(flt_event, &flt->event_mask))
137 return true;
138
139 /* Check filter only when opcode is set */
140 if (!flt->opcode)
141 return false;
142
143 if (flt_event == HCI_EV_CMD_COMPLETE &&
144 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
145 return true;
146
147 if (flt_event == HCI_EV_CMD_STATUS &&
148 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
149 return true;
150
151 return false;
152}
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100155void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100158 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("hdev %p len %d", hdev, skb->len);
161
162 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100163
Sasha Levinb67bfe02013-02-27 17:06:00 -0800164 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 struct sk_buff *nskb;
166
167 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
168 continue;
169
170 /* Don't send frame to the socket it came from */
171 if (skb->sk == sk)
172 continue;
173
Marcel Holtmann23500182013-08-26 21:40:52 -0700174 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100175 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
176 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
177 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
178 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmannbb775432015-10-09 16:13:50 +0200179 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700180 if (is_filtered_packet(sk, skb))
181 continue;
182 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
183 if (!bt_cb(skb)->incoming)
184 continue;
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100185 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
186 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
187 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmann23500182013-08-26 21:40:52 -0700188 continue;
189 } else {
190 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200191 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100194 if (!skb_copy) {
195 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300196 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100197 if (!skb_copy)
198 continue;
199
200 /* Put type byte before the data */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100201 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100202 }
203
204 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200205 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 continue;
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 if (sock_queue_rcv_skb(sk, nskb))
209 kfree_skb(nskb);
210 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100211
212 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100213
214 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100215}
216
Johan Hedberg71290692015-02-20 13:26:23 +0200217/* Send frame to sockets with specific channel */
218void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700219 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220{
221 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100222
Johan Hedberg71290692015-02-20 13:26:23 +0200223 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100224
225 read_lock(&hci_sk_list.lock);
226
Sasha Levinb67bfe02013-02-27 17:06:00 -0800227 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100228 struct sk_buff *nskb;
229
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700230 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700231 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700232 continue;
233
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100234 /* Skip the original socket */
235 if (sk == skip_sk)
236 continue;
237
238 if (sk->sk_state != BT_BOUND)
239 continue;
240
Johan Hedberg71290692015-02-20 13:26:23 +0200241 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800242 continue;
243
244 nskb = skb_clone(skb, GFP_ATOMIC);
245 if (!nskb)
246 continue;
247
248 if (sock_queue_rcv_skb(sk, nskb))
249 kfree_skb(nskb);
250 }
251
252 read_unlock(&hci_sk_list.lock);
253}
254
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100255/* Send frame to monitor socket */
256void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
257{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100258 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800259 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100260 __le16 opcode;
261
262 if (!atomic_read(&monitor_promisc))
263 return;
264
265 BT_DBG("hdev %p len %d", hdev, skb->len);
266
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100267 switch (hci_skb_pkt_type(skb)) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100268 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700269 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100270 break;
271 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700272 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273 break;
274 case HCI_ACLDATA_PKT:
275 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700276 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100277 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700278 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100279 break;
280 case HCI_SCODATA_PKT:
281 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700282 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100283 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700284 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100285 break;
Marcel Holtmanne875ff82015-10-07 16:38:35 +0200286 case HCI_DIAG_PKT:
287 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
288 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100289 default:
290 return;
291 }
292
Marcel Holtmann2b531292015-01-11 19:33:31 -0800293 /* Create a private copy with headroom */
294 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
295 if (!skb_copy)
296 return;
297
298 /* Put header before the data */
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100299 hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
Marcel Holtmann2b531292015-01-11 19:33:31 -0800300 hdr->opcode = opcode;
301 hdr->index = cpu_to_le16(hdev->id);
302 hdr->len = cpu_to_le16(skb->len);
303
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700304 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
305 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100306 kfree_skb(skb_copy);
307}
308
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100309static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
310{
311 struct hci_mon_hdr *hdr;
312 struct hci_mon_new_index *ni;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200313 struct hci_mon_index_info *ii;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100314 struct sk_buff *skb;
315 __le16 opcode;
316
317 switch (event) {
318 case HCI_DEV_REG:
319 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
320 if (!skb)
321 return NULL;
322
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200323 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100324 ni->type = hdev->dev_type;
325 ni->bus = hdev->bus;
326 bacpy(&ni->bdaddr, &hdev->bdaddr);
327 memcpy(ni->name, hdev->name, 8);
328
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700329 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100330 break;
331
332 case HCI_DEV_UNREG:
333 skb = bt_skb_alloc(0, GFP_ATOMIC);
334 if (!skb)
335 return NULL;
336
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700337 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100338 break;
339
Marcel Holtmanne131d742015-10-20 02:30:47 +0200340 case HCI_DEV_SETUP:
341 if (hdev->manufacturer == 0xffff)
342 return NULL;
343
344 /* fall through */
345
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200346 case HCI_DEV_UP:
347 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
348 if (!skb)
349 return NULL;
350
351 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
352 bacpy(&ii->bdaddr, &hdev->bdaddr);
353 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
354
355 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
356 break;
357
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200358 case HCI_DEV_OPEN:
359 skb = bt_skb_alloc(0, GFP_ATOMIC);
360 if (!skb)
361 return NULL;
362
363 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
364 break;
365
366 case HCI_DEV_CLOSE:
367 skb = bt_skb_alloc(0, GFP_ATOMIC);
368 if (!skb)
369 return NULL;
370
371 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
372 break;
373
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100374 default:
375 return NULL;
376 }
377
378 __net_timestamp(skb);
379
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100380 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100381 hdr->opcode = opcode;
382 hdr->index = cpu_to_le16(hdev->id);
383 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
384
385 return skb;
386}
387
Marcel Holtmanndd315062015-11-08 07:47:12 +0100388static void send_monitor_note(struct sock *sk, const char *text)
389{
390 size_t len = strlen(text);
391 struct hci_mon_hdr *hdr;
392 struct sk_buff *skb;
393
394 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
395 if (!skb)
396 return;
397
398 strcpy(skb_put(skb, len + 1), text);
399
400 __net_timestamp(skb);
401
402 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
403 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
404 hdr->index = cpu_to_le16(HCI_DEV_NONE);
405 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
406
407 if (sock_queue_rcv_skb(sk, skb))
408 kfree_skb(skb);
409}
410
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100411static void send_monitor_replay(struct sock *sk)
412{
413 struct hci_dev *hdev;
414
415 read_lock(&hci_dev_list_lock);
416
417 list_for_each_entry(hdev, &hci_dev_list, list) {
418 struct sk_buff *skb;
419
420 skb = create_monitor_event(hdev, HCI_DEV_REG);
421 if (!skb)
422 continue;
423
424 if (sock_queue_rcv_skb(sk, skb))
425 kfree_skb(skb);
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200426
427 if (!test_bit(HCI_RUNNING, &hdev->flags))
428 continue;
429
430 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
431 if (!skb)
432 continue;
433
434 if (sock_queue_rcv_skb(sk, skb))
435 kfree_skb(skb);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200436
Marcel Holtmanne131d742015-10-20 02:30:47 +0200437 if (test_bit(HCI_UP, &hdev->flags))
438 skb = create_monitor_event(hdev, HCI_DEV_UP);
439 else if (hci_dev_test_flag(hdev, HCI_SETUP))
440 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
441 else
442 skb = NULL;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200443
Marcel Holtmanne131d742015-10-20 02:30:47 +0200444 if (skb) {
445 if (sock_queue_rcv_skb(sk, skb))
446 kfree_skb(skb);
447 }
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100448 }
449
450 read_unlock(&hci_dev_list_lock);
451}
452
Marcel Holtmann040030e2012-02-20 14:50:37 +0100453/* Generate internal stack event */
454static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
455{
456 struct hci_event_hdr *hdr;
457 struct hci_ev_stack_internal *ev;
458 struct sk_buff *skb;
459
460 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
461 if (!skb)
462 return;
463
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100464 hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100465 hdr->evt = HCI_EV_STACK_INTERNAL;
466 hdr->plen = sizeof(*ev) + dlen;
467
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100468 ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100469 ev->type = type;
470 memcpy(ev->data, data, dlen);
471
472 bt_cb(skb)->incoming = 1;
473 __net_timestamp(skb);
474
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100475 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100476 hci_send_to_sock(hdev, skb);
477 kfree_skb(skb);
478}
479
480void hci_sock_dev_event(struct hci_dev *hdev, int event)
481{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100482 BT_DBG("hdev %s event %d", hdev->name, event);
483
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100484 if (atomic_read(&monitor_promisc)) {
485 struct sk_buff *skb;
486
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200487 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100488 skb = create_monitor_event(hdev, event);
489 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700490 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
491 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100492 kfree_skb(skb);
493 }
494 }
495
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200496 if (event <= HCI_DEV_DOWN) {
497 struct hci_ev_si_device ev;
498
499 /* Send event to sockets */
500 ev.event = event;
501 ev.dev_id = hdev->id;
502 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
503 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100504
505 if (event == HCI_DEV_UNREG) {
506 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100507
508 /* Detach sockets from device */
509 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800510 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100511 bh_lock_sock_nested(sk);
512 if (hci_pi(sk)->hdev == hdev) {
513 hci_pi(sk)->hdev = NULL;
514 sk->sk_err = EPIPE;
515 sk->sk_state = BT_OPEN;
516 sk->sk_state_change(sk);
517
518 hci_dev_put(hdev);
519 }
520 bh_unlock_sock(sk);
521 }
522 read_unlock(&hci_sk_list.lock);
523 }
524}
525
Johan Hedberg801c1e82015-03-06 21:08:50 +0200526static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
527{
528 struct hci_mgmt_chan *c;
529
530 list_for_each_entry(c, &mgmt_chan_list, list) {
531 if (c->channel == channel)
532 return c;
533 }
534
535 return NULL;
536}
537
538static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
539{
540 struct hci_mgmt_chan *c;
541
542 mutex_lock(&mgmt_chan_list_lock);
543 c = __hci_mgmt_chan_find(channel);
544 mutex_unlock(&mgmt_chan_list_lock);
545
546 return c;
547}
548
549int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
550{
551 if (c->channel < HCI_CHANNEL_CONTROL)
552 return -EINVAL;
553
554 mutex_lock(&mgmt_chan_list_lock);
555 if (__hci_mgmt_chan_find(c->channel)) {
556 mutex_unlock(&mgmt_chan_list_lock);
557 return -EALREADY;
558 }
559
560 list_add_tail(&c->list, &mgmt_chan_list);
561
562 mutex_unlock(&mgmt_chan_list_lock);
563
564 return 0;
565}
566EXPORT_SYMBOL(hci_mgmt_chan_register);
567
568void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
569{
570 mutex_lock(&mgmt_chan_list_lock);
571 list_del(&c->list);
572 mutex_unlock(&mgmt_chan_list_lock);
573}
574EXPORT_SYMBOL(hci_mgmt_chan_unregister);
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576static int hci_sock_release(struct socket *sock)
577{
578 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100579 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 BT_DBG("sock %p sk %p", sock, sk);
582
583 if (!sk)
584 return 0;
585
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100586 hdev = hci_pi(sk)->hdev;
587
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100588 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
589 atomic_dec(&monitor_promisc);
590
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 bt_sock_unlink(&hci_sk_list, sk);
592
593 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700594 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200595 /* When releasing an user channel exclusive access,
596 * call hci_dev_do_close directly instead of calling
597 * hci_dev_close to ensure the exclusive access will
598 * be released and the controller brought back down.
599 *
600 * The checking of HCI_AUTO_OFF is not needed in this
601 * case since it will have been cleared already when
602 * opening the user channel.
603 */
604 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200605 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
606 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700607 }
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 atomic_dec(&hdev->promisc);
610 hci_dev_put(hdev);
611 }
612
613 sock_orphan(sk);
614
615 skb_queue_purge(&sk->sk_receive_queue);
616 skb_queue_purge(&sk->sk_write_queue);
617
618 sock_put(sk);
619 return 0;
620}
621
Antti Julkub2a66aa2011-06-15 12:01:14 +0300622static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200623{
624 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300625 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200626
627 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
628 return -EFAULT;
629
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300630 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300631
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300632 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300633
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300634 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300635
636 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200637}
638
Antti Julkub2a66aa2011-06-15 12:01:14 +0300639static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200640{
641 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300642 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200643
644 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
645 return -EFAULT;
646
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300647 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300648
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300649 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300650
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300651 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300652
653 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200654}
655
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900656/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300657static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
658 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659{
660 struct hci_dev *hdev = hci_pi(sk)->hdev;
661
662 if (!hdev)
663 return -EBADFD;
664
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700665 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700666 return -EBUSY;
667
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700668 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200669 return -EOPNOTSUPP;
670
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700671 if (hdev->dev_type != HCI_BREDR)
672 return -EOPNOTSUPP;
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 switch (cmd) {
675 case HCISETRAW:
676 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000677 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700678 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 case HCIGETCONNINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100681 return hci_get_conn_info(hdev, (void __user *)arg);
Marcel Holtmann40be4922008-07-14 20:13:50 +0200682
683 case HCIGETAUTHINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100684 return hci_get_auth_info(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Johan Hedbergf0358562010-05-18 13:20:32 +0200686 case HCIBLOCKADDR:
687 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000688 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100689 return hci_sock_blacklist_add(hdev, (void __user *)arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200690
691 case HCIUNBLOCKADDR:
692 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000693 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100694 return hci_sock_blacklist_del(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700696
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700697 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698}
699
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300700static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
701 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100703 void __user *argp = (void __user *)arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700704 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 int err;
706
707 BT_DBG("cmd %x arg %lx", cmd, arg);
708
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700709 lock_sock(sk);
710
711 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
712 err = -EBADFD;
713 goto done;
714 }
715
716 release_sock(sk);
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 switch (cmd) {
719 case HCIGETDEVLIST:
720 return hci_get_dev_list(argp);
721
722 case HCIGETDEVINFO:
723 return hci_get_dev_info(argp);
724
725 case HCIGETCONNLIST:
726 return hci_get_conn_list(argp);
727
728 case HCIDEVUP:
729 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000730 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 return hci_dev_open(arg);
732
733 case HCIDEVDOWN:
734 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000735 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 return hci_dev_close(arg);
737
738 case HCIDEVRESET:
739 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000740 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return hci_dev_reset(arg);
742
743 case HCIDEVRESTAT:
744 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000745 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 return hci_dev_reset_stat(arg);
747
748 case HCISETSCAN:
749 case HCISETAUTH:
750 case HCISETENCRYPT:
751 case HCISETPTYPE:
752 case HCISETLINKPOL:
753 case HCISETLINKMODE:
754 case HCISETACLMTU:
755 case HCISETSCOMTU:
756 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000757 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 return hci_dev_cmd(cmd, argp);
759
760 case HCIINQUIRY:
761 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700763
764 lock_sock(sk);
765
766 err = hci_sock_bound_ioctl(sk, cmd, arg);
767
768done:
769 release_sock(sk);
770 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771}
772
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300773static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
774 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
Johan Hedberg03811012010-12-08 00:21:06 +0200776 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 struct sock *sk = sock->sk;
778 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200779 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
781 BT_DBG("sock %p sk %p", sock, sk);
782
Johan Hedberg03811012010-12-08 00:21:06 +0200783 if (!addr)
784 return -EINVAL;
785
786 memset(&haddr, 0, sizeof(haddr));
787 len = min_t(unsigned int, sizeof(haddr), addr_len);
788 memcpy(&haddr, addr, len);
789
790 if (haddr.hci_family != AF_BLUETOOTH)
791 return -EINVAL;
792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 lock_sock(sk);
794
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100795 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 err = -EALREADY;
797 goto done;
798 }
799
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100800 switch (haddr.hci_channel) {
801 case HCI_CHANNEL_RAW:
802 if (hci_pi(sk)->hdev) {
803 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 goto done;
805 }
806
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100807 if (haddr.hci_dev != HCI_DEV_NONE) {
808 hdev = hci_dev_get(haddr.hci_dev);
809 if (!hdev) {
810 err = -ENODEV;
811 goto done;
812 }
813
814 atomic_inc(&hdev->promisc);
815 }
816
817 hci_pi(sk)->hdev = hdev;
818 break;
819
Marcel Holtmann23500182013-08-26 21:40:52 -0700820 case HCI_CHANNEL_USER:
821 if (hci_pi(sk)->hdev) {
822 err = -EALREADY;
823 goto done;
824 }
825
826 if (haddr.hci_dev == HCI_DEV_NONE) {
827 err = -EINVAL;
828 goto done;
829 }
830
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700831 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700832 err = -EPERM;
833 goto done;
834 }
835
836 hdev = hci_dev_get(haddr.hci_dev);
837 if (!hdev) {
838 err = -ENODEV;
839 goto done;
840 }
841
Marcel Holtmann781f8992015-06-06 06:06:49 +0200842 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700843 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +0200844 hci_dev_test_flag(hdev, HCI_CONFIG) ||
845 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
846 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700847 err = -EBUSY;
848 hci_dev_put(hdev);
849 goto done;
850 }
851
Marcel Holtmann238be782015-03-13 02:11:06 -0700852 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700853 err = -EUSERS;
854 hci_dev_put(hdev);
855 goto done;
856 }
857
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200858 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700859
860 err = hci_dev_open(hdev->id);
861 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +0200862 if (err == -EALREADY) {
863 /* In case the transport is already up and
864 * running, clear the error here.
865 *
866 * This can happen when opening an user
867 * channel and HCI_AUTO_OFF grace period
868 * is still active.
869 */
870 err = 0;
871 } else {
872 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
873 mgmt_index_added(hdev);
874 hci_dev_put(hdev);
875 goto done;
876 }
Marcel Holtmann23500182013-08-26 21:40:52 -0700877 }
878
879 atomic_inc(&hdev->promisc);
880
881 hci_pi(sk)->hdev = hdev;
882 break;
883
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100884 case HCI_CHANNEL_MONITOR:
885 if (haddr.hci_dev != HCI_DEV_NONE) {
886 err = -EINVAL;
887 goto done;
888 }
889
890 if (!capable(CAP_NET_RAW)) {
891 err = -EPERM;
892 goto done;
893 }
894
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700895 /* The monitor interface is restricted to CAP_NET_RAW
896 * capabilities and with that implicitly trusted.
897 */
898 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
899
Marcel Holtmanndd315062015-11-08 07:47:12 +0100900 send_monitor_note(sk, "Linux version " UTS_RELEASE
901 " (" UTS_MACHINE ")");
902 send_monitor_note(sk, "Bluetooth subsystem version "
903 BT_SUBSYS_VERSION);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100904 send_monitor_replay(sk);
905
906 atomic_inc(&monitor_promisc);
907 break;
908
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100909 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200910 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
911 err = -EINVAL;
912 goto done;
913 }
914
915 if (haddr.hci_dev != HCI_DEV_NONE) {
916 err = -EINVAL;
917 goto done;
918 }
919
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700920 /* Users with CAP_NET_ADMIN capabilities are allowed
921 * access to all management commands and events. For
922 * untrusted users the interface is restricted and
923 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700924 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700925 if (capable(CAP_NET_ADMIN))
926 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700927
Marcel Holtmannf9207332015-03-14 19:27:55 -0700928 /* At the moment the index and unconfigured index events
929 * are enabled unconditionally. Setting them on each
930 * socket when binding keeps this functionality. They
931 * however might be cleared later and then sending of these
932 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700933 *
934 * This also enables generic events that are safe to be
935 * received by untrusted users. Example for such events
936 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700937 */
938 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
939 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
940 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700941 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700942 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200943 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 }
945
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100946
Johan Hedberg03811012010-12-08 00:21:06 +0200947 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 sk->sk_state = BT_BOUND;
949
950done:
951 release_sock(sk);
952 return err;
953}
954
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300955static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
956 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100958 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700960 struct hci_dev *hdev;
961 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
963 BT_DBG("sock %p sk %p", sock, sk);
964
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700965 if (peer)
966 return -EOPNOTSUPP;
967
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 lock_sock(sk);
969
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700970 hdev = hci_pi(sk)->hdev;
971 if (!hdev) {
972 err = -EBADFD;
973 goto done;
974 }
975
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 *addr_len = sizeof(*haddr);
977 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100978 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700979 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700981done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700983 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
985
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300986static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
987 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
989 __u32 mask = hci_pi(sk)->cmsg_mask;
990
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700991 if (mask & HCI_CMSG_DIR) {
992 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300993 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
994 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700997 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100998#ifdef CONFIG_COMPAT
999 struct compat_timeval ctv;
1000#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001001 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001002 void *data;
1003 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001004
1005 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001006
David S. Miller1da97f82007-09-12 14:10:58 +02001007 data = &tv;
1008 len = sizeof(tv);
1009#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -08001010 if (!COMPAT_USE_64BIT_TIME &&
1011 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001012 ctv.tv_sec = tv.tv_sec;
1013 ctv.tv_usec = tv.tv_usec;
1014 data = &ctv;
1015 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001016 }
David S. Miller1da97f82007-09-12 14:10:58 +02001017#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001018
1019 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001020 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001022
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001023static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1024 size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
1026 int noblock = flags & MSG_DONTWAIT;
1027 struct sock *sk = sock->sk;
1028 struct sk_buff *skb;
1029 int copied, err;
1030
1031 BT_DBG("sock %p, sk %p", sock, sk);
1032
Marcel Holtmannd94a6102015-10-25 22:45:18 +01001033 if (flags & MSG_OOB)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 return -EOPNOTSUPP;
1035
1036 if (sk->sk_state == BT_CLOSED)
1037 return 0;
1038
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001039 skb = skb_recv_datagram(sk, flags, noblock, &err);
1040 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 return err;
1042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 copied = skb->len;
1044 if (len < copied) {
1045 msg->msg_flags |= MSG_TRUNC;
1046 copied = len;
1047 }
1048
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001049 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -05001050 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Marcel Holtmann3a208622012-02-20 14:50:34 +01001052 switch (hci_pi(sk)->channel) {
1053 case HCI_CHANNEL_RAW:
1054 hci_sock_cmsg(sk, msg, skb);
1055 break;
Marcel Holtmann23500182013-08-26 21:40:52 -07001056 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001057 case HCI_CHANNEL_MONITOR:
1058 sock_recv_timestamp(msg, sk, skb);
1059 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001060 default:
1061 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1062 sock_recv_timestamp(msg, sk, skb);
1063 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +01001064 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065
1066 skb_free_datagram(sk, skb);
1067
1068 return err ? : copied;
1069}
1070
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001071static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1072 struct msghdr *msg, size_t msglen)
1073{
1074 void *buf;
1075 u8 *cp;
1076 struct mgmt_hdr *hdr;
1077 u16 opcode, index, len;
1078 struct hci_dev *hdev = NULL;
1079 const struct hci_mgmt_handler *handler;
1080 bool var_len, no_hdev;
1081 int err;
1082
1083 BT_DBG("got %zu bytes", msglen);
1084
1085 if (msglen < sizeof(*hdr))
1086 return -EINVAL;
1087
1088 buf = kmalloc(msglen, GFP_KERNEL);
1089 if (!buf)
1090 return -ENOMEM;
1091
1092 if (memcpy_from_msg(buf, msg, msglen)) {
1093 err = -EFAULT;
1094 goto done;
1095 }
1096
1097 hdr = buf;
1098 opcode = __le16_to_cpu(hdr->opcode);
1099 index = __le16_to_cpu(hdr->index);
1100 len = __le16_to_cpu(hdr->len);
1101
1102 if (len != msglen - sizeof(*hdr)) {
1103 err = -EINVAL;
1104 goto done;
1105 }
1106
1107 if (opcode >= chan->handler_count ||
1108 chan->handlers[opcode].func == NULL) {
1109 BT_DBG("Unknown op %u", opcode);
1110 err = mgmt_cmd_status(sk, index, opcode,
1111 MGMT_STATUS_UNKNOWN_COMMAND);
1112 goto done;
1113 }
1114
1115 handler = &chan->handlers[opcode];
1116
1117 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1118 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1119 err = mgmt_cmd_status(sk, index, opcode,
1120 MGMT_STATUS_PERMISSION_DENIED);
1121 goto done;
1122 }
1123
1124 if (index != MGMT_INDEX_NONE) {
1125 hdev = hci_dev_get(index);
1126 if (!hdev) {
1127 err = mgmt_cmd_status(sk, index, opcode,
1128 MGMT_STATUS_INVALID_INDEX);
1129 goto done;
1130 }
1131
1132 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1133 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1134 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1135 err = mgmt_cmd_status(sk, index, opcode,
1136 MGMT_STATUS_INVALID_INDEX);
1137 goto done;
1138 }
1139
1140 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1141 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1142 err = mgmt_cmd_status(sk, index, opcode,
1143 MGMT_STATUS_INVALID_INDEX);
1144 goto done;
1145 }
1146 }
1147
1148 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1149 if (no_hdev != !hdev) {
1150 err = mgmt_cmd_status(sk, index, opcode,
1151 MGMT_STATUS_INVALID_INDEX);
1152 goto done;
1153 }
1154
1155 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1156 if ((var_len && len < handler->data_len) ||
1157 (!var_len && len != handler->data_len)) {
1158 err = mgmt_cmd_status(sk, index, opcode,
1159 MGMT_STATUS_INVALID_PARAMS);
1160 goto done;
1161 }
1162
1163 if (hdev && chan->hdev_init)
1164 chan->hdev_init(sk, hdev);
1165
1166 cp = buf + sizeof(*hdr);
1167
1168 err = handler->func(sk, hdev, cp, len);
1169 if (err < 0)
1170 goto done;
1171
1172 err = msglen;
1173
1174done:
1175 if (hdev)
1176 hci_dev_put(hdev);
1177
1178 kfree(buf);
1179 return err;
1180}
1181
Ying Xue1b784142015-03-02 15:37:48 +08001182static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1183 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184{
1185 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001186 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 struct hci_dev *hdev;
1188 struct sk_buff *skb;
1189 int err;
1190
1191 BT_DBG("sock %p sk %p", sock, sk);
1192
1193 if (msg->msg_flags & MSG_OOB)
1194 return -EOPNOTSUPP;
1195
1196 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1197 return -EINVAL;
1198
1199 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1200 return -EINVAL;
1201
1202 lock_sock(sk);
1203
Johan Hedberg03811012010-12-08 00:21:06 +02001204 switch (hci_pi(sk)->channel) {
1205 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001206 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001207 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001208 case HCI_CHANNEL_MONITOR:
1209 err = -EOPNOTSUPP;
1210 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001211 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001212 mutex_lock(&mgmt_chan_list_lock);
1213 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1214 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001215 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001216 else
1217 err = -EINVAL;
1218
1219 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001220 goto done;
1221 }
1222
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001223 hdev = hci_pi(sk)->hdev;
1224 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 err = -EBADFD;
1226 goto done;
1227 }
1228
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001229 if (!test_bit(HCI_UP, &hdev->flags)) {
1230 err = -ENETDOWN;
1231 goto done;
1232 }
1233
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001234 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1235 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 goto done;
1237
Al Viro6ce8e9c2014-04-06 21:25:44 -04001238 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 err = -EFAULT;
1240 goto drop;
1241 }
1242
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001243 hci_skb_pkt_type(skb) = skb->data[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001246 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1247 /* No permission check is needed for user channel
1248 * since that gets enforced when binding the socket.
1249 *
1250 * However check that the packet type is valid.
1251 */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001252 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1253 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1254 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001255 err = -EINVAL;
1256 goto drop;
1257 }
1258
1259 skb_queue_tail(&hdev->raw_q, skb);
1260 queue_work(hdev->workqueue, &hdev->tx_work);
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001261 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001262 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 u16 ogf = hci_opcode_ogf(opcode);
1264 u16 ocf = hci_opcode_ocf(opcode);
1265
1266 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001267 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1268 &hci_sec_filter.ocf_mask[ogf])) &&
1269 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 err = -EPERM;
1271 goto drop;
1272 }
1273
Marcel Holtmann19821622015-11-06 07:42:20 +01001274 /* Since the opcode has already been extracted here, store
1275 * a copy of the value for later use by the drivers.
1276 */
1277 hci_skb_opcode(skb) = opcode;
1278
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001279 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001281 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001283 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001284 * single-command requests.
1285 */
Johan Hedberg44d27132015-11-05 09:31:40 +02001286 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02001287
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001289 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 }
1291 } else {
1292 if (!capable(CAP_NET_RAW)) {
1293 err = -EPERM;
1294 goto drop;
1295 }
1296
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001297 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1298 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannbb775432015-10-09 16:13:50 +02001299 err = -EINVAL;
1300 goto drop;
1301 }
1302
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001304 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 }
1306
1307 err = len;
1308
1309done:
1310 release_sock(sk);
1311 return err;
1312
1313drop:
1314 kfree_skb(skb);
1315 goto done;
1316}
1317
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001318static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1319 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320{
1321 struct hci_ufilter uf = { .opcode = 0 };
1322 struct sock *sk = sock->sk;
1323 int err = 0, opt = 0;
1324
1325 BT_DBG("sk %p, opt %d", sk, optname);
1326
1327 lock_sock(sk);
1328
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001329 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001330 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001331 goto done;
1332 }
1333
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 switch (optname) {
1335 case HCI_DATA_DIR:
1336 if (get_user(opt, (int __user *)optval)) {
1337 err = -EFAULT;
1338 break;
1339 }
1340
1341 if (opt)
1342 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1343 else
1344 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1345 break;
1346
1347 case HCI_TIME_STAMP:
1348 if (get_user(opt, (int __user *)optval)) {
1349 err = -EFAULT;
1350 break;
1351 }
1352
1353 if (opt)
1354 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1355 else
1356 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1357 break;
1358
1359 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001360 {
1361 struct hci_filter *f = &hci_pi(sk)->filter;
1362
1363 uf.type_mask = f->type_mask;
1364 uf.opcode = f->opcode;
1365 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1366 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1367 }
1368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 len = min_t(unsigned int, len, sizeof(uf));
1370 if (copy_from_user(&uf, optval, len)) {
1371 err = -EFAULT;
1372 break;
1373 }
1374
1375 if (!capable(CAP_NET_RAW)) {
1376 uf.type_mask &= hci_sec_filter.type_mask;
1377 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1378 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1379 }
1380
1381 {
1382 struct hci_filter *f = &hci_pi(sk)->filter;
1383
1384 f->type_mask = uf.type_mask;
1385 f->opcode = uf.opcode;
1386 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1387 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1388 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001389 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
1391 default:
1392 err = -ENOPROTOOPT;
1393 break;
1394 }
1395
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001396done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 release_sock(sk);
1398 return err;
1399}
1400
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001401static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1402 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403{
1404 struct hci_ufilter uf;
1405 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001406 int len, opt, err = 0;
1407
1408 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409
1410 if (get_user(len, optlen))
1411 return -EFAULT;
1412
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001413 lock_sock(sk);
1414
1415 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001416 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001417 goto done;
1418 }
1419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 switch (optname) {
1421 case HCI_DATA_DIR:
1422 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1423 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001424 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 opt = 0;
1426
1427 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001428 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 break;
1430
1431 case HCI_TIME_STAMP:
1432 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1433 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001434 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 opt = 0;
1436
1437 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001438 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 break;
1440
1441 case HCI_FILTER:
1442 {
1443 struct hci_filter *f = &hci_pi(sk)->filter;
1444
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001445 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 uf.type_mask = f->type_mask;
1447 uf.opcode = f->opcode;
1448 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1449 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1450 }
1451
1452 len = min_t(unsigned int, len, sizeof(uf));
1453 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001454 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 break;
1456
1457 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001458 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 break;
1460 }
1461
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001462done:
1463 release_sock(sk);
1464 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465}
1466
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001467static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 .family = PF_BLUETOOTH,
1469 .owner = THIS_MODULE,
1470 .release = hci_sock_release,
1471 .bind = hci_sock_bind,
1472 .getname = hci_sock_getname,
1473 .sendmsg = hci_sock_sendmsg,
1474 .recvmsg = hci_sock_recvmsg,
1475 .ioctl = hci_sock_ioctl,
1476 .poll = datagram_poll,
1477 .listen = sock_no_listen,
1478 .shutdown = sock_no_shutdown,
1479 .setsockopt = hci_sock_setsockopt,
1480 .getsockopt = hci_sock_getsockopt,
1481 .connect = sock_no_connect,
1482 .socketpair = sock_no_socketpair,
1483 .accept = sock_no_accept,
1484 .mmap = sock_no_mmap
1485};
1486
1487static struct proto hci_sk_proto = {
1488 .name = "HCI",
1489 .owner = THIS_MODULE,
1490 .obj_size = sizeof(struct hci_pinfo)
1491};
1492
Eric Paris3f378b62009-11-05 22:18:14 -08001493static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1494 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495{
1496 struct sock *sk;
1497
1498 BT_DBG("sock %p", sock);
1499
1500 if (sock->type != SOCK_RAW)
1501 return -ESOCKTNOSUPPORT;
1502
1503 sock->ops = &hci_sock_ops;
1504
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001505 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 if (!sk)
1507 return -ENOMEM;
1508
1509 sock_init_data(sock, sk);
1510
1511 sock_reset_flag(sk, SOCK_ZAPPED);
1512
1513 sk->sk_protocol = protocol;
1514
1515 sock->state = SS_UNCONNECTED;
1516 sk->sk_state = BT_OPEN;
1517
1518 bt_sock_link(&hci_sk_list, sk);
1519 return 0;
1520}
1521
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001522static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 .family = PF_BLUETOOTH,
1524 .owner = THIS_MODULE,
1525 .create = hci_sock_create,
1526};
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528int __init hci_sock_init(void)
1529{
1530 int err;
1531
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001532 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 err = proto_register(&hci_sk_proto, 0);
1535 if (err < 0)
1536 return err;
1537
1538 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001539 if (err < 0) {
1540 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001542 }
1543
Al Virob0316612013-04-04 19:14:33 -04001544 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001545 if (err < 0) {
1546 BT_ERR("Failed to create HCI proc file");
1547 bt_sock_unregister(BTPROTO_HCI);
1548 goto error;
1549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 BT_INFO("HCI socket layer initialized");
1552
1553 return 0;
1554
1555error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 proto_unregister(&hci_sk_proto);
1557 return err;
1558}
1559
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301560void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001562 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001563 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565}