blob: 19b23013c4f668341b1f80435d696d98da156db1 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020033#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Johan Hedberg801c1e82015-03-06 21:08:50 +020037static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
Marcel Holtmanncd82e612012-02-20 20:34:38 +010040static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* ----- HCI socket interface ----- */
43
Marcel Holtmann863def52014-07-11 05:41:00 +020044/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070053 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020054};
55
Marcel Holtmann6befc642015-03-14 19:27:53 -070056void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
Marcel Holtmannc85be542015-03-14 19:28:00 -070066int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
Johan Hedbergd0f172b2015-03-17 13:48:46 +020071unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
Jiri Slaby93919762015-02-19 15:20:43 +010076static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Jiri Slaby93919762015-02-19 15:20:43 +010078 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020082#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
Marcel Holtmann7e67c112014-07-11 05:36:40 +020090static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* Packet types */
92 0x10,
93 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020094 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020099 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113};
114
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100123 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700124
125 if (!test_bit(flt_type, &flt->type_mask))
126 return true;
127
128 /* Extra filter for event packets only */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100129 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700130 return false;
131
132 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
133
134 if (!hci_test_bit(flt_event, &flt->event_mask))
135 return true;
136
137 /* Check filter only when opcode is set */
138 if (!flt->opcode)
139 return false;
140
141 if (flt_event == HCI_EV_CMD_COMPLETE &&
142 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
143 return true;
144
145 if (flt_event == HCI_EV_CMD_STATUS &&
146 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
147 return true;
148
149 return false;
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100153void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
155 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100156 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
158 BT_DBG("hdev %p len %d", hdev, skb->len);
159
160 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100161
Sasha Levinb67bfe02013-02-27 17:06:00 -0800162 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 struct sk_buff *nskb;
164
165 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
166 continue;
167
168 /* Don't send frame to the socket it came from */
169 if (skb->sk == sk)
170 continue;
171
Marcel Holtmann23500182013-08-26 21:40:52 -0700172 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100173 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
174 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
175 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
176 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmannbb775432015-10-09 16:13:50 +0200177 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700178 if (is_filtered_packet(sk, skb))
179 continue;
180 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
181 if (!bt_cb(skb)->incoming)
182 continue;
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100183 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
184 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
185 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmann23500182013-08-26 21:40:52 -0700186 continue;
187 } else {
188 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200189 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100192 if (!skb_copy) {
193 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300194 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100195 if (!skb_copy)
196 continue;
197
198 /* Put type byte before the data */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100199 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100200 }
201
202 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200203 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 continue;
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 if (sock_queue_rcv_skb(sk, nskb))
207 kfree_skb(nskb);
208 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100209
210 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100211
212 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100213}
214
Johan Hedberg71290692015-02-20 13:26:23 +0200215/* Send frame to sockets with specific channel */
216void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700217 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100218{
219 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220
Johan Hedberg71290692015-02-20 13:26:23 +0200221 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100222
223 read_lock(&hci_sk_list.lock);
224
Sasha Levinb67bfe02013-02-27 17:06:00 -0800225 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100226 struct sk_buff *nskb;
227
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700228 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700229 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700230 continue;
231
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100232 /* Skip the original socket */
233 if (sk == skip_sk)
234 continue;
235
236 if (sk->sk_state != BT_BOUND)
237 continue;
238
Johan Hedberg71290692015-02-20 13:26:23 +0200239 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800240 continue;
241
242 nskb = skb_clone(skb, GFP_ATOMIC);
243 if (!nskb)
244 continue;
245
246 if (sock_queue_rcv_skb(sk, nskb))
247 kfree_skb(nskb);
248 }
249
250 read_unlock(&hci_sk_list.lock);
251}
252
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100253/* Send frame to monitor socket */
254void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
255{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100256 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800257 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100258 __le16 opcode;
259
260 if (!atomic_read(&monitor_promisc))
261 return;
262
263 BT_DBG("hdev %p len %d", hdev, skb->len);
264
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100265 switch (hci_skb_pkt_type(skb)) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100266 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700267 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100268 break;
269 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700270 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100271 break;
272 case HCI_ACLDATA_PKT:
273 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700274 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700276 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100277 break;
278 case HCI_SCODATA_PKT:
279 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700280 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100281 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700282 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100283 break;
Marcel Holtmanne875ff82015-10-07 16:38:35 +0200284 case HCI_DIAG_PKT:
285 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
286 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100287 default:
288 return;
289 }
290
Marcel Holtmann2b531292015-01-11 19:33:31 -0800291 /* Create a private copy with headroom */
292 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
293 if (!skb_copy)
294 return;
295
296 /* Put header before the data */
297 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
298 hdr->opcode = opcode;
299 hdr->index = cpu_to_le16(hdev->id);
300 hdr->len = cpu_to_le16(skb->len);
301
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700302 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
303 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100304 kfree_skb(skb_copy);
305}
306
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100307static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
308{
309 struct hci_mon_hdr *hdr;
310 struct hci_mon_new_index *ni;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200311 struct hci_mon_index_info *ii;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100312 struct sk_buff *skb;
313 __le16 opcode;
314
315 switch (event) {
316 case HCI_DEV_REG:
317 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
320
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200321 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100322 ni->type = hdev->dev_type;
323 ni->bus = hdev->bus;
324 bacpy(&ni->bdaddr, &hdev->bdaddr);
325 memcpy(ni->name, hdev->name, 8);
326
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700327 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100328 break;
329
330 case HCI_DEV_UNREG:
331 skb = bt_skb_alloc(0, GFP_ATOMIC);
332 if (!skb)
333 return NULL;
334
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700335 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100336 break;
337
Marcel Holtmanne131d742015-10-20 02:30:47 +0200338 case HCI_DEV_SETUP:
339 if (hdev->manufacturer == 0xffff)
340 return NULL;
341
342 /* fall through */
343
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200344 case HCI_DEV_UP:
345 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
346 if (!skb)
347 return NULL;
348
349 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
350 bacpy(&ii->bdaddr, &hdev->bdaddr);
351 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
352
353 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
354 break;
355
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200356 case HCI_DEV_OPEN:
357 skb = bt_skb_alloc(0, GFP_ATOMIC);
358 if (!skb)
359 return NULL;
360
361 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
362 break;
363
364 case HCI_DEV_CLOSE:
365 skb = bt_skb_alloc(0, GFP_ATOMIC);
366 if (!skb)
367 return NULL;
368
369 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
370 break;
371
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100372 default:
373 return NULL;
374 }
375
376 __net_timestamp(skb);
377
378 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
379 hdr->opcode = opcode;
380 hdr->index = cpu_to_le16(hdev->id);
381 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
382
383 return skb;
384}
385
386static void send_monitor_replay(struct sock *sk)
387{
388 struct hci_dev *hdev;
389
390 read_lock(&hci_dev_list_lock);
391
392 list_for_each_entry(hdev, &hci_dev_list, list) {
393 struct sk_buff *skb;
394
395 skb = create_monitor_event(hdev, HCI_DEV_REG);
396 if (!skb)
397 continue;
398
399 if (sock_queue_rcv_skb(sk, skb))
400 kfree_skb(skb);
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200401
402 if (!test_bit(HCI_RUNNING, &hdev->flags))
403 continue;
404
405 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
406 if (!skb)
407 continue;
408
409 if (sock_queue_rcv_skb(sk, skb))
410 kfree_skb(skb);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200411
Marcel Holtmanne131d742015-10-20 02:30:47 +0200412 if (test_bit(HCI_UP, &hdev->flags))
413 skb = create_monitor_event(hdev, HCI_DEV_UP);
414 else if (hci_dev_test_flag(hdev, HCI_SETUP))
415 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
416 else
417 skb = NULL;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200418
Marcel Holtmanne131d742015-10-20 02:30:47 +0200419 if (skb) {
420 if (sock_queue_rcv_skb(sk, skb))
421 kfree_skb(skb);
422 }
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100423 }
424
425 read_unlock(&hci_dev_list_lock);
426}
427
Marcel Holtmann040030e2012-02-20 14:50:37 +0100428/* Generate internal stack event */
429static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
430{
431 struct hci_event_hdr *hdr;
432 struct hci_ev_stack_internal *ev;
433 struct sk_buff *skb;
434
435 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
436 if (!skb)
437 return;
438
439 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
440 hdr->evt = HCI_EV_STACK_INTERNAL;
441 hdr->plen = sizeof(*ev) + dlen;
442
443 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
444 ev->type = type;
445 memcpy(ev->data, data, dlen);
446
447 bt_cb(skb)->incoming = 1;
448 __net_timestamp(skb);
449
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100450 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100451 hci_send_to_sock(hdev, skb);
452 kfree_skb(skb);
453}
454
455void hci_sock_dev_event(struct hci_dev *hdev, int event)
456{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100457 BT_DBG("hdev %s event %d", hdev->name, event);
458
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100459 if (atomic_read(&monitor_promisc)) {
460 struct sk_buff *skb;
461
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200462 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100463 skb = create_monitor_event(hdev, event);
464 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700465 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100467 kfree_skb(skb);
468 }
469 }
470
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200471 if (event <= HCI_DEV_DOWN) {
472 struct hci_ev_si_device ev;
473
474 /* Send event to sockets */
475 ev.event = event;
476 ev.dev_id = hdev->id;
477 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
478 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100479
480 if (event == HCI_DEV_UNREG) {
481 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100482
483 /* Detach sockets from device */
484 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800485 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100486 bh_lock_sock_nested(sk);
487 if (hci_pi(sk)->hdev == hdev) {
488 hci_pi(sk)->hdev = NULL;
489 sk->sk_err = EPIPE;
490 sk->sk_state = BT_OPEN;
491 sk->sk_state_change(sk);
492
493 hci_dev_put(hdev);
494 }
495 bh_unlock_sock(sk);
496 }
497 read_unlock(&hci_sk_list.lock);
498 }
499}
500
Johan Hedberg801c1e82015-03-06 21:08:50 +0200501static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
502{
503 struct hci_mgmt_chan *c;
504
505 list_for_each_entry(c, &mgmt_chan_list, list) {
506 if (c->channel == channel)
507 return c;
508 }
509
510 return NULL;
511}
512
513static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
514{
515 struct hci_mgmt_chan *c;
516
517 mutex_lock(&mgmt_chan_list_lock);
518 c = __hci_mgmt_chan_find(channel);
519 mutex_unlock(&mgmt_chan_list_lock);
520
521 return c;
522}
523
524int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
525{
526 if (c->channel < HCI_CHANNEL_CONTROL)
527 return -EINVAL;
528
529 mutex_lock(&mgmt_chan_list_lock);
530 if (__hci_mgmt_chan_find(c->channel)) {
531 mutex_unlock(&mgmt_chan_list_lock);
532 return -EALREADY;
533 }
534
535 list_add_tail(&c->list, &mgmt_chan_list);
536
537 mutex_unlock(&mgmt_chan_list_lock);
538
539 return 0;
540}
541EXPORT_SYMBOL(hci_mgmt_chan_register);
542
543void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
544{
545 mutex_lock(&mgmt_chan_list_lock);
546 list_del(&c->list);
547 mutex_unlock(&mgmt_chan_list_lock);
548}
549EXPORT_SYMBOL(hci_mgmt_chan_unregister);
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551static int hci_sock_release(struct socket *sock)
552{
553 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100554 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
556 BT_DBG("sock %p sk %p", sock, sk);
557
558 if (!sk)
559 return 0;
560
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100561 hdev = hci_pi(sk)->hdev;
562
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100563 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
564 atomic_dec(&monitor_promisc);
565
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 bt_sock_unlink(&hci_sk_list, sk);
567
568 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700569 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200570 /* When releasing an user channel exclusive access,
571 * call hci_dev_do_close directly instead of calling
572 * hci_dev_close to ensure the exclusive access will
573 * be released and the controller brought back down.
574 *
575 * The checking of HCI_AUTO_OFF is not needed in this
576 * case since it will have been cleared already when
577 * opening the user channel.
578 */
579 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200580 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
581 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700582 }
583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 atomic_dec(&hdev->promisc);
585 hci_dev_put(hdev);
586 }
587
588 sock_orphan(sk);
589
590 skb_queue_purge(&sk->sk_receive_queue);
591 skb_queue_purge(&sk->sk_write_queue);
592
593 sock_put(sk);
594 return 0;
595}
596
Antti Julkub2a66aa2011-06-15 12:01:14 +0300597static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200598{
599 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300600 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200601
602 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
603 return -EFAULT;
604
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300605 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300606
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300607 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300608
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300609 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300610
611 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200612}
613
Antti Julkub2a66aa2011-06-15 12:01:14 +0300614static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200615{
616 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300617 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200618
619 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
620 return -EFAULT;
621
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300622 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300623
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300624 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300625
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300626 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300627
628 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200629}
630
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900631/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300632static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
633 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
635 struct hci_dev *hdev = hci_pi(sk)->hdev;
636
637 if (!hdev)
638 return -EBADFD;
639
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700640 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700641 return -EBUSY;
642
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700643 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200644 return -EOPNOTSUPP;
645
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700646 if (hdev->dev_type != HCI_BREDR)
647 return -EOPNOTSUPP;
648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 switch (cmd) {
650 case HCISETRAW:
651 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000652 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700653 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200656 return hci_get_conn_info(hdev, (void __user *) arg);
657
658 case HCIGETAUTHINFO:
659 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Johan Hedbergf0358562010-05-18 13:20:32 +0200661 case HCIBLOCKADDR:
662 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000663 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300664 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200665
666 case HCIUNBLOCKADDR:
667 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000668 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300669 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700671
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700672 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300675static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
676 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200678 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700679 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 int err;
681
682 BT_DBG("cmd %x arg %lx", cmd, arg);
683
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700684 lock_sock(sk);
685
686 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
687 err = -EBADFD;
688 goto done;
689 }
690
691 release_sock(sk);
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 switch (cmd) {
694 case HCIGETDEVLIST:
695 return hci_get_dev_list(argp);
696
697 case HCIGETDEVINFO:
698 return hci_get_dev_info(argp);
699
700 case HCIGETCONNLIST:
701 return hci_get_conn_list(argp);
702
703 case HCIDEVUP:
704 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000705 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return hci_dev_open(arg);
707
708 case HCIDEVDOWN:
709 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000710 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 return hci_dev_close(arg);
712
713 case HCIDEVRESET:
714 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000715 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return hci_dev_reset(arg);
717
718 case HCIDEVRESTAT:
719 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000720 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 return hci_dev_reset_stat(arg);
722
723 case HCISETSCAN:
724 case HCISETAUTH:
725 case HCISETENCRYPT:
726 case HCISETPTYPE:
727 case HCISETLINKPOL:
728 case HCISETLINKMODE:
729 case HCISETACLMTU:
730 case HCISETSCOMTU:
731 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000732 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 return hci_dev_cmd(cmd, argp);
734
735 case HCIINQUIRY:
736 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700738
739 lock_sock(sk);
740
741 err = hci_sock_bound_ioctl(sk, cmd, arg);
742
743done:
744 release_sock(sk);
745 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300748static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
749 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Johan Hedberg03811012010-12-08 00:21:06 +0200751 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 struct sock *sk = sock->sk;
753 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200754 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 BT_DBG("sock %p sk %p", sock, sk);
757
Johan Hedberg03811012010-12-08 00:21:06 +0200758 if (!addr)
759 return -EINVAL;
760
761 memset(&haddr, 0, sizeof(haddr));
762 len = min_t(unsigned int, sizeof(haddr), addr_len);
763 memcpy(&haddr, addr, len);
764
765 if (haddr.hci_family != AF_BLUETOOTH)
766 return -EINVAL;
767
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 lock_sock(sk);
769
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100770 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 err = -EALREADY;
772 goto done;
773 }
774
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100775 switch (haddr.hci_channel) {
776 case HCI_CHANNEL_RAW:
777 if (hci_pi(sk)->hdev) {
778 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 goto done;
780 }
781
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100782 if (haddr.hci_dev != HCI_DEV_NONE) {
783 hdev = hci_dev_get(haddr.hci_dev);
784 if (!hdev) {
785 err = -ENODEV;
786 goto done;
787 }
788
789 atomic_inc(&hdev->promisc);
790 }
791
792 hci_pi(sk)->hdev = hdev;
793 break;
794
Marcel Holtmann23500182013-08-26 21:40:52 -0700795 case HCI_CHANNEL_USER:
796 if (hci_pi(sk)->hdev) {
797 err = -EALREADY;
798 goto done;
799 }
800
801 if (haddr.hci_dev == HCI_DEV_NONE) {
802 err = -EINVAL;
803 goto done;
804 }
805
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700806 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700807 err = -EPERM;
808 goto done;
809 }
810
811 hdev = hci_dev_get(haddr.hci_dev);
812 if (!hdev) {
813 err = -ENODEV;
814 goto done;
815 }
816
Marcel Holtmann781f8992015-06-06 06:06:49 +0200817 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700818 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +0200819 hci_dev_test_flag(hdev, HCI_CONFIG) ||
820 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
821 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700822 err = -EBUSY;
823 hci_dev_put(hdev);
824 goto done;
825 }
826
Marcel Holtmann238be782015-03-13 02:11:06 -0700827 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700828 err = -EUSERS;
829 hci_dev_put(hdev);
830 goto done;
831 }
832
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200833 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700834
835 err = hci_dev_open(hdev->id);
836 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +0200837 if (err == -EALREADY) {
838 /* In case the transport is already up and
839 * running, clear the error here.
840 *
841 * This can happen when opening an user
842 * channel and HCI_AUTO_OFF grace period
843 * is still active.
844 */
845 err = 0;
846 } else {
847 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
848 mgmt_index_added(hdev);
849 hci_dev_put(hdev);
850 goto done;
851 }
Marcel Holtmann23500182013-08-26 21:40:52 -0700852 }
853
854 atomic_inc(&hdev->promisc);
855
856 hci_pi(sk)->hdev = hdev;
857 break;
858
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100859 case HCI_CHANNEL_MONITOR:
860 if (haddr.hci_dev != HCI_DEV_NONE) {
861 err = -EINVAL;
862 goto done;
863 }
864
865 if (!capable(CAP_NET_RAW)) {
866 err = -EPERM;
867 goto done;
868 }
869
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700870 /* The monitor interface is restricted to CAP_NET_RAW
871 * capabilities and with that implicitly trusted.
872 */
873 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
874
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100875 send_monitor_replay(sk);
876
877 atomic_inc(&monitor_promisc);
878 break;
879
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100880 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200881 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
882 err = -EINVAL;
883 goto done;
884 }
885
886 if (haddr.hci_dev != HCI_DEV_NONE) {
887 err = -EINVAL;
888 goto done;
889 }
890
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700891 /* Users with CAP_NET_ADMIN capabilities are allowed
892 * access to all management commands and events. For
893 * untrusted users the interface is restricted and
894 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700895 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700896 if (capable(CAP_NET_ADMIN))
897 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700898
Marcel Holtmannf9207332015-03-14 19:27:55 -0700899 /* At the moment the index and unconfigured index events
900 * are enabled unconditionally. Setting them on each
901 * socket when binding keeps this functionality. They
902 * however might be cleared later and then sending of these
903 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700904 *
905 * This also enables generic events that are safe to be
906 * received by untrusted users. Example for such events
907 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700908 */
909 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
910 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
911 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700912 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700913 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200914 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 }
916
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100917
Johan Hedberg03811012010-12-08 00:21:06 +0200918 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 sk->sk_state = BT_BOUND;
920
921done:
922 release_sock(sk);
923 return err;
924}
925
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300926static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
927 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928{
929 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
930 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700931 struct hci_dev *hdev;
932 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
934 BT_DBG("sock %p sk %p", sock, sk);
935
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700936 if (peer)
937 return -EOPNOTSUPP;
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 lock_sock(sk);
940
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700941 hdev = hci_pi(sk)->hdev;
942 if (!hdev) {
943 err = -EBADFD;
944 goto done;
945 }
946
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 *addr_len = sizeof(*haddr);
948 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100949 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700950 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700952done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700954 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955}
956
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300957static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
958 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959{
960 __u32 mask = hci_pi(sk)->cmsg_mask;
961
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700962 if (mask & HCI_CMSG_DIR) {
963 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300964 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
965 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700968 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100969#ifdef CONFIG_COMPAT
970 struct compat_timeval ctv;
971#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700972 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200973 void *data;
974 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700975
976 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200977
David S. Miller1da97f82007-09-12 14:10:58 +0200978 data = &tv;
979 len = sizeof(tv);
980#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800981 if (!COMPAT_USE_64BIT_TIME &&
982 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200983 ctv.tv_sec = tv.tv_sec;
984 ctv.tv_usec = tv.tv_usec;
985 data = &ctv;
986 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200987 }
David S. Miller1da97f82007-09-12 14:10:58 +0200988#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200989
990 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700991 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900993
Ying Xue1b784142015-03-02 15:37:48 +0800994static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
995 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
997 int noblock = flags & MSG_DONTWAIT;
998 struct sock *sk = sock->sk;
999 struct sk_buff *skb;
1000 int copied, err;
1001
1002 BT_DBG("sock %p, sk %p", sock, sk);
1003
Marcel Holtmannd94a6102015-10-25 22:45:18 +01001004 if (flags & MSG_OOB)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 return -EOPNOTSUPP;
1006
1007 if (sk->sk_state == BT_CLOSED)
1008 return 0;
1009
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001010 skb = skb_recv_datagram(sk, flags, noblock, &err);
1011 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 return err;
1013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 copied = skb->len;
1015 if (len < copied) {
1016 msg->msg_flags |= MSG_TRUNC;
1017 copied = len;
1018 }
1019
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001020 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -05001021 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
Marcel Holtmann3a208622012-02-20 14:50:34 +01001023 switch (hci_pi(sk)->channel) {
1024 case HCI_CHANNEL_RAW:
1025 hci_sock_cmsg(sk, msg, skb);
1026 break;
Marcel Holtmann23500182013-08-26 21:40:52 -07001027 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001028 case HCI_CHANNEL_MONITOR:
1029 sock_recv_timestamp(msg, sk, skb);
1030 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001031 default:
1032 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1033 sock_recv_timestamp(msg, sk, skb);
1034 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +01001035 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037 skb_free_datagram(sk, skb);
1038
1039 return err ? : copied;
1040}
1041
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001042static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1043 struct msghdr *msg, size_t msglen)
1044{
1045 void *buf;
1046 u8 *cp;
1047 struct mgmt_hdr *hdr;
1048 u16 opcode, index, len;
1049 struct hci_dev *hdev = NULL;
1050 const struct hci_mgmt_handler *handler;
1051 bool var_len, no_hdev;
1052 int err;
1053
1054 BT_DBG("got %zu bytes", msglen);
1055
1056 if (msglen < sizeof(*hdr))
1057 return -EINVAL;
1058
1059 buf = kmalloc(msglen, GFP_KERNEL);
1060 if (!buf)
1061 return -ENOMEM;
1062
1063 if (memcpy_from_msg(buf, msg, msglen)) {
1064 err = -EFAULT;
1065 goto done;
1066 }
1067
1068 hdr = buf;
1069 opcode = __le16_to_cpu(hdr->opcode);
1070 index = __le16_to_cpu(hdr->index);
1071 len = __le16_to_cpu(hdr->len);
1072
1073 if (len != msglen - sizeof(*hdr)) {
1074 err = -EINVAL;
1075 goto done;
1076 }
1077
1078 if (opcode >= chan->handler_count ||
1079 chan->handlers[opcode].func == NULL) {
1080 BT_DBG("Unknown op %u", opcode);
1081 err = mgmt_cmd_status(sk, index, opcode,
1082 MGMT_STATUS_UNKNOWN_COMMAND);
1083 goto done;
1084 }
1085
1086 handler = &chan->handlers[opcode];
1087
1088 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1089 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1090 err = mgmt_cmd_status(sk, index, opcode,
1091 MGMT_STATUS_PERMISSION_DENIED);
1092 goto done;
1093 }
1094
1095 if (index != MGMT_INDEX_NONE) {
1096 hdev = hci_dev_get(index);
1097 if (!hdev) {
1098 err = mgmt_cmd_status(sk, index, opcode,
1099 MGMT_STATUS_INVALID_INDEX);
1100 goto done;
1101 }
1102
1103 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1104 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1105 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1106 err = mgmt_cmd_status(sk, index, opcode,
1107 MGMT_STATUS_INVALID_INDEX);
1108 goto done;
1109 }
1110
1111 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1112 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1113 err = mgmt_cmd_status(sk, index, opcode,
1114 MGMT_STATUS_INVALID_INDEX);
1115 goto done;
1116 }
1117 }
1118
1119 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1120 if (no_hdev != !hdev) {
1121 err = mgmt_cmd_status(sk, index, opcode,
1122 MGMT_STATUS_INVALID_INDEX);
1123 goto done;
1124 }
1125
1126 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1127 if ((var_len && len < handler->data_len) ||
1128 (!var_len && len != handler->data_len)) {
1129 err = mgmt_cmd_status(sk, index, opcode,
1130 MGMT_STATUS_INVALID_PARAMS);
1131 goto done;
1132 }
1133
1134 if (hdev && chan->hdev_init)
1135 chan->hdev_init(sk, hdev);
1136
1137 cp = buf + sizeof(*hdr);
1138
1139 err = handler->func(sk, hdev, cp, len);
1140 if (err < 0)
1141 goto done;
1142
1143 err = msglen;
1144
1145done:
1146 if (hdev)
1147 hci_dev_put(hdev);
1148
1149 kfree(buf);
1150 return err;
1151}
1152
Ying Xue1b784142015-03-02 15:37:48 +08001153static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1154 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155{
1156 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001157 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 struct hci_dev *hdev;
1159 struct sk_buff *skb;
1160 int err;
1161
1162 BT_DBG("sock %p sk %p", sock, sk);
1163
1164 if (msg->msg_flags & MSG_OOB)
1165 return -EOPNOTSUPP;
1166
1167 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1168 return -EINVAL;
1169
1170 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1171 return -EINVAL;
1172
1173 lock_sock(sk);
1174
Johan Hedberg03811012010-12-08 00:21:06 +02001175 switch (hci_pi(sk)->channel) {
1176 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001177 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001178 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001179 case HCI_CHANNEL_MONITOR:
1180 err = -EOPNOTSUPP;
1181 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001182 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001183 mutex_lock(&mgmt_chan_list_lock);
1184 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1185 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001186 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001187 else
1188 err = -EINVAL;
1189
1190 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001191 goto done;
1192 }
1193
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001194 hdev = hci_pi(sk)->hdev;
1195 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 err = -EBADFD;
1197 goto done;
1198 }
1199
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001200 if (!test_bit(HCI_UP, &hdev->flags)) {
1201 err = -ENETDOWN;
1202 goto done;
1203 }
1204
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001205 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1206 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 goto done;
1208
Al Viro6ce8e9c2014-04-06 21:25:44 -04001209 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 err = -EFAULT;
1211 goto drop;
1212 }
1213
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001214 hci_skb_pkt_type(skb) = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001217 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1218 /* No permission check is needed for user channel
1219 * since that gets enforced when binding the socket.
1220 *
1221 * However check that the packet type is valid.
1222 */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001223 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001226 err = -EINVAL;
1227 goto drop;
1228 }
1229
1230 skb_queue_tail(&hdev->raw_q, skb);
1231 queue_work(hdev->workqueue, &hdev->tx_work);
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001232 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001233 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 u16 ogf = hci_opcode_ogf(opcode);
1235 u16 ocf = hci_opcode_ocf(opcode);
1236
1237 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001238 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1239 &hci_sec_filter.ocf_mask[ogf])) &&
1240 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 err = -EPERM;
1242 goto drop;
1243 }
1244
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001245 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001247 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001249 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001250 * single-command requests.
1251 */
Johan Hedberg44d27132015-11-05 09:31:40 +02001252 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02001253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001255 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 }
1257 } else {
1258 if (!capable(CAP_NET_RAW)) {
1259 err = -EPERM;
1260 goto drop;
1261 }
1262
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001263 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1264 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannbb775432015-10-09 16:13:50 +02001265 err = -EINVAL;
1266 goto drop;
1267 }
1268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001270 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 }
1272
1273 err = len;
1274
1275done:
1276 release_sock(sk);
1277 return err;
1278
1279drop:
1280 kfree_skb(skb);
1281 goto done;
1282}
1283
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001284static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1285 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
1287 struct hci_ufilter uf = { .opcode = 0 };
1288 struct sock *sk = sock->sk;
1289 int err = 0, opt = 0;
1290
1291 BT_DBG("sk %p, opt %d", sk, optname);
1292
1293 lock_sock(sk);
1294
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001295 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001296 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001297 goto done;
1298 }
1299
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 switch (optname) {
1301 case HCI_DATA_DIR:
1302 if (get_user(opt, (int __user *)optval)) {
1303 err = -EFAULT;
1304 break;
1305 }
1306
1307 if (opt)
1308 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1309 else
1310 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1311 break;
1312
1313 case HCI_TIME_STAMP:
1314 if (get_user(opt, (int __user *)optval)) {
1315 err = -EFAULT;
1316 break;
1317 }
1318
1319 if (opt)
1320 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1321 else
1322 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1323 break;
1324
1325 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001326 {
1327 struct hci_filter *f = &hci_pi(sk)->filter;
1328
1329 uf.type_mask = f->type_mask;
1330 uf.opcode = f->opcode;
1331 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1332 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1333 }
1334
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 len = min_t(unsigned int, len, sizeof(uf));
1336 if (copy_from_user(&uf, optval, len)) {
1337 err = -EFAULT;
1338 break;
1339 }
1340
1341 if (!capable(CAP_NET_RAW)) {
1342 uf.type_mask &= hci_sec_filter.type_mask;
1343 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1344 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1345 }
1346
1347 {
1348 struct hci_filter *f = &hci_pi(sk)->filter;
1349
1350 f->type_mask = uf.type_mask;
1351 f->opcode = uf.opcode;
1352 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1353 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1354 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001355 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
1357 default:
1358 err = -ENOPROTOOPT;
1359 break;
1360 }
1361
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001362done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 release_sock(sk);
1364 return err;
1365}
1366
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001367static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1368 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369{
1370 struct hci_ufilter uf;
1371 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001372 int len, opt, err = 0;
1373
1374 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 if (get_user(len, optlen))
1377 return -EFAULT;
1378
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001379 lock_sock(sk);
1380
1381 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001382 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001383 goto done;
1384 }
1385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 switch (optname) {
1387 case HCI_DATA_DIR:
1388 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1389 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001390 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 opt = 0;
1392
1393 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001394 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 break;
1396
1397 case HCI_TIME_STAMP:
1398 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1399 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001400 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 opt = 0;
1402
1403 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001404 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 break;
1406
1407 case HCI_FILTER:
1408 {
1409 struct hci_filter *f = &hci_pi(sk)->filter;
1410
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001411 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 uf.type_mask = f->type_mask;
1413 uf.opcode = f->opcode;
1414 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1415 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1416 }
1417
1418 len = min_t(unsigned int, len, sizeof(uf));
1419 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001420 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 break;
1422
1423 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001424 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 break;
1426 }
1427
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001428done:
1429 release_sock(sk);
1430 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431}
1432
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001433static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 .family = PF_BLUETOOTH,
1435 .owner = THIS_MODULE,
1436 .release = hci_sock_release,
1437 .bind = hci_sock_bind,
1438 .getname = hci_sock_getname,
1439 .sendmsg = hci_sock_sendmsg,
1440 .recvmsg = hci_sock_recvmsg,
1441 .ioctl = hci_sock_ioctl,
1442 .poll = datagram_poll,
1443 .listen = sock_no_listen,
1444 .shutdown = sock_no_shutdown,
1445 .setsockopt = hci_sock_setsockopt,
1446 .getsockopt = hci_sock_getsockopt,
1447 .connect = sock_no_connect,
1448 .socketpair = sock_no_socketpair,
1449 .accept = sock_no_accept,
1450 .mmap = sock_no_mmap
1451};
1452
1453static struct proto hci_sk_proto = {
1454 .name = "HCI",
1455 .owner = THIS_MODULE,
1456 .obj_size = sizeof(struct hci_pinfo)
1457};
1458
Eric Paris3f378b62009-11-05 22:18:14 -08001459static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1460 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461{
1462 struct sock *sk;
1463
1464 BT_DBG("sock %p", sock);
1465
1466 if (sock->type != SOCK_RAW)
1467 return -ESOCKTNOSUPPORT;
1468
1469 sock->ops = &hci_sock_ops;
1470
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001471 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 if (!sk)
1473 return -ENOMEM;
1474
1475 sock_init_data(sock, sk);
1476
1477 sock_reset_flag(sk, SOCK_ZAPPED);
1478
1479 sk->sk_protocol = protocol;
1480
1481 sock->state = SS_UNCONNECTED;
1482 sk->sk_state = BT_OPEN;
1483
1484 bt_sock_link(&hci_sk_list, sk);
1485 return 0;
1486}
1487
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001488static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 .family = PF_BLUETOOTH,
1490 .owner = THIS_MODULE,
1491 .create = hci_sock_create,
1492};
1493
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494int __init hci_sock_init(void)
1495{
1496 int err;
1497
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001498 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1499
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 err = proto_register(&hci_sk_proto, 0);
1501 if (err < 0)
1502 return err;
1503
1504 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001505 if (err < 0) {
1506 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001508 }
1509
Al Virob0316612013-04-04 19:14:33 -04001510 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001511 if (err < 0) {
1512 BT_ERR("Failed to create HCI proc file");
1513 bt_sock_unregister(BTPROTO_HCI);
1514 goto error;
1515 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 BT_INFO("HCI socket layer initialized");
1518
1519 return 0;
1520
1521error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 proto_unregister(&hci_sk_proto);
1523 return err;
1524}
1525
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301526void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001528 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001529 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531}