blob: 1f4665a124f663a79150d45521948a5eb69f9e49 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020033#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Johan Hedberg801c1e82015-03-06 21:08:50 +020037static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
Marcel Holtmanncd82e612012-02-20 20:34:38 +010040static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* ----- HCI socket interface ----- */
43
Marcel Holtmann863def52014-07-11 05:41:00 +020044/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070053 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020054};
55
Marcel Holtmann6befc642015-03-14 19:27:53 -070056void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
Marcel Holtmannc85be542015-03-14 19:28:00 -070066int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
Johan Hedbergd0f172b2015-03-17 13:48:46 +020071unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
Jiri Slaby93919762015-02-19 15:20:43 +010076static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Jiri Slaby93919762015-02-19 15:20:43 +010078 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020082#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
Marcel Holtmann7e67c112014-07-11 05:36:40 +020090static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* Packet types */
92 0x10,
93 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020094 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020099 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113};
114
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
Marcel Holtmann8cd4f582015-10-09 16:13:49 +0200123 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700124
125 if (!test_bit(flt_type, &flt->type_mask))
126 return true;
127
128 /* Extra filter for event packets only */
129 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
130 return false;
131
132 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
133
134 if (!hci_test_bit(flt_event, &flt->event_mask))
135 return true;
136
137 /* Check filter only when opcode is set */
138 if (!flt->opcode)
139 return false;
140
141 if (flt_event == HCI_EV_CMD_COMPLETE &&
142 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
143 return true;
144
145 if (flt_event == HCI_EV_CMD_STATUS &&
146 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
147 return true;
148
149 return false;
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100153void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
155 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100156 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
158 BT_DBG("hdev %p len %d", hdev, skb->len);
159
160 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100161
Sasha Levinb67bfe02013-02-27 17:06:00 -0800162 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 struct sk_buff *nskb;
164
165 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
166 continue;
167
168 /* Don't send frame to the socket it came from */
169 if (skb->sk == sk)
170 continue;
171
Marcel Holtmann23500182013-08-26 21:40:52 -0700172 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
Marcel Holtmannbb775432015-10-09 16:13:50 +0200173 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
174 bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
175 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
176 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
177 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700178 if (is_filtered_packet(sk, skb))
179 continue;
180 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
181 if (!bt_cb(skb)->incoming)
182 continue;
183 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
184 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
185 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
186 continue;
187 } else {
188 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200189 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100192 if (!skb_copy) {
193 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300194 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100195 if (!skb_copy)
196 continue;
197
198 /* Put type byte before the data */
199 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
200 }
201
202 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200203 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 continue;
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 if (sock_queue_rcv_skb(sk, nskb))
207 kfree_skb(nskb);
208 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100209
210 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100211
212 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100213}
214
Johan Hedberg71290692015-02-20 13:26:23 +0200215/* Send frame to sockets with specific channel */
216void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700217 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100218{
219 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220
Johan Hedberg71290692015-02-20 13:26:23 +0200221 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100222
223 read_lock(&hci_sk_list.lock);
224
Sasha Levinb67bfe02013-02-27 17:06:00 -0800225 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100226 struct sk_buff *nskb;
227
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700228 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700229 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700230 continue;
231
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100232 /* Skip the original socket */
233 if (sk == skip_sk)
234 continue;
235
236 if (sk->sk_state != BT_BOUND)
237 continue;
238
Johan Hedberg71290692015-02-20 13:26:23 +0200239 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800240 continue;
241
242 nskb = skb_clone(skb, GFP_ATOMIC);
243 if (!nskb)
244 continue;
245
246 if (sock_queue_rcv_skb(sk, nskb))
247 kfree_skb(nskb);
248 }
249
250 read_unlock(&hci_sk_list.lock);
251}
252
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100253/* Send frame to monitor socket */
254void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
255{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100256 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800257 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100258 __le16 opcode;
259
260 if (!atomic_read(&monitor_promisc))
261 return;
262
263 BT_DBG("hdev %p len %d", hdev, skb->len);
264
265 switch (bt_cb(skb)->pkt_type) {
266 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700267 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100268 break;
269 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700270 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100271 break;
272 case HCI_ACLDATA_PKT:
273 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700274 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700276 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100277 break;
278 case HCI_SCODATA_PKT:
279 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700280 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100281 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700282 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100283 break;
Marcel Holtmanne875ff82015-10-07 16:38:35 +0200284 case HCI_DIAG_PKT:
285 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
286 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100287 default:
288 return;
289 }
290
Marcel Holtmann2b531292015-01-11 19:33:31 -0800291 /* Create a private copy with headroom */
292 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
293 if (!skb_copy)
294 return;
295
296 /* Put header before the data */
297 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
298 hdr->opcode = opcode;
299 hdr->index = cpu_to_le16(hdev->id);
300 hdr->len = cpu_to_le16(skb->len);
301
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700302 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
303 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100304 kfree_skb(skb_copy);
305}
306
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100307static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
308{
309 struct hci_mon_hdr *hdr;
310 struct hci_mon_new_index *ni;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200311 struct hci_mon_index_info *ii;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100312 struct sk_buff *skb;
313 __le16 opcode;
314
315 switch (event) {
316 case HCI_DEV_REG:
317 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
320
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200321 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100322 ni->type = hdev->dev_type;
323 ni->bus = hdev->bus;
324 bacpy(&ni->bdaddr, &hdev->bdaddr);
325 memcpy(ni->name, hdev->name, 8);
326
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700327 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100328 break;
329
330 case HCI_DEV_UNREG:
331 skb = bt_skb_alloc(0, GFP_ATOMIC);
332 if (!skb)
333 return NULL;
334
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700335 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100336 break;
337
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200338 case HCI_DEV_UP:
339 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
340 if (!skb)
341 return NULL;
342
343 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
344 bacpy(&ii->bdaddr, &hdev->bdaddr);
345 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
346
347 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
348 break;
349
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200350 case HCI_DEV_OPEN:
351 skb = bt_skb_alloc(0, GFP_ATOMIC);
352 if (!skb)
353 return NULL;
354
355 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
356 break;
357
358 case HCI_DEV_CLOSE:
359 skb = bt_skb_alloc(0, GFP_ATOMIC);
360 if (!skb)
361 return NULL;
362
363 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
364 break;
365
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100366 default:
367 return NULL;
368 }
369
370 __net_timestamp(skb);
371
372 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
373 hdr->opcode = opcode;
374 hdr->index = cpu_to_le16(hdev->id);
375 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
376
377 return skb;
378}
379
380static void send_monitor_replay(struct sock *sk)
381{
382 struct hci_dev *hdev;
383
384 read_lock(&hci_dev_list_lock);
385
386 list_for_each_entry(hdev, &hci_dev_list, list) {
387 struct sk_buff *skb;
388
389 skb = create_monitor_event(hdev, HCI_DEV_REG);
390 if (!skb)
391 continue;
392
393 if (sock_queue_rcv_skb(sk, skb))
394 kfree_skb(skb);
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200395
396 if (!test_bit(HCI_RUNNING, &hdev->flags))
397 continue;
398
399 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
400 if (!skb)
401 continue;
402
403 if (sock_queue_rcv_skb(sk, skb))
404 kfree_skb(skb);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200405
406 if (!test_bit(HCI_UP, &hdev->flags))
407 continue;
408
409 skb = create_monitor_event(hdev, HCI_DEV_UP);
410 if (!skb)
411 continue;
412
413 if (sock_queue_rcv_skb(sk, skb))
414 kfree_skb(skb);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100415 }
416
417 read_unlock(&hci_dev_list_lock);
418}
419
Marcel Holtmann040030e2012-02-20 14:50:37 +0100420/* Generate internal stack event */
421static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
422{
423 struct hci_event_hdr *hdr;
424 struct hci_ev_stack_internal *ev;
425 struct sk_buff *skb;
426
427 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
428 if (!skb)
429 return;
430
431 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
432 hdr->evt = HCI_EV_STACK_INTERNAL;
433 hdr->plen = sizeof(*ev) + dlen;
434
435 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
436 ev->type = type;
437 memcpy(ev->data, data, dlen);
438
439 bt_cb(skb)->incoming = 1;
440 __net_timestamp(skb);
441
442 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100443 hci_send_to_sock(hdev, skb);
444 kfree_skb(skb);
445}
446
447void hci_sock_dev_event(struct hci_dev *hdev, int event)
448{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100449 BT_DBG("hdev %s event %d", hdev->name, event);
450
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100451 if (atomic_read(&monitor_promisc)) {
452 struct sk_buff *skb;
453
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200454 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100455 skb = create_monitor_event(hdev, event);
456 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700457 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
458 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100459 kfree_skb(skb);
460 }
461 }
462
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200463 if (event <= HCI_DEV_DOWN) {
464 struct hci_ev_si_device ev;
465
466 /* Send event to sockets */
467 ev.event = event;
468 ev.dev_id = hdev->id;
469 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
470 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100471
472 if (event == HCI_DEV_UNREG) {
473 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100474
475 /* Detach sockets from device */
476 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800477 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100478 bh_lock_sock_nested(sk);
479 if (hci_pi(sk)->hdev == hdev) {
480 hci_pi(sk)->hdev = NULL;
481 sk->sk_err = EPIPE;
482 sk->sk_state = BT_OPEN;
483 sk->sk_state_change(sk);
484
485 hci_dev_put(hdev);
486 }
487 bh_unlock_sock(sk);
488 }
489 read_unlock(&hci_sk_list.lock);
490 }
491}
492
Johan Hedberg801c1e82015-03-06 21:08:50 +0200493static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
494{
495 struct hci_mgmt_chan *c;
496
497 list_for_each_entry(c, &mgmt_chan_list, list) {
498 if (c->channel == channel)
499 return c;
500 }
501
502 return NULL;
503}
504
505static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
506{
507 struct hci_mgmt_chan *c;
508
509 mutex_lock(&mgmt_chan_list_lock);
510 c = __hci_mgmt_chan_find(channel);
511 mutex_unlock(&mgmt_chan_list_lock);
512
513 return c;
514}
515
516int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
517{
518 if (c->channel < HCI_CHANNEL_CONTROL)
519 return -EINVAL;
520
521 mutex_lock(&mgmt_chan_list_lock);
522 if (__hci_mgmt_chan_find(c->channel)) {
523 mutex_unlock(&mgmt_chan_list_lock);
524 return -EALREADY;
525 }
526
527 list_add_tail(&c->list, &mgmt_chan_list);
528
529 mutex_unlock(&mgmt_chan_list_lock);
530
531 return 0;
532}
533EXPORT_SYMBOL(hci_mgmt_chan_register);
534
535void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
536{
537 mutex_lock(&mgmt_chan_list_lock);
538 list_del(&c->list);
539 mutex_unlock(&mgmt_chan_list_lock);
540}
541EXPORT_SYMBOL(hci_mgmt_chan_unregister);
542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543static int hci_sock_release(struct socket *sock)
544{
545 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100546 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
548 BT_DBG("sock %p sk %p", sock, sk);
549
550 if (!sk)
551 return 0;
552
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100553 hdev = hci_pi(sk)->hdev;
554
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100555 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
556 atomic_dec(&monitor_promisc);
557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 bt_sock_unlink(&hci_sk_list, sk);
559
560 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700561 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200562 /* When releasing an user channel exclusive access,
563 * call hci_dev_do_close directly instead of calling
564 * hci_dev_close to ensure the exclusive access will
565 * be released and the controller brought back down.
566 *
567 * The checking of HCI_AUTO_OFF is not needed in this
568 * case since it will have been cleared already when
569 * opening the user channel.
570 */
571 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200572 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
573 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700574 }
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 atomic_dec(&hdev->promisc);
577 hci_dev_put(hdev);
578 }
579
580 sock_orphan(sk);
581
582 skb_queue_purge(&sk->sk_receive_queue);
583 skb_queue_purge(&sk->sk_write_queue);
584
585 sock_put(sk);
586 return 0;
587}
588
Antti Julkub2a66aa2011-06-15 12:01:14 +0300589static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200590{
591 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300592 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200593
594 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
595 return -EFAULT;
596
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300597 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300598
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300599 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300600
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300601 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300602
603 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200604}
605
Antti Julkub2a66aa2011-06-15 12:01:14 +0300606static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200607{
608 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300609 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200610
611 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
612 return -EFAULT;
613
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300614 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300615
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300616 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300617
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300618 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300619
620 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200621}
622
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900623/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300624static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
625 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
627 struct hci_dev *hdev = hci_pi(sk)->hdev;
628
629 if (!hdev)
630 return -EBADFD;
631
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700632 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700633 return -EBUSY;
634
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700635 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200636 return -EOPNOTSUPP;
637
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700638 if (hdev->dev_type != HCI_BREDR)
639 return -EOPNOTSUPP;
640
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 switch (cmd) {
642 case HCISETRAW:
643 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000644 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700645 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200648 return hci_get_conn_info(hdev, (void __user *) arg);
649
650 case HCIGETAUTHINFO:
651 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
Johan Hedbergf0358562010-05-18 13:20:32 +0200653 case HCIBLOCKADDR:
654 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000655 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300656 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200657
658 case HCIUNBLOCKADDR:
659 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000660 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300661 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700663
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700664 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665}
666
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300667static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
668 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200670 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700671 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 int err;
673
674 BT_DBG("cmd %x arg %lx", cmd, arg);
675
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700676 lock_sock(sk);
677
678 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
679 err = -EBADFD;
680 goto done;
681 }
682
683 release_sock(sk);
684
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 switch (cmd) {
686 case HCIGETDEVLIST:
687 return hci_get_dev_list(argp);
688
689 case HCIGETDEVINFO:
690 return hci_get_dev_info(argp);
691
692 case HCIGETCONNLIST:
693 return hci_get_conn_list(argp);
694
695 case HCIDEVUP:
696 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000697 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return hci_dev_open(arg);
699
700 case HCIDEVDOWN:
701 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000702 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 return hci_dev_close(arg);
704
705 case HCIDEVRESET:
706 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000707 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 return hci_dev_reset(arg);
709
710 case HCIDEVRESTAT:
711 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000712 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return hci_dev_reset_stat(arg);
714
715 case HCISETSCAN:
716 case HCISETAUTH:
717 case HCISETENCRYPT:
718 case HCISETPTYPE:
719 case HCISETLINKPOL:
720 case HCISETLINKMODE:
721 case HCISETACLMTU:
722 case HCISETSCOMTU:
723 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000724 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 return hci_dev_cmd(cmd, argp);
726
727 case HCIINQUIRY:
728 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700730
731 lock_sock(sk);
732
733 err = hci_sock_bound_ioctl(sk, cmd, arg);
734
735done:
736 release_sock(sk);
737 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738}
739
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300740static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
741 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
Johan Hedberg03811012010-12-08 00:21:06 +0200743 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 struct sock *sk = sock->sk;
745 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200746 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
748 BT_DBG("sock %p sk %p", sock, sk);
749
Johan Hedberg03811012010-12-08 00:21:06 +0200750 if (!addr)
751 return -EINVAL;
752
753 memset(&haddr, 0, sizeof(haddr));
754 len = min_t(unsigned int, sizeof(haddr), addr_len);
755 memcpy(&haddr, addr, len);
756
757 if (haddr.hci_family != AF_BLUETOOTH)
758 return -EINVAL;
759
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 lock_sock(sk);
761
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100762 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 err = -EALREADY;
764 goto done;
765 }
766
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100767 switch (haddr.hci_channel) {
768 case HCI_CHANNEL_RAW:
769 if (hci_pi(sk)->hdev) {
770 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 goto done;
772 }
773
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100774 if (haddr.hci_dev != HCI_DEV_NONE) {
775 hdev = hci_dev_get(haddr.hci_dev);
776 if (!hdev) {
777 err = -ENODEV;
778 goto done;
779 }
780
781 atomic_inc(&hdev->promisc);
782 }
783
784 hci_pi(sk)->hdev = hdev;
785 break;
786
Marcel Holtmann23500182013-08-26 21:40:52 -0700787 case HCI_CHANNEL_USER:
788 if (hci_pi(sk)->hdev) {
789 err = -EALREADY;
790 goto done;
791 }
792
793 if (haddr.hci_dev == HCI_DEV_NONE) {
794 err = -EINVAL;
795 goto done;
796 }
797
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700798 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700799 err = -EPERM;
800 goto done;
801 }
802
803 hdev = hci_dev_get(haddr.hci_dev);
804 if (!hdev) {
805 err = -ENODEV;
806 goto done;
807 }
808
Marcel Holtmann781f8992015-06-06 06:06:49 +0200809 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700810 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +0200811 hci_dev_test_flag(hdev, HCI_CONFIG) ||
812 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
813 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700814 err = -EBUSY;
815 hci_dev_put(hdev);
816 goto done;
817 }
818
Marcel Holtmann238be782015-03-13 02:11:06 -0700819 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700820 err = -EUSERS;
821 hci_dev_put(hdev);
822 goto done;
823 }
824
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200825 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700826
827 err = hci_dev_open(hdev->id);
828 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +0200829 if (err == -EALREADY) {
830 /* In case the transport is already up and
831 * running, clear the error here.
832 *
833 * This can happen when opening an user
834 * channel and HCI_AUTO_OFF grace period
835 * is still active.
836 */
837 err = 0;
838 } else {
839 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
840 mgmt_index_added(hdev);
841 hci_dev_put(hdev);
842 goto done;
843 }
Marcel Holtmann23500182013-08-26 21:40:52 -0700844 }
845
846 atomic_inc(&hdev->promisc);
847
848 hci_pi(sk)->hdev = hdev;
849 break;
850
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100851 case HCI_CHANNEL_MONITOR:
852 if (haddr.hci_dev != HCI_DEV_NONE) {
853 err = -EINVAL;
854 goto done;
855 }
856
857 if (!capable(CAP_NET_RAW)) {
858 err = -EPERM;
859 goto done;
860 }
861
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700862 /* The monitor interface is restricted to CAP_NET_RAW
863 * capabilities and with that implicitly trusted.
864 */
865 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
866
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100867 send_monitor_replay(sk);
868
869 atomic_inc(&monitor_promisc);
870 break;
871
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100872 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200873 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
874 err = -EINVAL;
875 goto done;
876 }
877
878 if (haddr.hci_dev != HCI_DEV_NONE) {
879 err = -EINVAL;
880 goto done;
881 }
882
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700883 /* Users with CAP_NET_ADMIN capabilities are allowed
884 * access to all management commands and events. For
885 * untrusted users the interface is restricted and
886 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700887 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700888 if (capable(CAP_NET_ADMIN))
889 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700890
Marcel Holtmannf9207332015-03-14 19:27:55 -0700891 /* At the moment the index and unconfigured index events
892 * are enabled unconditionally. Setting them on each
893 * socket when binding keeps this functionality. They
894 * however might be cleared later and then sending of these
895 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700896 *
897 * This also enables generic events that are safe to be
898 * received by untrusted users. Example for such events
899 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700900 */
901 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
902 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
903 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700904 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700905 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200906 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
908
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100909
Johan Hedberg03811012010-12-08 00:21:06 +0200910 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 sk->sk_state = BT_BOUND;
912
913done:
914 release_sock(sk);
915 return err;
916}
917
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300918static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
919 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
921 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
922 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700923 struct hci_dev *hdev;
924 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
926 BT_DBG("sock %p sk %p", sock, sk);
927
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700928 if (peer)
929 return -EOPNOTSUPP;
930
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 lock_sock(sk);
932
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700933 hdev = hci_pi(sk)->hdev;
934 if (!hdev) {
935 err = -EBADFD;
936 goto done;
937 }
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 *addr_len = sizeof(*haddr);
940 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100941 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700942 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700944done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700946 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947}
948
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300949static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
950 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951{
952 __u32 mask = hci_pi(sk)->cmsg_mask;
953
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700954 if (mask & HCI_CMSG_DIR) {
955 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300956 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
957 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700958 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700960 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100961#ifdef CONFIG_COMPAT
962 struct compat_timeval ctv;
963#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700964 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200965 void *data;
966 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700967
968 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200969
David S. Miller1da97f82007-09-12 14:10:58 +0200970 data = &tv;
971 len = sizeof(tv);
972#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800973 if (!COMPAT_USE_64BIT_TIME &&
974 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200975 ctv.tv_sec = tv.tv_sec;
976 ctv.tv_usec = tv.tv_usec;
977 data = &ctv;
978 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200979 }
David S. Miller1da97f82007-09-12 14:10:58 +0200980#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200981
982 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700983 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900985
Ying Xue1b784142015-03-02 15:37:48 +0800986static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
987 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
989 int noblock = flags & MSG_DONTWAIT;
990 struct sock *sk = sock->sk;
991 struct sk_buff *skb;
992 int copied, err;
993
994 BT_DBG("sock %p, sk %p", sock, sk);
995
996 if (flags & (MSG_OOB))
997 return -EOPNOTSUPP;
998
999 if (sk->sk_state == BT_CLOSED)
1000 return 0;
1001
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001002 skb = skb_recv_datagram(sk, flags, noblock, &err);
1003 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return err;
1005
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 copied = skb->len;
1007 if (len < copied) {
1008 msg->msg_flags |= MSG_TRUNC;
1009 copied = len;
1010 }
1011
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001012 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -05001013 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
Marcel Holtmann3a208622012-02-20 14:50:34 +01001015 switch (hci_pi(sk)->channel) {
1016 case HCI_CHANNEL_RAW:
1017 hci_sock_cmsg(sk, msg, skb);
1018 break;
Marcel Holtmann23500182013-08-26 21:40:52 -07001019 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001020 case HCI_CHANNEL_MONITOR:
1021 sock_recv_timestamp(msg, sk, skb);
1022 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001023 default:
1024 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1025 sock_recv_timestamp(msg, sk, skb);
1026 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +01001027 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029 skb_free_datagram(sk, skb);
1030
1031 return err ? : copied;
1032}
1033
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001034static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1035 struct msghdr *msg, size_t msglen)
1036{
1037 void *buf;
1038 u8 *cp;
1039 struct mgmt_hdr *hdr;
1040 u16 opcode, index, len;
1041 struct hci_dev *hdev = NULL;
1042 const struct hci_mgmt_handler *handler;
1043 bool var_len, no_hdev;
1044 int err;
1045
1046 BT_DBG("got %zu bytes", msglen);
1047
1048 if (msglen < sizeof(*hdr))
1049 return -EINVAL;
1050
1051 buf = kmalloc(msglen, GFP_KERNEL);
1052 if (!buf)
1053 return -ENOMEM;
1054
1055 if (memcpy_from_msg(buf, msg, msglen)) {
1056 err = -EFAULT;
1057 goto done;
1058 }
1059
1060 hdr = buf;
1061 opcode = __le16_to_cpu(hdr->opcode);
1062 index = __le16_to_cpu(hdr->index);
1063 len = __le16_to_cpu(hdr->len);
1064
1065 if (len != msglen - sizeof(*hdr)) {
1066 err = -EINVAL;
1067 goto done;
1068 }
1069
1070 if (opcode >= chan->handler_count ||
1071 chan->handlers[opcode].func == NULL) {
1072 BT_DBG("Unknown op %u", opcode);
1073 err = mgmt_cmd_status(sk, index, opcode,
1074 MGMT_STATUS_UNKNOWN_COMMAND);
1075 goto done;
1076 }
1077
1078 handler = &chan->handlers[opcode];
1079
1080 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1081 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1082 err = mgmt_cmd_status(sk, index, opcode,
1083 MGMT_STATUS_PERMISSION_DENIED);
1084 goto done;
1085 }
1086
1087 if (index != MGMT_INDEX_NONE) {
1088 hdev = hci_dev_get(index);
1089 if (!hdev) {
1090 err = mgmt_cmd_status(sk, index, opcode,
1091 MGMT_STATUS_INVALID_INDEX);
1092 goto done;
1093 }
1094
1095 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1096 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1097 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1098 err = mgmt_cmd_status(sk, index, opcode,
1099 MGMT_STATUS_INVALID_INDEX);
1100 goto done;
1101 }
1102
1103 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1104 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1105 err = mgmt_cmd_status(sk, index, opcode,
1106 MGMT_STATUS_INVALID_INDEX);
1107 goto done;
1108 }
1109 }
1110
1111 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1112 if (no_hdev != !hdev) {
1113 err = mgmt_cmd_status(sk, index, opcode,
1114 MGMT_STATUS_INVALID_INDEX);
1115 goto done;
1116 }
1117
1118 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1119 if ((var_len && len < handler->data_len) ||
1120 (!var_len && len != handler->data_len)) {
1121 err = mgmt_cmd_status(sk, index, opcode,
1122 MGMT_STATUS_INVALID_PARAMS);
1123 goto done;
1124 }
1125
1126 if (hdev && chan->hdev_init)
1127 chan->hdev_init(sk, hdev);
1128
1129 cp = buf + sizeof(*hdr);
1130
1131 err = handler->func(sk, hdev, cp, len);
1132 if (err < 0)
1133 goto done;
1134
1135 err = msglen;
1136
1137done:
1138 if (hdev)
1139 hci_dev_put(hdev);
1140
1141 kfree(buf);
1142 return err;
1143}
1144
Ying Xue1b784142015-03-02 15:37:48 +08001145static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1146 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147{
1148 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001149 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 struct hci_dev *hdev;
1151 struct sk_buff *skb;
1152 int err;
1153
1154 BT_DBG("sock %p sk %p", sock, sk);
1155
1156 if (msg->msg_flags & MSG_OOB)
1157 return -EOPNOTSUPP;
1158
1159 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1160 return -EINVAL;
1161
1162 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1163 return -EINVAL;
1164
1165 lock_sock(sk);
1166
Johan Hedberg03811012010-12-08 00:21:06 +02001167 switch (hci_pi(sk)->channel) {
1168 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001169 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001170 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001171 case HCI_CHANNEL_MONITOR:
1172 err = -EOPNOTSUPP;
1173 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001174 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001175 mutex_lock(&mgmt_chan_list_lock);
1176 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1177 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001178 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001179 else
1180 err = -EINVAL;
1181
1182 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001183 goto done;
1184 }
1185
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001186 hdev = hci_pi(sk)->hdev;
1187 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 err = -EBADFD;
1189 goto done;
1190 }
1191
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001192 if (!test_bit(HCI_UP, &hdev->flags)) {
1193 err = -ENETDOWN;
1194 goto done;
1195 }
1196
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001197 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1198 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 goto done;
1200
Al Viro6ce8e9c2014-04-06 21:25:44 -04001201 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 err = -EFAULT;
1203 goto drop;
1204 }
1205
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001206 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001209 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1210 /* No permission check is needed for user channel
1211 * since that gets enforced when binding the socket.
1212 *
1213 * However check that the packet type is valid.
1214 */
1215 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1216 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1217 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1218 err = -EINVAL;
1219 goto drop;
1220 }
1221
1222 skb_queue_tail(&hdev->raw_q, skb);
1223 queue_work(hdev->workqueue, &hdev->tx_work);
1224 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001225 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 u16 ogf = hci_opcode_ogf(opcode);
1227 u16 ocf = hci_opcode_ocf(opcode);
1228
1229 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001230 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1231 &hci_sec_filter.ocf_mask[ogf])) &&
1232 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 err = -EPERM;
1234 goto drop;
1235 }
1236
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001237 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001239 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001241 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001242 * single-command requests.
1243 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03001244 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02001245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001247 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 }
1249 } else {
1250 if (!capable(CAP_NET_RAW)) {
1251 err = -EPERM;
1252 goto drop;
1253 }
1254
Marcel Holtmannbb775432015-10-09 16:13:50 +02001255 if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1256 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1257 err = -EINVAL;
1258 goto drop;
1259 }
1260
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001262 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 }
1264
1265 err = len;
1266
1267done:
1268 release_sock(sk);
1269 return err;
1270
1271drop:
1272 kfree_skb(skb);
1273 goto done;
1274}
1275
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001276static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1277 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278{
1279 struct hci_ufilter uf = { .opcode = 0 };
1280 struct sock *sk = sock->sk;
1281 int err = 0, opt = 0;
1282
1283 BT_DBG("sk %p, opt %d", sk, optname);
1284
1285 lock_sock(sk);
1286
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001287 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001288 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001289 goto done;
1290 }
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 switch (optname) {
1293 case HCI_DATA_DIR:
1294 if (get_user(opt, (int __user *)optval)) {
1295 err = -EFAULT;
1296 break;
1297 }
1298
1299 if (opt)
1300 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1301 else
1302 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1303 break;
1304
1305 case HCI_TIME_STAMP:
1306 if (get_user(opt, (int __user *)optval)) {
1307 err = -EFAULT;
1308 break;
1309 }
1310
1311 if (opt)
1312 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1313 else
1314 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1315 break;
1316
1317 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001318 {
1319 struct hci_filter *f = &hci_pi(sk)->filter;
1320
1321 uf.type_mask = f->type_mask;
1322 uf.opcode = f->opcode;
1323 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1324 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1325 }
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 len = min_t(unsigned int, len, sizeof(uf));
1328 if (copy_from_user(&uf, optval, len)) {
1329 err = -EFAULT;
1330 break;
1331 }
1332
1333 if (!capable(CAP_NET_RAW)) {
1334 uf.type_mask &= hci_sec_filter.type_mask;
1335 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1336 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1337 }
1338
1339 {
1340 struct hci_filter *f = &hci_pi(sk)->filter;
1341
1342 f->type_mask = uf.type_mask;
1343 f->opcode = uf.opcode;
1344 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1345 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1346 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001347 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
1349 default:
1350 err = -ENOPROTOOPT;
1351 break;
1352 }
1353
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001354done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 release_sock(sk);
1356 return err;
1357}
1358
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001359static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1360 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361{
1362 struct hci_ufilter uf;
1363 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001364 int len, opt, err = 0;
1365
1366 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368 if (get_user(len, optlen))
1369 return -EFAULT;
1370
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001371 lock_sock(sk);
1372
1373 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001374 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001375 goto done;
1376 }
1377
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 switch (optname) {
1379 case HCI_DATA_DIR:
1380 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1381 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001382 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 opt = 0;
1384
1385 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001386 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 break;
1388
1389 case HCI_TIME_STAMP:
1390 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1391 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001392 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 opt = 0;
1394
1395 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001396 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 break;
1398
1399 case HCI_FILTER:
1400 {
1401 struct hci_filter *f = &hci_pi(sk)->filter;
1402
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001403 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 uf.type_mask = f->type_mask;
1405 uf.opcode = f->opcode;
1406 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1407 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1408 }
1409
1410 len = min_t(unsigned int, len, sizeof(uf));
1411 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001412 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 break;
1414
1415 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001416 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 break;
1418 }
1419
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001420done:
1421 release_sock(sk);
1422 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423}
1424
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001425static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 .family = PF_BLUETOOTH,
1427 .owner = THIS_MODULE,
1428 .release = hci_sock_release,
1429 .bind = hci_sock_bind,
1430 .getname = hci_sock_getname,
1431 .sendmsg = hci_sock_sendmsg,
1432 .recvmsg = hci_sock_recvmsg,
1433 .ioctl = hci_sock_ioctl,
1434 .poll = datagram_poll,
1435 .listen = sock_no_listen,
1436 .shutdown = sock_no_shutdown,
1437 .setsockopt = hci_sock_setsockopt,
1438 .getsockopt = hci_sock_getsockopt,
1439 .connect = sock_no_connect,
1440 .socketpair = sock_no_socketpair,
1441 .accept = sock_no_accept,
1442 .mmap = sock_no_mmap
1443};
1444
1445static struct proto hci_sk_proto = {
1446 .name = "HCI",
1447 .owner = THIS_MODULE,
1448 .obj_size = sizeof(struct hci_pinfo)
1449};
1450
Eric Paris3f378b62009-11-05 22:18:14 -08001451static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1452 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453{
1454 struct sock *sk;
1455
1456 BT_DBG("sock %p", sock);
1457
1458 if (sock->type != SOCK_RAW)
1459 return -ESOCKTNOSUPPORT;
1460
1461 sock->ops = &hci_sock_ops;
1462
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001463 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 if (!sk)
1465 return -ENOMEM;
1466
1467 sock_init_data(sock, sk);
1468
1469 sock_reset_flag(sk, SOCK_ZAPPED);
1470
1471 sk->sk_protocol = protocol;
1472
1473 sock->state = SS_UNCONNECTED;
1474 sk->sk_state = BT_OPEN;
1475
1476 bt_sock_link(&hci_sk_list, sk);
1477 return 0;
1478}
1479
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001480static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 .family = PF_BLUETOOTH,
1482 .owner = THIS_MODULE,
1483 .create = hci_sock_create,
1484};
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486int __init hci_sock_init(void)
1487{
1488 int err;
1489
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001490 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 err = proto_register(&hci_sk_proto, 0);
1493 if (err < 0)
1494 return err;
1495
1496 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001497 if (err < 0) {
1498 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001500 }
1501
Al Virob0316612013-04-04 19:14:33 -04001502 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001503 if (err < 0) {
1504 BT_ERR("Failed to create HCI proc file");
1505 bt_sock_unregister(BTPROTO_HCI);
1506 goto error;
1507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 BT_INFO("HCI socket layer initialized");
1510
1511 return 0;
1512
1513error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 proto_unregister(&hci_sk_proto);
1515 return err;
1516}
1517
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301518void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001520 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001521 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523}