blob: 9a100c1fd7b5ec13cdec4b852e70ca769700a067 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020033#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Johan Hedberg801c1e82015-03-06 21:08:50 +020037static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
Marcel Holtmanncd82e612012-02-20 20:34:38 +010040static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* ----- HCI socket interface ----- */
43
Marcel Holtmann863def52014-07-11 05:41:00 +020044/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070053 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020054};
55
Marcel Holtmann6befc642015-03-14 19:27:53 -070056void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
Marcel Holtmannc85be542015-03-14 19:28:00 -070066int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
Johan Hedbergd0f172b2015-03-17 13:48:46 +020071unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
Jiri Slaby93919762015-02-19 15:20:43 +010076static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Jiri Slaby93919762015-02-19 15:20:43 +010078 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020082#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
Marcel Holtmann7e67c112014-07-11 05:36:40 +020090static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* Packet types */
92 0x10,
93 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020094 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020099 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113};
114
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 flt_type = 0;
125 else
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128 if (!test_bit(flt_type, &flt->type_mask))
129 return true;
130
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 return false;
134
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137 if (!hci_test_bit(flt_event, &flt->event_mask))
138 return true;
139
140 /* Check filter only when opcode is set */
141 if (!flt->opcode)
142 return false;
143
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 return true;
147
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 return true;
151
152 return false;
153}
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100159 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 BT_DBG("hdev %p len %d", hdev, skb->len);
162
163 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100164
Sasha Levinb67bfe02013-02-27 17:06:00 -0800165 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 struct sk_buff *nskb;
167
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 continue;
170
171 /* Don't send frame to the socket it came from */
172 if (skb->sk == sk)
173 continue;
174
Marcel Holtmann23500182013-08-26 21:40:52 -0700175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
177 continue;
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
180 continue;
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 continue;
185 } else {
186 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200187 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100190 if (!skb_copy) {
191 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100193 if (!skb_copy)
194 continue;
195
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 }
199
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200201 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 continue;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (sock_queue_rcv_skb(sk, nskb))
205 kfree_skb(nskb);
206 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100207
208 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100209
210 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100211}
212
Johan Hedberg71290692015-02-20 13:26:23 +0200213/* Send frame to sockets with specific channel */
214void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700215 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100216{
217 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100218
Johan Hedberg71290692015-02-20 13:26:23 +0200219 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220
221 read_lock(&hci_sk_list.lock);
222
Sasha Levinb67bfe02013-02-27 17:06:00 -0800223 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100224 struct sk_buff *nskb;
225
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700226 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700227 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700228 continue;
229
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100230 /* Skip the original socket */
231 if (sk == skip_sk)
232 continue;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
Johan Hedberg71290692015-02-20 13:26:23 +0200237 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800238 continue;
239
240 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (!nskb)
242 continue;
243
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
247
248 read_unlock(&hci_sk_list.lock);
249}
250
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100251/* Send frame to monitor socket */
252void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100254 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800255 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100256 __le16 opcode;
257
258 if (!atomic_read(&monitor_promisc))
259 return;
260
261 BT_DBG("hdev %p len %d", hdev, skb->len);
262
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100266 break;
267 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269 break;
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 break;
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100279 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100281 break;
Marcel Holtmanne875ff82015-10-07 16:38:35 +0200282 case HCI_DIAG_PKT:
283 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
284 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100285 default:
286 return;
287 }
288
Marcel Holtmann2b531292015-01-11 19:33:31 -0800289 /* Create a private copy with headroom */
290 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
291 if (!skb_copy)
292 return;
293
294 /* Put header before the data */
295 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
296 hdr->opcode = opcode;
297 hdr->index = cpu_to_le16(hdev->id);
298 hdr->len = cpu_to_le16(skb->len);
299
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700300 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
301 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100302 kfree_skb(skb_copy);
303}
304
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100305static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
306{
307 struct hci_mon_hdr *hdr;
308 struct hci_mon_new_index *ni;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200309 struct hci_mon_index_info *ii;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100310 struct sk_buff *skb;
311 __le16 opcode;
312
313 switch (event) {
314 case HCI_DEV_REG:
315 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
316 if (!skb)
317 return NULL;
318
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200319 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100320 ni->type = hdev->dev_type;
321 ni->bus = hdev->bus;
322 bacpy(&ni->bdaddr, &hdev->bdaddr);
323 memcpy(ni->name, hdev->name, 8);
324
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700325 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100326 break;
327
328 case HCI_DEV_UNREG:
329 skb = bt_skb_alloc(0, GFP_ATOMIC);
330 if (!skb)
331 return NULL;
332
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700333 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100334 break;
335
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200336 case HCI_DEV_UP:
337 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
338 if (!skb)
339 return NULL;
340
341 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
342 bacpy(&ii->bdaddr, &hdev->bdaddr);
343 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
344
345 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
346 break;
347
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200348 case HCI_DEV_OPEN:
349 skb = bt_skb_alloc(0, GFP_ATOMIC);
350 if (!skb)
351 return NULL;
352
353 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
354 break;
355
356 case HCI_DEV_CLOSE:
357 skb = bt_skb_alloc(0, GFP_ATOMIC);
358 if (!skb)
359 return NULL;
360
361 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
362 break;
363
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100364 default:
365 return NULL;
366 }
367
368 __net_timestamp(skb);
369
370 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
371 hdr->opcode = opcode;
372 hdr->index = cpu_to_le16(hdev->id);
373 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
374
375 return skb;
376}
377
378static void send_monitor_replay(struct sock *sk)
379{
380 struct hci_dev *hdev;
381
382 read_lock(&hci_dev_list_lock);
383
384 list_for_each_entry(hdev, &hci_dev_list, list) {
385 struct sk_buff *skb;
386
387 skb = create_monitor_event(hdev, HCI_DEV_REG);
388 if (!skb)
389 continue;
390
391 if (sock_queue_rcv_skb(sk, skb))
392 kfree_skb(skb);
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200393
394 if (!test_bit(HCI_RUNNING, &hdev->flags))
395 continue;
396
397 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
398 if (!skb)
399 continue;
400
401 if (sock_queue_rcv_skb(sk, skb))
402 kfree_skb(skb);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200403
404 if (!test_bit(HCI_UP, &hdev->flags))
405 continue;
406
407 skb = create_monitor_event(hdev, HCI_DEV_UP);
408 if (!skb)
409 continue;
410
411 if (sock_queue_rcv_skb(sk, skb))
412 kfree_skb(skb);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100413 }
414
415 read_unlock(&hci_dev_list_lock);
416}
417
Marcel Holtmann040030e2012-02-20 14:50:37 +0100418/* Generate internal stack event */
419static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
420{
421 struct hci_event_hdr *hdr;
422 struct hci_ev_stack_internal *ev;
423 struct sk_buff *skb;
424
425 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
426 if (!skb)
427 return;
428
429 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
430 hdr->evt = HCI_EV_STACK_INTERNAL;
431 hdr->plen = sizeof(*ev) + dlen;
432
433 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
434 ev->type = type;
435 memcpy(ev->data, data, dlen);
436
437 bt_cb(skb)->incoming = 1;
438 __net_timestamp(skb);
439
440 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100441 hci_send_to_sock(hdev, skb);
442 kfree_skb(skb);
443}
444
445void hci_sock_dev_event(struct hci_dev *hdev, int event)
446{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100447 BT_DBG("hdev %s event %d", hdev->name, event);
448
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100449 if (atomic_read(&monitor_promisc)) {
450 struct sk_buff *skb;
451
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200452 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100453 skb = create_monitor_event(hdev, event);
454 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700455 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
456 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100457 kfree_skb(skb);
458 }
459 }
460
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200461 if (event <= HCI_DEV_DOWN) {
462 struct hci_ev_si_device ev;
463
464 /* Send event to sockets */
465 ev.event = event;
466 ev.dev_id = hdev->id;
467 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
468 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100469
470 if (event == HCI_DEV_UNREG) {
471 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100472
473 /* Detach sockets from device */
474 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800475 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100476 bh_lock_sock_nested(sk);
477 if (hci_pi(sk)->hdev == hdev) {
478 hci_pi(sk)->hdev = NULL;
479 sk->sk_err = EPIPE;
480 sk->sk_state = BT_OPEN;
481 sk->sk_state_change(sk);
482
483 hci_dev_put(hdev);
484 }
485 bh_unlock_sock(sk);
486 }
487 read_unlock(&hci_sk_list.lock);
488 }
489}
490
Johan Hedberg801c1e82015-03-06 21:08:50 +0200491static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
492{
493 struct hci_mgmt_chan *c;
494
495 list_for_each_entry(c, &mgmt_chan_list, list) {
496 if (c->channel == channel)
497 return c;
498 }
499
500 return NULL;
501}
502
503static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
504{
505 struct hci_mgmt_chan *c;
506
507 mutex_lock(&mgmt_chan_list_lock);
508 c = __hci_mgmt_chan_find(channel);
509 mutex_unlock(&mgmt_chan_list_lock);
510
511 return c;
512}
513
514int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
515{
516 if (c->channel < HCI_CHANNEL_CONTROL)
517 return -EINVAL;
518
519 mutex_lock(&mgmt_chan_list_lock);
520 if (__hci_mgmt_chan_find(c->channel)) {
521 mutex_unlock(&mgmt_chan_list_lock);
522 return -EALREADY;
523 }
524
525 list_add_tail(&c->list, &mgmt_chan_list);
526
527 mutex_unlock(&mgmt_chan_list_lock);
528
529 return 0;
530}
531EXPORT_SYMBOL(hci_mgmt_chan_register);
532
533void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
534{
535 mutex_lock(&mgmt_chan_list_lock);
536 list_del(&c->list);
537 mutex_unlock(&mgmt_chan_list_lock);
538}
539EXPORT_SYMBOL(hci_mgmt_chan_unregister);
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541static int hci_sock_release(struct socket *sock)
542{
543 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100544 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
546 BT_DBG("sock %p sk %p", sock, sk);
547
548 if (!sk)
549 return 0;
550
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100551 hdev = hci_pi(sk)->hdev;
552
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100553 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
554 atomic_dec(&monitor_promisc);
555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 bt_sock_unlink(&hci_sk_list, sk);
557
558 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700559 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200560 /* When releasing an user channel exclusive access,
561 * call hci_dev_do_close directly instead of calling
562 * hci_dev_close to ensure the exclusive access will
563 * be released and the controller brought back down.
564 *
565 * The checking of HCI_AUTO_OFF is not needed in this
566 * case since it will have been cleared already when
567 * opening the user channel.
568 */
569 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200570 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
571 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700572 }
573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 atomic_dec(&hdev->promisc);
575 hci_dev_put(hdev);
576 }
577
578 sock_orphan(sk);
579
580 skb_queue_purge(&sk->sk_receive_queue);
581 skb_queue_purge(&sk->sk_write_queue);
582
583 sock_put(sk);
584 return 0;
585}
586
Antti Julkub2a66aa2011-06-15 12:01:14 +0300587static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200588{
589 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300590 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200591
592 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
593 return -EFAULT;
594
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300595 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300596
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300597 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300598
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300599 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300600
601 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200602}
603
Antti Julkub2a66aa2011-06-15 12:01:14 +0300604static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200605{
606 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300607 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200608
609 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
610 return -EFAULT;
611
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300612 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300613
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300614 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300615
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300616 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300617
618 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200619}
620
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900621/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300622static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
623 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624{
625 struct hci_dev *hdev = hci_pi(sk)->hdev;
626
627 if (!hdev)
628 return -EBADFD;
629
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700630 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700631 return -EBUSY;
632
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700633 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200634 return -EOPNOTSUPP;
635
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700636 if (hdev->dev_type != HCI_BREDR)
637 return -EOPNOTSUPP;
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 switch (cmd) {
640 case HCISETRAW:
641 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000642 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700643 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200646 return hci_get_conn_info(hdev, (void __user *) arg);
647
648 case HCIGETAUTHINFO:
649 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Johan Hedbergf0358562010-05-18 13:20:32 +0200651 case HCIBLOCKADDR:
652 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000653 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300654 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200655
656 case HCIUNBLOCKADDR:
657 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000658 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300659 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700661
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700662 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300665static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
666 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200668 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700669 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 int err;
671
672 BT_DBG("cmd %x arg %lx", cmd, arg);
673
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700674 lock_sock(sk);
675
676 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
677 err = -EBADFD;
678 goto done;
679 }
680
681 release_sock(sk);
682
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 switch (cmd) {
684 case HCIGETDEVLIST:
685 return hci_get_dev_list(argp);
686
687 case HCIGETDEVINFO:
688 return hci_get_dev_info(argp);
689
690 case HCIGETCONNLIST:
691 return hci_get_conn_list(argp);
692
693 case HCIDEVUP:
694 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000695 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 return hci_dev_open(arg);
697
698 case HCIDEVDOWN:
699 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000700 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 return hci_dev_close(arg);
702
703 case HCIDEVRESET:
704 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000705 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return hci_dev_reset(arg);
707
708 case HCIDEVRESTAT:
709 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000710 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 return hci_dev_reset_stat(arg);
712
713 case HCISETSCAN:
714 case HCISETAUTH:
715 case HCISETENCRYPT:
716 case HCISETPTYPE:
717 case HCISETLINKPOL:
718 case HCISETLINKMODE:
719 case HCISETACLMTU:
720 case HCISETSCOMTU:
721 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000722 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return hci_dev_cmd(cmd, argp);
724
725 case HCIINQUIRY:
726 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700728
729 lock_sock(sk);
730
731 err = hci_sock_bound_ioctl(sk, cmd, arg);
732
733done:
734 release_sock(sk);
735 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300738static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
739 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
Johan Hedberg03811012010-12-08 00:21:06 +0200741 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 struct sock *sk = sock->sk;
743 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200744 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746 BT_DBG("sock %p sk %p", sock, sk);
747
Johan Hedberg03811012010-12-08 00:21:06 +0200748 if (!addr)
749 return -EINVAL;
750
751 memset(&haddr, 0, sizeof(haddr));
752 len = min_t(unsigned int, sizeof(haddr), addr_len);
753 memcpy(&haddr, addr, len);
754
755 if (haddr.hci_family != AF_BLUETOOTH)
756 return -EINVAL;
757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 lock_sock(sk);
759
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100760 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 err = -EALREADY;
762 goto done;
763 }
764
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100765 switch (haddr.hci_channel) {
766 case HCI_CHANNEL_RAW:
767 if (hci_pi(sk)->hdev) {
768 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 goto done;
770 }
771
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100772 if (haddr.hci_dev != HCI_DEV_NONE) {
773 hdev = hci_dev_get(haddr.hci_dev);
774 if (!hdev) {
775 err = -ENODEV;
776 goto done;
777 }
778
779 atomic_inc(&hdev->promisc);
780 }
781
782 hci_pi(sk)->hdev = hdev;
783 break;
784
Marcel Holtmann23500182013-08-26 21:40:52 -0700785 case HCI_CHANNEL_USER:
786 if (hci_pi(sk)->hdev) {
787 err = -EALREADY;
788 goto done;
789 }
790
791 if (haddr.hci_dev == HCI_DEV_NONE) {
792 err = -EINVAL;
793 goto done;
794 }
795
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700796 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700797 err = -EPERM;
798 goto done;
799 }
800
801 hdev = hci_dev_get(haddr.hci_dev);
802 if (!hdev) {
803 err = -ENODEV;
804 goto done;
805 }
806
Marcel Holtmann781f8992015-06-06 06:06:49 +0200807 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700808 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +0200809 hci_dev_test_flag(hdev, HCI_CONFIG) ||
810 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
811 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700812 err = -EBUSY;
813 hci_dev_put(hdev);
814 goto done;
815 }
816
Marcel Holtmann238be782015-03-13 02:11:06 -0700817 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700818 err = -EUSERS;
819 hci_dev_put(hdev);
820 goto done;
821 }
822
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200823 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700824
825 err = hci_dev_open(hdev->id);
826 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +0200827 if (err == -EALREADY) {
828 /* In case the transport is already up and
829 * running, clear the error here.
830 *
831 * This can happen when opening an user
832 * channel and HCI_AUTO_OFF grace period
833 * is still active.
834 */
835 err = 0;
836 } else {
837 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
838 mgmt_index_added(hdev);
839 hci_dev_put(hdev);
840 goto done;
841 }
Marcel Holtmann23500182013-08-26 21:40:52 -0700842 }
843
844 atomic_inc(&hdev->promisc);
845
846 hci_pi(sk)->hdev = hdev;
847 break;
848
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100849 case HCI_CHANNEL_MONITOR:
850 if (haddr.hci_dev != HCI_DEV_NONE) {
851 err = -EINVAL;
852 goto done;
853 }
854
855 if (!capable(CAP_NET_RAW)) {
856 err = -EPERM;
857 goto done;
858 }
859
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700860 /* The monitor interface is restricted to CAP_NET_RAW
861 * capabilities and with that implicitly trusted.
862 */
863 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
864
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100865 send_monitor_replay(sk);
866
867 atomic_inc(&monitor_promisc);
868 break;
869
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100870 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200871 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
872 err = -EINVAL;
873 goto done;
874 }
875
876 if (haddr.hci_dev != HCI_DEV_NONE) {
877 err = -EINVAL;
878 goto done;
879 }
880
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700881 /* Users with CAP_NET_ADMIN capabilities are allowed
882 * access to all management commands and events. For
883 * untrusted users the interface is restricted and
884 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700885 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700886 if (capable(CAP_NET_ADMIN))
887 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700888
Marcel Holtmannf9207332015-03-14 19:27:55 -0700889 /* At the moment the index and unconfigured index events
890 * are enabled unconditionally. Setting them on each
891 * socket when binding keeps this functionality. They
892 * however might be cleared later and then sending of these
893 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700894 *
895 * This also enables generic events that are safe to be
896 * received by untrusted users. Example for such events
897 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700898 */
899 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
900 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
901 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700902 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700903 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200904 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 }
906
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100907
Johan Hedberg03811012010-12-08 00:21:06 +0200908 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 sk->sk_state = BT_BOUND;
910
911done:
912 release_sock(sk);
913 return err;
914}
915
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300916static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
917 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918{
919 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
920 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700921 struct hci_dev *hdev;
922 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 BT_DBG("sock %p sk %p", sock, sk);
925
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700926 if (peer)
927 return -EOPNOTSUPP;
928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 lock_sock(sk);
930
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700931 hdev = hci_pi(sk)->hdev;
932 if (!hdev) {
933 err = -EBADFD;
934 goto done;
935 }
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 *addr_len = sizeof(*haddr);
938 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100939 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700940 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700942done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700944 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945}
946
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300947static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
948 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949{
950 __u32 mask = hci_pi(sk)->cmsg_mask;
951
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700952 if (mask & HCI_CMSG_DIR) {
953 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300954 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
955 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700958 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100959#ifdef CONFIG_COMPAT
960 struct compat_timeval ctv;
961#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700962 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200963 void *data;
964 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700965
966 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200967
David S. Miller1da97f82007-09-12 14:10:58 +0200968 data = &tv;
969 len = sizeof(tv);
970#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800971 if (!COMPAT_USE_64BIT_TIME &&
972 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200973 ctv.tv_sec = tv.tv_sec;
974 ctv.tv_usec = tv.tv_usec;
975 data = &ctv;
976 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200977 }
David S. Miller1da97f82007-09-12 14:10:58 +0200978#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200979
980 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700981 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900983
Ying Xue1b784142015-03-02 15:37:48 +0800984static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
985 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986{
987 int noblock = flags & MSG_DONTWAIT;
988 struct sock *sk = sock->sk;
989 struct sk_buff *skb;
990 int copied, err;
991
992 BT_DBG("sock %p, sk %p", sock, sk);
993
994 if (flags & (MSG_OOB))
995 return -EOPNOTSUPP;
996
997 if (sk->sk_state == BT_CLOSED)
998 return 0;
999
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001000 skb = skb_recv_datagram(sk, flags, noblock, &err);
1001 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 return err;
1003
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 copied = skb->len;
1005 if (len < copied) {
1006 msg->msg_flags |= MSG_TRUNC;
1007 copied = len;
1008 }
1009
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001010 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -05001011 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
Marcel Holtmann3a208622012-02-20 14:50:34 +01001013 switch (hci_pi(sk)->channel) {
1014 case HCI_CHANNEL_RAW:
1015 hci_sock_cmsg(sk, msg, skb);
1016 break;
Marcel Holtmann23500182013-08-26 21:40:52 -07001017 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001018 case HCI_CHANNEL_MONITOR:
1019 sock_recv_timestamp(msg, sk, skb);
1020 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001021 default:
1022 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1023 sock_recv_timestamp(msg, sk, skb);
1024 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +01001025 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
1027 skb_free_datagram(sk, skb);
1028
1029 return err ? : copied;
1030}
1031
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001032static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1033 struct msghdr *msg, size_t msglen)
1034{
1035 void *buf;
1036 u8 *cp;
1037 struct mgmt_hdr *hdr;
1038 u16 opcode, index, len;
1039 struct hci_dev *hdev = NULL;
1040 const struct hci_mgmt_handler *handler;
1041 bool var_len, no_hdev;
1042 int err;
1043
1044 BT_DBG("got %zu bytes", msglen);
1045
1046 if (msglen < sizeof(*hdr))
1047 return -EINVAL;
1048
1049 buf = kmalloc(msglen, GFP_KERNEL);
1050 if (!buf)
1051 return -ENOMEM;
1052
1053 if (memcpy_from_msg(buf, msg, msglen)) {
1054 err = -EFAULT;
1055 goto done;
1056 }
1057
1058 hdr = buf;
1059 opcode = __le16_to_cpu(hdr->opcode);
1060 index = __le16_to_cpu(hdr->index);
1061 len = __le16_to_cpu(hdr->len);
1062
1063 if (len != msglen - sizeof(*hdr)) {
1064 err = -EINVAL;
1065 goto done;
1066 }
1067
1068 if (opcode >= chan->handler_count ||
1069 chan->handlers[opcode].func == NULL) {
1070 BT_DBG("Unknown op %u", opcode);
1071 err = mgmt_cmd_status(sk, index, opcode,
1072 MGMT_STATUS_UNKNOWN_COMMAND);
1073 goto done;
1074 }
1075
1076 handler = &chan->handlers[opcode];
1077
1078 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1079 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1080 err = mgmt_cmd_status(sk, index, opcode,
1081 MGMT_STATUS_PERMISSION_DENIED);
1082 goto done;
1083 }
1084
1085 if (index != MGMT_INDEX_NONE) {
1086 hdev = hci_dev_get(index);
1087 if (!hdev) {
1088 err = mgmt_cmd_status(sk, index, opcode,
1089 MGMT_STATUS_INVALID_INDEX);
1090 goto done;
1091 }
1092
1093 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1094 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1095 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1096 err = mgmt_cmd_status(sk, index, opcode,
1097 MGMT_STATUS_INVALID_INDEX);
1098 goto done;
1099 }
1100
1101 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1102 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1103 err = mgmt_cmd_status(sk, index, opcode,
1104 MGMT_STATUS_INVALID_INDEX);
1105 goto done;
1106 }
1107 }
1108
1109 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1110 if (no_hdev != !hdev) {
1111 err = mgmt_cmd_status(sk, index, opcode,
1112 MGMT_STATUS_INVALID_INDEX);
1113 goto done;
1114 }
1115
1116 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1117 if ((var_len && len < handler->data_len) ||
1118 (!var_len && len != handler->data_len)) {
1119 err = mgmt_cmd_status(sk, index, opcode,
1120 MGMT_STATUS_INVALID_PARAMS);
1121 goto done;
1122 }
1123
1124 if (hdev && chan->hdev_init)
1125 chan->hdev_init(sk, hdev);
1126
1127 cp = buf + sizeof(*hdr);
1128
1129 err = handler->func(sk, hdev, cp, len);
1130 if (err < 0)
1131 goto done;
1132
1133 err = msglen;
1134
1135done:
1136 if (hdev)
1137 hci_dev_put(hdev);
1138
1139 kfree(buf);
1140 return err;
1141}
1142
Ying Xue1b784142015-03-02 15:37:48 +08001143static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1144 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
1146 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001147 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 struct hci_dev *hdev;
1149 struct sk_buff *skb;
1150 int err;
1151
1152 BT_DBG("sock %p sk %p", sock, sk);
1153
1154 if (msg->msg_flags & MSG_OOB)
1155 return -EOPNOTSUPP;
1156
1157 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1158 return -EINVAL;
1159
1160 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1161 return -EINVAL;
1162
1163 lock_sock(sk);
1164
Johan Hedberg03811012010-12-08 00:21:06 +02001165 switch (hci_pi(sk)->channel) {
1166 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001167 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001168 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001169 case HCI_CHANNEL_MONITOR:
1170 err = -EOPNOTSUPP;
1171 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001172 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001173 mutex_lock(&mgmt_chan_list_lock);
1174 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1175 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001176 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001177 else
1178 err = -EINVAL;
1179
1180 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001181 goto done;
1182 }
1183
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001184 hdev = hci_pi(sk)->hdev;
1185 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 err = -EBADFD;
1187 goto done;
1188 }
1189
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001190 if (!test_bit(HCI_UP, &hdev->flags)) {
1191 err = -ENETDOWN;
1192 goto done;
1193 }
1194
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001195 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1196 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 goto done;
1198
Al Viro6ce8e9c2014-04-06 21:25:44 -04001199 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 err = -EFAULT;
1201 goto drop;
1202 }
1203
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001204 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001207 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1208 /* No permission check is needed for user channel
1209 * since that gets enforced when binding the socket.
1210 *
1211 * However check that the packet type is valid.
1212 */
1213 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1214 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1215 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1216 err = -EINVAL;
1217 goto drop;
1218 }
1219
1220 skb_queue_tail(&hdev->raw_q, skb);
1221 queue_work(hdev->workqueue, &hdev->tx_work);
1222 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001223 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 u16 ogf = hci_opcode_ogf(opcode);
1225 u16 ocf = hci_opcode_ocf(opcode);
1226
1227 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001228 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1229 &hci_sec_filter.ocf_mask[ogf])) &&
1230 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 err = -EPERM;
1232 goto drop;
1233 }
1234
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001235 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001237 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001239 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001240 * single-command requests.
1241 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03001242 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02001243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001245 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 }
1247 } else {
1248 if (!capable(CAP_NET_RAW)) {
1249 err = -EPERM;
1250 goto drop;
1251 }
1252
1253 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001254 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 }
1256
1257 err = len;
1258
1259done:
1260 release_sock(sk);
1261 return err;
1262
1263drop:
1264 kfree_skb(skb);
1265 goto done;
1266}
1267
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001268static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1269 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270{
1271 struct hci_ufilter uf = { .opcode = 0 };
1272 struct sock *sk = sock->sk;
1273 int err = 0, opt = 0;
1274
1275 BT_DBG("sk %p, opt %d", sk, optname);
1276
1277 lock_sock(sk);
1278
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001279 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001280 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001281 goto done;
1282 }
1283
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 switch (optname) {
1285 case HCI_DATA_DIR:
1286 if (get_user(opt, (int __user *)optval)) {
1287 err = -EFAULT;
1288 break;
1289 }
1290
1291 if (opt)
1292 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1293 else
1294 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1295 break;
1296
1297 case HCI_TIME_STAMP:
1298 if (get_user(opt, (int __user *)optval)) {
1299 err = -EFAULT;
1300 break;
1301 }
1302
1303 if (opt)
1304 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1305 else
1306 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1307 break;
1308
1309 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001310 {
1311 struct hci_filter *f = &hci_pi(sk)->filter;
1312
1313 uf.type_mask = f->type_mask;
1314 uf.opcode = f->opcode;
1315 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1316 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1317 }
1318
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 len = min_t(unsigned int, len, sizeof(uf));
1320 if (copy_from_user(&uf, optval, len)) {
1321 err = -EFAULT;
1322 break;
1323 }
1324
1325 if (!capable(CAP_NET_RAW)) {
1326 uf.type_mask &= hci_sec_filter.type_mask;
1327 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1328 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1329 }
1330
1331 {
1332 struct hci_filter *f = &hci_pi(sk)->filter;
1333
1334 f->type_mask = uf.type_mask;
1335 f->opcode = uf.opcode;
1336 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1337 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1338 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001339 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
1341 default:
1342 err = -ENOPROTOOPT;
1343 break;
1344 }
1345
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001346done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 release_sock(sk);
1348 return err;
1349}
1350
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001351static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1352 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353{
1354 struct hci_ufilter uf;
1355 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001356 int len, opt, err = 0;
1357
1358 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
1360 if (get_user(len, optlen))
1361 return -EFAULT;
1362
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001363 lock_sock(sk);
1364
1365 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001366 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001367 goto done;
1368 }
1369
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 switch (optname) {
1371 case HCI_DATA_DIR:
1372 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1373 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001374 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 opt = 0;
1376
1377 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001378 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 break;
1380
1381 case HCI_TIME_STAMP:
1382 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1383 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001384 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 opt = 0;
1386
1387 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001388 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 break;
1390
1391 case HCI_FILTER:
1392 {
1393 struct hci_filter *f = &hci_pi(sk)->filter;
1394
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001395 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 uf.type_mask = f->type_mask;
1397 uf.opcode = f->opcode;
1398 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1399 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1400 }
1401
1402 len = min_t(unsigned int, len, sizeof(uf));
1403 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001404 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 break;
1406
1407 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001408 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 break;
1410 }
1411
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001412done:
1413 release_sock(sk);
1414 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415}
1416
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001417static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 .family = PF_BLUETOOTH,
1419 .owner = THIS_MODULE,
1420 .release = hci_sock_release,
1421 .bind = hci_sock_bind,
1422 .getname = hci_sock_getname,
1423 .sendmsg = hci_sock_sendmsg,
1424 .recvmsg = hci_sock_recvmsg,
1425 .ioctl = hci_sock_ioctl,
1426 .poll = datagram_poll,
1427 .listen = sock_no_listen,
1428 .shutdown = sock_no_shutdown,
1429 .setsockopt = hci_sock_setsockopt,
1430 .getsockopt = hci_sock_getsockopt,
1431 .connect = sock_no_connect,
1432 .socketpair = sock_no_socketpair,
1433 .accept = sock_no_accept,
1434 .mmap = sock_no_mmap
1435};
1436
1437static struct proto hci_sk_proto = {
1438 .name = "HCI",
1439 .owner = THIS_MODULE,
1440 .obj_size = sizeof(struct hci_pinfo)
1441};
1442
Eric Paris3f378b62009-11-05 22:18:14 -08001443static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1444 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445{
1446 struct sock *sk;
1447
1448 BT_DBG("sock %p", sock);
1449
1450 if (sock->type != SOCK_RAW)
1451 return -ESOCKTNOSUPPORT;
1452
1453 sock->ops = &hci_sock_ops;
1454
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001455 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 if (!sk)
1457 return -ENOMEM;
1458
1459 sock_init_data(sock, sk);
1460
1461 sock_reset_flag(sk, SOCK_ZAPPED);
1462
1463 sk->sk_protocol = protocol;
1464
1465 sock->state = SS_UNCONNECTED;
1466 sk->sk_state = BT_OPEN;
1467
1468 bt_sock_link(&hci_sk_list, sk);
1469 return 0;
1470}
1471
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001472static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 .family = PF_BLUETOOTH,
1474 .owner = THIS_MODULE,
1475 .create = hci_sock_create,
1476};
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478int __init hci_sock_init(void)
1479{
1480 int err;
1481
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001482 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 err = proto_register(&hci_sk_proto, 0);
1485 if (err < 0)
1486 return err;
1487
1488 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001489 if (err < 0) {
1490 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001492 }
1493
Al Virob0316612013-04-04 19:14:33 -04001494 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001495 if (err < 0) {
1496 BT_ERR("Failed to create HCI proc file");
1497 bt_sock_unregister(BTPROTO_HCI);
1498 goto error;
1499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 BT_INFO("HCI socket layer initialized");
1502
1503 return 0;
1504
1505error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 proto_unregister(&hci_sk_proto);
1507 return err;
1508}
1509
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301510void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001512 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001513 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515}