blob: 7805fd1b4a789e5c9274d2986932ac95d4480d56 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020044#define HCI_SFLT_MAX_OGF 5
45
46struct hci_sec_filter {
47 __u32 type_mask;
48 __u32 event_mask[2];
49 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
50};
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052static struct hci_sec_filter hci_sec_filter = {
53 /* Packet types */
54 0x10,
55 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020056 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 /* Commands */
58 {
59 { 0x0 },
60 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020061 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020063 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020065 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020067 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020069 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 }
71};
72
73static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070074 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075};
76
Marcel Holtmannf81fe642013-08-25 23:25:15 -070077static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
78{
79 struct hci_filter *flt;
80 int flt_type, flt_event;
81
82 /* Apply filter */
83 flt = &hci_pi(sk)->filter;
84
85 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
86 flt_type = 0;
87 else
88 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
89
90 if (!test_bit(flt_type, &flt->type_mask))
91 return true;
92
93 /* Extra filter for event packets only */
94 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
95 return false;
96
97 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
98
99 if (!hci_test_bit(flt_event, &flt->event_mask))
100 return true;
101
102 /* Check filter only when opcode is set */
103 if (!flt->opcode)
104 return false;
105
106 if (flt_event == HCI_EV_CMD_COMPLETE &&
107 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
108 return true;
109
110 if (flt_event == HCI_EV_CMD_STATUS &&
111 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
112 return true;
113
114 return false;
115}
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100118void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
120 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100121 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 BT_DBG("hdev %p len %d", hdev, skb->len);
124
125 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100126
Sasha Levinb67bfe02013-02-27 17:06:00 -0800127 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 struct sk_buff *nskb;
129
130 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
131 continue;
132
133 /* Don't send frame to the socket it came from */
134 if (skb->sk == sk)
135 continue;
136
Marcel Holtmann23500182013-08-26 21:40:52 -0700137 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
138 if (is_filtered_packet(sk, skb))
139 continue;
140 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
141 if (!bt_cb(skb)->incoming)
142 continue;
143 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
144 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
145 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
146 continue;
147 } else {
148 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200149 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100152 if (!skb_copy) {
153 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300154 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100155 if (!skb_copy)
156 continue;
157
158 /* Put type byte before the data */
159 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
160 }
161
162 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200163 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 continue;
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 if (sock_queue_rcv_skb(sk, nskb))
167 kfree_skb(nskb);
168 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100169
170 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100171
172 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100173}
174
175/* Send frame to control socket */
176void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
177{
178 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100179
180 BT_DBG("len %d", skb->len);
181
182 read_lock(&hci_sk_list.lock);
183
Sasha Levinb67bfe02013-02-27 17:06:00 -0800184 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100185 struct sk_buff *nskb;
186
187 /* Skip the original socket */
188 if (sk == skip_sk)
189 continue;
190
191 if (sk->sk_state != BT_BOUND)
192 continue;
193
194 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
195 continue;
196
197 nskb = skb_clone(skb, GFP_ATOMIC);
198 if (!nskb)
199 continue;
200
201 if (sock_queue_rcv_skb(sk, nskb))
202 kfree_skb(nskb);
203 }
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 read_unlock(&hci_sk_list.lock);
206}
207
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100208/* Send frame to monitor socket */
209void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
210{
211 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100212 struct sk_buff *skb_copy = NULL;
213 __le16 opcode;
214
215 if (!atomic_read(&monitor_promisc))
216 return;
217
218 BT_DBG("hdev %p len %d", hdev, skb->len);
219
220 switch (bt_cb(skb)->pkt_type) {
221 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700222 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100223 break;
224 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700225 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100226 break;
227 case HCI_ACLDATA_PKT:
228 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700229 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100230 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700231 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100232 break;
233 case HCI_SCODATA_PKT:
234 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700235 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100236 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700237 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100238 break;
239 default:
240 return;
241 }
242
243 read_lock(&hci_sk_list.lock);
244
Sasha Levinb67bfe02013-02-27 17:06:00 -0800245 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100246 struct sk_buff *nskb;
247
248 if (sk->sk_state != BT_BOUND)
249 continue;
250
251 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
252 continue;
253
254 if (!skb_copy) {
255 struct hci_mon_hdr *hdr;
256
257 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300258 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
259 GFP_ATOMIC, true);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100260 if (!skb_copy)
261 continue;
262
263 /* Put header before the data */
264 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
265 hdr->opcode = opcode;
266 hdr->index = cpu_to_le16(hdev->id);
267 hdr->len = cpu_to_le16(skb->len);
268 }
269
270 nskb = skb_clone(skb_copy, GFP_ATOMIC);
271 if (!nskb)
272 continue;
273
274 if (sock_queue_rcv_skb(sk, nskb))
275 kfree_skb(nskb);
276 }
277
278 read_unlock(&hci_sk_list.lock);
279
280 kfree_skb(skb_copy);
281}
282
283static void send_monitor_event(struct sk_buff *skb)
284{
285 struct sock *sk;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100286
287 BT_DBG("len %d", skb->len);
288
289 read_lock(&hci_sk_list.lock);
290
Sasha Levinb67bfe02013-02-27 17:06:00 -0800291 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100292 struct sk_buff *nskb;
293
294 if (sk->sk_state != BT_BOUND)
295 continue;
296
297 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
298 continue;
299
300 nskb = skb_clone(skb, GFP_ATOMIC);
301 if (!nskb)
302 continue;
303
304 if (sock_queue_rcv_skb(sk, nskb))
305 kfree_skb(nskb);
306 }
307
308 read_unlock(&hci_sk_list.lock);
309}
310
311static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
312{
313 struct hci_mon_hdr *hdr;
314 struct hci_mon_new_index *ni;
315 struct sk_buff *skb;
316 __le16 opcode;
317
318 switch (event) {
319 case HCI_DEV_REG:
320 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
321 if (!skb)
322 return NULL;
323
324 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
325 ni->type = hdev->dev_type;
326 ni->bus = hdev->bus;
327 bacpy(&ni->bdaddr, &hdev->bdaddr);
328 memcpy(ni->name, hdev->name, 8);
329
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700330 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100331 break;
332
333 case HCI_DEV_UNREG:
334 skb = bt_skb_alloc(0, GFP_ATOMIC);
335 if (!skb)
336 return NULL;
337
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700338 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100339 break;
340
341 default:
342 return NULL;
343 }
344
345 __net_timestamp(skb);
346
347 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
348 hdr->opcode = opcode;
349 hdr->index = cpu_to_le16(hdev->id);
350 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
351
352 return skb;
353}
354
355static void send_monitor_replay(struct sock *sk)
356{
357 struct hci_dev *hdev;
358
359 read_lock(&hci_dev_list_lock);
360
361 list_for_each_entry(hdev, &hci_dev_list, list) {
362 struct sk_buff *skb;
363
364 skb = create_monitor_event(hdev, HCI_DEV_REG);
365 if (!skb)
366 continue;
367
368 if (sock_queue_rcv_skb(sk, skb))
369 kfree_skb(skb);
370 }
371
372 read_unlock(&hci_dev_list_lock);
373}
374
Marcel Holtmann040030e2012-02-20 14:50:37 +0100375/* Generate internal stack event */
376static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
377{
378 struct hci_event_hdr *hdr;
379 struct hci_ev_stack_internal *ev;
380 struct sk_buff *skb;
381
382 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
383 if (!skb)
384 return;
385
386 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
387 hdr->evt = HCI_EV_STACK_INTERNAL;
388 hdr->plen = sizeof(*ev) + dlen;
389
390 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
391 ev->type = type;
392 memcpy(ev->data, data, dlen);
393
394 bt_cb(skb)->incoming = 1;
395 __net_timestamp(skb);
396
397 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100398 hci_send_to_sock(hdev, skb);
399 kfree_skb(skb);
400}
401
402void hci_sock_dev_event(struct hci_dev *hdev, int event)
403{
404 struct hci_ev_si_device ev;
405
406 BT_DBG("hdev %s event %d", hdev->name, event);
407
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100408 /* Send event to monitor */
409 if (atomic_read(&monitor_promisc)) {
410 struct sk_buff *skb;
411
412 skb = create_monitor_event(hdev, event);
413 if (skb) {
414 send_monitor_event(skb);
415 kfree_skb(skb);
416 }
417 }
418
Marcel Holtmann040030e2012-02-20 14:50:37 +0100419 /* Send event to sockets */
420 ev.event = event;
421 ev.dev_id = hdev->id;
422 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
423
424 if (event == HCI_DEV_UNREG) {
425 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100426
427 /* Detach sockets from device */
428 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800429 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100430 bh_lock_sock_nested(sk);
431 if (hci_pi(sk)->hdev == hdev) {
432 hci_pi(sk)->hdev = NULL;
433 sk->sk_err = EPIPE;
434 sk->sk_state = BT_OPEN;
435 sk->sk_state_change(sk);
436
437 hci_dev_put(hdev);
438 }
439 bh_unlock_sock(sk);
440 }
441 read_unlock(&hci_sk_list.lock);
442 }
443}
444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445static int hci_sock_release(struct socket *sock)
446{
447 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100448 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
450 BT_DBG("sock %p sk %p", sock, sk);
451
452 if (!sk)
453 return 0;
454
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100455 hdev = hci_pi(sk)->hdev;
456
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100457 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
458 atomic_dec(&monitor_promisc);
459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 bt_sock_unlink(&hci_sk_list, sk);
461
462 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700463 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200464 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700465 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
466 hci_dev_close(hdev->id);
467 }
468
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 atomic_dec(&hdev->promisc);
470 hci_dev_put(hdev);
471 }
472
473 sock_orphan(sk);
474
475 skb_queue_purge(&sk->sk_receive_queue);
476 skb_queue_purge(&sk->sk_write_queue);
477
478 sock_put(sk);
479 return 0;
480}
481
Antti Julkub2a66aa2011-06-15 12:01:14 +0300482static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200483{
484 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300485 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200486
487 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
488 return -EFAULT;
489
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300490 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300491
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300492 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300493
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300494 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300495
496 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200497}
498
Antti Julkub2a66aa2011-06-15 12:01:14 +0300499static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200500{
501 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300502 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200503
504 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
505 return -EFAULT;
506
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300507 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300508
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300509 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300510
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300511 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300512
513 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200514}
515
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900516/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300517static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
518 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
520 struct hci_dev *hdev = hci_pi(sk)->hdev;
521
522 if (!hdev)
523 return -EBADFD;
524
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700525 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
526 return -EBUSY;
527
Marcel Holtmann4a964402014-07-02 19:10:33 +0200528 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200529 return -EOPNOTSUPP;
530
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700531 if (hdev->dev_type != HCI_BREDR)
532 return -EOPNOTSUPP;
533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 switch (cmd) {
535 case HCISETRAW:
536 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000537 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700538 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200541 return hci_get_conn_info(hdev, (void __user *) arg);
542
543 case HCIGETAUTHINFO:
544 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
Johan Hedbergf0358562010-05-18 13:20:32 +0200546 case HCIBLOCKADDR:
547 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000548 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300549 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200550
551 case HCIUNBLOCKADDR:
552 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000553 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300554 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700556
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700557 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558}
559
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300560static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
561 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200563 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700564 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 int err;
566
567 BT_DBG("cmd %x arg %lx", cmd, arg);
568
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700569 lock_sock(sk);
570
571 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
572 err = -EBADFD;
573 goto done;
574 }
575
576 release_sock(sk);
577
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 switch (cmd) {
579 case HCIGETDEVLIST:
580 return hci_get_dev_list(argp);
581
582 case HCIGETDEVINFO:
583 return hci_get_dev_info(argp);
584
585 case HCIGETCONNLIST:
586 return hci_get_conn_list(argp);
587
588 case HCIDEVUP:
589 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000590 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 return hci_dev_open(arg);
592
593 case HCIDEVDOWN:
594 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000595 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 return hci_dev_close(arg);
597
598 case HCIDEVRESET:
599 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000600 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 return hci_dev_reset(arg);
602
603 case HCIDEVRESTAT:
604 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000605 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 return hci_dev_reset_stat(arg);
607
608 case HCISETSCAN:
609 case HCISETAUTH:
610 case HCISETENCRYPT:
611 case HCISETPTYPE:
612 case HCISETLINKPOL:
613 case HCISETLINKMODE:
614 case HCISETACLMTU:
615 case HCISETSCOMTU:
616 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000617 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 return hci_dev_cmd(cmd, argp);
619
620 case HCIINQUIRY:
621 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700623
624 lock_sock(sk);
625
626 err = hci_sock_bound_ioctl(sk, cmd, arg);
627
628done:
629 release_sock(sk);
630 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631}
632
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300633static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
634 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635{
Johan Hedberg03811012010-12-08 00:21:06 +0200636 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 struct sock *sk = sock->sk;
638 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200639 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
641 BT_DBG("sock %p sk %p", sock, sk);
642
Johan Hedberg03811012010-12-08 00:21:06 +0200643 if (!addr)
644 return -EINVAL;
645
646 memset(&haddr, 0, sizeof(haddr));
647 len = min_t(unsigned int, sizeof(haddr), addr_len);
648 memcpy(&haddr, addr, len);
649
650 if (haddr.hci_family != AF_BLUETOOTH)
651 return -EINVAL;
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 lock_sock(sk);
654
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100655 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 err = -EALREADY;
657 goto done;
658 }
659
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100660 switch (haddr.hci_channel) {
661 case HCI_CHANNEL_RAW:
662 if (hci_pi(sk)->hdev) {
663 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 goto done;
665 }
666
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100667 if (haddr.hci_dev != HCI_DEV_NONE) {
668 hdev = hci_dev_get(haddr.hci_dev);
669 if (!hdev) {
670 err = -ENODEV;
671 goto done;
672 }
673
674 atomic_inc(&hdev->promisc);
675 }
676
677 hci_pi(sk)->hdev = hdev;
678 break;
679
Marcel Holtmann23500182013-08-26 21:40:52 -0700680 case HCI_CHANNEL_USER:
681 if (hci_pi(sk)->hdev) {
682 err = -EALREADY;
683 goto done;
684 }
685
686 if (haddr.hci_dev == HCI_DEV_NONE) {
687 err = -EINVAL;
688 goto done;
689 }
690
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700691 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700692 err = -EPERM;
693 goto done;
694 }
695
696 hdev = hci_dev_get(haddr.hci_dev);
697 if (!hdev) {
698 err = -ENODEV;
699 goto done;
700 }
701
702 if (test_bit(HCI_UP, &hdev->flags) ||
703 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +0200704 test_bit(HCI_SETUP, &hdev->dev_flags) ||
705 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700706 err = -EBUSY;
707 hci_dev_put(hdev);
708 goto done;
709 }
710
711 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
712 err = -EUSERS;
713 hci_dev_put(hdev);
714 goto done;
715 }
716
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200717 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700718
719 err = hci_dev_open(hdev->id);
720 if (err) {
721 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200722 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700723 hci_dev_put(hdev);
724 goto done;
725 }
726
727 atomic_inc(&hdev->promisc);
728
729 hci_pi(sk)->hdev = hdev;
730 break;
731
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100732 case HCI_CHANNEL_CONTROL:
Marcel Holtmann4b95a242012-02-20 21:24:37 +0100733 if (haddr.hci_dev != HCI_DEV_NONE) {
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100734 err = -EINVAL;
735 goto done;
736 }
737
Marcel Holtmann801f13b2012-02-20 20:54:10 +0100738 if (!capable(CAP_NET_ADMIN)) {
739 err = -EPERM;
740 goto done;
741 }
742
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100743 break;
744
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100745 case HCI_CHANNEL_MONITOR:
746 if (haddr.hci_dev != HCI_DEV_NONE) {
747 err = -EINVAL;
748 goto done;
749 }
750
751 if (!capable(CAP_NET_RAW)) {
752 err = -EPERM;
753 goto done;
754 }
755
756 send_monitor_replay(sk);
757
758 atomic_inc(&monitor_promisc);
759 break;
760
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100761 default:
762 err = -EINVAL;
763 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 }
765
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100766
Johan Hedberg03811012010-12-08 00:21:06 +0200767 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 sk->sk_state = BT_BOUND;
769
770done:
771 release_sock(sk);
772 return err;
773}
774
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300775static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
776 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777{
778 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
779 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700780 struct hci_dev *hdev;
781 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
783 BT_DBG("sock %p sk %p", sock, sk);
784
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700785 if (peer)
786 return -EOPNOTSUPP;
787
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 lock_sock(sk);
789
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700790 hdev = hci_pi(sk)->hdev;
791 if (!hdev) {
792 err = -EBADFD;
793 goto done;
794 }
795
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 *addr_len = sizeof(*haddr);
797 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100798 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700799 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700801done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700803 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804}
805
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300806static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
807 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808{
809 __u32 mask = hci_pi(sk)->cmsg_mask;
810
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700811 if (mask & HCI_CMSG_DIR) {
812 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300813 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
814 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700817 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100818#ifdef CONFIG_COMPAT
819 struct compat_timeval ctv;
820#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700821 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200822 void *data;
823 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700824
825 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200826
David S. Miller1da97f82007-09-12 14:10:58 +0200827 data = &tv;
828 len = sizeof(tv);
829#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800830 if (!COMPAT_USE_64BIT_TIME &&
831 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200832 ctv.tv_sec = tv.tv_sec;
833 ctv.tv_usec = tv.tv_usec;
834 data = &ctv;
835 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200836 }
David S. Miller1da97f82007-09-12 14:10:58 +0200837#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200838
839 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900842
843static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300844 struct msghdr *msg, size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845{
846 int noblock = flags & MSG_DONTWAIT;
847 struct sock *sk = sock->sk;
848 struct sk_buff *skb;
849 int copied, err;
850
851 BT_DBG("sock %p, sk %p", sock, sk);
852
853 if (flags & (MSG_OOB))
854 return -EOPNOTSUPP;
855
856 if (sk->sk_state == BT_CLOSED)
857 return 0;
858
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200859 skb = skb_recv_datagram(sk, flags, noblock, &err);
860 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 return err;
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 copied = skb->len;
864 if (len < copied) {
865 msg->msg_flags |= MSG_TRUNC;
866 copied = len;
867 }
868
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300869 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
871
Marcel Holtmann3a208622012-02-20 14:50:34 +0100872 switch (hci_pi(sk)->channel) {
873 case HCI_CHANNEL_RAW:
874 hci_sock_cmsg(sk, msg, skb);
875 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700876 case HCI_CHANNEL_USER:
Marcel Holtmann97e0bde2012-02-22 13:49:28 +0100877 case HCI_CHANNEL_CONTROL:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100878 case HCI_CHANNEL_MONITOR:
879 sock_recv_timestamp(msg, sk, skb);
880 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100881 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
883 skb_free_datagram(sk, skb);
884
885 return err ? : copied;
886}
887
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900888static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 struct msghdr *msg, size_t len)
890{
891 struct sock *sk = sock->sk;
892 struct hci_dev *hdev;
893 struct sk_buff *skb;
894 int err;
895
896 BT_DBG("sock %p sk %p", sock, sk);
897
898 if (msg->msg_flags & MSG_OOB)
899 return -EOPNOTSUPP;
900
901 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
902 return -EINVAL;
903
904 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
905 return -EINVAL;
906
907 lock_sock(sk);
908
Johan Hedberg03811012010-12-08 00:21:06 +0200909 switch (hci_pi(sk)->channel) {
910 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700911 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200912 break;
913 case HCI_CHANNEL_CONTROL:
914 err = mgmt_control(sk, msg, len);
915 goto done;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100916 case HCI_CHANNEL_MONITOR:
917 err = -EOPNOTSUPP;
918 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200919 default:
920 err = -EINVAL;
921 goto done;
922 }
923
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200924 hdev = hci_pi(sk)->hdev;
925 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 err = -EBADFD;
927 goto done;
928 }
929
Marcel Holtmann7e21add2009-11-18 01:05:00 +0100930 if (!test_bit(HCI_UP, &hdev->flags)) {
931 err = -ENETDOWN;
932 goto done;
933 }
934
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200935 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
936 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 goto done;
938
939 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
940 err = -EFAULT;
941 goto drop;
942 }
943
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700944 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -0800947 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
948 /* No permission check is needed for user channel
949 * since that gets enforced when binding the socket.
950 *
951 * However check that the packet type is valid.
952 */
953 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
954 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
955 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
956 err = -EINVAL;
957 goto drop;
958 }
959
960 skb_queue_tail(&hdev->raw_q, skb);
961 queue_work(hdev->workqueue, &hdev->tx_work);
962 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -0700963 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 u16 ogf = hci_opcode_ogf(opcode);
965 u16 ocf = hci_opcode_ocf(opcode);
966
967 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -0300968 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
969 &hci_sec_filter.ocf_mask[ogf])) &&
970 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 err = -EPERM;
972 goto drop;
973 }
974
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200975 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200977 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 } else {
Johan Hedberg11714b32013-03-05 20:37:47 +0200979 /* Stand-alone HCI commands must be flaged as
980 * single-command requests.
981 */
982 bt_cb(skb)->req.start = true;
983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200985 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987 } else {
988 if (!capable(CAP_NET_RAW)) {
989 err = -EPERM;
990 goto drop;
991 }
992
993 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200994 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 }
996
997 err = len;
998
999done:
1000 release_sock(sk);
1001 return err;
1002
1003drop:
1004 kfree_skb(skb);
1005 goto done;
1006}
1007
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001008static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1009 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010{
1011 struct hci_ufilter uf = { .opcode = 0 };
1012 struct sock *sk = sock->sk;
1013 int err = 0, opt = 0;
1014
1015 BT_DBG("sk %p, opt %d", sk, optname);
1016
1017 lock_sock(sk);
1018
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001019 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001020 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001021 goto done;
1022 }
1023
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 switch (optname) {
1025 case HCI_DATA_DIR:
1026 if (get_user(opt, (int __user *)optval)) {
1027 err = -EFAULT;
1028 break;
1029 }
1030
1031 if (opt)
1032 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1033 else
1034 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1035 break;
1036
1037 case HCI_TIME_STAMP:
1038 if (get_user(opt, (int __user *)optval)) {
1039 err = -EFAULT;
1040 break;
1041 }
1042
1043 if (opt)
1044 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1045 else
1046 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1047 break;
1048
1049 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001050 {
1051 struct hci_filter *f = &hci_pi(sk)->filter;
1052
1053 uf.type_mask = f->type_mask;
1054 uf.opcode = f->opcode;
1055 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1056 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1057 }
1058
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 len = min_t(unsigned int, len, sizeof(uf));
1060 if (copy_from_user(&uf, optval, len)) {
1061 err = -EFAULT;
1062 break;
1063 }
1064
1065 if (!capable(CAP_NET_RAW)) {
1066 uf.type_mask &= hci_sec_filter.type_mask;
1067 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1068 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1069 }
1070
1071 {
1072 struct hci_filter *f = &hci_pi(sk)->filter;
1073
1074 f->type_mask = uf.type_mask;
1075 f->opcode = uf.opcode;
1076 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1077 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1078 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001079 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
1081 default:
1082 err = -ENOPROTOOPT;
1083 break;
1084 }
1085
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001086done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 release_sock(sk);
1088 return err;
1089}
1090
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001091static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1092 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
1094 struct hci_ufilter uf;
1095 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001096 int len, opt, err = 0;
1097
1098 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
1100 if (get_user(len, optlen))
1101 return -EFAULT;
1102
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001103 lock_sock(sk);
1104
1105 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001106 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001107 goto done;
1108 }
1109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 switch (optname) {
1111 case HCI_DATA_DIR:
1112 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1113 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001114 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 opt = 0;
1116
1117 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001118 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 break;
1120
1121 case HCI_TIME_STAMP:
1122 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1123 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001124 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 opt = 0;
1126
1127 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001128 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 break;
1130
1131 case HCI_FILTER:
1132 {
1133 struct hci_filter *f = &hci_pi(sk)->filter;
1134
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001135 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 uf.type_mask = f->type_mask;
1137 uf.opcode = f->opcode;
1138 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1139 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1140 }
1141
1142 len = min_t(unsigned int, len, sizeof(uf));
1143 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001144 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 break;
1146
1147 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001148 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 break;
1150 }
1151
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001152done:
1153 release_sock(sk);
1154 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155}
1156
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001157static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 .family = PF_BLUETOOTH,
1159 .owner = THIS_MODULE,
1160 .release = hci_sock_release,
1161 .bind = hci_sock_bind,
1162 .getname = hci_sock_getname,
1163 .sendmsg = hci_sock_sendmsg,
1164 .recvmsg = hci_sock_recvmsg,
1165 .ioctl = hci_sock_ioctl,
1166 .poll = datagram_poll,
1167 .listen = sock_no_listen,
1168 .shutdown = sock_no_shutdown,
1169 .setsockopt = hci_sock_setsockopt,
1170 .getsockopt = hci_sock_getsockopt,
1171 .connect = sock_no_connect,
1172 .socketpair = sock_no_socketpair,
1173 .accept = sock_no_accept,
1174 .mmap = sock_no_mmap
1175};
1176
1177static struct proto hci_sk_proto = {
1178 .name = "HCI",
1179 .owner = THIS_MODULE,
1180 .obj_size = sizeof(struct hci_pinfo)
1181};
1182
Eric Paris3f378b62009-11-05 22:18:14 -08001183static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1184 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185{
1186 struct sock *sk;
1187
1188 BT_DBG("sock %p", sock);
1189
1190 if (sock->type != SOCK_RAW)
1191 return -ESOCKTNOSUPPORT;
1192
1193 sock->ops = &hci_sock_ops;
1194
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001195 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 if (!sk)
1197 return -ENOMEM;
1198
1199 sock_init_data(sock, sk);
1200
1201 sock_reset_flag(sk, SOCK_ZAPPED);
1202
1203 sk->sk_protocol = protocol;
1204
1205 sock->state = SS_UNCONNECTED;
1206 sk->sk_state = BT_OPEN;
1207
1208 bt_sock_link(&hci_sk_list, sk);
1209 return 0;
1210}
1211
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001212static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 .family = PF_BLUETOOTH,
1214 .owner = THIS_MODULE,
1215 .create = hci_sock_create,
1216};
1217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218int __init hci_sock_init(void)
1219{
1220 int err;
1221
1222 err = proto_register(&hci_sk_proto, 0);
1223 if (err < 0)
1224 return err;
1225
1226 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001227 if (err < 0) {
1228 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001230 }
1231
Al Virob0316612013-04-04 19:14:33 -04001232 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001233 if (err < 0) {
1234 BT_ERR("Failed to create HCI proc file");
1235 bt_sock_unregister(BTPROTO_HCI);
1236 goto error;
1237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 BT_INFO("HCI socket layer initialized");
1240
1241 return 0;
1242
1243error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 proto_unregister(&hci_sk_proto);
1245 return err;
1246}
1247
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301248void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001250 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001251 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253}